python-watcher-4.0.0/0000775000175000017500000000000013656752352014527 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/0000775000175000017500000000000013656752352015274 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/requirements.txt0000664000175000017500000000072113656752270020557 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. openstackdocstheme>=1.20.0 # Apache-2.0 sphinx>=1.8.0,!=2.1.0,!=3.0.0 # BSD sphinxcontrib-pecanwsme>=0.8.0 # Apache-2.0 sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD reno>=2.7.0 # Apache-2.0 sphinxcontrib-apidoc>=0.2.0 # BSD os-api-ref>=1.4.0 # Apache-2.0 python-watcher-4.0.0/doc/notification_samples/0000775000175000017500000000000013656752352021506 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/notification_samples/action-execution-start.json0000664000175000017500000000242113656752270027010 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionExecutionPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": null, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.execution.start", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } python-watcher-4.0.0/doc/notification_samples/action-cancel-end.json0000664000175000017500000000241613656752270025647 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionCancelPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": null, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.cancel.end", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } python-watcher-4.0.0/doc/notification_samples/audit-update.json0000664000175000017500000000477213656752270025000 0ustar zuulzuul00000000000000{ "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:51:38.722986 ", "payload": { "watcher_object.name": "AuditUpdatePayload", "watcher_object.data": { "name": "my_audit", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.name": "StrategyPayload", "watcher_object.data": { "name": "dummy", "parameters_spec": { "properties": { "para2": { "default": "hello", "type": "string", "description": "string parameter example" }, "para1": { "maximum": 10.2, "default": 3.2, "minimum": 1.0, "description": "number parameter example", "type": "number" } } }, "updated_at": null, "display_name": "Dummy strategy", "deleted_at": null, "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "created_at": "2016-11-04T16:25:35Z" }, "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "scope": [], "created_at": "2016-11-04T16:51:21Z", "uuid": "f1e0d912-afd9-4bf2-91ef-c99cd08cc1ef", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.name": "GoalPayload", "watcher_object.data": { "efficacy_specification": [], "updated_at": null, "name": "dummy", "display_name": "Dummy goal", "deleted_at": null, "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "created_at": "2016-11-04T16:25:35Z" }, "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "parameters": { "para2": "hello", "para1": 3.2 }, "deleted_at": null, "state_update": { "watcher_object.name": "AuditStateUpdatePayload", "watcher_object.data": { "state": "ONGOING", "old_state": "PENDING" }, "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "interval": null, "updated_at": null, "state": "ONGOING", "audit_type": "ONESHOT" }, "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "priority": "INFO", "event_type": "audit.update", "message_id": "697fdf55-7252-4b6c-a2c2-5b9e85f6342c" } python-watcher-4.0.0/doc/notification_samples/action_plan-cancel-start.json0000664000175000017500000000352413656752270027251 0ustar zuulzuul00000000000000{ "event_type": "action_plan.cancel.start", "payload": { "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "scope": [], "audit_type": "ONESHOT", "state": "SUCCEEDED", "parameters": {}, "interval": null, "updated_at": null } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "fault": null, "state": "CANCELLING", "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "parameters_spec": {}, "display_name": "test strategy", "updated_at": null } }, "updated_at": null } }, "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" } python-watcher-4.0.0/doc/notification_samples/action-update.json0000664000175000017500000000301613656752270025135 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionUpdatePayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state_update": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionStateUpdatePayload", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" } }, "state": "ONGOING", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": {}, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.update", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } python-watcher-4.0.0/doc/notification_samples/infra-optim-exception.json0000664000175000017500000000112113656752270026614 0ustar zuulzuul00000000000000{ "event_type": "infra-optim.exception", "payload": { "watcher_object.data": { "exception": "NoAvailableStrategyForGoal", "exception_message": "No strategy could be found to achieve the server_consolidation goal.", "function_name": "_aggregate_create_in_db", "module_name": "watcher.objects.aggregate" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "priority": "ERROR", "publisher_id": "watcher-api:fake-mini" } python-watcher-4.0.0/doc/notification_samples/action_plan-execution-error.json0000664000175000017500000000443113656752270030021 0ustar zuulzuul00000000000000{ "event_type": "action_plan.execution.error", "publisher_id": "infra-optim:node0", "priority": "ERROR", "message_id": "9a45c5ae-0e21-4300-8fa0-5555d52a66d9", "payload": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanActionPayload", "watcher_object.data": { "fault": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "ExceptionPayload", "watcher_object.data": { "exception_message": "TEST", "module_name": "watcher.tests.notifications.test_action_plan_notification", "function_name": "test_send_action_plan_action_with_error", "exception": "WatcherException" } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "created_at": "2016-10-18T09:52:05Z", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.data": { "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "created_at": "2016-10-18T09:52:05Z", "name": "TEST", "updated_at": null, "display_name": "test strategy", "parameters_spec": {}, "deleted_at": null } }, "updated_at": null, "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.data": { "parameters": {}, "name": "my_audit", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "created_at": "2016-10-18T09:52:05Z", "scope": [], "updated_at": null, "audit_type": "ONESHOT", "interval": null, "deleted_at": null, "state": "PENDING" } }, "global_efficacy": [], "state": "ONGOING" } }, "timestamp": "2016-10-18 09:52:05.219414" } python-watcher-4.0.0/doc/notification_samples/audit-planner-start.json0000664000175000017500000000437113656752270026303 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.planner.start", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } python-watcher-4.0.0/doc/notification_samples/service-update.json0000664000175000017500000000161413656752270025322 0ustar zuulzuul00000000000000{ "payload": { "watcher_object.name": "ServiceUpdatePayload", "watcher_object.namespace": "watcher", "watcher_object.data": { "status_update": { "watcher_object.name": "ServiceStatusUpdatePayload", "watcher_object.namespace": "watcher", "watcher_object.data": { "old_state": "ACTIVE", "state": "FAILED" }, "watcher_object.version": "1.0" }, "last_seen_up": "2016-09-22T08:32:06Z", "name": "watcher-service", "sevice_host": "controller" }, "watcher_object.version": "1.0" }, "event_type": "service.update", "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" }python-watcher-4.0.0/doc/notification_samples/audit-strategy-end.json0000664000175000017500000000437013656752270026116 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.strategy.end", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } python-watcher-4.0.0/doc/notification_samples/audit-strategy-start.json0000664000175000017500000000437213656752270026507 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.strategy.start", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } python-watcher-4.0.0/doc/notification_samples/action_plan-execution-start.json0000664000175000017500000000356013656752270030027 0ustar zuulzuul00000000000000{ "event_type": "action_plan.execution.start", "payload": { "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanActionPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "my_audit", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "scope": [], "audit_type": "ONESHOT", "state": "PENDING", "parameters": {}, "interval": null, "updated_at": null } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "fault": null, "state": "ONGOING", "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "parameters_spec": {}, "display_name": "test strategy", "updated_at": null } }, "updated_at": null } }, "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" } python-watcher-4.0.0/doc/notification_samples/audit-delete.json0000664000175000017500000000433513656752270024753 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "DELETED", "updated_at": null, "deleted_at": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditDeletePayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.delete", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } python-watcher-4.0.0/doc/notification_samples/action_plan-execution-end.json0000664000175000017500000000356013656752270027440 0ustar zuulzuul00000000000000{ "event_type": "action_plan.execution.end", "payload": { "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanActionPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "my_audit", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "scope": [], "audit_type": "ONESHOT", "state": "SUCCEEDED", "parameters": {}, "interval": null, "updated_at": null } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "fault": null, "state": "ONGOING", "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "parameters_spec": {}, "display_name": "test strategy", "updated_at": null } }, "updated_at": null } }, "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" } python-watcher-4.0.0/doc/notification_samples/audit-strategy-error.json0000664000175000017500000000522713656752270026503 0ustar zuulzuul00000000000000{ "priority": "ERROR", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": "test_send_audit_action_with_error", "module_name": "watcher.tests.notifications.test_audit_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.strategy.error", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } python-watcher-4.0.0/doc/notification_samples/action-execution-end.json0000664000175000017500000000242113656752270026421 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionExecutionPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": null, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "SUCCEEDED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.execution.end", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } python-watcher-4.0.0/doc/notification_samples/action_plan-cancel-error.json0000664000175000017500000000437513656752270027252 0ustar zuulzuul00000000000000{ "event_type": "action_plan.cancel.error", "publisher_id": "infra-optim:node0", "priority": "ERROR", "message_id": "9a45c5ae-0e21-4300-8fa0-5555d52a66d9", "payload": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.data": { "fault": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "ExceptionPayload", "watcher_object.data": { "exception_message": "TEST", "module_name": "watcher.tests.notifications.test_action_plan_notification", "function_name": "test_send_action_plan_cancel_with_error", "exception": "WatcherException" } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "created_at": "2016-10-18T09:52:05Z", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.data": { "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "created_at": "2016-10-18T09:52:05Z", "name": "TEST", "updated_at": null, "display_name": "test strategy", "parameters_spec": {}, "deleted_at": null } }, "updated_at": null, "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.version": "1.0", "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.data": { "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "created_at": "2016-10-18T09:52:05Z", "scope": [], "updated_at": null, "audit_type": "ONESHOT", "interval": null, "deleted_at": null, "state": "SUCCEEDED" } }, "global_efficacy": [], "state": "CANCELLING" } }, "timestamp": "2016-10-18 09:52:05.219414" } python-watcher-4.0.0/doc/notification_samples/action-cancel-error.json0000664000175000017500000000325413656752270026233 0ustar zuulzuul00000000000000{ "priority": "ERROR", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionCancelPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ExceptionPayload", "watcher_object.data": { "module_name": "watcher.tests.notifications.test_action_notification", "exception": "WatcherException", "exception_message": "TEST", "function_name": "test_send_action_cancel_with_error" } }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "FAILED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.cancel.error", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } python-watcher-4.0.0/doc/notification_samples/action-create.json0000664000175000017500000000236013656752270025117 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionCreatePayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "PENDING", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": {}, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "event_type": "action.create", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } python-watcher-4.0.0/doc/notification_samples/action_plan-delete.json0000664000175000017500000000352213656752270026131 0ustar zuulzuul00000000000000{ "publisher_id": "infra-optim:node0", "timestamp": "2016-10-18 09:52:05.219414", "payload": { "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "created_at": "2016-10-18T09:52:05Z", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "name": "my_audit", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "interval": null, "audit_type": "ONESHOT", "scope": [], "updated_at": null, "deleted_at": null, "state": "PENDING", "created_at": "2016-10-18T09:52:05Z", "parameters": {} }, "watcher_object.version": "1.0", "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher" }, "global_efficacy": {}, "updated_at": null, "deleted_at": null, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.data": { "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "created_at": "2016-10-18T09:52:05Z", "name": "TEST", "display_name": "test strategy", "deleted_at": null, "updated_at": null, "parameters_spec": {} }, "watcher_object.version": "1.0", "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher" }, "state": "DELETED" }, "watcher_object.version": "1.0", "watcher_object.name": "ActionPlanDeletePayload", "watcher_object.namespace": "watcher" }, "event_type": "action_plan.delete", "message_id": "3d137686-a1fd-4683-ab40-c4210aac2140", "priority": "INFO" } python-watcher-4.0.0/doc/notification_samples/action_plan-update.json0000664000175000017500000000416413656752270026154 0ustar zuulzuul00000000000000{ "payload": { "watcher_object.version": "1.0", "watcher_object.data": { "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.version": "1.0", "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "scope": [], "created_at": "2016-10-18T09:52:05Z", "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "interval": null, "updated_at": null, "state": "PENDING", "deleted_at": null, "parameters": {} }, "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload" }, "created_at": "2016-10-18T09:52:05Z", "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "updated_at": null, "state_update": { "watcher_object.version": "1.0", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" }, "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanStateUpdatePayload" }, "state": "ONGOING", "deleted_at": null, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.version": "1.0", "watcher_object.data": { "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "display_name": "test strategy", "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "deleted_at": null, "parameters_spec": {} }, "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload" }, "global_efficacy": {} }, "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanUpdatePayload" }, "publisher_id": "infra-optim:node0", "priority": "INFO", "timestamp": "2016-10-18 09:52:05.219414", "event_type": "action_plan.update", "message_id": "0a8a7329-fd5a-4ec6-97d7-2b776ce51a4c" } python-watcher-4.0.0/doc/notification_samples/action_plan-create.json0000664000175000017500000000350413656752270026132 0ustar zuulzuul00000000000000{ "publisher_id": "infra-optim:node0", "payload": { "watcher_object.version": "1.0", "watcher_object.data": { "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.version": "1.0", "watcher_object.data": { "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "display_name": "test strategy", "name": "TEST", "updated_at": null, "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "deleted_at": null }, "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload" }, "created_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.version": "1.0", "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "scope": [], "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "parameters": {}, "interval": null, "deleted_at": null, "state": "PENDING", "created_at": "2016-10-18T09:52:05Z", "updated_at": null }, "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload" }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": {}, "deleted_at": null, "state": "RECOMMENDED", "updated_at": null }, "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanCreatePayload" }, "priority": "INFO", "message_id": "5148bff1-ea06-4ad6-8e4e-8c85ca5eb629", "event_type": "action_plan.create", "timestamp": "2016-10-18 09:52:05.219414" } python-watcher-4.0.0/doc/notification_samples/action-execution-error.json0000664000175000017500000000326113656752270027007 0ustar zuulzuul00000000000000{ "priority": "ERROR", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionExecutionPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ExceptionPayload", "watcher_object.data": { "module_name": "watcher.tests.notifications.test_action_notification", "exception": "WatcherException", "exception_message": "TEST", "function_name": "test_send_action_execution_with_error" } }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "FAILED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy":[], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.execution.error", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } python-watcher-4.0.0/doc/notification_samples/action_plan-cancel-end.json0000664000175000017500000000352113656752270026657 0ustar zuulzuul00000000000000{ "event_type": "action_plan.cancel.end", "payload": { "watcher_object.namespace": "watcher", "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "scope": [], "audit_type": "ONESHOT", "state": "SUCCEEDED", "parameters": {}, "interval": null, "updated_at": null } }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "fault": null, "state": "CANCELLED", "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": null, "name": "TEST", "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "parameters_spec": {}, "display_name": "test strategy", "updated_at": null } }, "updated_at": null } }, "priority": "INFO", "message_id": "3984dc2b-8aef-462b-a220-8ae04237a56e", "timestamp": "2016-10-18 09:52:05.219414", "publisher_id": "infra-optim:node0" } python-watcher-4.0.0/doc/notification_samples/action-delete.json0000664000175000017500000000236013656752270025116 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionDeletePayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "DELETED", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": {}, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "ONGOING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "event_type": "action.delete", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } python-watcher-4.0.0/doc/notification_samples/audit-planner-error.json0000664000175000017500000000522613656752270026277 0ustar zuulzuul00000000000000{ "priority": "ERROR", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": "test_send_audit_action_with_error", "module_name": "watcher.tests.notifications.test_audit_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.planner.error", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } python-watcher-4.0.0/doc/notification_samples/audit-planner-end.json0000664000175000017500000000436713656752270025721 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "ONGOING", "updated_at": null, "deleted_at": null, "fault": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditActionPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.planner.end", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } python-watcher-4.0.0/doc/notification_samples/action-cancel-start.json0000664000175000017500000000242113656752270026232 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "ActionCancelPayload", "watcher_object.data": { "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "input_parameters": { "param2": 2, "param1": 1 }, "fault": null, "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLING", "action_plan": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.name": "TerseActionPlanPayload", "watcher_object.data": { "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "global_efficacy": [], "created_at": "2016-10-18T09:52:05Z", "updated_at": null, "state": "CANCELLING", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "deleted_at": null } }, "parents": [], "action_type": "nop", "deleted_at": null } }, "event_type": "action.cancel.start", "publisher_id": "infra-optim:node0", "timestamp": "2017-01-01 00:00:00.000000", "message_id": "530b409c-9b6b-459b-8f08-f93dbfeb4d41" } python-watcher-4.0.0/doc/notification_samples/audit-create.json0000664000175000017500000000433513656752270024754 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "watcher_object.data": { "name": "my_audit", "audit_type": "ONESHOT", "parameters": { "para2": "hello", "para1": 3.2 }, "state": "PENDING", "updated_at": null, "deleted_at": null, "goal_uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "goal": { "watcher_object.data": { "uuid": "bc830f84-8ae3-4fc6-8bc6-e3dd15e8b49a", "name": "dummy", "updated_at": null, "deleted_at": null, "efficacy_specification": [], "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy goal" }, "watcher_object.name": "GoalPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "interval": null, "scope": [], "strategy_uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "strategy": { "watcher_object.data": { "parameters_spec": { "properties": { "para2": { "type": "string", "default": "hello", "description": "string parameter example" }, "para1": { "description": "number parameter example", "maximum": 10.2, "type": "number", "default": 3.2, "minimum": 1.0 } } }, "name": "dummy", "uuid": "75234dfe-87e3-4f11-a0e0-3c3305d86a39", "updated_at": null, "deleted_at": null, "created_at": "2016-11-04T16:25:35Z", "display_name": "Dummy strategy" }, "watcher_object.name": "StrategyPayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "created_at": "2016-11-04T16:29:20Z", "uuid": "4a97b9dd-2023-43dc-b713-815bdd94d4d6" }, "watcher_object.name": "AuditCreatePayload", "watcher_object.version": "1.0", "watcher_object.namespace": "watcher" }, "publisher_id": "infra-optim:localhost", "timestamp": "2016-11-04 16:31:36.264673 ", "event_type": "audit.create", "message_id": "cbcf9f2c-7c53-4b4d-91ec-db49cca024b6" } python-watcher-4.0.0/doc/source/0000775000175000017500000000000013656752352016574 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/man/0000775000175000017500000000000013656752352017347 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/man/watcher-status.rst0000664000175000017500000000402413656752270023056 0ustar zuulzuul00000000000000============== watcher-status ============== ----------------------------------------- CLI interface for Watcher status commands ----------------------------------------- Synopsis ======== :: watcher-status [] Description =========== :program:`watcher-status` is a tool that provides routines for checking the status of a Watcher deployment. Options ======= The standard pattern for executing a :program:`watcher-status` command is:: watcher-status [] Run without arguments to see a list of available command categories:: watcher-status Categories are: * ``upgrade`` Detailed descriptions are below: You can also run with a category argument such as ``upgrade`` to see a list of all commands in that category:: watcher-status upgrade These sections describe the available categories and arguments for :program:`Watcher-status`. Upgrade ~~~~~~~ .. _watcher-status-checks: ``watcher-status upgrade check`` Performs a release-specific readiness check before restarting services with new code. For example, missing or changed configuration options, incompatible object states, or other conditions that could lead to failures while upgrading. **Return Codes** .. list-table:: :widths: 20 80 :header-rows: 1 * - Return code - Description * - 0 - All upgrade readiness checks passed successfully and there is nothing to do. * - 1 - At least one check encountered an issue and requires further investigation. This is considered a warning but the upgrade may be OK. * - 2 - There was an upgrade status check failure that needs to be investigated. This should be considered something that stops an upgrade. * - 255 - An unexpected error occurred. **History of Checks** **2.0.0 (Stein)** * Sample check to be filled in with checks as they are added in Stein. **3.0.0 (Train)** * A check was added to enforce the minimum required version of nova API used. python-watcher-4.0.0/doc/source/man/watcher-decision-engine.rst0000664000175000017500000000124213656752270024572 0ustar zuulzuul00000000000000======================= watcher-decision-engine ======================= --------------------------------------- Service for the Watcher Decision Engine --------------------------------------- :Author: openstack@lists.launchpad.net :Copyright: OpenStack Foundation :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== watcher-decision-engine [options] DESCRIPTION =========== :ref:`Watcher Decision Engine ` OPTIONS ======= **General options** .. include:: general-options.rst FILES ===== **/etc/watcher/watcher.conf** Default configuration file for Watcher Decision Engine .. include:: footer.rst python-watcher-4.0.0/doc/source/man/watcher-api.rst0000664000175000017500000000107113656752270022303 0ustar zuulzuul00000000000000=========== watcher-api =========== --------------------------- Service for the Watcher API --------------------------- :Author: openstack@lists.launchpad.net :Copyright: OpenStack Foundation :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== watcher-api [options] DESCRIPTION =========== watcher-api is a server daemon that serves the Watcher API OPTIONS ======= **General options** .. include:: general-options.rst FILES ===== **/etc/watcher/watcher.conf** Default configuration file for Watcher API .. include:: footer.rst python-watcher-4.0.0/doc/source/man/footer.rst0000664000175000017500000000016213656752270021375 0ustar zuulzuul00000000000000BUGS ==== * Watcher bugs are tracked in Launchpad at `OpenStack Watcher `__ python-watcher-4.0.0/doc/source/man/general-options.rst0000664000175000017500000000453513656752270023215 0ustar zuulzuul00000000000000 **-h, --help** Show the help message and exit **--version** Print the version number and exit **-v, --verbose** Print more verbose output **--noverbose** Disable verbose output **-d, --debug** Print debugging output (set logging level to DEBUG instead of default WARNING level) **--nodebug** Disable debugging output **--use-syslog** Use syslog for logging **--nouse-syslog** Disable the use of syslog for logging **--syslog-log-facility SYSLOG_LOG_FACILITY** syslog facility to receive log lines **--config-dir DIR** Path to a config directory to pull \*.conf files from. This file set is sorted, to provide a predictable parse order if individual options are over-ridden. The set is parsed after the file(s) specified via previous --config-file, arguments hence over-ridden options in the directory take precedence. This means that configuration from files in a specified config-dir will always take precedence over configuration from files specified by --config-file, regardless to argument order. **--config-file PATH** Path to a config file to use. Multiple config files can be specified by using this flag multiple times, for example, --config-file --config-file . Values in latter files take precedence. **--log-config-append PATH** **--log-config PATH** The name of logging configuration file. It does not disable existing loggers, but just appends specified logging configuration to any other existing logging options. Please see the Python logging module documentation for details on logging configuration files. The log-config name for this option is depcrecated. **--log-format FORMAT** A logging.Formatter log message format string which may use any of the available logging.LogRecord attributes. Default: None **--log-date-format DATE_FORMAT** Format string for %(asctime)s in log records. Default: None **--log-file PATH, --logfile PATH** (Optional) Name of log file to output to. If not set, logging will go to stdout. **--log-dir LOG_DIR, --logdir LOG_DIR** (Optional) The directory to keep log files in (will be prepended to --log-file) python-watcher-4.0.0/doc/source/man/watcher-applier.rst0000664000175000017500000000112213656752270023163 0ustar zuulzuul00000000000000=============== watcher-applier =============== ------------------------------- Service for the Watcher Applier ------------------------------- :Author: openstack@lists.launchpad.net :Copyright: OpenStack Foundation :Manual section: 1 :Manual group: cloud computing SYNOPSIS ======== watcher-applier [options] DESCRIPTION =========== :ref:`Watcher Applier ` OPTIONS ======= **General options** .. include:: general-options.rst FILES ===== **/etc/watcher/watcher.conf** Default configuration file for Watcher Applier .. include:: footer.rst python-watcher-4.0.0/doc/source/man/index.rst0000664000175000017500000000031413656752270021205 0ustar zuulzuul00000000000000==================== Watcher Manual Pages ==================== .. toctree:: :glob: :maxdepth: 1 watcher-api watcher-applier watcher-db-manage watcher-decision-engine watcher-status python-watcher-4.0.0/doc/source/man/watcher-db-manage.rst0000664000175000017500000001467513656752270023363 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _watcher-db-manage: ================= watcher-db-manage ================= The :command:`watcher-db-manage` utility is used to create the database schema tables that the watcher services will use for storage. It can also be used to upgrade (or downgrade) existing database tables when migrating between different versions of watcher. The `Alembic library `_ is used to perform the database migrations. Options ======= This is a partial list of the most useful options. To see the full list, run the following:: watcher-db-manage --help .. program:: watcher-db-manage .. option:: -h, --help Show help message and exit. .. option:: --config-dir Path to a config directory with configuration files. .. option:: --config-file Path to a configuration file to use. .. option:: -d, --debug Print debugging output. .. option:: -v, --verbose Print more verbose output. .. option:: --version Show the program's version number and exit. .. option:: upgrade, downgrade, stamp, revision, version, create_schema, purge The :ref:`command ` to run. Usage ===== Options for the various :ref:`commands ` for :command:`watcher-db-manage` are listed when the :option:`-h` or :option:`--help` option is used after the command. For example:: watcher-db-manage create_schema --help Information about the database is read from the watcher configuration file used by the API server and conductor services. This file must be specified with the :option:`--config-file` option:: watcher-db-manage --config-file /path/to/watcher.conf create_schema The configuration file defines the database backend to use with the *connection* database option:: [database] connection=mysql://root@localhost/watcher If no configuration file is specified with the :option:`--config-file` option, :command:`watcher-db-manage` assumes an SQLite database. .. _db-manage_cmds: Command Options =============== :command:`watcher-db-manage` is given a command that tells the utility what actions to perform. These commands can take arguments. Several commands are available: .. _create_schema: create_schema ------------- .. program:: create_schema .. option:: -h, --help Show help for create_schema and exit. This command will create database tables based on the most current version. It assumes that there are no existing tables. An example of creating database tables with the most recent version:: watcher-db-manage --config-file=/etc/watcher/watcher.conf create_schema downgrade --------- .. program:: downgrade .. option:: -h, --help Show help for downgrade and exit. .. option:: --revision The revision number you want to downgrade to. This command will revert existing database tables to a previous version. The version can be specified with the :option:`--revision` option. An example of downgrading to table versions at revision 2581ebaf0cb2:: watcher-db-manage --config-file=/etc/watcher/watcher.conf downgrade --revision 2581ebaf0cb2 revision -------- .. program:: revision .. option:: -h, --help Show help for revision and exit. .. option:: -m , --message The message to use with the revision file. .. option:: --autogenerate Compares table metadata in the application with the status of the database and generates migrations based on this comparison. This command will create a new revision file. You can use the :option:`--message` option to comment the revision. This is really only useful for watcher developers making changes that require database changes. This revision file is used during database migration and will specify the changes that need to be made to the database tables. Further discussion is beyond the scope of this document. stamp ----- .. program:: stamp .. option:: -h, --help Show help for stamp and exit. .. option:: --revision The revision number. This command will 'stamp' the revision table with the version specified with the :option:`--revision` option. It will not run any migrations. upgrade ------- .. program:: upgrade .. option:: -h, --help Show help for upgrade and exit. .. option:: --revision The revision number to upgrade to. This command will upgrade existing database tables to the most recent version, or to the version specified with the :option:`--revision` option. If there are no existing tables, then new tables are created, beginning with the oldest known version, and successively upgraded using all of the database migration files, until they are at the specified version. Note that this behavior is different from the :ref:`create_schema` command that creates the tables based on the most recent version. An example of upgrading to the most recent table versions:: watcher-db-manage --config-file=/etc/watcher/watcher.conf upgrade .. note:: This command is the default if no command is given to :command:`watcher-db-manage`. .. warning:: The upgrade command is not compatible with SQLite databases since it uses ALTER TABLE commands to upgrade the database tables. SQLite supports only a limited subset of ALTER TABLE. version ------- .. program:: version .. option:: -h, --help Show help for version and exit. This command will output the current database version. purge ----- .. program:: purge .. option:: -h, --help Show help for purge and exit. .. option:: -d, --age-in-days The number of days (starting from today) before which we consider soft deleted objects as expired and should hence be erased. By default, all objects soft deleted are considered expired. This can be useful as removing a significant amount of objects may cause a performance issues. .. option:: -n, --max-number The maximum number of database objects we expect to be deleted. If exceeded, this will prevent any deletion. .. option:: -t, --goal Either the UUID or name of the goal to purge. .. option:: -e, --exclude-orphans This is a flag to indicate when we want to exclude orphan objects from deletion. .. option:: --dry-run This is a flag to indicate when we want to perform a dry run. This will show the objects that would be deleted instead of actually deleting them. This command will purge the current database by removing both its soft deleted and orphan objects. python-watcher-4.0.0/doc/source/strategies/0000775000175000017500000000000013656752352020746 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/strategies/basic-server-consolidation.rst0000664000175000017500000000627713656752270026743 0ustar zuulzuul00000000000000================================== Basic Offline Server Consolidation ================================== Synopsis -------- **display name**: ``Basic offline consolidation`` **goal**: ``server_consolidation`` .. watcher-term:: watcher.decision_engine.strategy.strategies.basic_consolidation.BasicConsolidation Requirements ------------ Metrics ******* The *basic* strategy requires the following metrics: ============================ ============ ======= =========================== metric service name plugins comment ============================ ============ ======= =========================== ``compute.node.cpu.percent`` ceilometer_ none need to set the ``compute_monitors`` option to ``cpu.virt_driver`` in the nova.conf. ``cpu_util`` ceilometer_ none cpu_util has been removed since Stein. ============================ ============ ======= =========================== .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``change_nova_service_state`` - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ====================== ====== ============= =================================== parameter type default Value description ====================== ====== ============= =================================== ``migration_attempts`` Number 0 Maximum number of combinations to be tried by the strategy while searching for potential candidates. To remove the limit, set it to 0 ``period`` Number 7200 The time interval in seconds for getting statistic aggregation from metric data source ====================== ====== ============= =================================== Efficacy Indicator ------------------ .. watcher-func:: :format: literal_block watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 server_consolidation --strategy basic $ openstack optimize audit create -a at1 -p migration_attempts=4 External Links -------------- None. python-watcher-4.0.0/doc/source/strategies/node_resource_consolidation.rst0000664000175000017500000000436313656752270027266 0ustar zuulzuul00000000000000==================================== Node Resource Consolidation Strategy ==================================== Synopsis -------- **display name**: ``Node Resource Consolidation Strategy`` **goal**: ``Server Consolidation`` .. watcher-term:: watcher.decision_engine.strategy.strategies.node_resource_consolidation.NodeResourceConsolidation Requirements ------------ None. Metrics ******* None Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``change_nova_service_state`` - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ==================== ====== ======================================= parameter type default Value description ==================== ====== ======================================= ``host_choice`` String The way to select the server migration destination node, The value auto means that Nova schedular selects the destination node, and specify means the strategy specifies the destination. ==================== ====== ======================================= Efficacy Indicator ------------------ None Algorithm --------- For more information on the Node Resource Consolidation Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/train/approved/node-resource-consolidation.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 server_consolidation \ --strategy node_resource_consolidation $ openstack optimize audit create \ -a at1 -p host_choice=auto External Links -------------- None. python-watcher-4.0.0/doc/source/strategies/workload-stabilization.rst0000664000175000017500000001361113656752270026175 0ustar zuulzuul00000000000000============================================= Watcher Overload standard deviation algorithm ============================================= Synopsis -------- **display name**: ``Workload stabilization`` **goal**: ``workload_balancing`` .. watcher-term:: watcher.decision_engine.strategy.strategies.workload_stabilization.WorkloadStabilization Requirements ------------ Metrics ******* The *workload_stabilization* strategy requires the following metrics: ============================ ============ ======= ============================= metric service name plugins comment ============================ ============ ======= ============================= ``compute.node.cpu.percent`` ceilometer_ none need to set the ``compute_monitors`` option to ``cpu.virt_driver`` in the nova.conf. ``hardware.memory.used`` ceilometer_ SNMP_ ``cpu_util`` ceilometer_ none cpu_util has been removed since Stein. ``memory.resident`` ceilometer_ none ============================ ============ ======= ============================= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute .. _SNMP: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#snmp-based-meters Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ==================== ====== ===================== ============================= parameter type default Value description ==================== ====== ===================== ============================= ``metrics`` array |metrics| Metrics used as rates of cluster loads. ``thresholds`` object |thresholds| Dict where key is a metric and value is a trigger value. ``weights`` object |weights| These weights used to calculate common standard deviation. Name of weight contains meter name and _weight suffix. ``instance_metrics`` object |instance_metrics| Mapping to get hardware statistics using instance metrics. ``host_choice`` string retry Method of host's choice. There are cycle, retry and fullsearch methods. Cycle will iterate hosts in cycle. Retry will get some hosts random (count defined in retry_count option). Fullsearch will return each host from list. ``retry_count`` number 1 Count of random returned hosts. ``periods`` object |periods| These periods are used to get statistic aggregation for instance and host metrics. The period is simply a repeating interval of time into which the samples are grouped for aggregation. Watcher uses only the last period of all received ones. ==================== ====== ===================== ============================= .. |metrics| replace:: ["cpu_util", "memory.resident"] .. |thresholds| replace:: {"cpu_util": 0.2, "memory.resident": 0.2} .. |weights| replace:: {"cpu_util_weight": 1.0, "memory.resident_weight": 1.0} .. |instance_metrics| replace:: {"cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used"} .. |periods| replace:: {"instance": 720, "node": 600} Efficacy Indicator ------------------ .. watcher-func:: :format: literal_block watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator Algorithm --------- You can find description of overload algorithm and role of standard deviation here: https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/sd-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 workload_balancing --strategy workload_stabilization $ openstack optimize audit create -a at1 \ -p thresholds='{"memory.resident": 0.05}' \ -p metrics='["memory.resident"]' External Links -------------- - `Watcher Overload standard deviation algorithm spec `_ python-watcher-4.0.0/doc/source/strategies/workload_balance.rst0000664000175000017500000000533613656752270024775 0ustar zuulzuul00000000000000=================================== Workload Balance Migration Strategy =================================== Synopsis -------- **display name**: ``Workload Balance Migration Strategy`` **goal**: ``workload_balancing`` .. watcher-term:: watcher.decision_engine.strategy.strategies.workload_balance.WorkloadBalance Requirements ------------ None. Metrics ******* The *workload_balance* strategy requires the following metrics: ======================= ============ ======= ========================= metric service name plugins comment ======================= ============ ======= ========================= ``cpu_util`` ceilometer_ none cpu_util has been removed since Stein. ``memory.resident`` ceilometer_ none ======================= ============ ======= ========================= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ============== ====== ============= ==================================== parameter type default Value description ============== ====== ============= ==================================== ``metrics`` String 'cpu_util' Workload balance base on cpu or ram utilization. choice: ['cpu_util', 'memory.resident'] ``threshold`` Number 25.0 Workload threshold for migration ``period`` Number 300 Aggregate time period of ceilometer ============== ====== ============= ==================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Workload Balance Migration Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/workload-balance-migration-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 workload_balancing --strategy workload_balance $ openstack optimize audit create -a at1 -p threshold=26.0 \ -p period=310 -p metrics=cpu_util External Links -------------- None. python-watcher-4.0.0/doc/source/strategies/outlet_temp_control.rst0000664000175000017500000000534013656752270025602 0ustar zuulzuul00000000000000================================= Outlet Temperature Based Strategy ================================= Synopsis -------- **display name**: ``Outlet temperature based strategy`` **goal**: ``thermal_optimization`` .. watcher-term:: watcher.decision_engine.strategy.strategies.outlet_temp_control Requirements ------------ This strategy has a dependency on the host having Intel's Power Node Manager 3.0 or later enabled. Metrics ******* The *outlet_temperature* strategy requires the following metrics: ========================================= ============ ======= ======= metric service name plugins comment ========================================= ============ ======= ======= ``hardware.ipmi.node.outlet_temperature`` ceilometer_ IPMI ========================================= ============ ======= ======= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#ipmi-based-meters Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ============== ====== ============= ==================================== parameter type default Value description ============== ====== ============= ==================================== ``threshold`` Number 35.0 Temperature threshold for migration ``period`` Number 30 The time interval in seconds for getting statistic aggregation from metric data source ============== ====== ============= ==================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Outlet Temperature Based Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/outlet-temperature-based-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 thermal_optimization --strategy outlet_temperature $ openstack optimize audit create -a at1 -p threshold=31.0 External Links -------------- - `Intel Power Node Manager 3.0 `_ python-watcher-4.0.0/doc/source/strategies/zone_migration.rst0000664000175000017500000001246413656752270024532 0ustar zuulzuul00000000000000============== Zone migration ============== Synopsis -------- **display name**: ``Zone migration`` **goal**: ``hardware_maintenance`` .. watcher-term:: watcher.decision_engine.strategy.strategies.zone_migration.ZoneMigration Requirements ------------ Metrics ******* None Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Storage cluster data model is also required: .. watcher-term:: watcher.decision_engine.model.collector.cinder.CinderClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migrate`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``volume_migrate`` - .. watcher-term:: watcher.applier.actions.volume_migration.VolumeMigrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ======================== ======== ============= ============================== parameter type default Value description ======================== ======== ============= ============================== ``compute_nodes`` array None Compute nodes to migrate. ``storage_pools`` array None Storage pools to migrate. ``parallel_total`` integer 6 The number of actions to be run in parallel in total. ``parallel_per_node`` integer 2 The number of actions to be run in parallel per compute node. ``parallel_per_pool`` integer 2 The number of actions to be run in parallel per storage pool. ``priority`` object None List prioritizes instances and volumes. ``with_attached_volume`` boolean False False: Instances will migrate after all volumes migrate. True: An instance will migrate after the attached volumes migrate. ======================== ======== ============= ============================== The elements of compute_nodes array are: ============= ======= =============== ============================= parameter type default Value description ============= ======= =============== ============================= ``src_node`` string None Compute node from which instances migrate(mandatory). ``dst_node`` string None Compute node to which instances migrate. ============= ======= =============== ============================= The elements of storage_pools array are: ============= ======= =============== ============================== parameter type default Value description ============= ======= =============== ============================== ``src_pool`` string None Storage pool from which volumes migrate(mandatory). ``dst_pool`` string None Storage pool to which volumes migrate. ``src_type`` string None Source volume type(mandatory). ``dst_type`` string None Destination volume type (mandatory). ============= ======= =============== ============================== The elements of priority object are: ================ ======= =============== ====================== parameter type default Value description ================ ======= =============== ====================== ``project`` array None Project names. ``compute_node`` array None Compute node names. ``storage_pool`` array None Storage pool names. ``compute`` enum None Instance attributes. |compute| ``storage`` enum None Volume attributes. |storage| ================ ======= =============== ====================== .. |compute| replace:: ["vcpu_num", "mem_size", "disk_size", "created_at"] .. |storage| replace:: ["size", "created_at"] Efficacy Indicator ------------------ .. watcher-func:: :format: literal_block watcher.decision_engine.goal.efficacy.specs.HardwareMaintenance.get_global_efficacy_indicator Algorithm --------- For more information on the zone migration strategy please refer to: http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/zone-migration-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 hardware_maintenance --strategy zone_migration $ openstack optimize audit create -a at1 \ -p compute_nodes='[{"src_node": "s01", "dst_node": "d01"}]' External Links -------------- None python-watcher-4.0.0/doc/source/strategies/uniform_airflow.rst0000664000175000017500000000621413656752270024704 0ustar zuulzuul00000000000000================================== Uniform Airflow Migration Strategy ================================== Synopsis -------- **display name**: ``Uniform airflow migration strategy`` **goal**: ``airflow_optimization`` .. watcher-term:: watcher.decision_engine.strategy.strategies.uniform_airflow.UniformAirflow Requirements ------------ This strategy has a dependency on the server having Intel's Power Node Manager 3.0 or later enabled. Metrics ******* The *uniform_airflow* strategy requires the following metrics: ================================== ============ ======= ======= metric service name plugins comment ================================== ============ ======= ======= ``hardware.ipmi.node.airflow`` ceilometer_ IPMI ``hardware.ipmi.node.temperature`` ceilometer_ IPMI ``hardware.ipmi.node.power`` ceilometer_ IPMI ================================== ============ ======= ======= .. _ceilometer: http://docs.openstack.org/admin-guide/telemetry-measurements.html#ipmi-based-meters Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ====================== ====== ============= =========================== parameter type default Value description ====================== ====== ============= =========================== ``threshold_airflow`` Number 400.0 Airflow threshold for migration Unit is 0.1CFM ``threshold_inlet_t`` Number 28.0 Inlet temperature threshold for migration decision ``threshold_power`` Number 350.0 System power threshold for migration decision ``period`` Number 300 Aggregate time period of ceilometer ====================== ====== ============= =========================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Uniform Airflow Migration Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/newton/implemented/uniform-airflow-migration-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 airflow_optimization --strategy uniform_airflow $ openstack optimize audit create -a at1 -p threshold_airflow=410 \ -p threshold_inlet_t=29.0 -p threshold_power=355.0 -p period=310 External Links -------------- - `Intel Power Node Manager 3.0 `_ python-watcher-4.0.0/doc/source/strategies/host_maintenance.rst0000664000175000017500000000414613656752270025023 0ustar zuulzuul00000000000000=========================== Host Maintenance Strategy =========================== Synopsis -------- **display name**: ``Host Maintenance Strategy`` **goal**: ``cluster_maintaining`` .. watcher-term:: watcher.decision_engine.strategy.strategies.host_maintenance.HostMaintenance Requirements ------------ None. Metrics ******* None Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``change_nova_service_state`` - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ==================== ====== ==================================== parameter type default Value description ==================== ====== ==================================== ``maintenance_node`` String The name of the compute node which need maintenance. Required. ``backup_node`` String The name of the compute node which will backup the maintenance node. Optional. ==================== ====== ==================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Host Maintenance Strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/queens/approved/cluster-maintenance-strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audit create \ -g cluster_maintaining -s host_maintenance \ -p maintenance_node=compute01 \ -p backup_node=compute02 \ --auto-trigger External Links -------------- None. python-watcher-4.0.0/doc/source/strategies/actuation.rst0000664000175000017500000000413513656752270023471 0ustar zuulzuul00000000000000============= Actuator ============= Synopsis -------- **display name**: ``Actuator`` **goal**: ``unclassified`` .. watcher-term:: watcher.decision_engine.strategy.strategies.actuation.Actuator Requirements ------------ Metrics ******* None Cluster data model ****************** None Actions ******* Default Watcher's actions. Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameters are: ==================== ====== ===================== ============================= parameter type default Value description ==================== ====== ===================== ============================= ``actions`` array None Actions to be executed. ==================== ====== ===================== ============================= The elements of actions array are: ==================== ====== ===================== ============================= parameter type default Value description ==================== ====== ===================== ============================= ``action_type`` string None Action name defined in setup.cfg(mandatory) ``resource_id`` string None Resource_id of the action. ``input_parameters`` object None Input_parameters of the action(mandatory). ==================== ====== ===================== ============================= Efficacy Indicator ------------------ None Algorithm --------- This strategy create an action plan with a predefined set of actions. How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 unclassified --strategy actuator $ openstack optimize audit create -a at1 \ -p actions='[{"action_type": "migrate", "resource_id": "56a40802-6fde-4b59-957c-c84baec7eaed", "input_parameters": {"migration_type": "live", "source_node": "s01"}}]' External Links -------------- None python-watcher-4.0.0/doc/source/strategies/index.rst0000664000175000017500000000010613656752270022603 0ustar zuulzuul00000000000000Strategies ========== .. toctree:: :glob: :maxdepth: 1 ./* python-watcher-4.0.0/doc/source/strategies/noisy_neighbor.rst0000664000175000017500000000461313656752270024521 0ustar zuulzuul00000000000000============== Noisy neighbor ============== Synopsis -------- **display name**: ``Noisy Neighbor`` **goal**: ``noisy_neighbor`` .. watcher-term:: watcher.decision_engine.strategy.strategies.noisy_neighbor.NoisyNeighbor Requirements ------------ Metrics ******* The *noisy_neighbor* strategy requires the following metrics: ============================ ============ ======= ======================= metric service name plugins comment ============================ ============ ======= ======================= ``cpu_l3_cache`` ceilometer_ none Intel CMT_ is required ============================ ============ ======= ======================= .. _CMT: http://www.intel.com/content/www/us/en/architecture-and-technology/resource-director-technology.html .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ==================== ====== ============= ============================ parameter type default Value description ==================== ====== ============= ============================ ``cache_threshold`` Number 35.0 Performance drop in L3_cache threshold for migration ==================== ====== ============= ============================ Efficacy Indicator ------------------ None Algorithm --------- For more information on the noisy neighbor strategy please refer to: http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/noisy_neighbor_strategy.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 noisy_neighbor --strategy noisy_neighbor $ openstack optimize audit create -a at1 \ -p cache_threshold=45.0 External Links -------------- None python-watcher-4.0.0/doc/source/strategies/vm_workload_consolidation.rst0000664000175000017500000000612313656752270026752 0ustar zuulzuul00000000000000================================== VM Workload Consolidation Strategy ================================== Synopsis -------- **display name**: ``VM Workload Consolidation Strategy`` **goal**: ``vm_consolidation`` .. watcher-term:: watcher.decision_engine.strategy.strategies.vm_workload_consolidation.VMWorkloadConsolidation Requirements ------------ Metrics ******* The *vm_workload_consolidation* strategy requires the following metrics: ============================ ============ ======= ========================= metric service name plugins comment ============================ ============ ======= ========================= ``cpu_util`` ceilometer_ none cpu_util has been removed since Stein. ``memory.resident`` ceilometer_ none ``memory`` ceilometer_ none ``disk.root.size`` ceilometer_ none ============================ ============ ======= ========================= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``migration`` - .. watcher-term:: watcher.applier.actions.migration.Migrate * - ``change_nova_service_state`` - .. watcher-term:: watcher.applier.actions.change_nova_service_state.ChangeNovaServiceState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ====================== ====== ============= =================================== parameter type default Value description ====================== ====== ============= =================================== ``period`` Number 3600 The time interval in seconds for getting statistic aggregation from metric data source ====================== ====== ============= =================================== Efficacy Indicator ------------------ .. watcher-func:: :format: literal_block watcher.decision_engine.goal.efficacy.specs.ServerConsolidation.get_global_efficacy_indicator Algorithm --------- For more information on the VM Workload consolidation strategy please refer to: https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/zhaw-load-consolidation.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 server_consolidation --strategy vm_workload_consolidation $ openstack optimize audit create -a at1 External Links -------------- *Spec URL* https://specs.openstack.org/openstack/watcher-specs/specs/mitaka/implemented/zhaw-load-consolidation.html python-watcher-4.0.0/doc/source/strategies/saving_energy.rst0000664000175000017500000000507013656752270024341 0ustar zuulzuul00000000000000====================== Saving Energy Strategy ====================== Synopsis -------- **display name**: ``Saving Energy Strategy`` **goal**: ``saving_energy`` .. watcher-term:: watcher.decision_engine.strategy.strategies.saving_energy.SavingEnergy Requirements ------------ This feature will use Ironic to do the power on/off actions, therefore this feature requires that the ironic component is configured. And the compute node should be managed by Ironic. Ironic installation: https://docs.openstack.org/ironic/latest/install/index.html Cluster data model ****************** Default Watcher's Compute cluster data model: .. watcher-term:: watcher.decision_engine.model.collector.nova.NovaClusterDataModelCollector Actions ******* .. list-table:: :widths: 30 30 :header-rows: 1 * - action - description * - ``change_node_power_state`` - .. watcher-term:: watcher.applier.actions.change_node_power_state.ChangeNodePowerState Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ====================== ====== ======= ====================================== parameter type default description Value ====================== ====== ======= ====================================== ``free_used_percent`` Number 10.0 a rational number, which describes the the quotient of min_free_hosts_num/nodes_with_VMs_num ``min_free_hosts_num`` Int 1 an int number describes minimum free compute nodes ====================== ====== ======= ====================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the Energy Saving Strategy please refer to: http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html How to use it ? --------------- step1: Add compute nodes info into ironic node management .. code-block:: shell $ ironic node-create -d pxe_ipmitool -i ipmi_address=10.43.200.184 \ ipmi_username=root -i ipmi_password=nomoresecret -e compute_node_id=3 step 2: Create audit to do optimization .. code-block:: shell $ openstack optimize audittemplate create \ at1 saving_energy --strategy saving_energy $ openstack optimize audit create -a at1 \ -p free_used_percent=20.0 External Links -------------- None python-watcher-4.0.0/doc/source/strategies/strategy-template.rst0000664000175000017500000000540313656752270025154 0ustar zuulzuul00000000000000============= Strategy name ============= Synopsis -------- **display name**: **goal**: Add here a complete description of your strategy Requirements ------------ Metrics ******* Write here the list of metrics required by your strategy algorithm (in the form of a table). If these metrics requires specific Telemetry plugin or other additional software, please explain here how to deploy them (and add link to dedicated installation guide). Example: ======================= ============ ======= ======= metric service name plugins comment ======================= ============ ======= ======= compute.node.* ceilometer_ none one point every 60s vm.cpu.utilization_perc monasca_ none power ceilometer_ kwapi_ one point every 60s ======================= ============ ======= ======= .. _ceilometer: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html#openstack-compute .. _monasca: https://github.com/openstack/monasca-agent/blob/master/docs/Libvirt.md .. _kwapi: https://kwapi.readthedocs.io/en/latest/index.html Cluster data model ****************** Default Watcher's cluster data model. or If your strategy implementation requires a new cluster data model, please describe it in this section, with a link to model plugin's installation guide. Actions ******* Default Watcher's actions. or If your strategy implementation requires new actions, add the list of Action plugins here (in the form of a table) with a link to the plugin's installation procedure. ======== ================= action description ======== ================= action1_ This action1 ... action2_ This action2 ... ======== ================= .. _action1 : https://github.com/myrepo/watcher/plugins/action1 .. _action2 : https://github.com/myrepo/watcher/plugins/action2 Planner ******* Default Watcher's planner. or If your strategy requires also a new planner to schedule built actions in time, please describe it in this section, with a link to planner plugin's installation guide. Configuration ------------- If your strategy use configurable parameters, explain here how to tune them. Efficacy Indicator ------------------ Add here the Efficacy indicator computed by your strategy. Algorithm --------- Add here either the description of your algorithm or link to the existing description. How to use it ? --------------- .. code-block:: shell $ Write the command line to create an audit with your strategy. External Links -------------- If you have written papers, blog articles .... about your strategy into Watcher, or if your strategy is based from external publication(s), please add HTTP links and references in this section. - `link1 `_ - `link2 `_ python-watcher-4.0.0/doc/source/strategies/storage_capacity_balance.rst0000664000175000017500000000354513656752270026474 0ustar zuulzuul00000000000000======================== Storage capacity balance ======================== Synopsis -------- **display name**: ``Storage Capacity Balance Strategy`` **goal**: ``workload_balancing`` .. watcher-term:: watcher.decision_engine.strategy.strategies.storage_capacity_balance.StorageCapacityBalance Requirements ------------ Metrics ******* None Cluster data model ****************** Storage cluster data model is required: .. watcher-term:: watcher.decision_engine.model.collector.cinder.CinderClusterDataModelCollector Actions ******* Default Watcher's actions: .. list-table:: :widths: 25 35 :header-rows: 1 * - action - description * - ``volume_migrate`` - .. watcher-term:: watcher.applier.actions.volume_migration.VolumeMigrate Planner ******* Default Watcher's planner: .. watcher-term:: watcher.decision_engine.planner.weight.WeightPlanner Configuration ------------- Strategy parameter is: ==================== ====== ============= ===================================== parameter type default Value description ==================== ====== ============= ===================================== ``volume_threshold`` Number 80.0 Volume threshold for capacity balance ==================== ====== ============= ===================================== Efficacy Indicator ------------------ None Algorithm --------- For more information on the storage capacity balance strategy please refer to: http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/storage-capacity-balance.html How to use it ? --------------- .. code-block:: shell $ openstack optimize audittemplate create \ at1 workload_balancing --strategy storage_capacity_balance $ openstack optimize audit create -a at1 \ -p volume_threshold=85.0 External Links -------------- None python-watcher-4.0.0/doc/source/_static/0000775000175000017500000000000013656752352020222 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/_static/.placeholder0000664000175000017500000000000013656752270022472 0ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/conf.py0000775000175000017500000001203113656752270020072 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from watcher import version as watcher_version from watcher import objects objects.register_all() # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('../../')) sys.path.insert(0, os.path.abspath('../')) sys.path.insert(0, os.path.abspath('./')) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'oslo_config.sphinxext', 'sphinx.ext.viewcode', 'sphinxcontrib.httpdomain', 'sphinxcontrib.pecanwsme.rest', 'stevedore.sphinxext', 'ext.term', 'ext.versioned_notifications', 'oslo_config.sphinxconfiggen', 'openstackdocstheme', 'sphinx.ext.napoleon', 'sphinxcontrib.rsvgconverter', ] wsme_protocols = ['restjson'] config_generator_config_file = [( '../../etc/watcher/oslo-config-generator/watcher.conf', '_static/watcher')] sample_config_basename = 'watcher' # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Watcher' copyright = u'OpenStack Foundation' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. # The full version, including alpha/beta/rc tags. release = watcher_version.version_info.release_string() # The short X.Y version. version = watcher_version.version_string # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['watcher.'] exclude_patterns = [ # The man directory includes some snippet files that are included # in other documents during the build but that should not be # included in the toctree themselves, so tell Sphinx to ignore # them when scanning for input files. 'man/footer.rst', 'man/general-options.rst', 'strategies/strategy-template.rst', 'image_src/plantuml/README.rst', ] # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). add_module_names = True suppress_warnings = ['app.add_directive'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for man page output -------------------------------------------- # Grouping the document tree for man pages. # List of tuples 'sourcefile', 'target', u'title', u'Authors name', 'manual' man_pages = [ ('man/watcher-api', 'watcher-api', u'Watcher API Server', [u'OpenStack'], 1), ('man/watcher-applier', 'watcher-applier', u'Watcher Applier', [u'OpenStack'], 1), ('man/watcher-db-manage', 'watcher-db-manage', u'Watcher Db Management Utility', [u'OpenStack'], 1), ('man/watcher-decision-engine', 'watcher-decision-engine', u'Watcher Decision Engine', [u'OpenStack'], 1), ] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme_path = ["."] # html_theme = '_theme' html_theme = 'openstackdocs' # html_static_path = ['static'] # html_theme_options = {} # Output file base name for HTML help builder. htmlhelp_basename = '%sdoc' % project html_last_updated_fmt = '%Y-%m-%d %H:%M' #openstackdocstheme options repository_name = 'openstack/watcher' bug_project = 'watcher' bug_tag = '' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'doc-watcher.tex', u'%s Documentation' % project, u'OpenStack Foundation', 'manual'), ] # If false, no module index is generated. latex_domain_indices = False latex_elements = { 'makeindex': '', 'printindex': '', 'preamble': r'\setcounter{tocdepth}{3}', } # Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 latex_use_xindy = False # Example configuration for intersphinx: refer to the Python standard library. # intersphinx_mapping = {'http://docs.python.org/': None} python-watcher-4.0.0/doc/source/install/0000775000175000017500000000000013656752352020242 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/install/get_started.rst0000664000175000017500000000203713656752270023302 0ustar zuulzuul00000000000000============================================ Infrastructure Optimization service overview ============================================ The Infrastructure Optimization service provides flexible and scalable optimization service for multi-tenant OpenStack based clouds. The Infrastructure Optimization service consists of the following components: ``watcher`` command-line client A CLI to communicate with ``watcher-api`` to optimize the cloud. ``watcher-api`` service An OpenStack-native REST API that accepts and responds to end-user calls by processing them and forwarding to appropriate underlying watcher services via AMQP. ``watcher-decision-engine`` service It runs audit and return an action plan to achieve optimization goal specified by the end-user in audit. ``watcher-applier`` service It executes action plan built by watcher-decision-engine. It interacts with other OpenStack components like nova to execute the given action plan. ``watcher-dashboard`` Watcher UI implemented as a plugin for the OpenStack Dashboard. python-watcher-4.0.0/doc/source/install/common_configure.rst0000664000175000017500000000502113656752270024322 0ustar zuulzuul000000000000002. Edit the ``/etc/watcher/watcher.conf`` file and complete the following actions: * In the ``[database]`` section, configure database access: .. code-block:: ini [database] ... connection = mysql+pymysql://watcher:WATCHER_DBPASS@controller/watcher?charset=utf8 * In the `[DEFAULT]` section, configure the transport url for RabbitMQ message broker. .. code-block:: ini [DEFAULT] ... control_exchange = watcher transport_url = rabbit://openstack:RABBIT_PASS@controller Replace the RABBIT_PASS with the password you chose for OpenStack user in RabbitMQ. * In the `[keystone_authtoken]` section, configure Identity service access. .. code-block:: ini [keystone_authtoken] ... www_authenticate_uri = http://controller:5000 auth_url = http://controller:5000 memcached_servers = controller:11211 auth_type = password project_domain_name = default user_domain_name = default project_name = service username = watcher password = WATCHER_PASS Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service. * Watcher interacts with other OpenStack projects via project clients, in order to instantiate these clients, Watcher requests new session from Identity service. In the `[watcher_clients_auth]` section, configure the identity service access to interact with other OpenStack project clients. .. code-block:: ini [watcher_clients_auth] ... auth_type = password auth_url = http://controller:5000 username = watcher password = WATCHER_PASS project_domain_name = default user_domain_name = default project_name = service Replace WATCHER_PASS with the password you chose for the watcher user in the Identity service. * In the `[api]` section, configure host option. .. code-block:: ini [api] ... host = controller Replace controller with the IP address of the management network interface on your controller node, typically 10.0.0.11 for the first node in the example architecture. * In the `[oslo_messaging_notifications]` section, configure the messaging driver. .. code-block:: ini [oslo_messaging_notifications] ... driver = messagingv2 3. Populate watcher database: .. code-block:: ini su -s /bin/sh -c "watcher-db-manage --config-file /etc/watcher/watcher.conf upgrade" python-watcher-4.0.0/doc/source/install/install-ubuntu.rst0000664000175000017500000000162613656752270023766 0ustar zuulzuul00000000000000.. _install-ubuntu: Install and configure for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Infrastructure Optimization service for Ubuntu 16.04 (LTS). .. include:: common_prerequisites.rst Install and configure components -------------------------------- 1. Install the packages: .. code-block:: console # apt install watcher-api watcher-decision-engine \ watcher-applier # apt install python-watcherclient .. include:: common_configure.rst Finalize installation --------------------- Start the Infrastructure Optimization services and configure them to start when the system boots: .. code-block:: console # systemctl enable watcher-api.service \ watcher-decision-engine.service \ watcher-applier.service # systemctl start watcher-api.service \ watcher-decision-engine.service \ watcher-applier.service python-watcher-4.0.0/doc/source/install/install-rdo.rst0000664000175000017500000000203013656752270023216 0ustar zuulzuul00000000000000.. _install-rdo: Install and configure for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Infrastructure Optimization service for Red Hat Enterprise Linux 7 and CentOS 7. .. include:: common_prerequisites.rst Install and configure components -------------------------------- 1. Install the packages: .. code-block:: console # sudo yum install openstack-watcher-api openstack-watcher-applier \ openstack-watcher-decision-engine .. include:: common_configure.rst Finalize installation --------------------- Start the Infrastructure Optimization services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-watcher-api.service \ openstack-watcher-decision-engine.service \ openstack-watcher-applier.service # systemctl start openstack-watcher-api.service \ openstack-watcher-decision-engine.service \ openstack-watcher-applier.service python-watcher-4.0.0/doc/source/install/next-steps.rst0000664000175000017500000000026213656752270023105 0ustar zuulzuul00000000000000.. _next-steps: Next steps ~~~~~~~~~~ Your OpenStack environment now includes the watcher service. To add additional services, see https://docs.openstack.org/queens/install/. python-watcher-4.0.0/doc/source/install/install.rst0000664000175000017500000000102013656752270022432 0ustar zuulzuul00000000000000.. _install: Install and configure ~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the Infrastructure Optimization service, code-named watcher, on the controller node. This section assumes that you already have a working OpenStack environment with at least the following components installed: Identity Service, Compute Service, Telemetry data collection service. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 2 install-rdo.rst install-ubuntu.rst python-watcher-4.0.0/doc/source/install/verify.rst0000664000175000017500000001752313656752270022307 0ustar zuulzuul00000000000000.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Infrastructure Optimization service. .. note:: Perform these commands on the controller node. 1. Source the ``admin`` project credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc 2. List service components to verify successful launch and registration of each process: .. code-block:: console $ openstack optimize service list +----+-------------------------+------------+--------+ | ID | Name | Host | Status | +----+-------------------------+------------+--------+ | 1 | watcher-decision-engine | controller | ACTIVE | | 2 | watcher-applier | controller | ACTIVE | +----+-------------------------+------------+--------+ 3. List goals and strategies: .. code-block:: console $ openstack optimize goal list +--------------------------------------+----------------------+----------------------+ | UUID | Name | Display name | +--------------------------------------+----------------------+----------------------+ | a8cd6d1a-008b-4ff0-8dbc-b30493fcc5b9 | dummy | Dummy goal | | 03953f2f-02d0-42b5-9a12-7ba500a54395 | workload_balancing | Workload Balancing | | de0f8714-984b-4d6b-add1-9cad8120fbce | server_consolidation | Server Consolidation | | f056bc80-c6d1-40dc-b002-938ccade9385 | thermal_optimization | Thermal Optimization | | e7062856-892e-4f0f-b84d-b828464b3fd0 | airflow_optimization | Airflow Optimization | | 1f038da9-b36c-449f-9f04-c225bf3eb478 | unclassified | Unclassified | +--------------------------------------+----------------------+----------------------+ $ openstack optimize strategy list +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ | UUID | Name | Display name | Goal | +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ | 98ae84c8-7c9b-4cbd-8d9c-4bd7c6b106eb | dummy | Dummy strategy | dummy | | 02a170b6-c72e-479d-95c0-8a4fdd4cc1ef | dummy_with_scorer | Dummy Strategy using sample Scoring Engines | dummy | | 8bf591b8-57e5-4a9e-8c7d-c37bda735a45 | outlet_temperature | Outlet temperature based strategy | thermal_optimization | | 8a0810fb-9d9a-47b9-ab25-e442878abc54 | vm_workload_consolidation | VM Workload Consolidation Strategy | server_consolidation | | 1718859c-3eb5-45cb-9220-9cb79fe42fa5 | basic | Basic offline consolidation | server_consolidation | | b5e7f5f1-4824-42c7-bb52-cf50724f67bf | workload_stabilization | Workload stabilization | workload_balancing | | f853d71e-9286-4df3-9d3e-8eaf0f598e07 | workload_balance | Workload Balance Migration Strategy | workload_balancing | | 58bdfa89-95b5-4630-adf6-fd3af5ff1f75 | uniform_airflow | Uniform airflow migration strategy | airflow_optimization | | 66fde55d-a612-4be9-8cb0-ea63472b420b | dummy_with_resize | Dummy strategy with resize | dummy | +--------------------------------------+---------------------------+---------------------------------------------+----------------------+ 4. Run an action plan by creating an audit with dummy goal: .. code-block:: console $ openstack optimize audit create --goal dummy +--------------+--------------------------------------+ | Field | Value | +--------------+--------------------------------------+ | UUID | e94d4826-ad4e-44df-ad93-dff489fde457 | | Created At | 2017-05-23T11:46:58.763394+00:00 | | Updated At | None | | Deleted At | None | | State | PENDING | | Audit Type | ONESHOT | | Parameters | {} | | Interval | None | | Goal | dummy | | Strategy | auto | | Audit Scope | [] | | Auto Trigger | False | +--------------+--------------------------------------+ $ openstack optimize audit list +--------------------------------------+------------+-----------+-------+----------+--------------+ | UUID | Audit Type | State | Goal | Strategy | Auto Trigger | +--------------------------------------+------------+-----------+-------+----------+--------------+ | e94d4826-ad4e-44df-ad93-dff489fde457 | ONESHOT | SUCCEEDED | dummy | auto | False | +--------------------------------------+------------+-----------+-------+----------+--------------+ $ openstack optimize actionplan list +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ | UUID | Audit | State | Updated At | Global efficacy | +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | e94d4826-ad4e-44df-ad93-dff489fde457 | RECOMMENDED | None | None | +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ $ openstack optimize actionplan start ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 +---------------------+--------------------------------------+ | Field | Value | +---------------------+--------------------------------------+ | UUID | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | | Created At | 2017-05-23T11:46:58+00:00 | | Updated At | 2017-05-23T11:53:12+00:00 | | Deleted At | None | | Audit | e94d4826-ad4e-44df-ad93-dff489fde457 | | Strategy | dummy | | State | ONGOING | | Efficacy indicators | [] | | Global efficacy | {} | +---------------------+--------------------------------------+ $ openstack optimize actionplan list +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ | UUID | Audit | State | Updated At | Global efficacy | +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ | ba9ce6b3-969c-4b8e-bb61-ae24e8630f81 | e94d4826-ad4e-44df-ad93-dff489fde457 | SUCCEEDED | 2017-05-23T11:53:16+00:00 | None | +--------------------------------------+--------------------------------------+-----------+---------------------------+-----------------+ python-watcher-4.0.0/doc/source/install/index.rst0000664000175000017500000000241713656752270022106 0ustar zuulzuul00000000000000============= Install Guide ============= .. toctree:: :maxdepth: 2 get_started.rst install.rst verify.rst next-steps.rst The Infrastructure Optimization service (Watcher) provides flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Watcher provides a complete optimization loop including everything from a metrics receiver, complex event processor and profiler, optimization processor and an action plan applier. This provides a robust framework to realize a wide range of cloud optimization goals, including the reduction of data center operating costs, increased system performance via intelligent virtual machine migration, increased energy efficiency and more! Watcher also supports a pluggable architecture by which custom optimization algorithms, data metrics and data profilers can be developed and inserted into the Watcher framework. Check the documentation for watcher optimization strategies at `Strategies `_. Check watcher glossary at `Glossary `_. This chapter assumes a working setup of OpenStack following the `OpenStack Installation Tutorial `_. python-watcher-4.0.0/doc/source/install/common_prerequisites.rst0000664000175000017500000001252213656752270025251 0ustar zuulzuul00000000000000Prerequisites ------------- Before you install and configure the Infrastructure Optimization service, you must create a database, service credentials, and API endpoints. 1. Create the database, complete these steps: * Use the database access client to connect to the database server as the ``root`` user: .. code-block:: console # mysql * Create the ``watcher`` database: .. code-block:: console CREATE DATABASE watcher CHARACTER SET utf8; * Grant proper access to the ``watcher`` database: .. code-block:: console GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'localhost' \ IDENTIFIED BY 'WATCHER_DBPASS'; GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'%' \ IDENTIFIED BY 'WATCHER_DBPASS'; Replace ``WATCHER_DBPASS`` with a suitable password. * Exit the database access client. .. code-block:: console exit; 2. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc 3. To create the service credentials, complete these steps: * Create the ``watcher`` user: .. code-block:: console $ openstack user create --domain default --password-prompt watcher User Password: Repeat User Password: +---------------------+----------------------------------+ | Field | Value | +---------------------+----------------------------------+ | domain_id | default | | enabled | True | | id | b18ee38e06034b748141beda8fc8bfad | | name | watcher | | options | {} | | password_expires_at | None | +---------------------+----------------------------------+ * Add the ``admin`` role to the ``watcher`` user: .. code-block:: console $ openstack role add --project service --user watcher admin .. note:: This command produces no output. * Create the watcher service entities: .. code-block:: console $ openstack service create --name watcher --description "Infrastructure Optimization" infra-optim +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Infrastructure Optimization | | enabled | True | | id | d854f6fff0a64f77bda8003c8dedfada | | name | watcher | | type | infra-optim | +-------------+----------------------------------+ 4. Create the Infrastructure Optimization service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ infra-optim public http://controller:9322 +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | Infrastructure Optimization | | enabled | True | | id | d854f6fff0a64f77bda8003c8dedfada | | name | watcher | | type | infra-optim | +-------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ infra-optim internal http://controller:9322 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 225aef8465ef4df48a341aaaf2b0a390 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | d854f6fff0a64f77bda8003c8dedfada | | service_name | watcher | | service_type | infra-optim | | url | http://controller:9322 | +--------------+----------------------------------+ $ openstack endpoint create --region RegionOne \ infra-optim admin http://controller:9322 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | 375eb5057fb546edbdf3ee4866179672 | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | d854f6fff0a64f77bda8003c8dedfada | | service_name | watcher | | service_type | infra-optim | | url | http://controller:9322 | +--------------+----------------------------------+ python-watcher-4.0.0/doc/source/images/0000775000175000017500000000000013656752352020041 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/images/watcher_db_schema_diagram.png0000664000175000017500000022012713656752270025660 0ustar zuulzuul00000000000000‰PNG  IHDR‹È:Çx5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gUÛzTXtplantumlxœÍWÛnÚ@}·Ä?Œè H $Ð!”^’¶)¤Q¡}©*k±2’Ùµö‚‚ªþ{Ç`Â5ÄAJ‚ŸìcÏì9sYŸ+´uÃ(çÝRˆ0D{«B“>‹•ÇÈ+9/ˆ„10P"2Ðh@¡{ðæòò=_Eh6áoξ®Ia¾J‹Ô#ל®;G!Ô¡c5ÉÁï“ÚŸé²Cœ/×NÒåL‰±ÿ€û} D0öMŒ%÷–”dàUçûõ… Tˆa‹Œ=é¢Hô"LâOÞ 4 ‹¡/,£?ñm—†˜F‡™BŒp» î·œóþ%Îft«? ܳ5Ù”îÓ‚‡Ð8¢ç 6š ,»™P»Ìè' öQáB²¾Å!oÄ>­SPªÞx 8ç`7%9œnØšâ4…SÜqµº4Šq=Ë_W‰ý M˜q¼ Ãq95r´Ë뫬;Õ#mò¿Z5Û f‹†ûPWA’>W•ÜYÓ)ÕY´Ú"o&Qû¤õÓ˜iiWÙdDªÇ 8%ûÙê&ÛÛ]¹¢O'8}y¥l¸+ͪ"vÖRæ?*‰Ä;»ÉHpÔ &ç ¥_GއæyÖÙá$YØ6…¸Ã¹Äõ¢¦àÅ OKJôQHî2ÜŸ‰>NrA²¯Õå,}ÅÄLyâ‘D>Ÿ„ˆ å|«ŒÝ°Ì¾¬o¥ïâÅ_xÏŸüuä¡Q*A~~¤Î3òRi¤„o8^®œ7 ?•ñ•5Ù¿» ¹8ugØi×ÏÞП2ųô¥uGi¸csÞ9ÊpòéÝ0Âþl·`Ä!)©³òÉÛBÇIh åTªõÊYýô|¼èÂq¹R+z…Ï7-0ÊiNfþ9áì¹Ä]Ñ»#Q趋й€NZθ#ÒJQÚ‰¾(Û‰•àj§‡¸Éu¸4Pï¶W)•Ê~µrثԼIw÷Á‡u5’Nÿ¢€IDATxÚì½ \u¾øïñ\.iaF\ò碙ÊÏx‘=˜¹Ef–¹jhfQYòg¹µ­Zj^×ÍÌk–ÞHËÍ5ŸRÀP¸À"‹Kh¬âS™ZFK®¡±ÈŠbH@îÿSÓßì9sæ<‡9ç¼ß¯Ï‹×0gžÎÌ|¿ß÷™™ïgºüÀLta† €¡† €¡† €¡‚'i>{¾¦d?Až‰æú 5Ái €¡‚¤®Éè2Œ ÏÄßvî§PœÆ*8Õ˜}¶<¥¦$ƒ ˆŽ‹ÏRS:öüù?»·d‡›doERfýÄP[êÊÊG%\3öÔÖ"êuV)ÝŒK–môésõ‰Û•á Ê.¿ü2÷–ìp“ì­¨SʬïjËþòQ ¢§®%PöhÙCOSËa¨€¡r ÕúùQÛÖN µ‘Û½{]LÌAAe½ÊOíô‡eN™r4~ƒE¬à Ò´›á<×=-u­ÑÞ™ïdIq8^wKw%tË”«k4˜@wERf}ÞP¿Z¶¼àš±î$P»n/õÅPCå9ÔºsâÄví5Tõr‹öJRŸ>Wçæ¾ÑÜ\!Ãò×^£(±k×Y‘¦Ý ç¹îiiïªî™ïª/Ú+Yº×PÕç¹ùõhoÉ7ÉÞŠ:¥Ìú¶¡~ÿíÞü¿×êÙS'sS|óRJþµã<ܻߴ]]1TÀPý·/rrüäÉwŸ?ÿg‰)SîQë)åY·úúR;R}/$¤§úÈÚÔ©¿°mããïWZ}iídb"M»ÎsÝÓRyUÎdís¨öÎ|W Õ^ɲ%Kž™0!F‘ÎãÇÿÇöÙn'—ìp“ì­¨SʬoêéuïFŒw/{Ú Ônx°º`#u† ª÷åCu²åv&OdcãG ±Ý¤eZºt–zOSÆÏœ§Üœ>ý!iÂÕn¿ýû_еoß°+æÚ6Š™™¯EFö“åDE à.?M»IÎs{§¥hppU_~Ý3ßUCµW²ìIjDD¸«è販î-Ù™ tWÔ)eÖ· uÿƒ3Ž<<Ûíçªýú7{縗žÂ^^5Å AÚå\w]ßO?ÍÒž$¶ .ì-C ÕÃÞ»vœ©éÜx×NeežTvìv ÕÃ…ÚÃç¹ç£ãJ–÷–Yß6Ô¢÷×¾¼ÜmC­{}eÁàûÝHOaBÍD1|xTié»êsò¯ögŒn‚ {‹ÅPCõë0~_¹½šÎ¥÷•'%M“š«®n§öž&AÓîÉBíóÜóÑq%Ëʬojf€Qç?‡†êLç?Ý y!ÔL«VÍ×>à!ÿj U÷ál{‹ÅPCõ÷(lfÉÍq®¥,¹é‘²Ø§œ\þŠsCC{Y¤¶R“Ô4í.Ô}ž{>lK–Ú•^öZ2§±©Ú&ãj Õ™jöÒS8Ì !Š)gŽò³2`ðˆˆñb1TÀPý=Z÷~8î—Òx7¯ÝàTÚç›)¹c*iŸ1Tï*ÔœçœÆ\Cýé××¶÷ªn'óB$$ÄÊœ´´dÛnyö®¡gÀPCõkI-lfAĸS¿qô꼈qeŸ¢Ù¦i÷ÆBÍyÎiì#Ï¡žØ¦çPW¿ãð9TÝ Næ…(-}7*j@tô@™×jÝö‹¡†Jü¿gõòûÿ¢p`ì‘GæHõ§üF¿”žQ·$íØã/ì<1ÿgcIõLÓîí…šóœÓØÛOãýSgµ¥/ÿÑ:ìËo/ƒ3y!$ú÷¿FB÷"«m‚ {‹ÅPC%þ©×óé¢MûýBÑ “3†Ë>—{ï´YÕMÞ£™ iç<'üá4>½ý½Â~ãÝ4Ô쬷>L>T 0T‚ i§PœÆí@­ïX«wJ9i¨ß¬^•?`?Ã0T 1#šv 5ÁiÜÞ«¼÷»‚ÿ3V7š¡¶dmÙ>d"±`¨@cF4íj‚Ó¸C¢_ž"çAŸ¥¦x²i§P>pwŠ¡þ@mlBÉЇJ –µEô´dô42T`¨Ð!µAž 5íá§qgÝ ø)Z?£—úþ#;ë›Õ«¶™X÷kôC…»†úßˤ" ¢ãâóåË 5ÁiÜ Ôf¼Ptó”Ìnÿ›@mÀ¸½‰sH †¡†J4íj‚Ó˜ÀPC%‚¦ 8 0T‚ Ê¦PœÆœÆ*€Ëµ€í;‚ ‚ðê¦BM˜ù4&išg%ÃPÁÍÆL·ÝrؘO@[Hдw¢¡R¨ ¯; ÎJ†¡‚G/·Ð˜„]C¥Py •¤i¾›• C…¶^n©¬Ì2äZ‹¥ûüù‰êH{Í’v‚%KžéÑ#8,ìʬ¬eŒŠü[SS-‹Mwþr‹ ,]:«µu_aáʈˆpÛ&0.n̶m¯Ë@ié»ÒJ)#ÇŒ¹MÖbµ.Ý)åÓéÓjlüˆŠ€ i§P*¡‚ßjPEÚ$hn®p©1Sæ’P¯ˆhg·Xº«W_Ô t×¥;¥ Ë4ÔM;…šÀP 0T3ã‘ÒDÕ×—ZÍn¯1³’§ßšv 5¡rþ`¨À]~—oÚŽ”f©®n§z›/!!¶©iOMMq|üýV7/ž¡½!h;%•AÓN¡&üÓPÉJ¡†úSG‡¨¨Ý’“ã]êTa;rÁ‚§”»{2\[["­”üž™ùš¶S…¬+)išüUFêNImBдS¨ ®¡b¨*ø¡vn¬^ýâðáQr‚¦BMp“•C µóCÊ¿„¶´M;…šà4&+† ê?ý ô h6šv 5áo†JV ¸†J×P ÂG •¬*ø¯¡RD ÂÇ •BMøÌ]~²R`¨à†ê^âmTVæM˜#¿P»ÅÄܘ›û†Ã…¸Qhofe¥DFö“*):z`aáJuš´´äðð«ºjë2ªÂß •BMxKO)²R`¨€¡vàåiWRRæÈÏÐÖÖ}ååbcGvD9×.-1q’üä•Õåç§…„ôTFnÚôŠT@R qéˆà*…šàa²R`¨`FCuø¤ŽíHÝdδ 2‹üÕ½:¢^í°Êßa7Ä¥$EEoË”Êð°a×k/½Ð˜>Ö´S¨)Ô*Y)0TðGCué‘ mH+Òsòä»gÏ~LZ{Û Íßaœ7D7-ˆÕ&)Ídhh¯O?ÍRÆv“í ²XU@4f„ß*…š %))0TðzCu©[¥UÔÕíܶíõÔÔ磣>÷Üãº«Óæï0΢»%¶›$ÓHC8bD´Ú˜­]»PäoTÔ3C¥P\C%0T0—¡ª ƒÔøjm®;²]35?²Xº´Î´¯N6fj¦ \w]_Û‘4f„Ï4íj 5†J`¨àõ†Ú¿ÿ5+¤IX²äµ6×Ùö‚“&ݵ{÷:Y¬´=©©ÏËZlÓ¸Ô˜é¦ÑN™”4íÂ…2årËM7E*#Ÿ{îñ5k^R.· :˜ÆŒð±¦BM¡ÆPÛžÏP±mÛëQQ¤ÔŒ]Q±I÷yn‡#1Tð5CÍÉYvepp¶ÝÒiœøÃ™²* ¤”ö€€®RòG¾õÀtÛô.5fºiA´SJs%_D>jÔ-j?ßúúRie3d^uhÌŸiÚ)Ôj µ-áù S§þâ³Ï²e`Õªùê —J"† >h¨¾Ú´ nT3‚¦BM˜ç4ÖÍö Œ—_#}ú\ŸŸ¦ŽY¼x†ü í%S.Z4]–fÊ55Åò“F~óhûÒ™0C…8±î£2*`¨Þ¶iA”1î-‡f‰ÀP)Ô„y Õ6Ûƒ›Ô‘Š¡Ê”EEo‡„ôT‡Õ ââÆlÛöº ”–¾+§– 3T(!vûôÓSÔ Ätƒ‚,&ÄœÒqR÷-Þºïû¦³¡R¨9Íc¨ë×/I ²÷L§îpmm‰Hª(fDDxfækfËP¡û†*eØ*m…îH |ÓP;º³¤3×H<ßqÒÞ[¼•Ð¾ï›ÆŒðº¦BM¡æ‡–g¨ /?ø‘¡vJgÉÎí8iðo«÷}Ó˜Þh¨j 5†ê“*Ì–¶C…ŽmÌ:«³d'vœ´÷ïK6ïû¦1#¼ÑP)Ôj •ÀPÁë µÓ;Kz¾ã¤½·x›ª$AÓN¡¦Pc¨† ªG;KvnÇIÝ·xë¾ï›ÆŒð C¥PS¨½º§¡†ÚÎ%©n<ßqR÷-Þºïû¦–$|ÃP)Ôj3Tgf÷|Z´mÛ^Š çäˆÑò£Q·¡Ã‘*ø…¡ÒqÒ´}$ šv 5…CíPCõ|Z´©SñÙgÙ2°jÕ|õÑÝÕÙÛ 0T:NvrI‚¦BM¡ösCµÍ;vâÄöaî ì&eX)ŸÊ‰¡\ìWö°Mmf’´hÚ|ºÏgc¨€¡¡„© Õ6ïØØ±?_¸ði¿cÆÜ¦Œ :t(óÒ|gÎŒsÞä<ŸM ±Û§Ÿž¢N ¦d™0!æäÉBƒ‘*`¨A`¨§q'ªn45«ƒz 299~ذëŸ|ò’’Õ®šœ‡Ó¢)±iÓ+C‡>þÏVY/æÏOÔ¦EÓ‰¡‚?j[Î{žë"*Ó÷ÂøÊœñ¡wþ|0í[Ou U¢ªê«VÍ5ê–¸¸1îüžI‹&ñÖ[ó"#ûÕÔën†wåJÃPÁÓ™{}} ê÷|¿Nå6SxøUJ*uõ™*©¿¤®”ßôê‹ÍÖ…“ð[Cu^Œ:èŒõóþ×~n¨ºwùgÏ~ìôé2ðÉ'[zô¾d“Úì’iÒ¢¥¤Ì±§§²ò¥´iÑtGb¨À5T4TÏ÷ëÜ´é•!C®ÕæÁ‘HLœ$õ—lC~~ZHHO®‡a¨^t Õkôóþ×~n¨'Nl>r¤»4‡¯j”Ê·¾¾´Ï3ý:ÕgÚ¼® 'áu†ª[(l ¸®¡Ú;çÛh¨m?·}¦ÿµî†©î¨[:¬' Õ¸’ÄP L}—_·ƒ¤ñÍ#™¬¢b“ÔŒÉÉñêÈÝ»×EGŒ‰¹Q{ÏÚ™ Zwi UÚÚ„„ئ¦=ÒœÄÇßï|aó|¿Îçž{\yþoíÚ…C‡VF&%M»p¡L¹†jÚ.œ„×Ýå·-º\f·½Ëo­ t ‹½»üîÛ>ÙÿZwÃ䫬¥-YòŒ¡Ú«'ïòW’>ðÖSC¯ï)5@~ëë* nIùT&–‘2£íøë×/’ê288HÛ:JˆžN˜ãj­»4‡†Z[["õ¯Òÿ#3ó5ç+>Ï÷묯/•ÆCÖ(“©«[ »Òä]8 3÷”²-Ôº…B·€Ÿ]†¡BÖöz,ÿ¨š0!Æê­3º} ­&“ÊZmDAbïn©{‹Dwãm·ÍøÇ¢“ßEw^{oÊq¦¿§A–––Òsòä»gÏ~¬¨èmí3FÚí”a«—ÊoY©Ã»üº[h»t¿½÷ÙãוÙ~k7zæ|çµC÷X¿ŒÇ g«y ÕÕιº»Z·8ÜáºgŽÛm¹îÖÊbKJVËÝ7TyË©‚¡ú¡újÊ :°°×cÑá£ÓVJ§ÛÇÐj²Ý»×©—Q##ûÙ{jÍ^MÛÇÌíõÁ´÷Vñ¶|—Þ6îLOㆹ®n§´m©©ÏGG|î¹Ç4T±e;¼†ªÛaÓj?è~;{ï|·=μë\;Æž¹_ÇIí°w¬_hnгÕ$†êFç\í3.w¸î™ã¶¡ênmmm‰¡C«o"õÆSCí8CõLNC2'b¨Ð±†ê|E«Óích;Ù¨Q·ˆx®TûQ:ÙÝÒž¡êöÁl£¡:ì/éÐPÝ»³£;qcãGê’o¿ýí]~19í]~u¯_¿È¡¡:³\2TÛ᪡º×3×Þ×±×Ö™>µàg«I ÕιÚ)‹€±Òu„¡Úní§Ÿf……]~•öé¯;U0Ts^C%ß † f¹Ëï|Euxܸ;¬îëö1´l÷îuÑÑcbn”Æ¥î–öîòÛn¼íJ[V'¿‹¶¿¤Ã»üm4ÔI“î’% Í^jêóýû_£>©®Ü —¿2¬>ì¯Î«ŒwÃPm÷ƒKwùu„KwùÝè™kðuìu§u¦°Ã[·=[½ÂPu¿‚vWÛÛ-Nj»ßå·ÚZùÙ5 ¨èíÜÜ7nº)R>òÒSCõ˜¡vh^7¦䌕ÓxÅŠ¹mY¸ÃN#*x±¡ºÔcQ>y²pøð(m¯Ý>†¶“IˆžN˜cpÆëv·Ôí©»ñ¶+5nYü.Úþ’Ú¾D2¯Ò#DíõåÐPíÕ²e@ÚKÙK²IAA–Ñ£o=p ]fãÆÿºîº¾²ù+ú˜2å‡Ï¡:³t¿ì|92RÛNëÝ) jR‡=syC :l¯;­3}„{Ý÷lõ CÕý Ú]mo·8i¨ºgŽÛ=¥l·61q’œ-ʧòNþõÒSCõ[CUŸi¶ê­ïöÂ;ëº/† ¾s'¥µu_|üýnäMôùÛ.›6½¢Íu@xoWº˜pªmª]NcÝÄ,iiÉò‹By6Ã^¶7^^¯›{Ä^cd°p—Ò¶\ú1!¿¶æwcË&“r5³† þh¨Êƒäjššís‘‘ýJKߥ]¤iÇP NcƒÄ,Ó§?ÔØø‘3Ù\zy½îSIaoá®^pѾÔÔ½-wÆP]Íì¡‚?^C¥#$AÓŽ¡œÆÎœÆºÝ^››+Œó<¸÷òzÝž¶ápám1T÷¶Ü¡¡º‘ÙC • hÚ)Ô§± †ê0σ{/¯wÒP.ÜUC-/ß=°-[îÐPÝÈì¡‚_*é<šv 5Áilp$f1È–àÞËë¼Ëïpá.•”={6FFöS{J¹·åm4Ô6fœÀPÁ;3Òy„*…šÓ¸OcƒÄ,ÙÜ{y½nîÛp¸p'{J)·Ô­:ò»·åm4Ô6fœÀPÆÌ¯Óy4íj 5§1á¥ç † nÖ¤ó0:‚¦BM¡ÆP üËPIçaþtM;…šB¡’jCÿ2TÒy˜?AÓN¡¦Pc¨† *é<̕΃ i§PS¨1TÏç”à1e :ÓPIçaþtM;…šB¡¶‹V’žC¯1TÒy˜?AÓN¡¦Pc¨>i¨ÞžžCžõ!šv 5á_§±îo˜7ï—òÛ`Èkå’qN ÒSttz hÌ‚¦BM`¨? ¨–Œw‡3O›ž¢ãÒS`¨à³é<šv 5Áiì’¡ªó‚‚,=öHOáô*p¹… hÚ)Ô„ƪ'YeipÒPIOáô*˜®1#Aø˜¡R¨ ³Æýû_SP°B,mÉ’gܸËOz ¤§ÀPÁË3Òy4í>f¨jNcÏŸÆ99Ëî ²2Tñ¶ÀÀnrôµ=¥lsJžÂé)0T 1óëtM;…šBÍiÌ‘òÒóC7kÒy˜?AÓN¡¦Pc¨êÍn* üÚPIçažtM;…šB¡’žC •tæJçAдS¨)Ô*¡‚*é<̟΃ i§PS¨ýÜPÛñ9ižsÅPÁ; •tæOçAдS¨)Ô*™(L˜‰C…¬HçaþtM;…šB¡’‰Â„×}1Tðô³>Üæ {€BMxÝi¬› B7y‚Á§d¢èÐL*xº1£¯"Aø˜¡R¨ ¯;uÑMžàä§d¢h÷L*xº1#AÐ šBÍiܹ§±n÷;Ýä N~J&ŠvÏD¡‚‡3:K„*…šð=CµMžàä§d¢h÷L*xAcFgI‚ð1C¥Ps›ó.¿mòãOÉDÑq™(0T 1ãµÝM;…šBíw=¥lDè&O0þ”L—‰C…¬è,Ék» kÚ)Ôj§&È6^o¨t–äµÝ„5íj 5†J`¨àõ†JgI^ÛMøXÓN¡¦Pc¨d¢ÀPÁg •Î’æé,IдS¨)Ô*¡wùé,Ék» ¼ËO¡¦P{¯¡’7 Cì)EgI^ÛMøXO) 5…Cõ@V y3† þh¨Ap{” 0T C • hÚ)Ô§±ƒÓØëÙ˜1·Éü‚Ó§?ÔØø×PC%C%8;ó46[Þ4cCµÍJæÞzí}™†»ü࿆JgI‚ð1C¥P¾g¨•7ÍØPG:¿^‡_C •GÑ Â¯ •BÍilλü•7ÍUCu¸^õ.ÿâÅ3œü‚*`¨4f¡R¨9;¹§”Iò¦9ÓSÊv¤3ë2äZYoRÒ4ùëÌÄPÁ •Î’æï,IдS¨)Ô8{yÑMÓÞÿ×;TC-ø¿ó§ {*è,IcFøXO) 5…Ú»NãÏRSd„×…8 \»¤úáø9¼ññm½î–Sgëw×Uåwª÷v–$hÚ¹øD¡öíÓ˜ðêÀPÁ)Nm/Ëí3þã¹+OdìøŸðq[ºØÿ²Ÿ7fÞÞY’ iÇP)Ô¾z7×7ümç~% SWoyéõÿùʆ¹‹eÕ„×ÄÚ *8¸Å`VjNØØéEòï'óWeõ¼+;tLóÙó\n! • Lxk)++ËÍÍݼyó:ðBäÀÉᓃXUU…¡Â?q¦üpÉèßž¬Qþýó¤¤~9™õA`¨a~C=xð ¬%;;[\g=xrÈäÀÉᓃX]]¡‚R¹ä„Ž¡1# • ¼ÂPeù{÷î•åææn¯B™89|rkkk1T0ºã/•ËÙƒŸÓ_ÒÛãÈ«¯²lãdöZ ïíÜÚŽ†J¡&¼ú4ÖR]]-«8xð`YYY1xrÈäÀÉᓃxîÜ9 ìr¡êÔ‘Ekè/IæìÜÚŽ†J^}k³©­­Å©ªª:^…29prøä 655a¨~͉ô¢‹§ÏØû´á‹“- ô—ôú8ñ‡FbÄãì Ûȸ⮌¹5#pDÆÕ÷fNš‘ùÛe^Ô¹µ] P>pƒÏƒ¡ú¢ž{â_.¼aê¹cUî-þ’ÞÁªÕé]‡gô™þow¬ëwì+6ü29CyqZ×[Óÿõç=ï|oÊ3fÛQö:·¶;jðÓ0Tðb.ž>³cxBYÜo¬.‘ºý%½£kä}O*7ÚÒÃǼ÷èvˆ5¿û}†æÕ¾?쨠Û3º ß3mÃÒ4“wnmw(Ôà§1`¨à­|³cONØØ£KÖ]jimËrè/édX~þÓ£`?ŸqÕè-éì+2M´ûüÜÏŸÈ\’fÚέí…|à4 ¼’ê¼RÑÓ¯³w¶Ã¢è/izògþצt»â[[z.˜¿œÝb…쓌n·ýࣗÝY˜ú{/êÜÚþõ…¼ÿ4 ¼’–†F5¡¿¤ùÙzof·Û¶ßùƒ¡þË­[¯º7囹ì+>;tx‹å‰ß¿n›ì¢CÛþè-[Û 5øÀi *˜šÓ;ö(—N«óJåo媭ʿgʳs¬(¾ã‡§ueà«Ù½ïu»û `¨`ÄÖžw)zúß &IͺìNvŽ'Ò‹”]$üem^þ€.Tb·`¨Ðž|{²F”ëóåéÊ¿ª~}–òž £_V´44ª»H8ºdÝ©íeì ÚŸK-­‡¼Ó.¢Àë8²höÐkõë›{ÄÀØEV”NLb'`¨Ð±´44~8~ÎŽá ¯Œ¦þp¥ö_­¡Ú~ ÿøñ Tv† ˆXiÑÍÓv?8·õâwì øÇÏV²Œi>{ž€¡BGQ¸2Àf¥¶1!?† íÀ…ªS¹}Æÿõýbv@{ÑÒÐÈEh ÚDÃ'Ù íHóÙó…7L=¼àv† `.ž>“6¶òí­ì À,œ=øyvï{É“ €¡˜ˆ3凷^qw]ÅQv† vi½øIÁx†²½PÞ‰J¢ ôiih,˜´ë¾gIz ±ÊØm¡©ö,;CšÏž/ºyÚþËÐSÀPC³èé‘EkØ€¡† fÑÓ/Wç°+C LÁ×Ù;ÑSÀPͽ¦0TÀPÍÅŽá $IÀPÀeÈ6ÕqÔ”ìßzÅÝçŽU±+0T³PùöÖ‚Á5Ÿ=Ï®ÀPÌBů^-=£¥¡‘]€¡ú>ªNíM\LÒSó³ë¾gÌJe?`¨>η'kò"b?_žÎ®0?- {â_æ÷$†êË4Ÿ=¿ýúGHË*˜‚–†ÆÃ>™¿Š]*t>­¿Ûuß³è)´#d› ÚÄ™òÃÏ]É~€v¤ã2öwéB½*¸k¨]þU=š9räHxxøo¼á»½©ö,ç†  Õ¤UÏÿnØÞ½{ölÙâû¼¥¡1'lì™òÜ~*¸f¨/^|ú駃Dä_Õß|óMñÅ€€e̼yóBBB,Ë£>ÚÐÐ`%—ß}÷]||¼|ºlÙ2u¼ üþ÷¿ïÛ·o``àСC?þøc{†úá‡öéÓçƒ>Ð~d»Ò‘#GjöÔ©Sáááêö˜Šê¼Ò¼ˆØ U§80TpÁP“’’bccÏýȤI“ä_ÕŸxâ Õü^zé%™ìÌ™3---‰‰‰3gδ2Ôääd™]¦—åLžê*Žò²DðŒ¡Ú>‡öÕW_)Ã2ªZ£¨¡º„>}úüõ¯U†¿ýöÛË/¿ÜÊPe9_ýµ2,ZCU¯ËФêj@@ÀöíÛ­ÆÛ[éðáÃ322dàË/¿¯ýî;óúߥ–Ö]÷=»Æ2NB Õ›¸Pu*/"–‡Õ CQ²Mé^Cµ©¤Õx+»UoýkMTw±öÆ[ܼysïÞ½ËÊÊœYéÿøÇÈÈHˆóÍ7M¾óå÷gÁà‡¾ÎÞÉy€¡zÒtÞ0õËÕ9ì ðD ׳ð°0õ:¥Õ5Tíd}úôùÛßþf°Lí5TY «†ªxgHHˆüu¸RaèС/¿ürDDÄ÷ßoþ=_¸’”´ª×P:1‰Ûй†ªFRRRCCÃßÿþwíÓAFF†6á`¨ú¬\¹2444(((>>^Í0ÕîX,–¾}ûîÛ·€¡€CGrSñùòôcËÞc?`¨~’±L·'krûŒ¯-=Ä®ÀP0T0 }¿8Àô•ÀP;™ºŠ£õ‡+Ù€¡‚BYÜoöÄ¿Ì~ÀP;oOÖäx :¯”]*(4ÕžÍ Kµ€¡v%£g|{žý€¡˜…½‰‹Éအ˜ˆ¦Ú³Ù½ï­)ÙÏ®ÀPÛ¿a'€Ù Û”·ð—µyùhihdW`¨í†´+Û¯ä›{Ø`*ÈØïEì~pî'óW±0Ôvcoâbi]Ø€¡‚Û4|q2ë²;ëW²+0Ôv ®âhNØØ‹§Ï°+C…¶püÍ-G­a?`¨måRKká Sy}`¨ªY8²hMéÄ$ö`¨ª)h>{>Àßž¬aW† €¡š…K-­ì0-d› CðEšjÏò`¨`"ªóJ‹nžÆsD€¡€‰Ø1<áËÕ9ìÀPÁ)Ϋb'@GSWq4·Ïø–†Fv`¨à€š’ýÒfpë <Àîç[öû0T0BÄ4ÀÕy¥ì ð È6åíÔ®Ì Û|ö<»0T°Ë_Öæ}0òWìðÈØï˜•züÍ-ìÀPAŸÖ‹ßåö¦ü0»0TðdÍÃcE€¡‚]öÏXV:1‰ý`EóÙó5%û sÆGÓ²ÌÍõ Ôj[¹ÔÒšÛg|Ã'ÙVH[›ÑeA.Åßvî§öÀP¡c õ³å)5%A8ŒÏRS0T uÁ—«s¨|CõG.ž>“Ýû^ºÍb¨nÄWË–\3Ö™«§VWR·_7á«Íï°1TcêW~½“Ê0Täã¹+÷ÏXÆ~ÀPÝ蹟ÿï÷Z={ÚÅ>ÿôLêK)ù׎s¯w¿,Êcsµ×ìª{† €¡ú)ÍgÏo½âîsǪت«qzÝ;…ã­®:i¨;nx°º`£·*¡`¨žãÛ“5‡¼Ã~ÀP݈ýÎ8òðl{·ò퉩Ç~ý›½‰sŒWQP°"*j€ÅÒ=""|Íš—ÑTQÕ3--9,ìÊ€€®òïÉ“…“'ßd ì6vìÏkjŠuç’˜7ï—!!=eá>:öüù?+#›šöÄÇß/#CC{-]:K»ãm7•ÀP0TÀP=Eî¯}y¹Û†Z÷úÊ‚Á÷¯¢wï+ŠŠÞ–Í™3ãt¯†Ê¿O<1^5Å!C®Ý½{]kë¾ææŠääxÑMݹ,x*6vdmm‰L–˜8I]¸Ì2iÒ]²´úúR1][Cµ7£î¦b¨*† ªG#3`¸’–ß=C½”þƒ¾¯¢oß°´´äêê"ƒûõò¯ø¢îìâ©=zëÎÕ§ÏÕ'NlW†/\(»üò˔ᰰ+Ož,T†eÀÖPíͨ»©*† €¡†êѽaÐaß¡¡J84ÔC‡2§L¹G,pРˆ‚‚ö UûïîÝëbbn ²(kWnýëÎe±tWÂx2ÛÝu7Cm/C½ÔÒZWq”Z0T µƒ¯¡n{ß¡¡ª±kך°°+1Ô>}®ÎÍ}£¹¹B†å¯îƒ¤ÊdÊ#ªV¡½†zâÄvÝk¨º3ên*†Ú^†Ú|ö|NØX’↠€¡:zuðÄ6=‡ºú‡Ï¡ÆÇß_UõEûBBz*#ƒƒƒNŸÞaÏPe2õyЩS¡~j5×’%ÏL˜£Èèñãÿ£>®šœ?yòÝçÏÿYbÊ”{l ÕÞŒº›Š¡¶ã]þCÏ¿Á‹ECõqZ%Øj›úòOÕ–¾üG_Xè°/fæk‘‘ý»EE Po§¦>¯Üa×5T™¬ÿkºöí¶bÅ\õS«¹׌ˆ—)££ÊŠ”‘%$ÄÊÅ2—.%¶kÑQwS1Ôv4ÔúÕ¹}Æ_ji¥. Õg9ºdÝ¡çß`?`¨mʇºý½Â~ãÝ4Ô쬷>ì^>TEeež˜(Ú<=¥JFÏøëûÅÔE€¡ú,…7L¥Û†ÚÖwJ5ïËï;ÖêRNê7«WåpóRIIÓΟÿs]ÝÎØØ‘2Ì6¡~³cÏŽá ÔE€¡ú&gÊSËc¨í_½÷»‚ÿ3¶iÍz— µ%kËö!¿ÚüŽ9¿ÔŠsCC{Yâãïolüˆ£lªlSy± _œ¤: ÕÙ?cÙçËÓÙj»DyÜŒ’nÙ°É «Ô?uáÏÝúá} eq¿f×a¨nP¸’gèCõAšjÏn½âîæ³çÙj»DKãÞÇ&” }¸yí‡zÚ’µEô´dô4™‹]‡¡†ú_®Î)Ÿº€ý€¡¶¯¤–?6³ ß¸S/.µ«§ÙY߬^µ}ÈIJ¸_£§*`¨ÿDu^)}¤0Ôy&uó;ù׎+ŒœxäÉäÚWßP’ù_Úö~Ý;ï}aáŽ[Î0δϞ*† ªÏöî?]´iÿŒŠnž’Ùm¸ì. Ó½‰sª 6š³ç>¡`¨€¡† *`¨ás†ZWqôâé3ÔK€¡†Š¡„Y uoââ#‹ÖP/† ê†úùòiw ‚pŸ¥¦tè5Ôœ°±—ZZ©šCõb¤o½øg@Û • —¢ãžCýÓíÿQWJÕªóuöÎ’Ñ38Úáê/«)É Âa|¾|Y‡êçËÓw?8—ª 0T/¦|ê‚£KÖq´ÝPy• LÒ—ÿâé3Ù½ïå y€¡z+Mµgßÿ×Ûé÷Š¡„e›’ßÞÇ–½GíªWr"½èO·ÿ‡C%3Ô U§¾=YCíªWÂ-þN1Ô.]ºà(†JÆ~ U‡K-­9ac¾8Éáï8CÕ•Q‡†j<‚K`¨ªÏÒT{öð‚w8ö&¼†Š¡*üƒŒýÐÑ×P++ó† ¹Öbé>~¢:Òžkj'X²ä™=‚îÌÊJQƨȿ55Å£FÝØmРˆòò ëÒR>MKK–…tE› Cÿ2ÔqãîX¸ðiX´hºó×Pe`éÒY­­û WFD„Ûzm\ܘmÛ^—ÒÒwE=•‘cÆÜ&k±Z—î”òéôé56~„3*† ~g¨AAMhn®pÉP•¹$ÔËœÚÙ-–îê%UuÝuéN)Ã2 ÂDø°¡ž;VÅkóC µ Õx¤xg}}©Õìö ÕvJi%|ÞP??ç/kó¨¦C µ=ïòÛŽ׬«Û©Þ»OHˆmjÚSSS¿Õ]þÅ‹ghïòÛN‰¡>o¨Õy¥ŒüÕ`¨€¡ê÷”ŠŠØ-99Þ¥žR¶#,xJ¹e/õµ%¢žòoDDxfækÚžR²®¤¤iòW©;%†Jø¼¡^jiÝzÅÝdï Õ 8‘^$ÁQ÷€¡vn¬^ýâðáQøáç=¥Êâ~süÍ-ÔT€¡š]÷=ûÕÆŽºªÅÒ]B›XŠ üÖP¹ÑªÐÒИuÙMµg9êj¨]¼ ´‰ðálSÙ½ï=w¬ŠÊ 0TóR[z¨ð†©rß¾†Jª–SÛË.T¢² Õ¼Y´¦âW¯rÈ1T‚ c?†j>ù«ê¼R¹Wê”)÷(/‚rò#ƒé]š† 0T Õs\jiÍí3ž7¬x‹¡qâÄööÞxšÈÈ~ÕÕE(¡`¨…‡±š7ï—ÊåOqGuù¡.S]‚vääÉwoÜø_ÊH_õâ®Áw±L»d‚0Û5Ô¼ˆXR÷† þn¨ÕÕEƒEX,Ý/žÒ³¹¹BFÖ×—Žys``·›nŠ{>ë²;é%àIC=t(SÑÊË/¿læÌ8ån¾—†/}Ÿ U~¥o½ânnô†jjKýéöÿàH{ÌP»@—.¨aÂlSåS[ö`¨¦àËÕ9f¥r¤=y • ê©íeŸÌ_EŪ)(Ÿº òí­iï2T.C*†êËl¿þ‘ºŠ£iÓª®Œºd¨••y&ÄY»ÅÄܘ›û†Ã…¸aÀÚÛ÷YY)‘‘ý,–îÑÑ WªÓ¤¥%+¯P§ä¦?¡`¨Ö\<}fëwÓMÊ·¯¡Š,¦¤ÌijÚÓÚº¯¼|ƒò"Óv¿«]Zb⤚šbY]~~ZHHOõ UC†\+ºÌõ`CÀP¨«8ÊC¨b¨Z-Ó^P4)n'†g±tŸ??Ñ¥ «2Kmm‰î%Oõ¦üMKK »2  «ÕÚ—,y¦G`ù(++Å`Kt×^Tô¶L© v½öz*†J`¨*x½¡ŽwÇÂ…OËÀ¢EÓ]²:QÏž“'ß={öc¢Œö¶aúô‡ÔŒúÚµ/]:«µuŸÈeDD¸2r̘Ûd¬¶Äj“÷ íõé§YʘÀÀn²ýAA–Aƒ"ÊË7`¨† €¡‚ת¸˜¢ 47W¸juuu;·m{=5õùèèÏ=÷¸îê”wAÙ®]Y©„ryÕÞ–Øn’L#v;bD´j¨k×.”ù5C%0T Ìe¨ªí‰Æ©Š¦;²] UÆÆ,–îBìŒ4;i¨ª˜*×]××v$†J˜ÍP/µ´zþ ª/ÀPÁ µÿk Vˆç-YòŒªhº#Û~—Ò¤»vï^'‹¡LM}^Ö¢>ŸZW·Ó CUïò/^Y†êŽŸÓîýºü3æ¬Ö9þÆžxgŒñ[O—,y¦Gà°°+³²R´ãºöésµèš:fñâÁÁA¡¡½dÊE‹¦+Ã99Ë• DïFº%0°›ÕûÅÒ½¶¶D÷’§z Sþ¦¥%ËF*ï»ÒæIµÝ~%Oª,vþüDÝŒªÃ†]¯^%µ¢¢·ev Cõ C¥> Õ¾\ÓîùMûû^ݰ½{÷†……I³×é×Pe“–.ÕÚºOŒ-""ÜêÓŠŠMêHÅPeJ¸žê°:A\ܘmÛ^—ÒÒwER·QOYàäÉwÏžý˜,PW exúô‡?²Íäo»ý껦´o½Ò.M4zᧃ‚,V2­4á¢ÝŸ~š…¡b¨>c¨Ô‡€¡š·F¾xñâÓO?ü#2 ÿª¿ùæ›R?(cæÍ›b±X}ôц†«e~÷Ýwñññòihhè²eËÔñ2ðûßÿ¾oß¾C‡ýøãím؇~اOŸ>ø@û‘íJGŽ©­²O:®nO;ªòj{ åò¤D~~š¨›òŒ¦:R;¥î\Ê{¡Ô‘NF]ÝN±ÛÔÔ磣>÷Ü㺆ÚÜ\¡û®)Û-õTFÊ,ö uíÚ…ÊK§¢¢h·DyEêˆÑ*†êÆêÏõ!`¨æª‘“’’bccÏýȤI“ä_uâ'žxB­é^zé%™ìÌ™3---‰‰‰3gδZfrr²Ì.ÓËr&Ož¬­‘¥2•¿ÿþû×_}ذaº–——'µ¶U}­»R©¸£¢¢Ôi¦OŸ. @G\Cµ íµk×E׌ß;ªC­¯/m£46~¤fη·F‡oCuh¨×]×Wk«¶›¡‰¡b¨n¨—ZZݾ×D}ª‰ Õö¹«°°°¯¾úJ–ùůN,U¡:¯üšÿë_ÿª ûí·—_~¹U,Ëùú믕aÐÖÈêu©”u7, `ûöíVãí­tøðá2ðå—_J=þÝwßyÆP{ô®¨Ø$’—œAÆÅIHˆmjÚSSS¿ó60iÒ]»w¯“Õ‰P¦¦>ß¿ÿ5ªòÖÕítÃPÕ»ü‹ÏÐò¹ç_³æ%åêСƒ•‘IIÓ.\(S®¡ÞtS$†Š¡šÇPÏ”Îðõa»Ô‡€¡šëªÕHµÆ´oU›«·º´5¯îbí·¹yóæÞ½{—••9³Ò?þñ‘‘‘2ÿæ›oº½C\5Ôõ뉤Ù{ŽSw¸¶¶D$U´2""<3ó5ç{J¬ˆ‰¹1  kPeôè[HWÆ/Xð”ò䀫†ªô” ì&Ò©^ ÕNY__:nܲF™L]ØjXØ•²ÆQ£nÑvóÇP1T3ÜåÏî}ï…ªSÔ‡m¯C5—¡Êo}õw¹Õ5«ŸïûÛß –©½f tµFVêÙùëp¥ÂСC_~ù刈ˆï¿ÿ¾# Õ·cõê‡j£hb¨ª uOüË'Ò‹¨Û^†j.CUž—:wî\CCCll¬ö¹+íd¯¾úê„ ”:÷‹/¾ëV“Ér&OžÜð#S¦Lq£FÊÊÊz÷î½yófã• [·n•¹¤™lËñCCµXºKh»ê+cÜ[Ɔ¡vº¡~¾<ݽןR†ê2ŸÌ_Õz±ýŸ%²×wuæÌ™–™>}ºú “íÄR?Êoô€€€èèhµ÷¨:™,'!!!00P~÷/[¶Ìàî˜ñ†;v,<<|ùòå+²³³•[*Aø³¡ž;VU0ø!êöׇ€¡: ùìù¬Ëîôê¯ðå—_J5Ú¡«˜ ÕuG o˜ê[ž””ÔÐÐð÷¿ÿ]{w¬#ÈÈÈÐ&Xñ˜¡º÷Ö{ð’Êʼ b‚‚,ÝbbnÌÍ}Ãá·so_içJKK¿JÉ>«ŒÉÊJ‰Œìg±tލ¾ûÊj. õð‚wÎüœú0ÔŽåDzÑîçzã–¯\¹2444(((>>^ͨÒîX,–¾}ûîÛ·Ïó†ê·×PÅ SRæ45íimÝW^¾!6vdG|;íÒ6mzeÈkµ©$'ÕÔË6äç§…„ôäB5†J}ØŽõ!`¨ø|yúþ¤\ö†jü û¬¬”Aƒ"”RJQåÓyó~)#µje•õ)  kŸ>W‹`‰çi_Ú$ÖÕ·oXUÕt½mÉ’gzô »RÖ«»4íûWƒƒƒBC{É”‹MW†sr–+ˆÞuK``7«—š:T=ùRµµ%º—<ÕK˜ò7--Y6Ry…•v§Ùn¿ñî•6ìzõ*©m½-³c¨*†ê9DOER9´k¨ãÆÝ±páÓö^a/æwèP¦â|3gÆ©Ÿª³Èìöä©¢b“x­ ˆ,ªÆ&š›kïÊâÒ¥³DaÅØ”u—¦ªL)ÒSV'ˆ‹³mÛë2PZú®Hªó^"ê) œ<ùîÙ³“ÚËÿ:}úCÙ>a»ýê»ìe––d±’iEˆE»?ý4 CÅP0Tϱë¾g«óJ9´k¨Æ/MNŽ6ìú'Ÿ| ¤dµÖ“ÔYdv«YòóÓD¶”§*•«Œ»w¯S/£FFö;zt›=CU+¡Ì¨»4«)uçRòüÿoºï®.©I]ÝN±ÛÔÔ磣>÷Ü㺆*_\÷á]Û-qøþU1Ôµk*ï Ð^lV®7‹òŽ¡b¨ªç8µ½¬©ö,‡Ö̆*QUõ‡U«æuK\Üg 54´×®]kÁRGÊì¢}……+Õ…8ùv+Ý¥9|»•j}}i¥±ñ#5ª½5:|»•ÃÝ{Ýu}µ¶j»Ú‘*† €¡‚_ªñ]þÙ³;}z‡ |òÉ–=‚¹Ë/“UTl-KNŽWGîÞ½.:z`LÌÚ{ÖΪîÒªxpBBlSÓžššâøøû÷’I“î’M•Õ‰P¦¦>ß¿ÿ5ªòÖÕítÃPÕ»ü‹ÏÐò¹çWð]»váСƒ•‘IIÓ.\(S®¡ÞtS$†Š¡`¨à_†ZY™5 0°›®®Z5¿oß0ù42²Ÿòd§ò©L,#eFÛžRë×/­ Ò*¯„èé„ 1Nöp7^šCC­­-IU:xef¾æ|O©‚‚²]ƒ‚,£Gßzà@º2~Á‚§”'\5T¥§”ì+‘Nõj¨vÊúúR±|Y£L¦®Nl5,ìJYã¨Q·h»ùc¨ªPùöVî>† >n¨žI,ÕÚº/>þ~{O úC¬^ýâðáQmM CöÄ¿üÕÆª5ÀPCµNÉ䯫íÓÓ_õO×Q¾¾¶«¾2ƽåàŽêñ7·˜•Jµ*`¨"mÚ@‰ ÕIÎüÜK߯*€© • 0T·¹ÔÒºõŠ»[/~GÍjûsüÍ-µ¥‡8®*ѹáü³­æy V·wš“ìä·0y_þ?ÝþßìØCÍjûóáø9_gïä¸b¨>ÖHÛ‹„˜  K``·˜˜ssßP§Y»váu×õU^ʺ~ý"í¼S¦Ü£ëaVo^í\CU6Ãbé5`ÅŠ¹Î,ª}7C=¼àcËÞ£f µýù`ä¯D›8®ª¯jdd¿””9MM{Z[÷•—oˆ©ŒÏÊJ=ýä“-J–Ùþý¯Q_ +óÊ¿Êkf÷°N1TõU´"©o½5ÏÇCmøâd]ÅQj6ÀPÛŸ‚Á;VÅqítC•{Þ¼_Z,݇ ¹Ö*õf@@×>}®ÎÏOSǤ¥%‡…]©}û¨í4‹Ï í%âµhÑte8'g¹2AMMñ¨Q·(—•¾íVWm'°]µ,Y>UÒ*éîuCw2Ýï«»Ò'¶­¼pÕyÑѾת¶¶ÄvYfqñïÔeX}¯©Ì»~ý¢‘#ovÆÃdä’%Ïôè,»Eu\Ûm–1Æ]/_Mþʰ6K«láüù‰ê”º;AwJíöÈ”êËZµ+Õn†“€u¿‘n–Y{9hmÏFÛcmü}ÉØàõŠ»›Ïžç¸šÁPm_¥FEÅ&Ñ;uÊéÓjlüÈx1ÔÖÖ}EEo‡„ôT‡Õ ââÆ(™ÿKKß3°õ {hW-Ö«\hɘ93ΞñèN¦û}uW:aBŒh™òÎR7.ï‰R˘<ùîÙ³“= Íc ]¦ [½9V„¯°p¥Ã»ü2°té,™]&Vw¯í6ûsåûÊß1cn3x‘˜îN0~嘲ý¶ï†µÝ g®\ê~#ç U÷l4xù™î÷ÅPüÚP›jÏf÷¾—ƒjCU_¯ªR~~š´ÙÊ50íS™FÑÞ4ª—h‡Õ ”73)hçÒ œîÚU''Çvý“O>PR²ÚÀxt'Óý¾º+µRI7nš×ÕíJM}>:zàsÏ=ŠÎ*W%^CÕݽVÛ¬Ž‘ï«ª¤¬Q©½èk»t§th¨¶›á¤¡Ú~#' ÕølÔkãï‹¡øµ¡Ö®,üÕ´†Úk×®5ŠØó$‡Óè‹Ô×—Xà ”¨ªúêUóGº%.nŒôØNfÏPmWÚvCU£±ñ#Uãn¿ýí]~±gí]~e@6xýúE Uw÷¶ÅPmw‚CC-/ß þÝ^†j;¬º£îi¦Ø; Õöûb¨~m¨ÍgÏ“*Å<†j{'´GàŠŠMÒº''ÇÛó$‡Óè‹)&$Ä65í©©)Ž¿_u…ººX­zöìÇNŸÞ¡ô4’Ͱ'º“Ù»Ëo»Rù´-wù'Mºk÷îu2»èQjêóýû_£>®<~ eX¹×¬WÚn³Kwùuw‚Á]þ={6FFöS{Jl†ö»d¨²ß VÈ¢–,yÆÀPíÆwùm¿/† à׆ ¦2TiÔ»EE P{“¬_¿Hšüàà {O:3îpmm‰˜Ò{)3ó5eä‚O)·\íM`µêU«æ÷í&Û,z¤êmèN¦û}uWzòdáðáQ®ö”’%+âU117ÊìAA–Ñ£o=p ]fãÆÿºîº¾²ù+ú˜2å‡Ï¡ÚÎh»Í'Nl—1JO©ªª?¨ýŸäëËH­ÏéîÝ)Õ;ãVù 6C{ˆv2Óçä, »RN3cCµw6Úkãï‹¡`¨`C%A}{ŦM¯¨Û ƒwÞòN©váÀ¬Trö† ¾i¨^¡ ^¡G=zGFö+-}å­§ž¼ý€¡‚ϪÏx§UàX„Ϫ”ëÇÏ¡~ 0T‚ÀPÍBóÙó¹}Æ_ji¥Š 0TÞtJ`¨f!'lì…ªSTq€¡¶ÇßÜR[zˆƒêc†ê’tºj¨nL/X,Ý£¢¬X1·- ¡šÇÏùjcU`¨íCÉèäCÅP;ÚPÕ×`Z%Er{á()†j¶¢}dÑšO毢Š µ}(üPýáJª õĉí#FDkSWÊ@ZZrXØ•Êë|jjŠGº%0°Û Aåå´º&ôésu~~šÕ…F{sUVæ r­ÅÒ}þüD‡—- îŒ5Z½÷H›Ê-×]—v-^<#88(4´WVVÊ¢Eӕ᜜åÊË$0Ô¶Ðzñ;žC µÝÈ‹ˆåÙ)“ê„ 1V¯ÿßš>ý¡ÆÆÔWï(éîKKßÁ²š½¢bSDD¸­ÀéÎ¥û‚"ƒ°·pW¯wjßïÞ–;c¨²–¢¢·CBzªÃêò÷!¡`¨¦`ëw7Ÿ=ÏA5ƒ¡ê¾B½¹¹B;z‰Q}Iz~~š˜–råU©8ݹt_ònÞCuo˪ºµÃêìË$0T Õ,HÕÏ5³¡ZMP__j5Whh¯]»Ö(ò§ûNݹœ4T‡ wÕPËË7DGlË–;4TãWw,“ÀP0T Cµn‰Ç»Ãö.¿v‚¸¸1 ±MM{jjŠããïWߟTQ±IæÒ¾®]<¬®n§Á\NÞåw¸p— uÏž‘‘ýÔžRîmy Õ`™† €¡š…éEQ“êÉ“…ÇGYõ”ÒNP[["‚%ž™ùš2rýúE¢zÁÁAZ×\°à)åv¶½¹*+ó¢¢vÓÚ¡m8\¸“=¥”[êVùÝÛò6ªÁ2 C ù _3Ô†/NRˆ *A`¨&"ÀdG 0Ôv ‹¥»UàF†ê*»î{¶:¯”Š0TÀP C5 Ç–½Ç›¥C µ=ßÿÉ«D µœÚ^V2z`¨€¡vi/élK¢S'§,–îQQV¬˜ÛÆïe ’¡v:ÍgÏg÷¾—ן†Ú&ÎüüÀ¬TŽ(†ê1CU_sj•yÊí…£¤ªÙØ~ý#uG©ëCuŸ¯³w–NL√ÄPuówÊÀ¼y¿´XºrmeežšÍTþ•‘óç'ZÍеOŸ«óóÓ¬.4Ê¿55Å£FÝØmРˆòò Æ‹ÒÝ<ƒ…;™Uûr)‘Ô¶l¹ÃĨ‹Ï í•••²hÑte8'g¹2Á2 µ-ÈÏþÚÒCÔu€¡ºÏ_Öæí‰™#jrCU_þ4nÜμª¢bSDD¸í2ãâÆlÛöº ”–¾+ZæÒË¥.ÜÕë­­ûlÓ¸´åΪ¬¥¨èížê°º|ƒe*†Š¡b¨ UyjssEPE)êHuÊüü41-å•TòW÷5÷ê…IuÝEنÅ·ÅPÝÛr‡†ª¾?V;¬În°LCÀP1T õ€•<‰N¹a¨¡¡½víZc;»ÖóêëK­Vꤡ:\¸«†Z^¾!:z`[¶¼¯?5X&¡`¨*†úSôïMAÁ ±´%Kžqã.Á›döääx­‡ÕÕíTïk'$Ä65í©©)Ž¿ß¥»üî’¡îÙ³12²ŸÚSʽ-o£¡,“ÀP0T Cý)rr–‡…]de¨âmÝ¢¢h{JÉ¿2R«të×/Õ“Ùµ®¹`ÁSÊíl®­--“#"Â33_3X”m8\¸“=¥”[êVùÝÛò6ªÁ2 CíL.ž>sîXGÔ$†J6%‚ÀP0TðCµíðNªÉ µ©öìÙƒŸS݆ >k¨žqb«@’ Õ%Δ&Õ4`¨€¡†j®k¨y±Tw€¡‚¯j;¾Ë”ç\ Õód÷¾W<•0TÀPݑζä4urzÁbé5`ÅŠ¹müÖ¶ Ùª9Ùuß³ßìØC*`¨&5Tõ¦VI¦Ü^8JŠ¡šßP÷ÏXöùòtj<ÀPÝ¡¶ôЗ«s8¢&1ÔÊʼ!C®µXºÏŸŸ¨JXMMñ¨Q·v4(¢¼|ƒ•¢Ù~ju}QwvÝÙsÁ€€®}ú\ŸŸf»p's jß#%’êäÂu·ÜaÔÅ‹g…†öÊÊJY´hº2œ“³Ü`gjG z*’Jª;±ßT†ªûz§¸¸1Û¶½.¥¥ïŠWÙ¾ÉàS{8ù)5**6ED„·ýškkë>ÛÌö®»åΪ¬¥¨èížê°º|ƒejûRWq´òí­Ôx€¡b¨^o¨AA1*hn®Ð¾üS½²ÐÕöåòŸÚ›@wE¶‘ŸŸ&'sÙ[x[ ÕáÂu·Ü¡¡*ßËjXÝ`™† €¡b¨ª †Z__jOûŒ?µ7“†Úk×®5ŠYê¾MÔUC-/ß=ÐÉ…ënyßtj°LCÀP1T Õ…»ü ±MM{jjŠããï·½Ëoû©xX]ÝNƒ œ¼ËߣGpEÅ&1Èääx­ä© wÉP÷ìÙÙOí)åpáº[ÞFC5X&¡`¨*†ªßS**j@``7­´ÕÖ–ˆW‰ºED„gf¾f¥\ºŸ.Xð”r;ÛÞº+²õë‰GiEV»p'{J)·Ô­:ò;\¸î–·ÑP –I`¨*†Š¡òN)‚ÀP0T=¾8Y[zˆ#Š¡† €¡†j.CµXº[bD`¨m¤õâwG—¬£Ò |ÇPÛñ­Q¼Ÿ‰ÀP;‹Ü>ã/µ´Rï† j¾õÔcÞL¾a¨Û¯¤®â(õ`¨€¡b¨†jʧ.øjcõ`¨àņZY™7dȵK÷ùóUÛÓ}•¼Á§Î¼Ý^wEN¦ŽR–,y¦Gà°°+³²RÚ²^{_0--YΛŸ¯6Ô#‹Öð(*`¨à݆j/c¿í«äüÔÞNfì76Ô¥Kgµ¶î+,\©¾õÞázÇŒ¹MÖèäœ>ý¡ÆÆð3« õËÕ9dô ÕešjÏŒ#jCµ÷ÖSÛWÉ;ù©½ œ|멱¡Ú¾õÞ½õÚû 2 rFx»¡žÚ^V2zõ`¨®ÑÒИuÙQ“ªí«äüÔÞíb¨Æ#_¯Ã¯@`¨Þk¨RÇVç•Rï†ê2RõsDMb¨öîòÛ¾JÞøS‡o·o—»üººl¼^õ.ÿâÅ3œü‚AÆ~4ÔÌn·‘®Ï$†ZY™5 0°[rr¼ñ«ä?uøv{ݹÚSÊv¤3ë2äZYoRÒ4ùëÌ$ À 5/"öBÕ)ª Õbõꇽ CÅP1T³¼^U›XŠ 0T CÅP CÀPÍÍ™òí¿ã b¨¡`¨€¡"áû†ÚÒÐx`V*U`¨€¡†j.µ´fv»[U€¡‚êg©)Òîz&ªÿ°ñëÜu[™ãøŠå¾ýåXŸÌ^ëc_J ‹·ßå/üPýáJj?ÀPÁì†J„KáÕ†ºë¾g¿ÎÞIí*˜—æú†¿íܯDaêê-/½¾ñ?_Ù0w±4ÀíOÿ6óÆ¸Œà;2þåÖšön·eôýE»-Ü›#ㆇ;¦ùø×ü·Û8î]‡g„Þ“9iFæo—ùÈW[»Á{ uoââÊ··Rû† ÞAYYYnnîæÍ›×µ‘Õk6ÌziSÌ´ôËb2º ×^yJÿ×Û7Ì}eß³aÞ’ô»~Ø!WöíoúÞ½‰?IêGÿç—|oÊ3ëßúo|;),Rd¤àTUUyQI?ºdÝÇsWRã†êµ¥‡Ž¿¹…ƒÚ)•ßÕŸƒRwìt„Sk‹´ƒ >O}ép•ŒøW]pÙœeÕZ–WL*Y7p¨>ÆçïãPýŒ²ádlо“òË­Ÿ-Ïóã…­°N×þËý8T€Öç|å!Á±§g,Sÿ­Î+õã…% ¼n’‡ç¼Ç8þßœ®‘PÿUnNh1‡ºµÏhÆp¨àçœ\]À…>t€”>q*ø3Gæ­Íï6”› |9žÌîð ã8Tð[N®.È‹Œûºê Cnp½îÊž s¸É¡å‘ÍV{Ÿ ü‡š’=9–—W1à6;Nª7‹qhaþöÐsµ¥ûÀ¡:˹òGæ­åwõ~êŽ{zfKCMáÆõÆ-÷þöøâ E :p¨. ‡õ£–¤¾öožªËû*~WŸø¥÷¨)Ù£$”í˜lüáÃÜ1€CõRxÈüžSk‹È³¥çøâwª÷²ñ‡ûqšr€Ksl‚ï,À¡z/E÷:W~€Ÿü•ƒ3–}2u ã8T_bÇÀI_lþŸü•Êq³xtp¨>Fmé~žÂñ6®ž=·-voV÷ÖéŸ-Ïc‡ à>W¯Ý?êàŒe …G(yd—[+h]ʆÿ‘s€CßÞ“ñì¹¹zöœ˜~ÆÁœ ûŽpPÔ¬lÿÕ‹Õy¥ŒàPÁ'9úößzG-Ìõº+ù݆r;Dó±kÌL’n|’Sk‹¶Üû[^þ­Â…}Gr,HßL|2u‰2€Cß£ä‘ g µøüýâÜðWÏžc(<ÎÑë+ÇÍb‡ à2g,“#%’È{œ3[Êd`À¡º™€JùÈÔ£ Ö3žå¾#…?É8ÕxÈT®×]áY=#C*&•qª ì™0‡‡Lýy­m×vïNùrk ¥XKp¨>É‘yk?ž¼ˆ_·å©;vZ¿ŸÖââàÁƒaaaóçÏ÷þ>”¶‚u»9VÖRj³sjmQ…u:¿n óùûÅù݆Ú\NõÚ3IjÇvíÚe±XÖ¯÷û¤¾Í+~šÕ²–àP›‹så¶ÅŽã×mI.8žc ·”á¾ÿêÕ«ãÇþ)È¿jå Èž¸}ûöÊ”)S¦„„„Ž1¢®®Î¦Ík×®Y­Vù444tΜ9êt)¼óÎ;={öüøãííûwìØ¾mÛ6íGú™ÆÆÆjÍÁ™3gÂÂÂÔþ´$¼&½Å*k)ÕÃÔ×^È ȯےžwjm‘“ûþää丸¸Kß3dÈùW­üÌ3ϨûÔiÓ¦IµsçÎ]¿~}̘1III6m¦¤¤È×¥¾´¯Ý÷Ën[¾øí·ßÎ;·wïÞ†ËËË`ã g*!::Z­“˜˜(V£U†ºÂ:>¾î{ÜØ÷ eee;w^³fùL…7Ê·V¬XÁÏê”þ:ù“©KXK›rtZ>2• p¨à'õêÕ«IIIß“˜˜¨Þ-§¯,{âÈÈÈöíÛÇÄĨÏ)«Õ¤Ñ£G„„„Ì™3Çä:¬yÇ>6oÞ<“™ ›7oV.¡‚P_{!¿Ûкc§YKÝ£¦doM*€'Nœv³Î">>~ãFîºó+ìÝ?ÍZê —WÜ=Œµp¨¶$''×ÕÕýóŸÿÔ^‡mÖ­[§}•ké7§kx¯àP]æo=ÇÜÔ;ÝÂgžLX´hQhhhPPÕjUßÝãq#""vïÞͯ¬¥ÿ÷ëÍï 8T×(¸{˜âó{ÜžæX\:\ÅP€úÆ.ª³ì8©¦d¿±¹^wE|ÿá9ï1­ÂÉÕ{_Ê`‡êÃȾ/åYÊG¦òúÃVäPÚ {¹»À#ì|rrݱӌµ9º`ý®13ù=…Øý-÷þÖäåüÐÜH}[‚qh>N,ÍÉ X_{¡À¡6_n­àa)Oq¾ò칿®:ÃP´"O^tpÆ2Æ¡YÙ3aNÉ#HÜ€Cm.ÄNåXð{„såÎl)cZ— ëôÏ–ç1ÍÊë;NânjóšT~cðÊG¦âP[€ëuWò» =±4‡¡À¡€J™ðåÖ Æ¡øætMÑý£¸Öïã‹7~2u ã8T€¶KÃ…Ëx¦Ã{ÒRx3'W”Le‡ Þ—[+¶ÅŽc‡ -/–sjK÷oí3šq*´ÇoÜùädÆLøºêL^dã8T—i¼zûÉ\¥¶tÿæÎ¿¼t¸Š¡¾9]“>qªËäXˆ†òK;OÃ…Ëy‘q¼Õ@åëª3ÛbÇqß‹9þ_×®7ã8T—)ýu2¯šw‰]cfr}À†²á,™Ê=çÊ0€Cu™ƒ3–}¿¸oDrˆ»cà$ÖÀ¡‚g8<ç½ÂŸŽl¼z¡p›ëuW îvruA›š’=ÛbDZ&Õý ¿·Ê¥ÃU9–Ü×à©­©Í¾Æ_üo=Çj8Tw8±4§|d*¿·Ê–{Û–Ïúx–/·Vä†üºêL\vYê¼È8ÖÀ¡ºCMÉž­}Fó{«ÂÀ³J[Ám38T׸^wåýÿõ™`Àwá4•÷Sa.ÃŒÕò» ½xà8?9ø"WϞ˱ `‡êoì|r2w^‚­~nÐpárMÉ‚ð§h¸XǦ 8TãPÚŠý¯Ìç'_¤îØéünC—Ýùºv½ Ÿâ«¹p¨~Gu^éÞ—2Úì/}ãz#ïÛòi³Å+'Ýs¨ŸÎK¯)YG¾Ÿf¤ãP‡ ~ÈÔ·I…CmƒUví7nì%_/·­Ã¡üpWßm(çP}—“« x¡/•À¡f,-++Û·oßÑ£G«««/]ºÄ–8TðUĘŠ=å)1_ÿëk/08T¢;ÔõÓææææïÚµKLjmm-[:àPÁWÙÿÊüÒ_'ó.XÀ¡ß6ì>û—•{F¼XÔã×ï<¨<|S>`WÜø³Yï|{m7Cäåuõ^_³fÍæÍ›Å¤îÛ·¯ººš-p¨à“\Øw$¿ÛPN¿•8¹bqþU9ðào&ÖNŸ×˜µö¿×¯¿±vÝù´ÌÃOÿ~k÷¸ü°ÇNfοñ-cå½uÕä™+V¬“š››[VVVUUÅ–8TðI*¬Ó«óJÀ¡¶å¸~eWyüï îpfÊâJíÅ—ÿ5gË/{ø?¯_þˆAóN‡šš.uåÊ•ëׯ/..>zô([:àPý„Ks¾®:Óv–—‹û€CÅžîxÔZò“aõËVšØS%®¯ÊÚÑë·%½†_¯«ÀâPp¨-GùÈÔ£ ÖóÃàPÛH”K{*ÖÓ¡=UâÆÚubRË)Ä¡àP[ôj…u:?<µMÜ{ºæí‚ˆÇ9{js&uË?ùç…ÍݽvíÚa=q¨N9T¿Ïd}lц‚ $w&¯´o±ÿ•ùŸ¿_Ì8àP]}r?¿Ëãú{OÛÙG{OjþI -ãPõ‡ 8TM'òJ{;Ÿœ|jmã€Cu)Îev¬?EêŒC•ØÚ=®úýw[ø*‡ `æPÉdMWÚ«Ø;N¶MƇêRì™ðûƒO½lr5ßИªqøéßïz2Ñ|§OÆÇ?ÐaÀ€Ÿ×ÔMõßúú «up`àM¡¡·Îžý’Í©S­WV¦,ŒŽî&õ##Ö-›†CÅ¡B[w¨<[@W‡ŠCõõ(êùdíôyn;Ôói™ÿö„ù,î¹çÎ;W46înh¨LI±Šû4w¨RgÈ_\¼X*!‡Wù;wþaQÑb)ˆ÷MJŽ~âP‡ŠC%È+CÅ¡úvdw裼–ß=‡zcíw¬ó³ŸÚ±c°¹C ûÑ©S[”rUÕjD„%33¥ººýÄ¡‡JWÚë(ºÔ¹òŒÕ¥Å7fßÜ¡J8t¨;w®è×ï¾  @¥öí`îPN·©°vB£:ÝÜ£GdAÁBô‡ 8ÔÖÑôïrFeíyþ•¢ŸÄ¿Ø÷_9£»Ú5z¢Loî§J òJ{-y‘qm*ÓÕWΡ†‡ßž›;¿¡¡RÊòWõ—bUÿ¥Ø—/ÿÝís¨jlß¾Ìb¹ ýÄ¡µ4ýäš·óÿm@a×A9£ŸùýÖ{‡æw}\êàóÈ+ݹ°ïHãÕkŒÕµûPïO¨M›ß¤ûP»4ŸEHÈ-ê}¢#G>®úË^½¢Þ|3YLªL1b€Í}¨âY%ââbõÆ488èìÙ­jûVë`1²ŠC•y¡Ÿ8TÀ¡¶¨¦—3úÉßüøqÇ9£» *ûÍï¤>n…À¡:~–lJ“žåúEóY,ìÚõŽöíaY¸p²j4÷ïÏîÙóneú[oMÑ>Ë?fÌ€€b7õÏòKdd¼x“úovöQQ]¤~tt7®ò£Ÿ€CmQMw9gtß‘%¤¢°8TÇïC½wˆÛuëÿ?¤º`5Â…~´Q‡Zÿ;—sF÷Y6l<‚…ÂàPä”êöÄ™9oºáP¿Ë)ÕåqîþG?Ú¨C=ùÞŸ ¸“3ºû`îIEap¨ïï/è>¨~ù*—êwÛu ‹~´Q‡úÝñ}Ä›{OLÇ÷å´ôü;Ÿh±œÑÞ>…jËGùȤ’~#¯g­qí:Õo~‡)D?Ú¨C=»å½Â.ÝKýÝ=R1ñÍ}”óoBi1Ùê@a‡ê[ñݽþŸ“Ú°ú=§ïõ†{ýÑO€¶ëP÷ü礃¿™èö«¤?÷‡]c&µ°Ä¡¢°-ÏÕ³çʆÿ‘qÀ¡6é})#“ î|fþ‚ÿ~ßþûRffl¹k0ïKA?ÚºC-ºgˆIÎh‡õ»wõõl>‹Ó§ ãã è0`ÀÏkjŠÍóšÔ×WX­ƒo ½Uÿ&í ]{îP_aÊ”gCBn‘6GŒpùòßÕš3gN²XnËÍŸ–ö‚RVÞ,¨THOŸ$Ý/J—®\ùȦ«†‹æ| FGw“‰‘‘aË–MCa½™K‡«ò» ep¨xçt·' 2ô`Òk.nÌÎþ×;§ß|ëð ÿµµg‚|ʽ§8TêÞìöfùN:TgòÜsÏ;w®hlÜÝÐP™’b«gîP•·I_¼X*!…¦_åOM}>..¶¶¶D:0f̤¤ájµ‰Ÿ’ŽmÚ4·sç¾üòÓJ9&æ.µ‚¶'Ò1›ö\4{™ªïÙV'¢°^ë´¶ÅŽcp¨ËÛ7á÷E÷'dwè£äícºk̤ê‚Õ<¹CÀ¡~æ9£:TgrFkCÌ\ÇŽÁæÕíŒ|öjxøíjƒ_]Ö©ÓÍjµúú }9  ƒ:QÛ5ÝŸaLÍ^"",™™)ÕÕE(,‡J8Tj‹žCݹsE¿~÷*Mµoÿs‡êpº«Uþ ¼I {0,Û´£u®®.šaöïÏNHxT k‘-–©…Å¡âP ýðv‡Ú÷¡†‡ßž›;¿¡¡RÊòWuobÔÿu=ëòå¿7ë9TõæW{Õì9Tós¨öÍɨ±}û2µqÖ;©Î+Ýþ«*CE?‡ê'Ïò‡„Ü¢Þm9räãª{ëÕ+êÍ7“ŤÊô#ØÜ‡*žU"..Voû‚ƒƒÎžÝj2G› ii/ ÔïôéB)=úûEí9Tµ'†÷¡Ú[4'; 1¾ŠC•¦PXoæ³åyÖ錃{õȼtYñÂ×ãÓŒtôp¨­ù>Tçª3ïC-(XصëíÛÿ "²pádÕÆíߟݳçÝÊô·Þš¢}–̘!Ä´éŸå—ÈÈxE¹\noŽú â##Ãd^11weg¿á¼C•(ÏàK—´7­š/š“BTTYÒèèn\å÷r.\®;všqpÏ¡„?ú 8ÔVË)å¤Cm™œRäŽÂ¡‚ÏŸC}sŽAøz™7ýjËݹuò½?„¨_¶Ò%‡ú]Îèîƒýû½}8T8TîC%ÐOª[9£ãWò“ab:]Ë=l|«ë…úŒ¼žm…À¡ú 8ÔÖÑôïrF?j“Ú°|•Ó9£G‘”…À¡è'[:àP›9gô“¿+øñãöîIý×½§ÿ5gK·AäŒFašÃ¡úå­5ú €CõDÎèPØuÐÁßL¬>Oy™ÿw9£Ó2?óû­÷Íïú89£QX€¦;TC3êêÛŽ1¸ú Ð&êÿËýü+E?‰ÏnÿsFwy|×è‰äŒFaÛ,Gæ­•`Zý*•@?Ú¨C%PXÐs õm Æ¡9Ρ?žwÏ=wÞ4uꇩã´ÒÒ^èØ1Øb¹mÆteŠŠ’G£ÿ:ôèY^¾Êd^†5åÓÌÌi\MSLè'àPq¨ ‹Cm+õ‰'þýµ×¾{1ÈŒ‰ÎŸCUj46î.,\¦÷µÃ‡?¶iÓ\)”–¾+ÖS™øØcÊ\læeXS>MLvåÊGˆ~‡J °^Ê'S—œ±Œqh‡(FS •.9Tå[êiNíו¼n jÃyÖ”²ÔA1ôp¨¶šN&k‚¼ÒÞC…uú‰¥9ŒƒW9Tó‰â;/^,µùº=‡ª¯É-­ÀØ¡y¥½Ê¡~¶MLvåÊG¨ ël‹'&ãàA‡*Z$ê!FP ÉÉ£ä Wñp]»ÞQR²T&Z­ƒSSŸ×~EdçÅGHAŒccï7\Ãwî\¡4%-DEu).þ³&N|Ê^}5Ô6¥ñ”rL~ùòßW¯þS\\¬Í¥“½zE=»5+ëõ‘#7Y›jÒ²ÓW_«O²J ŸþàPÝNc­|KB=Íi“ÆZ=¥ªV°—ÆZ_SÊR Caq¨àŒC»ÿ°R^»v–’n4;û#(—/m̘!j²¨°°‰Ú(ÕÄó©éImBý('gžÚ”I}ý¥ ýâèÑq6-H'UTzhoYlª9Ó ýh‹Õ|¢øNý‘½=‡ª¯É-­(,œw¨/¿üôÂ…“•òرC—.}U ÉÉ£22^Q&ŽŸ°rå )df¦ ò‹ÚÚ’_¡TKJ¾dÉTÃ5\>RêLœøÔ[oMQ›²W_ûE¥Ž¶q)/[6MÛ¬ÒÉ™3'8³,6Õ´è'WùͪxÍóç?T¯ÝW__QSS¬è«WùEjµWùõ5q¨(,œw¨©©Ï‹ŒÈ°XÒNnÎÏÏ”‰o¾™,ú&wî\ѵë—/ÿ]&>óÌ@±‰§Nm‰‰¹+;û ™"†U©¯¸¸Xå£)SžUNŠwTÛ7 µMµíÄøø‡W¯þ“2QŒ¯zr×dYôÕ´-è'€:ÔãÇó¢£»tHI±ºô¤”~¢«rÉ^ʵµ%b=åßÈÈ0e7 >)%óJN%•‰†5q¨(,œw¨ÕÕE=zDŠŒÈÑoHÈ-Ê=B/–ÆÆÞ/RÓ«WÔþýÙJÍÂÂE2Eì>ñb¥¾½[ÞÕªª>”–ö‚¾¾´#ÇØ†_ÔVVË ¥¬|E|³ºË¿"zÊÃeÑW3é6~ø°CmÝXºôÕ>}¢Q(‡ q¨Jˆu›:uÌ3Ï tuM–/Š#ÔFhè­­»q¹½,ú €Cu'õ×¾XŠ@aÛöi¸p™qð”CÝ¿?[–NnNJ®\Í÷Ñð§e!ÐO—j;_-CaüCÍPHýª÷^å'PX_å'ô‡J(,àP ýð&‡šð¨’ÊÉLê»T‡@a‡êCÛHÑCµ·Í±k@?p¨.D‘§Nmñl}ó:QQ]ª«‹0…(,àP\?]–TÿÓCµ·&Ý6üÈÕ] ú Ð&jUÕ}ûÆt˜=û¥àà eâ?þ±¡OŸh™(kjŠo|Ÿ$º}û´k×.((P¾¢L[ Ê€IDAT‘²á&×ÐP©6uôè_z÷¾Wj¦§O²©¯oA"-H!7w~÷î={Þ½wïZµ7)•++³D×:v .*Zl²,6Õ”¾åçgÆÄÜe¯ÿ >êPß|3Ùb¹M´B„ÂF4l¶}MsCa.^,ˆ°(}ýu™4«OŒçëz¨ŸµÍ©eµ·Ú‚á^CÉw CwèÐ&›×/ú ÐFªHƒÈ¨’“Gõïÿ€¢8]»ÞQR²T&Z­ƒSSŸ×~eÓ¦¹/¾8B åå«bcï7Üävî\¡4%-ÈQ~qñŸ¥0qâSöê«¡¶)-ˆHM:æò忯^ý§¸¸X›9J'{õŠ:{vkVÖë#G>n²,6Õ¤eQÃW_k¸/!PØcïKöa<èP?ýt³ŽöµùZѰ·íjš“ £ÎNÄJMRêOzh8ku‰´eµ?ú]ƒv„ÃÂ~”ý†ôJ¦$&ÓÖ×wýh»U„#>þa¥¼ví,%ݨÈLj”‰Ë—¿¦&Ù«*â"ÇÄJ55=©M¨åäÌS›2©¯ÿ¢´_T2 j[NªïXQzhoYlª9Ó …mxcsœC0àç ª‰—µÛ»~Û7Ñ4'Fþ.:ujKïÞ÷:”5_ÔC‡³Ûj3zÚOMFxåÊ6­é»‡~´]‡úòËO«ÇýcÇ]ºôU)ÈwFÆ+ÊÄñãDG¤™™2dÈ/jkKäÀW©–”4|É’©†›œ|¤Ô™8ñ©·Þš¢6e¯¾ö‹JmãR^¶lš¶Y¥“3gNpfYlªi!PXªŸ9Ô+W>š=û¥Ç{P¹AS»½Ûlû†šæªÂˆù¥WR²ÔDÖ|WÎZm\ß[ó½†xPeç¢NÔwýh»Upå(¶±q·XÒNnÎÏÏTnäz≗‰;w®èÚõ%É3Ï m:ujKLÌ]r¤+SDz”úúˆ‹‹U>š2åYå(Y¹ñÈ^}5Ô6Õ´ãã–ãue¢½ÍA¶á²è«i[&PXªŸ9Ôß'¯ïÕ+JÉW§ ›m_¯in(Œ|=*ªKB£&ˆOë¡á¬eyÅqÊÁÀk¯ïÜù‡ÊDõSµ`¸×k®ì\zôˆ”´õõÝC?Ú®C­®.™ ¼IŽ\CBnih¨”‰/–ÆÆÞÐAT~ÿþl¥faá"™"j% "z'#õ}чúQUÕâqƒƒƒÒÒ^Ð×—v´wŒi¿¨­¬– JYùŠì„{ìAù·]»vÊÃeÑW3é6ÂâP}Ú¡Š¤È–n±Ü–ž>I™¢ ›m_¯in(ŒDdd˜ùsè>¤‡&}Ж•GieÁ³²^×wR-èG84ôÖ7ßL–‰={ÞýÉ'ëmšÕwýh»U Ñ‚©SÇÈñ®«›–|QI­†Pënín/ ÂâPýãjK*Lyùª×^ïz¸}û²^½¢4ô õêþýÙŠŒÊÑmRÒpåj¾†?- ‹Cos¨† #ccï÷Âk2nèaqñŸÇŽª¼[@?ZÙ¡¶ƒvíÐG¶µ¸°ïHÃ…ËŒƒG*R†8£Ÿ~x•Ÿ PXà*A Ÿ8T…ð.‡ÊEýÀ¡:ˆ„„G7mšëÌD7êxábštÛí¡@a‡ê’uÉ¡?ž7hP¿  À€€ýúÝ—›;ßa#n8`íõî Ò£¢ºÞsWaá"í‹]•·â«5¹… ‡ àŸUDPyÙµÇ+71zôˆ4«‹Ã:-Ù[—–Ȥۆ93(,àP›ïªˆIzú¤úúŠÆÆÝåå«”t ?«mm̘!55Å2»üüÌ[Ôö؃Ò›žØtIñ¾¡¡·þã”)rD-ýQùRÒháPq¨~ruÀ€Ÿ'$ùd½M³úî¡°8Ô¶ìPsr扚)Š¡Z4ÉÚ'¥¢£»É&–’buéI)Ñ“~ýîkßþAA<ò3å™HåXWdD}RÊy‡ª<)%='ªž ÕÖ* "÷ïÿ€úðþÅ‹¥b²¥ò]µ8T*€ŸœCmÖç4uê˜gž¨>̤œ±P>R’G«!†¬uŦ·æ±}û2õL ‹ÂâP[Ý¡úG,]úªÐ6ÑhâPÑOªqìߟ­˜ÎNnNJ~ùòß•s~±±÷Ûœ õ†0ì­yÿyìØ¡55Å(, ‹CÅ¡z$Ò>•¯Lq¯¤ ýðU‡Úü‡ŠC%*@[?‡Ê…$…u›K‡«®ž=Ç8´ºCEÇôÀ‡*i¬ üÛ¡:|ÇSË;Ú–—ÍM›æFGwÙìÛ7¦²2K}ókÿþH䯚I ÙD?üä*i¬QXŸ8‡ê=µåesäÈÇ?ýt³–,™ª¾lõÅGLœø”òr+¥€l¢ŸÍëPIcÔ¢°àOU6dQððÛÕ×›¼ÚÉP=ÌoìÖ~ªÍá$2"ÂRUõLŸ2åYiY{$,‡ÊÊ9H›Ü¤Þ,›²DêãVê+OŸ.TÓ´"›è'€w9TÒX(,xù9ÔÊÊ,U( ÍPÇ\½Ê/¦Su~Ë–MSҋȧjË2åÓáÃÛ´i®JKßåñrÙTBÜíøñ úlUd®B?¼Ô¡’Æš@aÁ;j~~¦ø?åsU(ÜH1ê¤Cݹs…ª!QQ]Ú¤+iY梞 UÏzªófÙÌÊz½gÏ»Õ÷îáPÑO€–v¨¤±FjQXð‡zëöíËœ48Tå4ªØÇÂÂEÇ?fã mêÅ‹¥MÜÐZL6ßzkŠníËž¹Ê~´´C%5R‹Â¶g¶”J[Á8xСvì\Y™%›¼6…©¡ ™_å׊’ὡꧢi11wõëwŸz‘½«ü£GÇÕ×Wˆí³Z{³l¦§O²±§'>Å“Rè'@‹:TÒX#µ(lkñÙò¼ ëtÆÁƒuåÊbRE»´¾ÓPÐ uL-hEI6ŸŠ¬ ÔO«$Ò¦´,í«²S[["&U¾–ý†7˦a÷M!›è'@3:TÿÒX£°8TÔ¡];u¨-»­ÖÁʨ¾¥$È&ú €C%5 œ\]P>2Õ¡›ÔâE£éÌÁƒÃÂÂæÏŸßÖª¢k×β™ˆlè'çP Ö‡Ïn‹ç£ç;ÕŽíÚµËb±Èî—çP¦ ¶wýÀ¡(,ÕÀ¡^½zuüøñÁß#ùW­¼`Áñ‹íÛ·W¦L™2%$$$00pĈuuu6m^»vÍjµÊ§¡¡¡sæÌQ§KáwÞ‰ˆˆèÙ³çÇl¯c;vìß¶m›ö#ýLcccµöÌ™3aaaj|ú*?A Ÿ8TŸ¦dýöæû½PØv¨ÉÉÉqqq—¾gÈ!ò¯Zù™gžQß´iÓ¤Ú¹sç®_¿>f̘¤¤$›6SRRäëR_Ú‰×:T1—òÅo¿ývîܹ½{÷6ìX^^ž¸Xÿj8S1²ÑÑÑjÄÄD1Ä~p•ó'C?p¨¶qüxÞ Aý‚‚:ôëw_nî|‡Zæ†ÌÙä0ÌÌL û‘òfoeʆ éQQ]oЉ¹KM‹j/ó!ÊŽÂ6† —/ì;âСêïCµX,'OžTÊR U+‹5T¿þùçŸ+åo¾ù¦S§N6UÚùâ‹/”²´U=/+&5 À°cíÛ·ß²e‹Ít{3íӧϺuë¤pâÄ ñµ×®]k1‡êpÛñÈÆÕò:¶iÓÜèèn"Y}ûÆTVf™ŠŠË×]Ú½éû`ž9ÖæSýPO™ò¬ô_k£Õ¯8gÖïªÅbQÏSÚœCµ9ùÕW_™´©=‡* ºêPß"ÎTèÙ³çôéÓ###¿ýöÛfr¨²Êv¡¼vTIÌ¡µ§ÍgSÁ‡tLûÆ+õ.·“?¡cè'€×9ÔÙ³_Ǧ•3%*+³Ô‰ŠC•šbàBBnQËj…áÃÛ´i®JKßÑq~ûÉ–ãã–#~iÐPR¥œ˜8ìÊ•ô©ï¿š4Å0[ÌöÚø  @qTv$b»Õô0N*»¾Î+»áPësÒ¨_q{œQX¿q¨Êý£—.]ª««‹‹‹ÓÞ‡ª­6kÖ¬Aƒ)ôرcV«Õ¦š´_÷= n8T¡¬¬¬sçÎkÖ¬1Ÿ©°qãFù–¬$M‡*‡Íû÷g+()i¸É&l³Å©Ó}HÇÔg9~| ]&g—²:£cè'€×9T%Gó Möj9ÉPîÑT'jk~KIpò/íýÀ¥MôüùE­22^‰‰¹ëå—Ÿ6dž†JC­Ô÷ÄaâiîåË_S’NEGw³9!2Ý·oŒKʮÊn>ÔÚ¼ÞÚ|Œn3 ëõêÕ«IIIß“˜˜¨ÞÓ©¯,~12RV°ö111êÓôj5igôèÑ!!!sæÌ1¹[À¼c‡ ›7ožÉL…Í›7+ú›Ï¡¦¤X{÷¾wìØ¡%%K 7a{[œöT¨¯è˜zÏRÏžw_¾ü÷¦;Ô6®cè'€×9T}94ôÖíÛ—)vÍ<žâ\¼XÚÄmõÊ•ÔËUöæè0­ŸCeWïÙ²q·•]_V5×p µ¹²7}œQX°áĉb+›uñññ7nlV‡*QUõÁ’%Sû÷`øðÇô[¨Cqó!“xë­)QQ]jjŠÕ)M¹Ê߯u ýð‡Ú±cpee–ˆ‹aÆj{eÙŒW__!riµv~û2ä;w®Ù‰ed¼Òµëª„?ÿ¡Ê®^›9s‚aÍ—_~Z¹Gmùò×zö¼[™˜œ<êë¯Ë”s¨½zE5QÙe) JkÚTàú‚½¡6¿:æÞ8£° '99¹®®îŸÿü§önæ`ݺuÚN5“C8ñ©³g·Já“OÖËÆ¥WÃ-N[Á‡t,=}’=UFÀ½'¥Ð1ôÀêÊ•3Dq‚ƒƒìÝÿdX®­-ÑQžQÈÎ~Ãù' Dûõ»OÖå8û‘G~¶wïZezjêóÊ• W•]y  ƒ˜Nõl¨¶¦»‹bÊ¥š:;q«Ëm2ÇþýÐ>æïðÁUÃ>ääÌ“Öd Í•ÝÞP‹ÐKÏ££»éŸ0p8Î(¬áð}¨ÍÍ¢E‹BCCƒ‚‚¬V«ú†)±{÷îæv¨K–Lˆ°È¶#ÖM¹ÍÑFI ·8mÒ1›×&˜¼m áøžCõïXºôÕ>}¢›ø–í‰.H¡°žesç_Ö×^` °àÍçPõÛˆÍVo¸Vk¥ $ä5é‘hH§N7+ÿn>nlÝîõÐyŶ‘5_*ú 8ÔÅOü»šöC›íCyÙui黢#ÊÄàà ýû³‰IJ®L4¨Ÿ¥šÍYQ¢ÄÄaW®|¤5“ü"Úò€?WjÊßÇ{Pýtöì—dF……‹´©ü†h´ìiâãž8ñ©¢¢Åön0°×gýLÕD/öÒ¨!º?~|‚ZAöAA2b§OšLtÒ¡ê;æüNE‰ÊÊ,íwͳ¿è׼ܡšo#†kµV dã•MX¿-n>î‰t£‡†úãpY|HLp¨è'àPÿ†©Ÿ• ( êñwJеwï{ÇŽZR²T{†RkO%’¦ ÕÍ$G³Í ¥ÕšÚsêŒÌO èãüùE32^‰‰¹ëå—Ÿ6œµyŸµ3u&Y¶DVÖë={Þ}ùòßoüÏlÝbèûö1ŸèŒCÕwÌÉJ~~¦ì”ÓÞÚïšgÐÖ¯(,x¹C5ßF ×j­:´)"Â"HHAþ5Ù|Üs¨nôÐyŶ‘5_*ú 8TU½¼¥ªª–,™Ú¿ÿrlâPÍåØm‡Úô[šÄ :Ó y*Bgê[oMÑçËVCM`ho¢{´U¹—î™ìTBCoݾ}™¾šùNÅp}@aÁ›ªùDõÚfÓ‹½?;ûµkgIÁ|óiâÝœÎ÷ÐyÅv²WÞ&&8Tôp¨®òW__!Ëj¬Lœ8ñ©³g·Já“OÖwì¬~]•ßžë/ýˆZ?ÿ¡“WùÝs¨C†übçÎÒC‘ËŒŒWºv½ÃdÖÎìBÔ«l3gN0¬™ž>Éž=•nÈBõêe>Ñ=‡*‹VP°PZ3Ï -¿]ee–TKI±:y†áú€Â‚Ï9TíVo¸VÛlz¹¹ózè§99óÌ7O9T‡=4Ôg–ÅWćŠ~õÿ=)Ý-  ƒVbjkKDG”‡¢²³ßP&.Y25"Â"5Å~)7Iœ>]اO´Í“RöäXf!_—Ù©·Ï§¦>¯\ô±yRJÚTž”ªªúÀI‡jOŽEgûõ»Oz(Gó<ò³½{ךÌÚ™]ˆò¤‚t/9y”zâÓÞ;¤´M 2ÇþýÐ>= ŸèÌ3³†“¨År[ppùNeåʲ_‘j6÷Ñêóõ…Ÿs¨Ú­Þp­Öoq"zÚßím>î=)åF õÇ™eñ1Á¡¢Ÿ€Cå¥ÖM¥K_'ݬï®ò•TÚ(¬ÛœÙRv(mãà)‡ÚvB«?΄¯ˆ ýjK‡?é£,‹„öm)Ê6ŽÂ¶ªóJw œÄ8àP›¢?‡J(¬‡ÍÖ¶ØqŒ•@?ÑOÀ¡¢ ë-\Øw¤ð§#*~¢Ÿ€CEZúÎZ¿Ïø‡ÂºÍ×Ugò"ã*šéšÙ¹^ÑOÀ¡z»¦k–4¨_PP`@@‡~ýîËͯÖY¾üµîÝ#”Œy+WÎÐ~7!áQCMÑ?Vߊj«ÿŠï&éFa=åPsÃ2þäP[l³m#š©¼ð$:ºÛÂ…“iª™ž[Å¡àP÷FEuIOŸT__ÑØ¸»¼|U\\¬2}Æt‘ÚO>Y¯¼“µk×;´Y¤å_%)«óšâ Õ?N¾¢°nsãzcv‡*ši®™••YbRßzkJ ? Õª–[+o§[¶lšúFÒ¾}c´o9•)½{ß«¼£TÊêæ7eʳòÝ{î¹S}ï]MMqÿþ(‡ì†š:|-Ÿ4X[[¢¯ ]*.þ³ú¯”ÕL¡òÝ•+g¨y_¾§0-í…Žƒ-–ÛT½vr‘•—J§N£Ö4\dÚ&çPõ?DóÉÀ¡z›ßb<åP EI™.[wxøíùù™ê”ÌÌmºN}™3'…†Þ*[茉JY}¿~Û·Ùl ÅÁfÖ†"Œfêû#5ŤÚL·é†“²i¸DNæzµ·ªèW<óåE?‡ê@ÓEp•ÃhÙ„’’†+ êg“)Ê^ž'}îáÃSÞç_Zú®lnªŠv‡„ÜÿðĉO-Ö¾cEÛ%)Û¤Ôñ*,\äðŠ•fÏ~I¾.•Õwq;¹ÈöRpéÙ°¦‰C5ü!8‡ àªCÕ‹’••Yê&/5‡]¹ò‘yq¨" "D"JjY­`¸ík7[{´³6ÜöÑLý`ʼôi«õÝpF6 —Èy‡j¸ª˜gÒ2Ù-¢Ÿ€C5Öô”«ïŽ;´¤d©=]ÓNih¨Ôj„>ÿ²’EA=9áê óç?”í9#㕘˜»^~ùi'ÕV¤Y9Âvx>@mGí¡“‹l/)¶~‘ kš8TÇ àªCÕ‹R~~¦Øå4›öŒ©ÔQ¿h¯Ž* ötC¿ík7[{´³6ÜöÑL'ª¾N:Tý9éPÍWíŠg¾¼è'àPkzUÕK–Líßÿ9ÎóˆC½x±ÔS·(]¹ò‘:»‡ú©öŠ•¨¹öŠ•R¥X¹r†óÙMdÎ%µÕ/²«Õð‡À¡4Ý¡††Þº}û2ÅâØÛÖ±§úm߯¡šW°·í£™†WùÅ{Ê¡ê˪w4\Ô‚½UÅÜ¡šìÑOÀ¡kúĉO=»U¹¾cÇ`õjKS®òW__QSSlµvCm‡ ùÅÎ+dgd¼Òµëêš‘‘aÊå0ù+e庉ö»Êt7ÔÖÉE¶wÅJ¿È®^å7ü!D×Οÿ‡ Д«ü²AUVfÉÖ’bµ·1:¬cX6Üöµ›­a›Ynûh¦Íì**VGEuQŸ”2é†3²i¸D2n ¥©´´Lª½UÅü*¿Énýª±¦Ë{D„%  ƒlüªx>]اO´Í-ð2E¹^÷ÕÍO6Q™ÝM½7¼¶¶D¶Få®ÿìì7œ¿ë_ÚQ ¢ýúÝ's—#ÑGùÙÞ½kÕ:«Wÿ©{÷iAþJÙ°Í„„GÞS¥ÿ¢“‹,‹) +µÚd¸È†5õýQ?2ü!RSŸW.áPœt¨zQZ¹r†¸Šàà “ÃE‡u ˆ۾v³5¬`3kÃmÍÔöGæbó ¿I7´ãïð!3m9'gžÅr›¬æÕÞª¢_ñÌ—ýªcM÷†—ªde½®>¤IðÆ~·jÙèÐLgB½Ùýh[ÕS¿FEu)-}=Å¡¶A>[ž'Á8xÄ¡ú„#A3 ôp¨ÍëP …m:G¬¯7‹qðˆC%ô‡êiWÖûÏ¡VX§38TýD?‡Úú¦³¹óÚËæìFãíŒÀd£°žâ‹Í–þ:™qÀ¡úÊY/‘¾ÈGŠ~àP›±¾3ÙœlKŠÂ6“ßÚ;Žqð3‡ªÊÅñãyƒõ èЯß}¹¹óÕ:Ë—¿Ö½{„’ såÊÚï&$RãððÒÞA²´£ÄZõëwŸÌ=((ð‘G~¶wïZµÎêÕêÞ=BZ¿R6l3!áQ‡÷¡ê¿èä"ž5pþü‚É[¢ í¹g^â–sræY,·™;TÆ?«W>@?‡ŠC%PXðÿ«üÞè.+ëuíî}è߯‚~¢Ÿ€Cõ.‘² Ô …h‡ê)ýéØ18*ªKié»(~p•@a}˜Ï–çI0­ëP ýðy‡êÁËa¤Eaáè‚õ•ãf1¾ëPÑ1ýð‡ê’X7wÞ<åþÀÀ›¢£»-\8¹‰Ë¥‡ ÛtN®.(™Ê8x•CEÇô‡º·êWVfÙ¼yÊíÆ‘rÖ³|¹µb[ì8Æ‡ŠŽ¡Ÿè'àP÷šçêÐ'26ÌË|C—Ùæ¸ÜùÏö4Ô¤q'_ŒªM.¥}ÀÖž;|1êÌ™‚ƒƒBCoݰ!}ÆŒD¥œ“3ï†×ä€Fa½‹ŽÜ=ŒqhºCÕ磗BffŠÅr›òÖw{Û :†Ž¡Ÿ¾áPõ¯86Ïe“Y›’ÊÉÏ&a¯qWÏ46îÖ?®ëRÏQv™KQÑâ[Ô²Ú¾7ä€Fa½Æ«×²;<È84Ý¡êóÑËV™˜8ìÊ•œÙѱ¶¬cè'€o8T}š8ìwöò k_àâdŠg}8l¼)Êî^Ï*»ºk´—,»Õs@£°ÞɆ›ÿ£áÂeÆ¡‰UŸíS¶5ÑóíCÇÐO¯s¨öò;éPíåAÖ꣓)žõá°qW•½¼|ULÌ]Mé¹Ce¿á(Yv«ç€Fa½“󕇯^cšÃ¡ÚTÐoƒè:†~xCµ—ïØÉ«ü†yµÉ—OñløâkóÆ]RöŠŠÕQQ]Ô' Üëy•Ýr@£°àÇUŸÞfk5ÜÑ1t ýð:‡j/ß±>‘±a^fÃ<ÈÚäËΧxÖ‡ÃÆ|Â@¹eó¬{=o¢²{ChüØ¡êóÑÛl­†Û :†Ž¡Ÿ^çPy ‚ß8T‚@?üÙ¡z*?5ÂàP ýðŒCmùOlÈ à[#ÐOs¨ À9TýD?‡Ú¼¹L¹Ï…Ea]ââã»ÆÌdZÅ¡"nú à·ÕÛrX 7EGw[¸prSogû!Öã\:\•ßm(ãà…q#ÐOª‡ëWVfÙ¼œÅíÆQm¶Yi¸pysç_Þ¸ÞÈPàP7ôýêw¯ô»çž;åˆ|êÔ1ªNÕÔ÷ïÿ@@@‡="ËËWÙ¨˜þS›CpïÎÈž\¶oÿƒððÛóó3õ;ùî@mþÑq'7ì¹ÃwΜ9!88(4ôÖ ÒgÌHTÊ99óL…=¹á¿9]Ã84Å¡ÚKã"AÚ<*’¸ù±¸¡Ÿ­æP Ó¢ þئMs¥PZú®H>¡ˆÉ§ö*8™EÊʬÈȰ¦Ÿ–Ðæ°vظaÏq™KQÑâ[Ô²Ú¾I›(,h)üéÈ ûŽ0ÍáPL’çžD n~,nè'@«9TÃÔÒJ>5‹‰l™j¯‚“9¬åè_”NI cØxSDÜaã†=w(âjŠEmYýºI›(,hÙþ«Ïl)cšÃ¡ªâ#Bd¢HîIâæÇâ†~xC½x±Ôž2šj¯‚“"zëöíËñµ—·Ð%//_s—“öÜ¡ˆ›ïMÚDaAKå¸YÇodšâPUŸd³;éPÝ“Ä@?Zî*ÿèÑqõõ55ÅVë`ýU~ý§"UçÏhRÁÉ a;WVf‰ÈjS]kwIÄ+*VGEuQ&pظaÏ›(â&m¢° ¥áÂe Æ¡)µk×; Ê6ž–ö‚WùÝ“ÄÍÅ ýh5‡züx^tt·€€Z]«­-éu‹Œ ËÎ~ÃF• ?MM}^¹âc¯‚áŒô±rå ‘Úàà ­Ökwòa媓ͳ®7ìyEܤMÀ³5'gžÅr›lã6UdGÄGAû¤”^‘Ü“ÄÍÅ ýh5‡J(,øCåmJú €C%|À¡êx'ô‡j¶Û° ¤ …ð¸CEÜô‡J °(,´u‡Jè'€'ªSÿq[ ‹ÂBë:T@?p¨Í˜ººÅv3 Ûê|™º„qpÛ¡z iL$%<üöüüL}ã.i ’GJLª“öÜá;PgΜz«È쌉J9'gžÉ`è'€çª½œR›6Í•Bié»"Cú<"&ŸÚ«àdÚs‡:{öK» EF†99ßÇ{Pæèä&&»rå#…m-N®.(þGÆÁm‡êµ‚VY™e¨Z®žïÔ¿À^ã†=wÆ¡Ê\ŠŠ‡„Ü¢–ÕöMÚ$ÐOO:TÃŒÒJ5y‰„™j¯‚“©«ÍªÒ Möm÷æko¤r‰Â¶®÷úÛCÏ1n;To´üüL±qò-{7Å¡:lܰçª*³†’kÒ&~´„C½x±ÔžJšj¯‚GªùDççëp¶U¨;v:/2Žqð¸Cm-A ½uûöeг4T-Wjyùª˜˜»œlܰçªaóá"ÐO€–»Ê?zt\}}EMM±Õ:XQLÿ©ÈÖùóšTðÈU~C¹4Ÿ¯z•æÌ N. ¶7®7fwx°ñê5†Â³Wù[KÐ:v ®¬Ì™’b5T-—4°¢buTTõI)‡ö¼‰Õ¤Mýð¤C=~¯\ý±WÁpF®>)¥ŸèÌ|ï¹çN™orò(ùëÌ(l+r¾òÕm‡ê=‚¦ÄÊ•3ÄGi¬¶q'5P¹¤nó ¿ÃÆ {ÞD‡jÒ&~xÒ¡¶XºôÕ>}¢QCüØ¡ú €Cõ™Pòbónp¨~àP  ­8TåHX(~àP p¨~àP …/v¨-–0™‡, ô ©õÓŒtÙ³!ë þäPÝ{#^Ó+cvq¨è'´Q‡JÍ(¬§Ørïo¿®:Ã8àP *€ÿ;Ô†‹u_}¸G‰ÂŒ¥ë§Í]ý‡×WMž)[Ax&–¯Ba=ÂßzîË­ŒƒUyÿq`àMS§ŽQÝ^MMqÿþtоÐÃäÓvì}ÝpF®¾à9-í…Žƒ-–Û6lHoÊ|í-`ffŠ4NnR*€W;T-eee¹¹¹kÖ¬Yàid½’µKÖ±ªª*¶C÷¨°N?¾x#ãà†Cµ—SjÓ¦¹R(-}W<œ>I’ɧö*x$IÞìÙ/56î.,\æä|Õ$yÎ,`bâ°+W>ÂâP|Æ¡îÛ·O6€Í›7‹™X à9d’õJÖ.YǪ««ÙÝã@êÛŸL]Â8¸áPƒ‚ÅóI¡¡¡R›T=7©žPtòS{ gäªCUZhâ|í-‚ÔÁÿáP|ɡʪ¿k×.Ùrss×xY£d½’µKÖ±ÚÚZ¶C÷8±4§|d*ãàA‡zñb©=§hþ©½ q¨æŸ¯ÃE p¨¾áP«««eíß·o_YYY1€ç5JÖ+Y»d»téÛ¡{Ô–îßÚg4ãàÁ«ü£GÇÕ×WÔÔ[­ƒõWùõŸŠç;þC“ ¹Êoh—Íç«^åŸ9s‚“ HàP|Æ¡Šu¨­­QUUuÀsÈ%땬]²ŽÕ×׳ºÇÕ³çrÃ2n8ÔãÇó¢£»tHI±ª­¶¶D<œ˜¿ÈȰìì7l œá§©©Ï+—ÎíU0œ‘«OJé':3ß{î¹S曜øýlåŒéÆ{rŸþæžÞ9èý?‘øPÞþ±ÀY‡eM–õYÖêK—.±u¼‘Æ«×6Üü .3&ÈŽ¼¶¶VöèUUUG[•}kó%>Ý@ý7§ë¯÷¬ÌÙÒÃm–Ï|û(€)²Ëš,볬ÕõõõlÝ€C/¥Â:ýëª3Œƒ/²µÏhåA·ü.CÖµëýåÖ Æ‡ Ðjžó^É#”ò§s³Ä¡òâ0*@«Q_{asç_ª–Tþ‡Úxõ#€ChÄ›™·V;åãÉ‹*€Áãn8TÀ¡àPÚ¯)ÙÃ8¼…Ïß/Þ1pã8Tð.®Ê È8x9uÇN]°žqÀ¡´ò"ãêk/0ÞÌŽ“Ž/ÞÈ8àPÚ ;Ÿœ\[ºŸqðZjJöþtäë  ­ppÆ2® {-bLÅžVç•28T€6Ä©µEeÃÿÈ8x'rðPòÈN àPÚWÏž;±4‡qðBêk/äX\Øw„¡À¡xç+H}›qÀ¡‡ 8T*àPÀG9<ç½/6È8´:_WùüýbƇ ÿ}(mÅÞ—2‡V§|d*o˜À¡ÀwÔ”ì)ºãкœ¯<”cÐpá2C€C€ïò¿‹7j¼z¡hEþöÐsä÷À¡Àÿ°G_n­`Z‹“« Šî%‡ þÅÔ·Ïyqh®×]É X[º¿¹$»¢ 8TðA¾ØüaÉ#‡V¡¦dÏ®13U7©Å‹t_Ó™ƒ†……ÍŸ?Ÿßp¨ÐŒ4^½öÍéÆÁ«Œ wvl×®]‹eýúõüX€Ch»õêÕ«ãÇþ)È¿jå ˆ_lß¾½2eÊ”)!!!#FŒ¨««³ióÚµkV«U> 3gŽ:] ï¼óNDDD@@@Ïž=?þøc{Û±cGxxø¶mÛ´ég«µ°gΜ Sû€Cðy‡šœœwé{† "ÿª•ŸyæÕùM›6Mª;wîúõëcÆŒIJJ²i3%%E¾.õ¥øøx­Cs)_üöÛoçÎÛ»woÃŽåå剋µñ¯†3#­ÖILLCÌ 8T_u¨úûP-ËÉ“'•²BCCÕÊb Õ‡þùçJù›o¾éÔ©“C•v¾øâ ¥,­CUÏËŠI 0ìXûöí·lÙb3ÝÞLûôé³nÝ:)œ8qB|íµk¼Ë p¨>ëPNT¤Ítw«^ú×:QÃfíM·™¸fÍšÎ;—••93Ó¿þõ¯QQQR°Z­ ,à—*€·ÓpáòÅÇt¨‹E=OisU[-<<ü«¯¾2iS{UtÕ¡*¾3$$Dþ:œ©Ð³gÏéÓ§GFF~ûí·üâ€C÷¹^w…AhöL˜S9n–“U¹ôÒ¥KuuuqqqÚûPµÕfÍš5hРŃ;vÌjµÚT“vâããë¾'!!Á ‡*”••uîÜyÍš5æ36nÜ(ßZ±b?7àPÀ}N­-*ýu2ãÐÜœ+?cÐpᲓõêÕ«IIIß“˜˜¨ÞÓ©¯,~122²}ûö111êÓôj5igôèÑ!!!sæÌ1¹[À¼c‡ ›7ožÉL…Í›7+úp¨à>õµrÃ2ÍͶØq'–æ´nNœ8!¶²Yg¿qãF~nÀ¡@S‡jx$xŠÏß/Þrïo¯¶Î³íÉÉÉuuuÿüç?µw 4ëÖ­Ó¾p ‡ îS9nÖѤ j.n\oÌ‹Œ«)ÙÓZX´hQhhhPPÕjUß0åq#""vïÞÍ/8TðŸ¿_¬æˆ#Þt[ì8Ƈ .PwìtŽeÀë E3ÁØàPÀev œôuÕÆp¨8TÀ¡àPZ“× ÓG u8”¶¢|d*ã€C𠮞=·¹ó/ëŽf(p¨ÐTŽ/Þȵé¦SaþñäEŒ<@ùÈÔê¼RÆ¡)Ô–îÏ ˆÑÀ¡€g8±4gç““‡¦°µÏèÏ–ç18Tð uÇNoüáäèt›ê¼Ò¿=ô€CO’>ð\ùÆÁmêk/08Tð$O^t(mã8TðÎl)+üéHÆp¨à-ܸÞXS²‡q*Àðä>À»82oíþWæ38T¯àêÙsøðÅÇ *€W°kÌ̽/e08Th ¯^ãKs¾ÜZ‘ßmèõº+ Zñ^$—2Aì{áOG~þ~1C€C€bÏ„9ŸL]Â8Øãøâ[ûŒæ43ZŽ/·VÜ=Œq°GÃ…Ë< €C€–&/2î›Ó5ŒàPÀ[ØûRÆñÅÀ¡€·ðåÖŠí¿z‘q*x 7®7î™0‡q*€÷Zv‡ Т4\¸\S²‡ ìEå¸7Ê~3Å¿—±ábR8T/BvÏëÚõ&ˆ¶_}¸)*€×9ÔOç¥×”¬#ˆ¶Ÿf¤ãP‡ à¥UvÕ7nì%ˆ¶_n[‡C*• ¼Î¡f,-++Û·oßÑ£G«««/]º„2‡J­éP×O››››[\\¼k×.1©µµµ(àPp¨-ß6ì>[”µçùWŠ~ÿ~`_åA™‚îƒvž(ÓåSL[t¨«ÿðúš5k6oÞ,&uß¾}ÕÕÕ(àPp¨-'×¼ÿo »:ø›‰µÓç5f­ýïõëo¬]w>-óð3¿ßzïÐü®K|[[s¨«&Ï\±b…˜ÔÜÜܲ²²ªª*”p¨8ÔfëWv•?ù»‚?~fÊâJíÅ—ÿ5gK·Ae¿ùÔǽµ‡šš.uåÊ•ëׯ/..>zô(Ê8Tj³ÛÓZK~2¬~ÙJ{ªÄõUY;úŽ,yx&‡ €CÀ¡6W”ÇÿNì©XO‡öT‰k׉I-6‡CÀ¡àP›áÞÓ÷þ\>À™³§6gR·tÌ=©8T*ÕóOîçG °¹÷´}þÇ=©ÓÒóï|Âן†âP‡ €Cõ¢8»å½Â.mÎ:éP%¶ÆÄW¬n>¿Øö‡ŠC*Õ»bÏN:ø›‰ö.åÛ3¦j~î»ÆLòi¿ˆCÅ¡‡ê]QtÏÚéóÜv¨çÓ2 z 6ŸÅéÓ…ññt0àç55ÅêG))ÖNnîØ1833E1‹*6öñÊ•ÆO’‚ü«VXºôÕˆ‹4Þ³çÝû÷g›8ÑôôI¡¡·ÞdµÖ¶`ÞO{³((XÝMZ‹Œ [¶l‡ €CõLd·ï£¼–ß=‡zcíwöÅ|÷ÜsçÎ+w74TŠ%w¨LõÕ±bkkKêë+dºùUþääQqq±/–J ò ùW­0bÄiDÚÚ»÷½&U¾¨¶ Ÿ£½~Ú›EçÎ?,*Z,ñ²IIÃq¨8TªgBÍä}‡U¡CÕ†˜¼Žƒ•rXتª>pò>T‹å6µ²BCoU+¨gC¥ñ€€&õÔ©-j Ò ÉU~m?íÍ""Â’™™R]]ÄU~*ÕÇΡîܹ¢_¿û‚‚•¦Ú·ÿ‰5´çPm¦«6Ñù'«¶àd?Õ÷ïÏNHx´S§›{ôˆ,(XˆCÀ¡àP}æ>ÔððÛssç74TJYþªÏÕs¨Ú3 Ús¨Î;Tós¨öúépÛ·/S[áàPp¨>ð,HÈ-êýš#G>®:<å>T™¨½588èìÙ­z;(”»H/_þ{\\¬ö>T窴 _—0¼Õ^?íÍÂj¬8lq¨ò]*‡ÚŒïCuÞ¡:ó>Ô‚‚…]»ÞѾý"", NÖ>1šbIÕgù%22^ ¼ÉðYþ¤¤áò‘Dbâ01µn8ÔÙ³_+)-Œ3Dß‚½~Ú›EvöQQ]:DGwã*?‡Ú¼9¥œt¨¾•SŠ÷žâP‡ €Cõ™8ùÞŸ ÂÔ/[é’C½¾*kK÷Á'×¼MvS*‡êù(ÿ]ÉO†‰é4y«”Í#ü;úŽ,6Þ;G¹@ÊDì&p¨8TŸ‰ëWvíxÔ*&µaù*‡öTŒìÿiï}À«¨î„’›ûÞ'nÀÆ<‘_6 Ìó¦ˆP¤5‹QË«ˆ)²¸© ä‰¬,@Ò¸l–hiŒ`5¥MQoİi`!B6¾Ù@#„Dk­µ”nÊRÊfÓ¬ܘÆÔßï»=}ÏÎÎÌ;÷æßMòù<߇g2÷Ì™sçžs¿fîÌ=m¸s¹l…Àa¨*†:€’Úôç}ôÿ¹×ßoRÿøÛÓ¿{þØä…§þâ¯ÑS CÀPå7©¯~¿æOï©´ð§±®ý[ÛÕÃü?ßÿZGQéÏùæ7?P3éÞaôÛSC C!w÷_®«x{冺ÿ¹¸2b޼w‰š‰÷¾•½îÒÑ}ÃåÎ}C C% CÀP CÀPC% C  \ õçÛ‹%UÄh‹KŠ1TÀPÂÔP b4† *@XžCýÎóm ¯Äh‹ŸoC  L •ß¡üC C% CÀP C C% C@C3f NC`¨*ÀЪ­Œ4Tç.¡`¨ƒ}C%0T `ÀÏ¡ž?dúô½Þ¨üüz¥?×4(*Z?¾ªªX­ÑÈŸmmõéé³<žÈiÓ’ššö:ì˶¤¼ZZš'•GDŒE³ 0T€Ñe¨ ÜþÌ3«daóæ÷çPeaëÖ'z{ÏÔÖîLJJ°zmfæüêêm²ÐØø²¨§Z9þm²Ó¾lKÊ«99vuƱ 0T€Qg¨>ŸWDSzzZ‚2Tµ•„>ÍiÜÜëÒ§TuÛ}Ù–”e)ƒ`*`¨j†ê¼R¼³³³Ñ´¹?Cµ–ä'­† *Àè5Ô¯ò[WŠkvt××î³³3º»›ÛÚê³²î7]å/,\m¼Êo-‰¡*`¨£×PÏŸ?’’2Ùã‰ÌËË êN)ëÊ‚‚•ê’½,··7ˆzÊŸII ••[ŒwJɾrs—Ë¿j¥mI •ÀPCu†:´QV¶iΜü‰ÀP0TÀP‡>¼Þ( ュCÀPCýoÏ.`Z† *çP CÀP0T‚ÀP0TÀPƒ%KîVó?é—å® ¡Lø„›Ãò¡ 0TÀP0T§˜6-éÂ…cÆ5ÉÉ/]ªëc%Á– a§ºµͶ}ÉÍ¡ 0TÀPF‹¡ööž‰ˆ;fÌŸÏÛÚúºšç):Ú§^5.Ÿ;÷³gß,ÅŠ‹Ÿ”õ«º¯7ÊVœ+1¶Äß©A~aÊ”Ä3n:{v¿u§--ây11Ñuu/©5òvæÎMõx"·n}B7ÀTLµ­¦¦45uªiïºñ¦]û;DºµÆÓ±U/íØñÔµ×~!11þƒªMoÜú. 0T€Ñ{µºzÛã?$ 'OîIOŸ¥Vêe±¨ää‰õõß“…uë–Λw«ñÕ¦¦½jm8WâºZ©A¤-?Å•+oîÛ÷파y¦Š&Μ™|ùòÏ.[v¯Ú—¨ž˜¥,äæ.W °“šÅ7mzÌ:áªn¼u×¶‡H·Çz4ô±•—¾XY¹EZ%krr4–·6ÀPC¥†ZP°R´)"b¬škTôHO:*N¦–ÚþÐC÷¨•º€uÁ6ô«¶•8„±~ã†ÙÙ¦Äùô©V¬X¤¤pñâ;Õ«û÷?§JZ‹lFÀ]ëCd{4ŽmyùfSmÖæ*`¨£ÑPKKó-º£½½áñÇ*+Û$kÖ¬ÉT =ö€Z^·né‹/nT+W­Z²kW¾±¤qkèWm+qÙPïH–åÝ»Ÿ6í47wyaájã¶ë×?¼cÇS¦wa-æÜr7»Ö•[[k{lumâ "©Æ•Öæ*`¨£ÑPyä>±® Ž¥¦N­¬Ü¢î+êê:ýÌ3«®»îOjjJeåÆª~ê7”jeFÆ<µ°xñûö}ÛŸèb¶•8„èiGƕƊøšN:¬”}õöž))Ù ÷e-f¬Ù¹ñÆ]Û"ýª^°[yIÔ\ZuòäžiÓ’¤cykó 0T€Ñh¨µµ;=žHñ0q#19‘§÷߯š3'%))¡¢âÙØØk”Eµ¶¾>iÒ ÑѾ¢¢µz¥^8zt‡,ww7ÛÊs%:d怜;ò·Ó+WÞœ?ÿ6ùs̘1jÍ¥Ku¢€^oTaájYßÓÓb[ÌÔ ‡6—m‘uÁzlãâÆ}ç;¹²rÆŒ›Þ{Zkó 0T€Ñh¨ý"Ub„Æ!Z/‘&åç¯xä‘ûÜ>qb÷Ì™ÉȆ €¡ŒC Ÿx÷ÝJ¥È×^û…5k2¯\y3à&õõß{ì±ÚÚê‘9 CJCá f‰¡† À9T‚ÀP0T • 0T 0ÔaK–Ü]]½ÍÍÊÊ*† *À03Ôä䉗.Õ¹ÌýAîcL›–tá±¾”ÌÖ*† ¢¡~ç;¹ññã#"Æz½Q½½gda̘1²¬fŠŽöÕÔ”¦¦Nõù¼úUYnm}ÝTX¢¥¥B1&&º®î%µFŠÍ›êñDnÝú„TÕÙÙ˜˜¯^úä“SR­¬±6IíW-Ÿ;÷³gß,{,.~Rþ5“ø[#5ÈÂáÃ/L™’8cÆMgÏ¶ÅLÇùÃP0T€~6Ô?<˜œ<Ñø|ø¦¦½óæÝª–OžÜ#¶iÓc&¬®Þöøã™ ‹ºÍœ™|ùòÏ.[v¯òEq;ÑDYÈÍ]žž>KÅT»ËÏ_¡'&5…ìW– ¥|}ý÷daݺ¥z_þB·Gj§”]\¹òæ¾}ßÎȘZk­ÅüCÀPúíê=÷|eÉ’»;:Ž«?EŲ²î·.ëÙD¾1V­7¨¬Ü¢“¤fï‘]¼øNõêþýÏ©’òomíÎ ŽÍž}³?ŸÐÕ:´ý¡‡îñׇ eÁ¸¡šm5„ÖZ‹¹i¡`¨}2Ô®®Ó[·>1þmêškÖd–•mR/—%JKó-º£½½áñÇRërs—®6Ö¼~ýÃú,éc= JŠüíÚ•/úØÐPæÏ'tµëÖ-}ñÅjåªUKdCg‘ U½ –wï~:´ÖZ‹™Ž ¡`¨ýo¨jFø™3“›šöÊòâÅwîÛ÷mµ>#c^MM©.öÈ#÷‰ê]¸p,5ujeåSaQIu–ÑxÂ5;;£·÷LIɆk¯ý‚ªJ6ONž¸dÉÝ>¡÷»qã£êô§¸£®Á!D UcËõÊZk-f:&† €¡ô³¡ŠŠ3&>~|qñ“jÍÑ£;bc¯Q?•…®®ÓºpmíN'RM¤M6™3Í?ÿ6ùS*Tk.]ª›6-Éë*,\-ë{zZT=II ηäëý¶¶¾>iÒ ÑѾ¢¢µ¦Æ¨ÆAkÜÐXX/‡ÐZk1k3 Cèÿs¨"vùù+yä>}3Ó3ϬÒ/‰#.nÜЪŒ©µ† €¡ŒC}÷ÝJ%×^û…5k2¯\yS’œ7ïÖ0<iÛZCÀPÛPÇ@ŸAþ0T  ¼Î¡¢h† €¡ ™¡ÚÊhP†zþü‘… Ó|>¯Ç™–vËáÃ/¬$6žÚ¬ª*NNžèõF¥¦N­­Ýi|l–zª«.É Q CçPE‹‹Ÿìînîí=ÓÔ´WÍóÔï'bµ­X±¨­­^vWSS{~€ÿôé7Š.s>Cå›0T€p4T£–O(:¬·Ãóz£òóWubU6ioo°=å©OaÊ¿¥¥yññã#"Æšö^T´6&&Z^ªª*vh‰íÞëê^’’jyöì›çS1T Cö†º`ÁíêqQ›7çeu¢ž±±×,^|çºuKEýµ!'çA}³¿qï[·>ÑÛ{Fä2))A­œ?ÿ6iƒ©%¦&)÷‹÷þûUjÇ)í÷ù¼Ó¦%©I 0T CÞ†*n'¦( ==-ÁZ]GÇñêêm%%RS§®_ÿ°íîôƒýM{W;•P§WýµÄÚ$)#v;wnª6ÔW^yFäß””É*† €¡„—¡jÛÓŠf»²_ UGW×i¯7ÊAˆÝH³KCÕbª¦LI´®ÄP1T  \ uÒ¤ŽÝ!žWT´V+šíʾ_å_´èŽ“'÷Hµ"”%%d/ú÷©ÇC0T}•¿°pµmÉÜÜåŸ|rJC93Y­\¿þáÝ»ŸVçPg̸ CÅP0T€ð2ÔC‡¶ÇÇWßkE³]i¼S*%e²Ç™——ÔRb½ii·DDŒõù¼wÝõå³g÷«õ+ERõRî UÝ)%-ÕgC%ÅAåHåéé³ôÍû"ÙÒ ÙV·CÅP0T€p1Ô‘ee›æÌIé£hb¨*† €¡öCx½QÆ»òÕšÐêÁä0T C% C _!0T 0T€p6ÔÊÊ-qqãFöO0ù)¡† 0œ 5!á‹|P‚Éõ‹ö9< µe´ß²‹¡`¨n¨C{çûà†J`¨€¡ CcÀôðÑÒÒ¼øøñjr©ªªâiÓ’¼Þ¨¤¤õ¬{Ó†þdN6Ÿ0áúššRµæÂ…csç¦ÊJ½;Û½K±Ù³oöx"å_YÖµ­‰‰–VI{¤zTª´9?EP'VÁ´GSƒÛÚêÓÓgI;O°Ý¯mIÓA&0T 0T§ë첓ó`W×iõgt´ïÝw+•i­Y“ÔÙÄ–– ñZµ¼pašx›š§Ôaï÷Üó5g•ü;þmúÕ­[Ÿmkkwê ü2䉯ŒÁºGcU™™ó««·ÉBcãË¢ž¦i®Œûµ-i:Ȇ €¡†ÀP{zZôú¼¼¬Ù³o~ì±Ê\^ﮩ)S§Kõ9B¯7ʨ§þö®‹Iô3JåU½­ÃIG]‰ÏçÕ•„l¨Ö=«Ró`)tÛýÚ–4dCÀPC `¨¦b­­¯ïÚ•Ÿž>+3s¾C‹wâÄnY]³ªgh†êFŽû×PWJÛ:;M›û3TkI~ÒŠ¡`¨€¡†n¨ëÖ-½|ù Yxï½11ÑÚº::Žû)ÖÒR!®–——e¼ònºÊo¬$àUþ  µ_®òÛÚ³n°ÈzvvFwws[[}VÖý¦«ü……«Wù­%1T C 5tCݵ+?11Þã‰LNž¨~O)QP°R]¼¶‚òòÍ"©ÑÑ>£ ^¼X;gNоSÊT‰ñN))¦î”jm}=4C=þHJÊd©Ä¨ÈÁÞ)e]ilp{{ƒ¨§º¬²r‹ñN)ÙonîrùW­´-‰¡b¨*`¨Ì)5¨QV¶I<›ã€¡`¨êЇ×%a|°¡`¨ƒj¨JÈŒ1˜"8øû%0T €s¨¡† €¡3íjSªrC† *†êÊŠúE›ÎŸ?²pašÏçõx"ÓÒn9|ø÷·êe‡z«êêm))“½Þ¨¹sS[Z*ÔÊŽŽãjJRù·½½Áº†Š¡`¨£ÅP““'?ÙÝÝÜÛ{¦©ioFƼp2cmË–Ýûá‡Õó³¦LIT+ü¡uë–ÊÂúõ«…~o†Š¡`¨AjUUñ´iIêQ»w?­OO"ª¹:'L¸¾¦¦ÔZ ­­^ƒôw÷º?Ç’ês–¦Sžºrù·´4/>~¼š,ÔØ¤¢¢µ11Ñò’¼ã³H¥ÚüüÎR(N¬ï¬’7~áÂ1õ¸VYJÓ­mp~¾¬éUãQUk6n|T&ïBÞ‹i“€Ç™ÀPC!†í{÷ÝJ%@kÖd:ÈYKK…8cÌÌùê1þ/‹<¹—QÏØØk/¾sݺ¥uu/ù›D 'çÁ®®ÓVÕÛºõ ÍÚÚºUz>'ã4¶ïEÌrÕª%jY?Wß´ìÆP­mpo¨¶GUσµ`Áí¦MB>Ά *À03Ô¼¼¬Ù³o~ì±Êl嬦¦T|HM¥NdZ§§×g=u—ÑÑq\¬«¤dCjêÔõë¶•¼žž[çÓ³§êú|^µR6qŠŠgg̸éÊ•7ûn¨Ö6¸4TGU·_Þ‹uÂÕ3¡† 0œ U¢µõõ]»òÓÓgefηjV\ܸ'v«+ã¶Ú'æÔÙÙØGèê:­/»;Lpê<%©C}ñÅÉÉÛÚêõš¾\å·.kw´=\zÁßQu6Ô¾g C `êºuK/_~CÞ{ï@LL´–¡ŽŽãjYV¶´Tˆ9'¸7¯ÍÎÎèîníËʺ߽4,ZtÇÉ“{¤f²’’ “&Ý`­<(CÕWù WÛ–,.~Ò¤§ê„v§”mä]=ºCÞTQÑZCµ=ª¯ò‡vœ 0T€af¨»vå'&Æ{<‘¢nêgŽ+Õ5eY./ß,:í3þ¸ÓX ½½AäIÝkUY¹ÅýRbrii·DDŒõù¼wÝõå³g÷[+ÊPÕRò^rs—ëëõ¦’ÖûÀlŸ6åæ|Û6:´=>~¼.gCµ=ª² Â*-II™l½S*àq&0TÀPFˆ¡ŽÈ(+Û4gNJŸÖd¨¶.•NŸ°pašÏçõx"ÓÒn9|ø]æ•Wž™2%QMFZ^¾Ù¸í’%wÛz˜õyCh¨ª^oTJÊä;žrSUÿ6CÀPC kCuxVÿÐjròÄââ'»»›{{Ï45íÍȘ§ÖWU‹ž¾÷Þõ@ÙI“n5z[ùSÍ(ëÞÆÄPõü«"©/¾¸qÏ¿b¨*`¨fCUÏõz£òóWX]áÂ…c³gßìñDÊ¿jâ%õêÆÊ&²¡~l§Z1v„ëkjJ D[[½z ©¾ï~ŒLí™;7UMê°_c1©\ÄKÅLLŒom}Ýè8e|TªÙK}ý÷ôŸ²,kô¶åå›çͻՇÉÊ¢¢µ11ÑññãµãZ߬íÇaû ZoÀÏZÂx¬üs—'€mß‘Ë)aýõ%k¯s~¿*`¨ÃÕPõ{v¿.³oß·§LI”ä_Y¶­sÉ’»þÕÍ›µý8ä#BV}ÎöðÚ–ÔWÆM7ò;4£ `¥ºªîæ&3ãò¡CÛããǡål¨þú’µ×9¿_ 0T€áj¨:ÊÊ6‰ˆŒÔGÙ÷öžÉʺßý/P%**ž5>€ð÷k æ”ÀPúßPE2$ü=¦gdX”ÄþýϹß$&&:9ybcãË8(³ž`¨Cvu Ñ¡`¨Ci¨a>/<¡`¨#ÖPƒ’ξLˆê²¼ító¡Íòê<Ó¡`¨jåÝL7ï²r”CÀPÃP­“¿ËBii^|üxõ|u3ž›fN7_t?/¼?t¨Ü5ú›n>´–ÛîËxÄ WGGûââÆUUoÞœ£–ÚîrÖxCÀPCý¯°u='çÁ®®Ónf<7Íœn| ¿ËyáÂ_åÁžï4NæZËÝªì¥®î¥ØØkô²®?à¬ñ† €¡†j3‘¦Q¼zzZŒ¬3žû›9Ýy+ÛY7­°ò¾jh-h¨úÚÎGpÖxCÀPC `¨¦ÖÏý͜KC Xy°†jœn>´–4Tç¹”ÎO`¨*`¨ÿÖY×Mf;ã¹íÌéÆéÚÝÏ oû´|çʃ2TÓtó¡µ¼†pÖxCÀPCý¯°Îºnò0ÛÏmgN7N×î~^xk¬ÜåR¶Ó͇Öò>jÀYã C • 0T 0T • 0T  ü Õë2.E`¨*`¨¡`¨ÃßPûqþO¦%0T 0Ô×Ê ¤³/:uY^ðz£RR&ïØñTß—$CÀP0ÔË·´T˜ž<rå()† €¡  ¡Ú>¿S6n|Ôëš>ýÆóç觙ʟ²2?…i«ˆˆ±&\_SSj:Ñ(¶µÕ§§Ïòx"§MKjjÚë\•­ :TîòÁ¨ÆÉ¥DRûÒò€F-,\í‹‹WUU¼ysŽZ>th»*àP'¡`¨€¡0T=ùÓ‚·»™ª¥¥"))ÁZgfæüêêm²ÐØø²hYP“K¬<Øó½½g¬ ªån UöRW÷Rlì5zY×ïP'¡`¨€¡0T5jOO‹ÏçU+eA¯Ô%kjJÅ´Ô”Tò¯í4÷úĤ.`[•5VÞC ­å UÏk\Ö›;ÔI`¨*`¨gMò$:‚¡ÆÅ;qb·us£çuv6švêÒPV¬¡65íMMÚ—–÷qúS‡: C õ1iÒ GîK+*ZÂUþ˜˜è–– ÙÆ$CÀP0ÔË·´T˜2rå()† €¡ ’¡ž?dúô½Þ¨üüZÂÚÚêÓÓgy<‘Ó¦%55í5)šõUÓùEÛÍmwäÏ#"ÆN˜p}MM©µr—Ï@5Î#%’ê²rÛ–|jaáêèh_\ܸªªâÍ›sÔò¡CÛ&¡`¨~ Õvz§ÌÌùÕÕÛd¡±ñeñ*ëÄH¯ú+àr)--II }?çÚÛ{Æúd•Û¶Ü¡Ê^êê^нF/ëúê$0T 0TCõù¼bT²ÐÓÓbœüSŸYŒˆk\ÞáUlwdššRÑ8ÙÊ_å}1Ô€•Û¶< ¡ª÷eZÖ›;ÔI`¨*`¨Ajgg£?ís~Õ_—†7îĉÝÊ,mg ÖP›šö¦¦NuY¹mËû8Ó©C† €¡†ÄUþììŒîîæ¶¶ú¬¬û­Wù­¯Š‡utw(àò*LLtKK…d^^–QòtåAjsó¾ää‰úN©€•Û¶¼†êP'¡`¨€¡Úß)•’2Ùã‰4J[{{ƒx•¨[RRBeå“rÙ¾ZP°R]ÎöWÀvGÖ(/ß,í3Ь±r—wJ©Kê¦ùVnÛò>ªC† €¡†ÊœR†Š¡† €¡† €¡ Cõz£LH*† *A`¨*@j?ÎkÊ ¢† €¡ ±¡%ýXÙ%0TÀP0T •ÀP0T€4ÔóçLŸ~£×•Ÿ¿BÛ^[[}zú,'rÚ´¤¦¦½&´¾:Æ€¿Ímwäòá¦z¡¨hmLLt|üøªªâ¾ì×ß,-͓ʙ›CÀP†ÒPýÍ)U]½M_‡³N’äðª¿.ç”r6Ô­[Ÿèí=S[»3))Áå~çÏ¿Möèò æä<ØÕuŸÃP0T€¡4TŸÏ+Î' ==-Ɖ@õ¹I}BÑå«þ Øî(XCU5Hôq¿þÞ‚”Aæ0T  L µ³³ÑŸ):¿ê¯@¿ªóJ÷û ø C2Cõw•?;;£»»¹­­>+ë~ëU~ë«â|Ç ôËU~[]vÞ¯¾Ê_X¸Úå$0T `( õüù#))“=žÈ¼¼,­híí âp"II ••[LgûjAÁJuéÜ_Û{§”u¥›ýNŸ~£ì77w¹üëæ *† 0”†:z¢¬lÓœ9)¸†Š¡† €¡}x½QÆK*† *†J*† €¡† *†J*† œ¡~XR,©z”ĹÛGÏ› *~S[ñÏ{^UoYz>† *@˜*AŒæÀPC/z:¯þÛñ·UÔ–”xzÛ¾¿yvïS…’³G^¼v÷_¾æýŠ2Âþýòks*güEåßnEoü•½*`¨aÊ©S§>üꫯîyì~ùÿYÖk¾ÛÕ9³=à‡ý7ÌÿÏCô?¾òê„ÿU¾å…QõÞ¥çKÿ—QÐÚÚÊ·`¨áÂ;ï¼S__ðàAIÕå#‰ïý`òûÿÇWôUÝrðëÿï/=_ú¿Œ‚ööv¾ C.]º$éùwÞ9uêTýˆàXáÎ?šVôíÿìÑÊ?¹CýY~ø‡¿Øð‡¨s?ôͪ‰÷k³¯}±|d¿qéóÒó¥ÿË(øøãù6  \ÄÜÞÞ.ºµµõÜð§å¯ìêí—«ÎýìÃ7ØPsëÕcçÈšsà‡¦¢29>mº.ùóggÞiü›Ôalù^å~ãÒç¥çKÿ—QÐÝÝÍ·`¨ÐÿüúàñŸ|¿»ý#õç'­¿9xÝ×®ž¿Xy›ÈÇÇêéc§—ý®Öª×÷|tåŸ_9"G•C€¡@ˆ|þY¯ñÏ–¿zîÝ /ȱÿù†ê€X©Ÿ_í;zî»~tß“ „Þùù¡ø{ÔùÔŸmÙ‹¡:ÐóÑ9>-ˆâ×L~@8&*ô?'ÿü©Ÿ=ÿ¿Æ1q@ŽÏgW»dáÂþº7æd›ÎF† }E«iYR.Eã×s9,üÓ¼¿ÒËÍYßúôòo9&* ,¿Úw”ƒàÀO7ïæ `¨0¨ô|t…ƒàÀo›~ÂAÀPÂ~x €¡`¨€¡À cœ úNï§¿ã÷©*ôI§O¸ï£w~Ρè/®þââÁë¾Æ“§0TfìÞËßõöêç9*ÍgW»O¸é:èÀrfC€ ywà MË 8'§0TšO/ÿöàu_û‹mŠ@ýÀ÷_ßhæP`¨à–?µ“ßJ(¿>x¼á®Õ ÜÒÝþ÷›4L €¡`¨€¡`¨€¡Œ,>ÿ¬—ƒ€¡€™¶†·[þê9ŽÃàó˲CgŸ(á8`¨`æ9Ùÿò÷õ‡Á§»ý£Ãî»ú‹‹ þ‹KGÿÏWÿ’ã0Tü|ûþæ¬oq0Tø/ên]þëƒÇ9CÅgW»^÷5N£`¨ðG.i¬ýÒ²½Yg̾ð“‚ïŸÊüÛÁþvæs ÂÓ£7=(’j´#aôUbhÌOúÓ„„„^xaÄ|Ÿ]í:’”ÑÖð¶ÃÛçsÀPF=]ùIÁ÷ý Gx}•ü߆½õÖ[ñññaŸÅo›~òéåߺA> `”X;úôÓOW­ZýdAþÔ…¿ûÝDDD¨57nŒõz½=ôÐÕ«WMuþîw¿ËÊÊ’Wãââžþy½^~ðƒ$&&z<ž3füøÇ?ö×°ýèG&Lø§ú'ãKÖΛ7ϨJ¿ùÍot{ø\ø\0T€aoB¹¹¹ÿE‹ÉŸºð#<¢ ãé§Ÿ–b¿ýío?ûì³+V¬Y³ÆTg^^žl.奞ŋMH$F6üýï¿mÛ¶Ù³gÛ6ìÈ‘#bK&O²Ý©SJJŠ.“““#âÅçÂ瀡 W²þÞ1>>þW¿ú•Z–…¸¸8]XDo;a„ù—QËÿñÿqíµ×šLHêùõ¯­–eÁhBúüŸÈÇã±mXDDıcÇLëýítΜ9¯½öš,üò—¿úÝï~ÇçÂ瀡 W ¸R›Ši½É¢ô%f£ñØVëo½i嫯¾zÝu×:uÊÍNÿñÿ199Y²²²¾ûÝïò¹ð¹`¨#Ê„âããõù0Ó¹:c± &üÛ¿ý›CÆsuRa°&¤ü&66Vþ ¸SaÆŒßúÖ·’’’~ÿûßó¹ð¹`¨aÍ…ýuÝí¹7!õ;Å?þøêÕ«Æß;‹=÷Üs .T®ó‹_ü"++ËTLêY¼xñÕ?°dÉ’LH8uêÔu×]÷ꫯ:ïTøá([íÙ³gØ}@?~jç\lãsÀPF ¢>¯ûšI€œ-äÓO?]³f÷äääèßZ ‹—$%%EDD¤¦¦ê»¶u1©';;ÛãñÄÆÆ>ÿüóW¥ö³Ÿý,!!aûöí;<¨.(;ÞÝðBÓ²> `´Ðœõ­÷òw y3~ùË_о è./^üÃþp8~FŸ]í:<ᾎ–ø\0T€‘ÏÕ_\º2T ÈÍͽzõê¿ÿû¿¯J¯½öšñÁFÃŽsß=ð£ûžäsÀPF>'ÿü)QŸ!lÀÎ;ãââ|>_VV–~’Q¿ãõzÏœ93|?©ÞO7˜§Qù\C€¡á£w~^3ùÏ?ëåP ~Yvè9Ù|^*ÀHæ½ü]?ß¾Ÿã0ŒhÎúÖ§—ËqÀPF2½Ÿ2…† *† *† 0ìøü³^W42>Ƕ†·9*ÀHàÜw4g}‹ã0ÜéùèÊ¡ø{:ržC€¡ oÔ¼D¿mú ‡bð˲Cƒ9*† 8 àóÏzÝüKG9*Àp峫]G’2>zç犃èií—–ñÃb `¸ò“‚�úyŽÃ£îÖå¿,;ÄqÀP†=]9xÝ×>iý ‡b„ÑÑòÁs²9*ÀðãóÏzÿõfŽÃHýp9*`¨*`¨*À‚»¦0T€pç'ßïùè Ça”ð˲CgŸ(á8`¨áËoŽ:zÓƒœT=t·txÂ}Wq‘C€¡„)u·.稣sß=pòÏŸâ8`¨áÈ¥#b¨‡ÑÆçŸõÖL~ ­ám† vˆžþËß×sF!ÿüÊ‘ÿóÕ¿ä×*@xqaÝs²q”Q‹|úâ©  ŒøñS;;Z>à8ŒZ:rþ“Ößp0TÀP0TÀP0Ô0£ç£+m olôt^extcÀPù†zmÌl‚ ‚;þ6Ó †:€)ðÃíÅm ¯á&>,)LCexú†z ”o«Ï??K„›ø×zm0 •áI ën *) /µ×–”:uêwÞ9wîÜ¥K—>þøc†'A7 •HC™Ú<½íðáÃõõõo½õ–d÷ööv†'A7 •8Äñûž3—ë*ÞþëoÖ}iñß{çªÛŽÞxß[ËŸõò*‡hd§ö}ó쫯¾zðàAÉîï¼óÎ¥K—†ïð¤3Ó®†Š¡^üêÕï×Lº·vjÆO¿ñdû·¶÷Vìÿÿø|ÿkE¥?{ø›oÜôõš?½GÊp FpjßûTáž={$»>|øÔ©S­­­ÃtxÒ™éÆÚCÅP#>ëz«i隣I ~ó·[$‘û‹ý»ç%-8õõ•Ržƒ6"S{eA±¤öòòòÔ×ן;wnØ O:3Ýxº1`¨ê`èé<Úpkf÷îr‡Œ®â³½?ºõ ·/#¯“ÚÃpxÒ™éÆ*†Š¡ŽhZºF2ºdë€]Åçû_ûÑÌoœÊXÉ¡#µ‡Ûð¤3Ó1T C!¿==:é>7'œL'ŸŽýé‚_íûÔ>ÓÎL`¨*†:Bîܯ¹qÁo6m5åì1þ1þŒ¯&ñnˆ&µ‡ÉðªÎ\Y¹%.nœÔfZóÄP0T u”ÆåºŠÚ›¾n=«ä&©K¼1õë—Ž”\Ö©ICˆá9T9!á‹|Pm]äÞìîtùÐÚ¶cCÀP1Ô‘o¯þæO³r.€Úær?{ø›o}c çu0ÔpžCÕ™vuà>6C CÀ¨›¹¤}óö“zGQéѤλ¸x±vñâ;}>¯ÇyÏ=_ik«×/ååe]{íbb¢KKóTÎÓ˜²`W×éU«–DGû$dAþÔÊÊ6%&ÆKå3fÜôjýÑ£;RR&{½QII »w?Í= u:óÆÆÆ^#ýꡇî¹råMS§5u`£Ã™ººûq!{¹îº?éìlT/õöž‘?Ûۜωڊîîæ¬¬û¥ñqqã¶n}ÂöªËvZ‡ªÃ•ªâãÇGDŒœ‰¡`¨êHˆÊÈ9êIæ¡%õÏ÷ÿg2pÞÅôé7ž<¹G2kOO‹ä?É‘jý¦MI“\+‰SÖ;_åÏÍ]ž‘1Oò´Ä¢EwÈŸº€¸‚T"õ?9{öÍj½dñºº—dAêš5™|УÁPº3¬”N(MzòŠ‹t¿òwU/Ûvu÷ãbýú‡‹ŠÖªWëë¿·paZÀ«ö¶ƒBj“±£‘ÕPƒj§i¨:ŒÐG¹OÙüà L CÅPGBÈQr¾ÍÙ9©K4TcHž‹‰‰Ö¿Økm}ÝåïPããÇë²7NÐgk¤r'R-'&Æ—–æ]ºTÇGwîl‘éð;TSWj\H¤¥Ý2þmÒŸC6T5ˆd©Aäïw¨.Ûiª.Gè L CÅPGòóP]&u7ëzëG •¼ÞóÊÞ€]rÿnýFÃíËd+©=܆'™nŒ¡`¨ꈒԦ¥kŽ&-øÍßnqÈèÿúwÏKZpêë+Éè¤ö°žtfº1† €¡b¨#í7©5“î­šñÓo<Ùþ­íêùçŸï­£¨ôgó›¾^ó§÷H©=ü‡'™nŒ¡`¨ꈺ»ÿr]ÅÛýͺ/-®Œ˜#‡QBrù[ËŸ¸ttwî“Ú‡Ñð¤3Ó1T C%R;ÓÀP0T †J*†Š¡©áIÐC =þ|{±|[á&>,)LCexú†z $"Ø4C%ˆaÝCíÃIšï<ßÖðAnâçÛŸÔs¨ Ob8wcÀPCOüÐ ø*A7ÆP0T • Hí OCÀPH*A`¨*†J¤v†'¡`¨0€)p̘1|­Dx*ÓÀP0Ô‘o¨¶Ù.` t.@%Hí OCÀPa°OÒ R;Ó †:à'iΟ?2}ú^oT~þ ½Ò_23(*Z?¾ªªX­ÑÈŸmmõéé³<žÈiÓ’ššö:ì˶¤¼ZZš'•GDŒ%Ó£ö*ÓÀP0ÔQj¨ ÜþÌ3«daóæ÷'idaëÖ'z{ÏÔÖîLJJ°&ÎÌÌùÕÕÛd¡±ñeÉmjåüù·É^Lû²-)¯æä<ØÕuš4CŒfCex*†:J ÕçóJ&“…žž– R ÚJBŸG1nîõFés6º€í¾lKʲ”!Ç£ÜPž† €¡b¨Á¥@ç•’Ø:;M›ûKÖ’üfŽÀPž† €¡r•?èˈ֕’Ì::Žë‹ƒÙÙÝÝÍmmõYY÷›.#®6^F´–$*ÓÀP0ÔÑk¨çÏII™ìñDæåeu+†ueAÁJuMP–ÛÛ$·ÉŸII ••[Œ·bȾrs—Ë¿j¥mIR ¡2< Cu†:´QV¶iΜR¡2< CÅP‡>¼Þ( ã“kCex*†Š¡þ·‡# H6Äè1T†'¡`¨œC%‚s¨Ý0T • Hí OCÀP1Ô~IK–Ü­f—é÷è{Ͷ5¸©vàÞAjgx2<1T $&'O¼t©n ¿I§MKºpáØTë\fpÞ>¡2<ž*†Š¡þgôöž‰ˆ;fÌŸÏÛÚúºZùá‡ÓÒnñx"½Þ(]@–?ÿÃ43ÑÑ>Uìý÷«æÌI‘bòo[[½zUê9|ø…)Sg̸éìÙý¶ß˜ª’;žºöÚ/$&ÆðAµj‰lkÛ¤ ªUËçÎýÃìÙ7ËVÅÅOªjoÙßëŽLo_¢¥¥BòeLLt]ÝKj4rîÜT9[·>¡`*¦ÚVSSšš:Õ´w‚ÔÎðdxb¨*†jÿ-V]½íñÇR_Ö))“UZRÑÔ´wÞ¼[ÕòÉ“{ÒÓg©b“&ÝÐÐP&I"+ëþ‚‚•êUùÒÏÏ_qåÊ›ûö};#cží¾¤XBÂ++·È¶²ÓœœM{15É}µªmRmròÄúúïɺuKMÕZCïÚvGƆɻž93ùòå7**ž]¶ì^µ/Iu’5e!7w¹>8¦bR³d¾M›³NIÚž O CÅPÍß_’½$!EDŒU“JfRßÚ:ä{\Ïs¨—¥ØCÝ£V¾òÊ3+V,R¯ê•²œaûi|©¼|³Z6îÅÔ$÷Õªò‡m7–×Õú ]ÆvGÆä]ëg먷,yzñâ;Õ«û÷?§Ž©˜›f*Óቡ`¨êFiiÞ¢Ew´·7<þøCee›dMnîòï|'×XfÍšLõ’qYŠ•”lP+W­Z"™L½ºkW¾.¹{÷ӶߘƗ$C˜¶µ6É}µªüºuK_|q£n›ÞÖ_èúmwd|ûò® W·]¿þá;žRË=ö€>8¦bÆJR;Óቡ`¨à”yä>ù¢¿páXjêT5ݶäI½½gt™Å‹ïÜ·ïÛj9#c^MM©,Hš\°àv)vòäžI“n¸råMã«’Æô²)䥅 ÓººN˶Ӧ%É‚±¼µI.«ÕÅ6n|T_Q¿¥óWÞØUÆvGÆ·/™Ut1žâ’}Éq!Ðû²3ÖLÚž O C§X[»Ó㉔/hùÊ–opù*ÿä“Sóçߦî?èîn–2GF-Ë‚ÊXóæÝ*ÛΜ™üª6ýªiÙò’$ ÙvÆŒ›Þ{¼µIî«U/µ¶¾.Y9:ÚWT´ÖZ^êTïź¡íŽŒo_2½ùSœK—ê$‹{½Q……«e}OO‹m1‡f¤v†'ÃCÀP1ÔÁûf”o5Á·Ž¸¸q²RòA¿W;´9@š”Ÿ¿â‘GîÃêHí O†'† €¡BX§@Û8ztÇ’%wŒ¯þwß­T9øÚk¿°fM¦º–JÚž O C…ÐSà"?R;Óቡ`¨0P'i-CextcÀP‡,Úf» RàùóG.Lóù¼OdZÚ-‡¿°’R¬ñ,HUUqròD¯7*5ujmíNãC|ÔuINmÃÚPž† €¡r5Ä%Ù¨¸øÉîîæÞÞ3MM{õÜ3ý›{Œµ­X±¨­­^vWSSªïÿ¨¨xvúô%s‰à*Ón j8ªñ{ßxÆÂa¥$I!^oT~þŠ ÎÜÈ&íí ¶çTô9ù·´4/>~|DÄXÓÞ‹ŠÖÆÄDËKUUÅ-±Ý{]ÝKRR-Ïž}³ñ„ )[Cex2<1T Cu›,¸ý™gVÉÂæÍ9A¥ Ém±±×,^|çºuK%'ùkCN΃úلƽoÝúDoïÉ^II jåüù·IL-15I%׸¸qï¿_¥Öx<‘Ò~ŸÏ;mZRSÓ^R 1’ •áI`¨ê(5TIjJ›žž–`ÓFGÇñêêm%%RS§®_ÿ°íîÔSµ­{×óè¨ó7þZbm’”‘ô9wnªN¯¼òÌ瘯<%e2)I†Êð$0T u$ªN'’'t°]Ù/)PGW×i¯7Ê!ãºÉÊ.S Î|jaÊ”DëJR ††Êðdxb¨ê(5ÔI“n8zt‡$’¢¢µ:Ø®ìûeÄE‹î8yrT+«¤dƒìEÿ®£ãx)P_F,,\m[27wù'ŸœR'ifÎLV+ׯx÷î§ÕIš3n"ak¨ O†'† €¡ŽRC=th{|üx5G¶Î¶+·b¤¤Löx"óò²‚ºCÒjZÚ-c}>ï]w}ùìÙýj}AÁJÉ‚úV ÷)PÝŠ!-‘T§O·KJ’“7"•§§ÏÒwwv6J—fȶº ¤@" •áÉðÄP0ÔQj¨##ÊÊ6Í™“ÒÇLF $FØÓ¦ž† €¡b¨CjÞmãm¿jMhõÌ •áI7 C%R;ÓÀP0T •¯l‚ÀP º1`¨*AÚž† €¡B?§@‡g"†ÿ}çÏY¸0Íçóz<‘ii·>üBÀ6„Ð<=3¤Duõ¶””É^oÔܹ©--ÆÆbÎ+ɬj8ÄàŸÏÿ0ÁUBÂ#"Æê•UUÅÉÉeL¥¦NÕÓ¢2R0T Cíç40˜yE[qñ“ÝÝͽ½gššöfd̈6k[¶ìÞ?<( »våë'õèò.†&1øÃ§¢âÙéÓoÔÏŸR±bÅ¢¶¶ziCMMilì5Œ Cù†*_ñc'L¸^¾úÕš ŽÍ›ªO`˜Îóé” ÅfϾÙ㉔eY×VT´6&&:>~|UU±Ãù…qbCI?‰‰ñ­­¯Ëúõz£Œ‰JòSzú,ÙiÂJêioophƒú³´4OÚ¬&ì1>èÑúvÔƒ¥ÚüüΓåÈ;²›C%µ»žþF“uÌÊšÂÂÕÑѾ¸¸qRróæµ|èÐöa7|äûDŸ%µF]ÝK²9#CÀPGË9Ô––Ф¤µ¼paš$=Ѷ¿«ü÷Üó5{ü;þmúÕ­[Ÿm%Çè Ò¡dMºvï~:;;C½ªçÅY°àvõjfæüêêm²ÐØø²dY÷_Ð’;cc¯Y¼øÎuë–Jzó÷¦rrìê:m}¹õíèÉrŒÓöØfJIÏ«V-Ñ$Uû|^9¼/Ö:¬$ïb¨nF“qÌ*C•’ÒÃ¥·ëe]` Ñhþ2(L2­„X´ûý÷«)*†: µ¦¦TÒ€:]ªçûöz£ŒzêÏPu±žžã™B½­®ÐÁPOžÜ£O£&'Oüàƒjc%R³$*½;}ÚÆ¡fÛèè8.鹤dCjêÔõë¶}S²/ÛÉr¬oÇå„ãÏΘqÓ•+o§|)úÞIDATWJûŸ;7Õy%yCõ×ýlǬ±¤íVÃhøˆ¡¾òÊ3ŸÿaÒ)ã5uQB”—‘‚¡`¨#ßPãâÆ8±[}õ[Õ34Cu“<Œ/¥§Ï’üW[»33s¾)±™ µ³³±_Öâ‚nšê<£C}ñÅ"Ümmõ¶ÍГ@ú[IÞÅPýu?Û1ë¯3Gëp>ú§ÛŒ C½†ÝÒR! Ã8‹÷‚·›®òKZêè8îò*¿›äa¬ðäÉ=©©SÓÒnÑïü]åÏÎÎèîníËʺßýô¢EwÈ.äíHF,)Ù0iÒ oÊMŠÕ—) WÛ–,.~ÒŸžJ3ä­Íœ™ì¼’¼‹¡úë~¶c6 ¡£á³~ýûw?­Î¡Î˜q“Z™›»ü“ON©s¨Œ Cù†Z^¾Y^t´Ïø›°‹kçÌI1>ꥠ`¥ºJhºSJŠ©;¥Z[_ÊPJˆž.\˜fÜP²¯Ôœ’2Yß)ÕÞÞ YV¶JJJ¨¬ÜâþV£GwHýòv|>ï]w}ùìÙýoÊMŠU·zHó$kêÓ9þž!e¬J=¦§ÏÒoÊv%yCuè~¶c6 ¡£áÓÙÙ(ÿ/•=J1½;±ÕøøñŒ C-†ÑÛ{&+ë~õ Ôá•uÊÊ6‰£ôÃwž65"£_†#CÀP1Ô ¯7JbÿþçL+ÃüK_5Ûx¯±ZÓ•“Y1Ô‘ý5|)*†Š¡ö5ƒœAÚ9‡J`¨* C%èÆ*†Š¡Ã$Üÿ’/|~óç“RZ˜Ÿ¿B—´=,¶%í‘’zjJãNÍpyØ_'tÈè¦W­]Å¡۾߰2T÷“mwµvS'7®7(ÓGæÜl?87ƒÅvŒXwí¯ÍÖì®Æ‡ÜÙÎÕwCµÝ0Ô2Ô­[ŸoíÚÚ’äævrþªõ÷•m­GGKK…q99vuv.#†*M­«{)6ö½¬ dfί®Þ& /Ë·¶µ…þ w-Ö«ÎeJX³&3„ó"’V¥y‹ß¹nÝRiž1+çë’e=³«ÚV„O>…€Wùm?²… ÓLóÙNý%Ÿ‚þDŒÓY‹mIãÁ´ÍôÖf¸9å㯺Lí¶]Å¡۾߰2T÷“mwµýŒÜx]Žt3ÉY?87áoŒ˜ví¯ÍÖê «üuW"‘«V-ÑD(eÊ»x±Öa¥KCÝ0Ô2TÉŒç/õ Ú&‹ ÖP­õÔÔ”Ê÷©:ÁcÜ£”Ñú+£›jÛl5Õ¸•Ñm w——5{öÍ=ö@CCYÈ—;:ŽKò()Ùš:uýú‡]ª¤juV2à9TÛ÷n¬Ü¸FÞVIÛ¹ÑmKÀYÔm ÕÚ —†jÛ Ý¤vç®bÛmßoXªûɶ»Ú~ ÆNnþGºmw²ýà\†í1íÚ¹ÍÆì®***ž1ã¦+WÞ4® ¡×4ü­tc¨£³†:P†ê2oé/Ay©/†7îĉÝÎõ,ã/‰vv6:ˆcÀ*Z[_ßµ+?=}Vfæü>þzOòœNç_ýê—ŒWùE)ŒWùÕ‚ì´¼|s@CuˆÐ ÕzX¦ü¦¦½âýe¨Öeçþ¦üuçÔn}¿ÃÂPm›mí®?…~4Ô¾ÿLÓ8F*tþpc¨/¾¸19yb[[½m3ôD¬þVº1ÔÑÙCTCµ^]š4醣GwÈ7fQÑZã·dGÇñ ®òÇÄD·´TH=yyYþrIÀ2¶Ë’¡³³3º»›% eeÝom¡mÓ®×­[zùòêf&iF†ºhÑ'Oî‘ÆK^))Ù ÇMÿd0))A]“•eY]¤3n«Ö‡`¨rxûr•ßzXœ¯ò77ï“d¯ï”rh†sqxG¶ýͺ௫8_µ¾ßaq•ßÚlÛîjýlÏz ŒYÀ«ü¡ª¿1b»k7_Yú*aájÛ’ÅÅOúÓSi†¼©™3“W†f¨£¡†:¨†*_”OdJÊdý ýC‡¶ÇÇŽö¿j Vª‹Mþ¾²­õ”—o–¯c©ÇácËØ.··7È·¶ºk¤²r‹µ…¶L»Þµ+?11^Ú,ùL¤›“‚út‹$¤´´[""Æú|Þ»îúòÙ³ûu™}û¾=eJ¢Ô ÿʲmK–Üðw¨Ö /^¬3'Åt§”¬Q·¶´¶¾®o(‘ÏBV¡ía±-©/)šnäwh†ñø»¹E/Ûö7ë‚¿®bíxÎï7Üî”rù1ÙvWë§à`{Öeû‘Ùv§€†êï÷7Fz‹sÿWwJIórs—ë‘èïRÖ§§¥§Ï2öëJ7?mµÝ0ÔþLƒó¸™Ñó°ñŠŠgõí„ÃD¸LÆÓ÷á9ªžl”•m“ÐgWÑC²Ø__ÁÃ嫼œ<±±ñe”é"‡—¡Ž¤*ïEÂøè%µ¦+§†:üR ú7¢C`¨an¨Ý0Ôn¨Ajgx*† ¤ÀpÁ™ÛÀP †!† €¡ŽdC êq*!ÇùóG.Lóù¼OdZÚ-‡¿°òök¼c·ºz[JÊd¯7jîÜÔ–– ýÀp5g ü«'` 8?'©C +CuÙÇÂaª©¤¾h|È@UUqròD›©©SÕ„j C CÅP‡ÆP%!?ÙÝÝÜÛ{¦©ioFƼ½ñVbÙ²{?üð z4Ï”)‰jåã?´nÝRYX¿þaµÐïÍÀPIíƒv5Ø3øÃ°¢âYãüò*V¬XÔÖV/m¨©)½†aˆ¡`¨£ÝP«ªŠ§MKRÏÒÛ½ûiÓóõ·sDÄØ ®—äa- yEƒ4Þ<ëæË]vªÏYÚ>­PýYZš?^ÍÈblRQÑÚ˜˜hyIÞ‚ñQˆRm~þ çldœ¨SÞø… ÇÔÃ#SŠ»ÑtkÜϾm:ªjÍÆJÃŒù[oð8#ÏPýu'Ï:xÃpΞ}³>Kjºº—ds†!† €¡ŽvCŽö©Éä›wÍšL‡¬ÐÒR¡ÎX 3s¾zBxcãËò­íþ{Sr^lì5‹ß¹nÝRIKþ²rN΃]]§­9fëÖ'D4%ÕéVééd¦P!)mÕª%Öù ƒÛÐÚ÷©Ñö¨:ÏÚq&Fž¡:t¼`Ïö þ0”Q&ýÜçóš$O q\ܸ÷߯bb¨êh7Ô¼¼¬Ù³o~ì±Êl³BMM©|«_Œé©¥MsÜëÓ-º€Ëèè8._÷%%RS§®_ÿ°mvééiùÜÿ4âŸ&¼v3%·ºÈ8cÆMW®¼ÙwCµ¶ÁejôwTgßù8#ÌP:^×£yÊ({å•gdAþ5Mo!ŠnΛÊ0ÄP0ÔÑn¨­­¯ïÚ•Ÿž>+3s¾õû=.n܉»Uò°Í7ò•ÝÙÙØÇïЮ®Óú²»Ã ŠÎº1Ô_Ühš•»/Wù­Ë:iÙ.½àï¨:§Æ¾gbxªswry‚0܆¡þ ¸éÿ„!ÿG‘aˆ¡`¨#ÐP×­[zùò²ðÞ{bb¢õ·pGÇq=ORKK…|eg7¯ÍÎÎèîníËʺßý÷æ¢Ewœ<¹Gj–LPR²aÒ¤¬••õåÅÂÂÕ¶%‹‹Ÿ4é©:¡Ý)eÛyGî7å<û¶íQ xy1´ãL _CuîN¶]Ë8|ÂsÊ@S?yå•gf̸I­ÌÍ]þÉ'§Ô9Ô™3“†*†:Ú u×®üÄÄx'RÔMý¾J¢ `¥º˜%Ëåå›å{<:ÚgüU™±@{{ƒ|k«{­*+·¸¿SJRHZÚ-c}>ï]w}ùìÙýÖʃJê y/’íôiSIë­$¶O›rsRʶ ‡m/‡Ë95ÚUYL)-II™l½E#àq&Fž¡:w'Û®e>á9 ;;ÅüdRLïNlUÞ©ìQ†¡ñ6†!† €¡ŽÞ«ü#/ÊÊ6Í™“ÒÇ‹žÆ“R¸†Êû†tcÀP1ÔCÒ˜„ñaµ&´zÈm¤v •aH7 C% • 0T C%C%èÆ€¡b¨#>˜æ”Ô>˜Ã“>C`¨*†z^<þÈÂ…i>Ÿ×ã‰LK»åðáÖBê5Þ§_UUœœ<ÑëJMjœh±´4/!á‹êIÝÖ­0TCUÕ.Yr÷`öL:?† €¡b¨CND‹‹Ÿìînîí=ÓÔ´7#cÞ@¤%cm+V,jk«—ÝÕÔ”ÆÆ^óùÿSÊ8ãö@dG •Ô>2 uÒ¤Ô¬ÈôLº1`¨ƒj¨’uŠŠÖÆÄDÇǯª*V+ÅêÔ#Bõݸ99îßÿœzµ²r‹üi}°hÀ|éõF8j}F©ú³´4O£f…1ªžµêÉ‹Rm~þ g)¬«{IJªåÙ³o6žO êÙàÖ6¸ŸàGM“8aÂõ¢ËzÍÆJûƬ7±~ Äè1Ô+WÞ”ÿSéé‹dà\{íÔŸ¶É¡7ìH-/߯)a»1Tk‚‚²¥¥Â¸­óì5ÖO=çPW­Z"ÿ#R˲ :t$‡ÞrGR5¤¤LVÿ£sc¨2:dhË0×ËÎß*þÆûÂ…iòßN5û(¡`¨£ÔPuГY«¹dze{{CjêÔ3nÒ³††pz££ã¸d©’’ RÕúõÛ&¼žžÛügmgÀÀU¨©çÎMÕ†úÊ+Ϩil$ûe¨Ö6¸4ÔššRÉÊêœq[çÀ­Ÿ1z õƒª㥇HÈ‚üéБzcÈIÕ –©†I@CÕ£Ãý·Š¿ñ.…ÑS Cí†j›ôåEï¿_?>!á‹.ëûïÒººNëgqÛ¶ÁMêui¨ZLÕ”)‰Ö•Ÿ‡:¸ÎµÒC‹wâÄnk1gCµ~ Ĩúê¼y·VVnÙ¿ÿ9}©Ý_Grè!w$]Czú¬òòÍ.û|Pß*þÆ;†Š¡`¨ªýeÁììŒîîæ¶¶ú¬¬û•P¦¤L®«{éðáfÎL–—Téè8îþ‹rÑ¢;NžÜ#‰G„¬¤däI7èl¤ë ÊPõUþÂÂÕ¶%ss—òÉ)uUš­V®_ÿðîÝO«s¨3fÜÔGC•wqôèÙ…ó à11Ñ--R,//ËøªóU~Ó§@Œ6C•áöÕ¯~IBÿšÓ_Grè!w$]ûïV&%%¸ìóî¿UÆ» ®òc¨*†j^noot"â(i©²r‹º)~×®|õªÈ¥ü) +Õ•;—7^HVKK»%"b¬Ïç½ë®/Ÿ=»_­7Ö”¡ª;¥<žH1Q}6ÔXR4>~¼Tžž>KߊÔÙÙ(ùOš!Ûê6¸¹ß¶ ¢²‹èhŸ³¡–—o·b¦ŸÌŠgHËÅþ­wJY?bÞËŸ˜¯ÊéБzcÀŽä¦Ã/Yr·Ë>ïþ[Åa¼_¼X;gN wJa¨êè5Ô‘ee›$Ÿõñ·Æ+Œd5‚9¥º1`¨jˆ!6)a¼+_­ ­’¡tcÀP1T‚ µ3< CÅPÃãK“Ÿ—¤v†'¡`¨ꀧÀ ²Z°)0„ò‚ו’2yÇŽ§úRù;Èâ¤v†'Ón *)0Äò--’_|qcß+'ç‘Ú1T†'Ý0ÔÁHÖù¯M3eû›ÔÛ49¸é†íVêáP^oT~þŠ€çE*öA9ÒÓÜQÁ¶<à³ZC˜‘œ µ3<ž*†Š¡Ú§@ëüצ™²'õ¶ÜßV Ü®Pïæ„‡¿Êƒ=¡"ïÎzŸ~P-w“ƒ‘œ µ3<ž*†Š¡Ú§@ë삦™²mgÓvžÜßV¶“”Z#`å}I¡µ<` aFr‚ÔÎðdxb¨*†D 4°Î¦í<9¸¿­\¦À€•››šö¦¦NíK˦@çÙtBžÀPž O C†jÿÚôEo;›¶íäàòEßÑqÜa+——VT lnÞ—œêõFMŸ~ãùóGôãåOY™Ÿ¿Â´UDÄØ ®¯©)5É?ÛÚêÓÓgy<‘Ó¦%55íu®Ê6Ù8TîòÉ‹ÆÙk$ ö¥åŸ¼XX¸::Ú7®ªªxóæµ|èÐvUÀ¡NCex2<1T C õì2 ÜîfÊ™––Ф¤k™™ó««·ÉBcãËò½Ôì5+ö„ŠqðÐZî&Ê^êê^нF/ëúê$0T†'ÃCÀP1Ô)PÏÐíóyÕJÛi»kjJå«\Íy#ÿZëT“Áè)dª²FÀÊû’CkyÀ¨'¨4.ëÍê$0T†'ÃCÀP1Ô³¦ogù¾!ÆÅ;qb·usc"éìl4íÔe Xy°)°©iojêÔ¾´<` tv ‡: •áÉðÄP0T õ1iÒ Gî4PT´6„ˈ11Ñ--²¹q.où¢ïè8®/œeggtw7·µÕgeÝÔeÄ€••››÷%'OÔ·b„Öò>¦@‡: •áÉðÄP0T õqèÐöøøñÑÑ>S ”ÄàñD¦¤L6ÞŠ!ÊJcÎ(/ß,¹D67&³‚‚•êz™,··7È÷¾ü™””PY¹Å¡*k¬Üå­êšéNáÐZÞÇèP'¡2<ž*†Š¡ò¸‚–O›bx*†:J • ¸ "l •áI`¨ê(5ÔÁIº¦ ¯*ÓÀP0T •  • †f†Ú“%òC:‚ÔÎð$0T †8†Ûd߂ו’2yÇŽ§úRù;Èâ¤v†'Ón *)0Äò--¦§Ø„\99ÔŽ¡2<鯀¡R <þÈôé7z½Qùù+ô·|[[}zú,'rÚ´¤¦¦½¦`}ÕtÃvsÛùK6c'L¸¾¦¦ÔZ¹Ë‡,'ª‘,è²rÛ–|Èbaáêèh_\ܸªªâÍ›sÔò¡CÛ&Ajgx2<1T ü¦@Ûùc23çWWo“…ÆÆ—å‹Û:óŠÃ«þ ¸œ¨FGKKERRBßOê'ûX¹mËݤ@ÙK]ÝK±±×èe]¿C©áÉðÄP0T Õ&ÚÎÁ­&nÑÓ½˜¾ô_õWÀådß55¥’'d+•÷%¬Ü¶åS z_¦e½¹C©áÉðÄP0T 5ˆØÙÙè/¯8¿ê¯€Ë7îĉÝ*uÙNWl ljÚ›š:Õeå¶-˜mË8.‚ÔÎðdxb¨*}1;;£»»¹­­>+ë~ëeDë«òEßÑqÜ¡€Ëˈ11Ñ--’¢Œs‚+*67ïKNž¨oÅX¹mËû˜ê$Hí O†'† €¡b¨ö·b¤¤Löx"Y¡½½A¾¸%7$%%TVn1}§Û¾ZP°R]/óWÀvGÖ(/ß,‰*:ÚgÌ”ÆÊ]ÞŠ¡®Ù™îX¹mËû˜ê$Hí O†'† €¡b¨LZCÌ)EÐ1T C%R;ÓÀP0T.)Ðë2¹„ÀPž† €¡b¨A`¨Ý0Ô06Ô~œ8Ñýãf‚ÔÎð$0T C8pZb&ˆ‘j¨ Oº1† €¡b¨¤@‚ÔÎðdxb¨*ô!ž?dúô½Þ¨üü:´µÕ§§Ïòx"§MKjjÚkJ6ÖWÇð·¹íŽ\>=Q/­‰‰Ž_UUÜ—ýú{ƒ¥¥yR9s“áó#ÂÇPž O CÅPÿ[†èìlô—Šœ_õW _R óJ÷û ø"œ •áI`¨ê(½ÊŸÑÝÝÜÖVŸ•u¿õ2¢õUI*Ç ôËeDÛ|ì¼_}±°pµË7Ha~•ŸáI`¨ê7Ôó礤Löx"óò²thoo$!Ù%))¡²r‹)CؾZP°R]›óWÀvGÁÞŠa]éf¿Ó§ß(ûÍÍ].ÿºyƒ>wJ1<ž*†: uôDYÙ¦9sRHWÄ0}ÚÓÀP0T uD…šUÜøä‚ÀPžtcÀP1T‚ µ3< CR A`¨ÝCÀP1T‚ µ3< C…àRà‡%ÅòmE„›ñ2˜†Êð$†u7 5ôHD°1h†Jú† =WÿíøÛ*jKÊ<½mßß<»÷©BùÚ"Â)^Ù;ЩáIŒ€n j_9uêÔáÇ_}õÕ=à/2jdì´¶¶2Co²výÞÛÝ'éÌ“pTdºÕ"¯L.œÈ Cô^L0Q>´Û!~ÐÇTye Äf¢ @ë lnžBüˆiA¢Šj‘HIÍoä¾3Aš>cËf<²pyq~ùãâ|·¼„§NžÀ£Æ”¬›ƒ…5£ÌcU‰›ùÕ_ÁçŒ?Véò7PrggJ¢dú-L; ¬/8ÂHPæwJLuá HA2+‘§ƒÚ#\ÍÉ0±BîgAï&÷ëÕÿgéfÀ-“¾F*œÍ<(!Yª”¸J¯9 êÌ ußô*Š7QšÍÜáαêf³7ÅôÞƒ¯³9°åðOÁš=ÇöÕš5Óá+øÒ®À[CB_¢k;aÚñZ ûåZOUÀåiÂ1Z¤le#yÁô{¡z6Ëý›h;b#.!’¬.(Ì`$<ÏawqNÞĸÛU‚íŽØc„+-ŒÁÒÄa¹ª–¦‚àAxHç%J=í´É‰™– k W6ïS^nÊ*&aö€Æ! Ц ÈƒC-÷<èËãµ½c/¼;:Ö·W[TQýÖ[÷6—âyÜkorÛëÅq?î‡~WY6ïøZtÆw@ùÈDAÝ0CWf´?~ª'…¡pÐîBçø´srzôzñ ?ú—É~ë[»Õi ùû×}¯±cßýîýk×–Ê&©¡¶ö¿\ºôü¼>œkvC¡"Ù©Ûn J»ºc¡¸Ã˜d½¡Ê€üÛ¿™ó|¶ZóÍoîòûÃåúO²øÆÏD"ŸRñ°yó'ÔxZžÿvÞä7îŸþ©'x¯±Kÿú¯_PðÎù—SÆ‚vÑèМ1ºæuXî”C¨G‰©Ÿ0vcBbª¼$ $äL›,CÑùÃkaŸks1 Ëm¨¯½öÈš5·ªô3Ï´WV–ÉI¿ùæsáðÆÝ»?¯6=õÔ7å#ùoÿö¨¤/^|vûö°©ÎÒÒß”‰XæYUg]ÝfS†Gi‘ úþ¡SeOn}ä‘£èå—¿£Nî~þóŸ‘Cq¾†jWƒØ‰n÷G?:.iKCµ+î0&«ÁP‹ŠÞóƒ<šÊi$Y”ÿvôG¬•D‹|KHìÚµ;%à0¼voœ¼5>øU]ƒ¤wì¨5õÍ.šÓ»™£ •P7ŽS?aìÆ„†ª79„¢Ý‡×Â>׿(b–ÛP%âå,•®ªúð?þããÆãDþ‘Ò›ž}ö€Cò¯Ø•+/:dãáÅ¿¥×?ÿüÁuë>¨ó\¸ð´ñ¦1©m^÷¡:Ô í~ÿû‡õ&I[ª]q‡1Y †züø× ßµuë§Þ#o™ñ¬OòGû?ýSe%R*//ײ”ÃðÚ½q÷wÉ¿þzý¦M•~S‹vÑèМîØÍ©„ºÝ(1õÆ„†zþü ñEç«ü¦P´ûðZØçšÃ!@L ªŽZIè—h«ºq{ÎËd*ÑÜüGâò_Ú#´¼ñÆ3ÉL5H:ùßDç®:ŸCµ[“Ü®¥¡:˜·Ý˜¬’gù/]zþ»ß½_ÞÙ >¼×îӢ̌ÕÕUòë',³¥rÆ7N+à?ÿsL¦Úä>ÛE£Cs©¼ãs u¦~ÂØ.Œ ‰Õc¨:ÀŠŠÞ³sçþüçqS¤¥Š7ÿ¹æ|ç+1 Ëj¨Æ«ü£êÂhòKâxÎÏæüàÑ¿ú«¦ººÍ>ß»¿þõ/f‡¡:ŒÉ*ü¶©gži/.~*íëÖ}ð«_mÐáÙ µÃð:¼q?¾ï®»>. iBÒ–Å-£Ñ¡¹TÞñTŽ •0^¬0&$Vá9T»M)†âÍ®a¨F†ZSó;÷úÌ¿ñÖ(Óe,™ÖS¬óÍ7Ÿ3^hИñÒ§¤—>—ÎPM—\%=/Cu“Uh¨Æ3î¦sÉÉŸÙ:ý÷ÿ]ãÌhŠ+»áuxãä%†ñÿø¸~"ÛáeŒF‡æRyÇíŽÓNÍ+Ô™ú c»0&$0TË3†â 5•™˜„3ÔŸÿ<.ÿµËÌ(ÿÄëÿ¥žþ ßÿÇ]fpy:uDü5ù‘7ÞxFßԯ묮®úþ÷«‚úú¼¬ÎðÝïÞ/Ó±z^Aþ†BE²f UvG?© KJBó2T‡1Y †ºiÓG%NÔ—ó‰ê}þóŸÙ²å.ýôÉßýÝCvoAiéoªgØ/\xzíÚR½ÕTÊaxÞ8y}ó›»**nÿÿãO-»mÍ¥òŽÛ¦šW¨3õÆvaLH`¨Æg‘-Cqa†šÊLHLÂrªÂí~kaá»"‘OéNý’H•Ù\Ýž" ã#™âËË[þ“³ü¶)É)¾+[½^Ô¬oµNþ¶)Y#%Š_¦þ¤”C Ò–ôYú&íþ¯ÿÕ¬Ÿ K±¸Ã˜d½¡Ê¾×ÖþyOeô÷îÜù‡?ûÙiý²|N¿¦ÇXP>¿ËÊ~KEËÿüŸ¡·šJ9¯Ý§îú—^Y~+™C4:4—â;ny$ïTê¡ÎÔOÛ…1!¡êMv¡¸0CMq&$&aù •—ú—’’_eœqczãžzꛟÿügø‰KÈâ0&$Q¦)ÀP³üµ}{X}é†|ÔÄ,áp8YLM}ÀP—ƒÔŸ”J¾µ¾¾þ¡‡Òy8 k´P†B!—ËU^^ÞÙÙ©ó455‰öŸåïë뵕œÁ`PjÐv¸ÿ~Çc4c»:5±X¬¸¸XUÕÞÞ®ËÚ5!¢,v»Ý¢¶ÉÏò[öæ6œêBàË2€°‚ €à • ‚ N •  ØNÀP9À€° Ø€àÀP7zaÁ jÚý ˜V/¢ `µjºé2ƒ€¡b¨*`¨ªiu£7†JØl@p`¨é%…*a@°Á €¡b¨À;Á *G;a@°Á €¡.âvýʵÁï|Oþr´3/l@p`¨KBê7z>ûò¾»ä€‰Å9ÚW9<Á êJrýʵ³_ü«Ç îT¿ùôÓo÷ðÿ(†º2Œ>ûòß|üu}\Äñ‰÷þÞñܪ‹=§–´E Cµ@Ÿ4}ÔU©Î›>ukMWþ¦¥ÖS `•ª’δ}m«ÑP·^¿ríÇ­ÇN¼ïÓ½}ƒ²Æã·Üquê—2ªO©.óüÀ¼ @°Á €¡Zlýò—¿üÀ<÷Üs¦M—^|UyêÒ}*0/l@p`¨ó0TÅõ+×ηuN¼q‰°`^ Ø€àÀPÓÂPnð|l'`¨*`¨*†Š¡†Š¡`¨*d<Á *† ™Eœ*† ÌË@°œ€¡b¨À¼ @°Á €¡b¨À¼ Á €¡XÃó@°œ€¡b¨€¡.£¡æä¬Ø¯`Ó*†êÔ4ª j†êøøø=÷Ü Ýnw^^^$9}úô‚½Ó´)­Î¡Î·3)æ_À>"Í€¡:êæÍ›wîÜ922"é©©©®®®7b¨«ÖPy>6‚0Ô•7T·Û=33cWª¹¹¹  ÀãñÔÕÕ% eWšdñ2m’Ä¡C‡Ô ÚŠŠŠ¾¾>‡šM G"¯×+eE£/_¾ì,Á¢×ÑhT*ôù|­­­¦«üÝb±XYY™” …BGޱÌoÙËjw-¹ÈäääŽ;rg‘„,¦ml@p`¨Ëa¨›6mÚ¾}{<OöÔ={ö„Ãá±±±éééúúúÆÆFKG´ôE½(–&5Håûöí[·nsÍFÖ¬Y£z%yvíÚ%öél¨’§¦¦f|I$߇êÐíÂÂB5,âv»™bRÙ5S‘¦¦&)¢{.‹ÌË€œ€¡®jC½zõê½÷Þ[VVæõzÀÎ;eÚ$‹CCC*=11‘ŸŸ¿CÕgÅíÜn·sÍvHÙ¼¼Ÿy‚0ÔUm¨FD6mÚ¤EʈËåZ€¡Z.ÚÕl$WUU‰7;·n' ó2Ô¾¾¾ÚÚZ±É’’’X,f™?õþ̹kΣ¤=žy‚0T ÕlH@@ßýé`T 0T»šHžžžžééiIË_]VœOßH$媦··×ï÷[æ·ëOr¶9w-ùª±ç+{•ç€` 8C]yCÝ´iSww·º?66ÖÔÔTUU¥6µ´´TWWKº¿¿_ßy™››;::jÙŠi“¡ÚÕl¤  @ߺuëV]víÚµ²"©²¾®®Îtjb–p8œlÝ–¨ëìb¨Ò®e~»þ˜²¥²k¦"úZÕó•½0Ô•7ÔÓ§O×ÖÖz½^—Ëå÷ûwìØa<(¾ …dSyyygg§Z¹ÿ~ÇcyJÒ´ÉᔪeÍFb±Xqq±dƒíííºl___EE…ZàÀã³üõõõn·[T2ùY~çnKJKK¥lYY™¾ÊoÊoןäjçÜ5Sù÷ ±±Ñ3KCCƒìG&†ºª 0T 0T Rƒç€` 8CÅP!“¢€`‚CÅPy6‚0T ˜—6 80T ˜—` 80Ô_ã‡?üáÝwßíeóæÍ²ø”“#›Ì£føîÏÑÑÑÆÆÆ@ àv»sss%óÉ“'S¬yQš€Å…ç€` 8C]yC=wîœßï?tèÐô,GŽñù|ýýýÚ£ÑèÁƒ-õQÜ1 ¶´´ŒÝ˜ýÎüX,¦usΚo¾ 5 UìPV×È¢þ­N1ÅD"±fÍm–F}ܱcǾ}ûìZœ³æ›oCÍBC-((¸zõªqÍøøx~~¾Ñãñøºuë’õ±°°ÐTv¾5ßdjªåïÔ»ÝnÓÖÝ»wïٳǴRgÓëóªùfšÀP³ÐPóóóÇÇÇk‰Dò™Î™™™uëÖ½òÊ+Æ•ÉgI[S¬ùfš€E‡ç€` 8C]yCMñnѳO>•——OLLè•Û·oommµÓÇÔïC]p°ÌQ@°Á €¡.‡¡Š9rdzzzffF²˜üĽâàÁƒ;vìÐ+‡††ü~ÿý÷ß?::zcö$èÉ“'õÖk¾™&€y6‚0Ô,4Táõ×_ß¼y³g–;ï¼Óò[K5ÕÕÕÆ•ÃÃÃõõõ>ŸÏåråååE"‘ÞÞÞyÕ|“Mó2l'`¨Yh¨ÌË@°œ€¡b¨¦ð|l'`¨*`¨*†š&†º²/¢ C5o]Ás¨*†Š¡ÂBàù ØNÀP1TȤ( Ø€àÀP1Tà‚ €à Cåhg^ Ø€àÀP1T`^‚ €àÀP9ÚÁž‚ €à 5 õú•kƒßùžüÅP0Ô6Ô+§~ðäûï– #±øŠô 0Ô_pýʵsûyª¸FýæÓO¿Ý³R}€Õn¨WNý ¾åÞǼŸìzçï<úÖ?æÝp±çÔ ö V©¡îmúÒ·jv<ö¾ß;þÎo[/käïc¹w,µžb¨ÏÁ@p†º¬†º²/¢- ¢€`‚C]dCýêο8úÑ-z6hk|ëÊÿÔ2œCæe‚ N Õb«¾uðØ3±þ׎·üBRŸºõ3H*0/Á@p†ºÂ†ªÖL¾9ö¶í9þŽªex–˜—6 80Ô¹ U3xì™î÷üî’~*d <Á jZªâú•kçÛ:'Þ¸DX`¨ia¨€¡b¨€¡b¨*† ÏÁ@p†Š¡B&EÁ'†Š¡ó2l'`¨*0/l@p`¨*0/Á@p`¨77j99„N¶Âó@°œ€¡b¨€¡þº¡ŽßsÏ=Á`ÐívçååE"‘Ó§O/…¡Î·Ôò´€¡¦¡nÞ¼yçÎ###’žššêêêÚ¸q#† €¡®˜¡ºÝî™™»RÍÍͧ®®.‘H$[ ea×®]ùùùyyymmmªˆ&¹•X,VVV&•„B¡#GŽXæ—„Tå÷û].—,G"¯×+ýɾ|ù²]+–=F£²Òçóµ¶¶ªü¢æº X{QQ‘q§0Ôå0ÔM›6mß¾='{êž={ÂáðØØØôôt}}}cc£ÉPí2ìÞ½[¬QÖ‹Šª&{­‰ÂÂBÕ+ÍäVôâ¶mÛ´/®Y³FõYš–&D7-KÙõPŠÔÔÔHmãããbºªToo¯ˆ².ÛÐÐ òºÚ ž‚ €à uå õêÕ«÷Þ{¯˜™×ë ;wî”5j“, ©ôÄÄD~~¾Éí2 šÇÚÞPƒÁ`[[›ºÓÀ.¿,ŠhZOÍË˳,e×C¿ß?<<¬Ò’Ð¥*++;::$100 ½Ã^m‡ß±Á êʪÑʆ††M›6iÛ3¢.¯-pÎ )j___mm­ècIII,³3Tãb<¯ªª«vn:ÅêÅ“'O–––J"ªû˜—6 80Ô•4T…ÛíV‰@  îï´sM» ó=‡ªéííõûý©ª4ÝÓÓ3==-iùk”fS6ËÏ¡ KUTTìÝ»7 9ܞ˼ @°Á €¡.¡¡nÚ´©»»{rrRÒcccMMMUUUjSKKKuuµ2¹þþþä{=í2¨ûPE ÷¡æææŽŽŽZöM *£C-((°ÌoROɦo]ݺu«Þj*e×CéU$IÌR[[k¬¼««K=ʼ @°Á €¡®Œ¡ž>}ZÍëõº\.¿ß¿cÇãIG1¼P($›ÊËËõsîFŸ³Ì ˆéŠ,êgù…ýû÷{<Ë3©R°´´Ôív—••é«ü¦ü¦‚’­¸¸XÚ ƒííízkr+–=#ß¾}»´(¦ÛÚÚªO âëêB¦3ñÆ¥o=1ùæØ¼Jñ|,œêbþ¦T–100 «#‘HWWWvìÚH,.ïûñ[î¸ô⫼Ñ*†šî455%‰«W¯†ÃaIÿr”::Œ_8•üCóyëåõÔo}æÇ­Ç®_¹Æ[€¡b¨iJ{{»Ïçóz½ÑhT݆ëñx‚Áà™3g²lOÿñþow¼åc¿kÓ³‰Ï­ŠoùïœRÀP1TH I•xô­?Yñ‡=Á?à”*†š†Ê‹—~=þî;Ubè±ççÌÌLK£Á €¡ruU3òôß>ê^¯ó±wTuûîúÑW¾¥Î¡Î9 ,ÿ”@p†Š¡ÂªÐÓÇÞ¾¡Ó³á¥ßýÂès/ÿûô¿¥9Œ! @p`¨*,úž:oj|êÔ)Æ 80T n õ}¨y>a:iJä@p†Š¡Â 0ñÆ¥·ûù…"ÒF‚CÅ3€ÈÀPñ rCÅ3€ÈÀPñ rCÅ3€È̇‡Q€àÀPñ r “€à Ïæb"‚0T<ˆ ðN ÏX0 ÿ9ó)//¿ÃŠÏæ¼ÿËöÈÖ|à·ß~û‡>ô¡; -ã¢ÌNL5€¡`¨0Dïr`)Ù¸q#S ¤ <)*†ša†º+3Q…B­ølÎû÷Ø#[?úÑ~ä#©¬¬Ü°¼ë]ïÂP0T u†*ªw53Q’*°°Èéìì|þùçÏŸ?O$ÀRpÇw`¨*†Š¡b¨v†ú¡}(Ÿ={V"íâÅ‹ãã㌠`¨*,«¡ªËîin¨Æ{WÜPU7Tº»»; º\.µÆ¸)«¦§•Þ/»Ì·c:¿1¢’ uÍš5O>ù¤DÚ+¯¼"ÁvåÊfÀP1TX1C][½C5žCíèèP+eÍ ÚRQQ‘¤‡‡‡ËäVÃRdöªiQ?Œ¢ µ´´ôᇖ?$ØÎž={ñâEfX)xR 0T C]Cý¾°(†‡Ýn·Ëå’DšS–júΛ‹d¨:ð”¡Þ~ûíGI}òÉ'ãñø… ˜y =?O0T 5ã õ/ÿò/¥ (Ý;ßùÎíÛ·ÿô§?5Y©é w–BOå“^U~“†úÚk¯ÉŽTWW+OM$ÉEºººŠ‹‹Õõw-%Æ\³fÍÀÀ€Z/}‹F£¹¹¹ç®»îÒçD¥æ†††¼¼¼äJLµ%_å·,ëÜC» “[—½®¨¨x饗,KÙíÎàà ìµ)ó† $!6&é'NHºªªÊAì,69sò ›Ú«««óÌR[[+}–“>—””èª$íõzÇÇÇmYîÅ|÷ÚÙP%¿êƒ>È}Ï€¡`¨°´†ºwï^;IÈç®Ôð¹Ï}n™Ï¡ŠžŠT©¯š²;)•¢¡Þwß}êú¾ ‰ƒ&ÉÏÏYéëëK®§§§çé§Ÿ–ÄÝwß­Ö‹²Ëâ+¯¼Òßß/‰ÊÊJãz:;]³3»²)öд˜|'ÃÈȈIËRv»#N¯v_ÐE”•Š#Jº¾¾^Ò/¼ð‚ƒ¡Zv;•A6í‹è©1Û–-[dåÎ;%­f醤e©z/TAÓ€;ìµ)?† *†Š¡®¼¡š®ìßrË-Ëi¨ZO€Ä|#Çd¨ûØÇD’¦g‘„åwùý~ÇS\\|Ï=÷èU=3³HÂëõªõ’ÓxŽÍår×KæªeÙ{h'¬±X¬¬¬Ìív›úiÊf·;²¿ÆÝ×E¤RçÔÔ”tiݺuvãïÐíäÌɃlÙIMÕùNéíæÍ›%-%-kLeíöb¾{¡† €¡b¨+l¨O?ýôúõëßþö·ë‹³–VºD†ú“Ÿüä#ùˆúŸ7ß|s‘c4TuÞÔ„¾¤«9{öl$Q2WZZšŠ¡NNNš*YRCuè¡¡Êâ¹sçL²e)É»cçjr0Jº±±Qþž8qÂÙP-»½X†*ÔÖÖªó¸úÌî¼ 5õ½¾aÿ¤† éOJ†Š¡f³¡ú|>)%ž*é±±±å4Ô .ÜvÛmJOçüÖžT õÿø%FÕ¦mÛ¶Éâ¾}û, ª«á²ûÆzzzzb±XòUþææfSñ%½ÊoÙC¥Y2n–õçççËâÐÐбcÇ’åL—²Ûp8œ|•_˜˜˜P÷Ë–••9Œ¿C·“3'²©}•_eSWù…x<.‹~¿_]²O.«¯Ú«‚¦OÞk»ü–û…¡`¨êòê{Þó)õýï_ôtÇŽvVúŽw¼C¢[ij§úâ~*D§b¨·Þz«¾UQ„,J+–E\.WII‰¨¹q¥BlL?Ä“H$ LÓ¨õÊ -­ÔÁx,ËÎÙÃýû÷«s~–_°*»¯ÔÍÔOS)»Ý”½¶|fH@ñÓP-»œ9yM-Jxè'¥$!‘©7­]»Vr®_¿Þ²v{±€½ÆP0T u% µ§§çöÛo«SÜ·oŸ¡¶¶¶ÞrË-‹u&UôT™ó½§©DŽòwÎ[Ün÷ÍêýV&gΜ‘q(**R‹ÓÓÓ•••·ÝvÛâÌe™0È*†Š¡®Œ¡.?úìé?øÁõôÆÒÿêi²‘¬6Ôx½Þ 6œ>}Z¯ …Bñx|›ÈÐñÁP0T 5k õG?úQ0Tz:¯_Œ\jC°„'¥ ýƒCÅP3ÀP•¤¦'ú>W‡ï=ÅP!=§, 2ëóCÅPÓÎPÓœè)† *† *†š‘”——+ÿûrz³°Ÿ2ÇPCÀPCÅP3õ jù+J«!r0ÀPCÀP1T C…ì‡'¥ ýƒCÅP1T Võñ…¡`¨€¡b¨€¡`¨*†Š¡Îy@®îŸžJÏÝ_†^a¨*†Š¡b¨™d¨sŽÉR† €¡b¨jÆêRˆKêu.¬õ47àåìžC[¦MK×+ž”‚´…'¥CÅP1T C]¥†Ê·MA†ÎŠ*†Š¡f˜¡NNNîØ±#wIÈ¢Z?<<‰D¼^¯ÛíÞ¼yóåË—Õú©©©h4êñx|>_kk«e±X¬¬¬Lò„B¡#Gލ¦5º3mmm~¿ßårÙ5—\Jhnn.((ÊëêꉄC¯äÝ—qÐGFFŠŠŠtçÝ”: e}EEE___ê»oÜGËJæÜa×®]ùùùyyy2JvC¡kK[ËvšÃPCÀP1T 5 µ©©)ÏRSS#‹j½ÈG<Ÿ™™™žž[-Óæ$Ùt~Ë: UЊð566Z¶.‹Û¶mÓ’dל©Ôž={¤·ccc’­¾¾^W®z%µI¯Ä8U©ÞÞ^e]¶¡¡AœÒÔU‡vÅá¤!Ù´oß¾uëÖ¥¾ûFS´¬Dg°ÛÝ»w‹.ËzbiÑáí³[ËvíšÃPCÀP1T 5½ Õï÷ª´$|>_r¼¼<•.**Òù-ë ƒmmm###Î%ªdÙcs¦R@@·>11‘ŸŸ¯÷bxxX¥%¡KUVVvttHb``@z%Âç0n¦võédYïv»Sß}£¡ZV¢3ØíŽ´¢ßç·Ïnl-ÛµkC CÅP1Ôô2TÓJm3ñx¼ªªÊëõªËÇêZüÔnyìëë«­­*))‰Åb©L½9#söêäÉ“¥¥¥’ˆF£êr¹‰ùîf*»?gfc"•ÝY€¡ÚµkÙ\êð¤¤-<)*†Š¡f•¡úý~ãIA}5ôôôLOOKZþ격œDÔôööJý©X”]sÉÙô­¢¦½ÐçP¥{ÆR{÷î …B333ÉSlw^»Ÿº¡ÚíÎÍŸCµ\´knÁdž €¡†º$†ªo¬L$ápX߇ZPP ï%ݺu«.«ïøTù-ëŒF£J°ÄP¥µ277wttÔ®3vÍ™Jµ´´TWW+íïï7Þ‰DT¯jkk•wuuÉ¢ˆ”åXÙµk'y©ì~ê†j·;ê>Té’ñ>TÓPqÛ9›ÃP0T C]IC5qcöYþÆÆFÏ, úNÍX,V\\ìr¹‚Á`{{»¶ÉP__ïv»Åíìf—ÖKKK%OYY™¾Ê¿ÿ~iÂÎÞìš3•Rš …$gyy¹~T_öbûöíºWú^¡»»[]è·Ä®];ÉKe÷S7T»Ý¹1û›x§~–ßr(4Îc›Js*†Š¡b¨+c¨«„‘0½‰DºººÒª‡ ¸4=/ C Cu¢©©)‘H\½zÕx¯BGG‡ñ §ÒD õ½¹OJAú'†Š¡b¨jÑÞÞîóù¼^o4Ußµäñx‚Áà™3gÒªŸÒÃfzàa¨Y³"†Š¡b¨*d¿`¨€¡`¨*†º„†š&?|Ÿ&ݸ™¦ÿ.`¨€¡`¨*†Š¡b¨*† *† êrê¢ÛXúû†zƒ'¥ ‚CÅP1T C]]†j:¾0T 0ÔE0Ô]»våçç¿~ίyÅbeee' 9r䯝ó¿ÎßÜÜ\PP Ùêêꉄ®DòûýÉß:55F%¿Ïç3~õýððp$ñz½n·[ý®’e‹–Ù’•±½½= Jq©Ó§OKÿ‹‹‹e±¢¢âõ×_WÙ&''wìØ‘;‹$Ô÷8ôÐag1TfÀP1TÀPçg¨ê·4ÇÆÆŒ¿¥9§¡êßmll´Ì¼gÏžp8,5OOO××׳mÛ¶M;œÉ•Õ¯­ ’ЊôÄãñ™™©Jòè_æ4µh—Í´ [¶l•lr‰hׯ_¯²555IçuOôw©ÚõÐag1TfÀP1TÀPçg¨EEEƒƒƒÉçl¨Á`°­­mddÄr«" ©ôÄÄD~~¾Î&&gÙIéŒ."½²Ô;Qɼ¼¼9ýϘÍÔI-Ç’Ç´¨Õï÷ëa‘„ˆ¬svCeæ C u~†º°‘ïëë«­­+))‰Åbv™èkúÒf×\<¯ªªòz½ÎUÙeK¥ ç׿ê¾;›5ð¤¤p`¨*†ša†ê|ULkffF¥‰D²oõööêŸëL>‡jw'¨]'ÎPöôôLOOKZþÚy¤]¶ªì”±'©œCïÎf_àa¨Y³"†Š¡b¨ék¨ê>T±+ËûP×®]+Á&’*êêêôúh4ª¼V µ  @­ÌÍÍÕ5·´´TWWKº¿¿ßîæQ#ê.ÏÄ,ápXç”&ôm¯[·nÕëM-Úe[€¡êûMUOL÷¡&÷p;‹¡`¨€¡b¨€¡Ú>Ë/ú%ªgù,___EE…Ëå ƒÐ륪ÒÒR·Û]VV¦¯òïß¿ßãñL¼- Iñòòr)2§´‰%×××KµâšÆ'奉ââbÕööv½ÞÔ¢]¶êäädcc£g–††é˜s°³*† *† ê:Œ0TÀP0T CÅP!ûáI)HÿàÀP1T C…U}|a¨*`¨*`¨*†Š¡b¨*†Š¡b¨‹9+û"ü0T C Õ¼uCÍbxR Ò?80T CÅP1ÔÕß6:+`¨*†Š¡ò9¡0󆊡†Š¡† '†Š¡b¨*Ÿ*† *† *† + OJAú'†Š¡b¨*†ºª/ C uÑ õú•kƒßùžüÅPCÀP1T u… uè‘çžxÏ]’a$_‘¾† €¡b¨*†ú &ß{9º÷ø-Õo>ýôÛ=+Õ7ÀP0T C]í†:ôÈsÏ”Õu¼åc²ò©âšã·Üq±çÔ ö 2ž”‚ôN CÅPÓØPÿ÷·Ÿ¸kGç;ª:Þ²N7}ëÊÿÔRë)†šÝðmS¡³"†Š¡b¨i9+û"ü0T 0T 0TóÖÎÃ>õ'{/úôñwn|ô­ë±æmë˽cΡ† €¡†Š¡†je¨¿2†K/¾ßro§ç]ïüGßúq$0TÀP0T C]aCUk®_¹vnÿ#=Á?X†gù!‹áI)HÿàÀP1T 5c UséÅWŸ|ÿÝKú}¨°zŽ/ C u Uqýʵómo\"NCÀP1T 5- CÀP1T C CÅPùý½çž{0T€ÅB?Œ"G† éœ*†š†*ÈÊ¢ü]=i‰œÚÚZI766®¶}'½¤éÏæ¼_¥ÕJ 2åóCÅPÓËP÷ìÙc  ›+¾t‘£:–|uÅ;œÜîîî`0èr¹ÔšùöPç_”7æx*`¨*†ºø†j\ó /lذÁ;‹$dC]C-**’ôðððÂzhÊ¡b¨@p2€¡b¨Ùc¨2¶²¸iÓ¦±±±«W¯JBç”ÔŒ3Ôt<’õ46†Š¡ÁÉ †Š¡f¡VUUÉâ™3gÔ¢$dqÆ ÆÌŠüüü‡zÈT‰ä”ÄÃ?,é'NHZ*´Ó)·Û]QQñÒK/%g”ûä ÖâÍuuužYjkkÅ¡ulXÖ™Ê9Ô®®®ââbuyÝÒê,3û&]Pë¥KÑh477Wzx×]wés¢‰D¢¡¡!///¹SmÉWùíê´% uÙУ`¨¶Á €¡b¨Ù`¨bxÆÅ™™Yôz½ÆÌ" ýýý’ðù|¦J”•Š;Jº¾¾ÞùüëÈȈd…BÉ›ª««eSÏ,ÆŠžªõO?ý´$¶lÙbŠS©ª¨¶È___Ÿ]?-3¨tOî¾ûnµ~ûöí²øÊ+¯¨!ª¬¬4®ß»w¯Ýà;(¦]z”T0ÔCÀP1Ô%7TSCU[í쪸¸X*™ššòûýëÖY¼¡±X¬¬¬L5$¸\®ä<Ò¢jHu@WîñxŒëeQ­ÿRN‰e©ªôSê‘nßsÏ=¢€É±Ì ‡Â4Dª‡ÝÝóª]v£„¡b¨*†šU†šÊU~ç´¼Aê{äï‰'’{RXX(›Î;—ìU 6Ô¼œ·Z֙Сž={6‰(»---MîŒegCœœ4Uró†š\'†Š¡`¨êª0Ô^xA=)uuÓ“R©êÄÄ„ºÛ²¬¬Ì²'ùùù²uhhèØ±cv"‡¯òÇb1ãUþwä¸,ëLýY~u{€¾o!S}•_õÄt•¿¹¹ÙTüæ¯ò'ש¯ò«>`¨*†Š¡f§¡*I­ªªrÏ" ãh§b¨‚:*²hÙùðöûýÎ_Û988(‚›œG¤Y?)%‰±±1µþÿËù-Ë:S1T}鼤¤äé§Ÿ¶8Ƭ2Û’®ê'¥‰„ì~AA©'j½²óTÆ0¹lrv£„¡.<)éœ*†š‘†ê1°( MOOWVVÞvÛm‹R›ºÍ ¨¨è&#gÑ!mpÑßPH%ð0TȬÏS CMwC] C …Bñx|QLKýjÀéÓ§ÓÍPQ@ÀPCÀP1ÔŒ1ÔôŒŒ0TÀP0T CÅPC%ÞC C C…å…'¥ ýƒCÅP1T V)*†Š¡b¨*`¨*†Š¡b¨*†Š¡b¨*`¨*†Š¡b¨öG?ÑIð¤¤p`¨*†Š¡Âê‚o›‚ 0T C]Õ†:ßRËÓ `¨€¡`¨*†Š¡b¨*† *† j †‹ÅÊÊÊ<O(:räˆ^ßÜÜ\PP ëëêê‰D²ZfvíÚ•ŸŸŸ——×ÖÖ¦Šh’{•Üzr~IHU~¿ßårÉâððp$ñz½n·{óæÍ—/_¶kŲ‡SSSÑhTVú|¾ÖÖV•ãÆ28ºàÈÈHQQ‘q§C CÅP1Ôå3ÔÂÂBK¢zj¥ Q8›žž®¯¯×ëµÿÙeؽ{·X£¬UMöZ–­›òËâ¶mÛ´/ŠÅãñ™™iZšÝ´,e×C)RSS#µ‹éªR½½½"ʺlCCƒÈ+ÇKŠð¤¤p`¨*†ša† ÛÚÚFFFŒ+ÀÐÐJOLLäçç›,Ð.CQQÑàà ùø´7TËÖ“ UDÓ²¸xj^^že)»úýþááa•–„.UYYÙÑÑ!‰é•6ÇË|ÁP0T C]Cíëë«­­+))‰ÅbÚöŒ¨ËëF œ3CІj׺Cñx<^UUåõz›N±‡zñäÉ“¥¥¥’ˆF£êþÀP0T C]CÕôööúý~•êþN;×´Ë0ßs¨–­;ª4ÝÓÓ3==-iùk”fS6ËÏ¡ KUTTìÝ»7 ÍÌÌp°`¨*†Š¡®˜¡F£Qå”âˆjeKKKuuµ2¹þþþä{=í2¨ûPE ÷¡æææŽŽŽZöʲuS~“zJ6}ëêÖ­[õVS)»J¯"‘Hb–ÚÚZcå]]]²(vÅ‘‚¡`¨*†º’†*+KKKÝnwYY™¾Î® / ¹\®òòrýœ»Ñç,3MMM"‹úY~aÿþýÇòLªeë¦ü¦‚’­¸¸XÚ ƒííízkr+–=œœœÜ¾}»´(¦ÛÚÚ* ¿»»[]è‡yÁ“RþÁ €¡b¨j†êjf``@V/F"‘®®.†eÁ‡¡BfÍŠ*†Š¡b¨iDSSS"‘¸zõj8–ô/ǧ£Ãø…S€¡† €¡b¨*†º|´··û|>¯×F'''eÇã ƒgΜáÁPCÀP1T C C CÅP1TŒž”‚ôN CÅPÓ7rVöEøe=*†Š¡b¨óŽœ4 CÀP1TÀP1TÀP0T CÅP1T CÀPyã1T Vž”‚ôN CÅP1T uuÁ·MA†ÎŠ*†Š¡b¨|N`¨Ì<€¡b¨€¡b¨€¡Á €¡b¨jFêõ+׿ó=ùËç`¨€¡`¨*†ºÂ†zþ›Ý…¿+Fbq>'`¾ð¤¤p`¨*†š1†ú³sƒ§þàÏ;=Ôo>ýôÛ=+Õ7È0T CÅPn¨ç¿ÙùÔoþ?où˜¬|¦¬îø-w\ì9µ‚} CÅP1ÔÕj¨üõã룾íꤩ¼^úÝÆ®w}j©õCÅP0T 0Të­+û"ü0T C Õ¼µóðƒOýÉÞî`õãï¾³+“ÇÇÞQµ çP!‹áI)HÿàÀP1T 5 õWÆpéÅW_Þö•ÇÞ¾áÄû6#©°X‡¡BfÍŠ*†Š¡¦—¡ª5ׯ\;·ÿ‘§nýÌ2<Ë*† *† ê܆ª¹ôâ«O~I¿0T 0T 0Ôyªâú•kçÛ:'Þ¸Dœ† *†Š¡b¨ia¨ ƒ'¥ ýƒCÅP1T V)*†Š¡b¨*`¨*†Š¡b¨*†Š¡b¨*`¨*†Š¡b¨ó€'¥ ýƒCÅP1T V|Ûd謀¡b¨êê2TäU=¥åî/]¯0TÀP0T CÅP3ÒPç“¥4 0T CÅP3ÆP—B\R¯sa­§¹/g÷Ú2mÂPCÀP1T CÅPW‹¡ò¤¤-<)*†Š¡f•¡NNNîØ±#wIÈ¢Z?<<‰D¼^¯ÛíÞ¼yóåË—Õú©©©h4êñx|>_kk«e±X¬¬¬Lò„B¡#Gލ¦5º3mmm~¿ßårÙ5—\Jhnn.((ÊëêꉄC¯6nÜ(ã  ŽŒŒé"λ)5:t( ÊúŠŠŠ¾¾¾Ôw߸–•̹;®]»òóóóòòd”ì†B×–<¶–í:47_0T CÅP—ÐP›ššÂáðø,555²¨Ö‹|Äãñ™™™ééi±%Ñ2mN’Mç·¬³°°P­_cc£e벸mÛ6-IvÍ™JÉ*½“lõõõºrÕ+©Mz%Æ©JõööŠ(ë² â”¦®:´+' ɦ}ûö­[·.õÝ7š¢e%:ƒÝîìÞ½[tYÖ‹K‹oŸÝØZ¶k׆ €¡b¨jzªßïTiIø|¾ä<":yyy*]TT444¤ó[Ö ÛÚÚFFFœ-JTɲÿÆæL¥€n}bb"??_ïÅðð°JKB—ª¬¬ìèèÄÀÀ€ôJ„ÏaÜLíêÓɲÞív§¾ûFCµ¬Dg°ÛiE¿)ÎoŸÝØZ¶k׆ €¡b¨jzªi¥¶™x<^UUåõzÕåcu-þFj·<öõõÕÖÖŠ•””Äb±T ¦Þœ‘9{uòäÉÒÒRID£Qu¹ÜÄ|w3•ÝŸ3³1‘Êî,ÀPíÚµlCÀP1T 5½ Õï÷O ês¨@ §§gzzZÒòW—Må$¢¦··WêOÅ¢ìšKΦo5í…>‡*Ý3–ª¨¨Ø»wo(š™™I.˜b»óÚýÔ Õnwnþªå¢]s©Ã“R¶ð¤`¨*†šU†ªo¬L$ápX߇ZPP ï%ݺu«.«ïøTù-ëŒF£J°ÄP¥µ277wttÔ®3vÍ™Jµ´´TWW+íïï7Þ‰DT¯jkk•wuuÉ¢ˆ”åXÙµk'y©ì~ê†j·;ê>Té’ñ>TÓPqÛ9›[@àa¨Y³"†Š¡b¨im¨&nÌ>ËߨØè™¥¡¡Aß©‹ÅŠ‹‹].W0loo×–#êëëÝn·¸ÝÃìÒzii©ä)++ÓWù÷ïß/MØÙ›]s¦RJ³B¡ä,//×êË^lß¾]÷Jß« tww« ý–صk'y©ì~ê†j·;7fŸ`ïÔÏò[…ÆylSiC CÅP1Ô•1ÔUÂÀÀ€H˜^ŒD"]]]iÕÛ¹4MC CÅP1T unššš‰ÄÕ«W÷*ttt¿p*MZß›‹¡`¨€¡b¨*†šÍ´··û|>¯×FÕw-y<ž`0xæÌ™´ê§ôðàÁƒ:È<)éœ*†Š¡b¨°JÁP0T C]BCM“¾O“nÜLÓ0T CÅP1T CÀP1TÀP—ËPÝÆÒßÿ0T CÅP1T CM;xR Ò?80T CÍãWßG"¯×ëv»Õï*Y¶h™-YÛÛÛƒÁ ä—:}ú´ô¿¸¸X+**^ýu•mrrrÇŽ¹³HB}€Cv6ë+¾m 2tVÀP1T 5} Uý–æØØ˜ñ·4ç4ÔÂÂBý» –™eœÃá°Ô<==]__o̶mÛ6íp&WV¿¶*HBW(ÒÇgff¤*É£™ÓÔ¢]6Ó.lÙ²ettT²Éq$¢i\\¿~½ÊÖÔÔ$×=Ñߥj×C‡ÅP™yC C u~†ZTT488˜¬qΆ ÛÚÚFFF,·*ÀÐÐJOLLäççëlbr–”Îè"Ò+K½•ÌËË›ÓÿŒÙLÔr,yL‹ú—Qý~¿IˆÈ:÷Ðag1TfÀPCÅPCŸ¡.ìGäûúújkkÅÃJJJb±˜]f#úš¾ƒ´Ù5Ç«ªª¼^¯sUvÙRiÂyǵ¹:äŸïÎb¨*`¨*`¨ 9‡*¦533£Ò‰D"Ù·z{{õÏu&ŸCµ»Ô®“g({zz¦§§%-í<Ò.Û UvÊØ“TΡÎwg³ž”‚ôN CÅP3ÌPÕ}¨bW–÷¡®]»V‚M$U2ÔÕÕéõÑhTy­jAAZ™››;::ªknii©®®–t¿ÝÍ£FÔ]ž‰YÂá°Î)MèÛ^·nݪכZ´Ë¶CÕ÷›ªž˜îCMîáv6ûÀP0T C]C½1ûT¨žå³ü}}}.—+ 8p@¯—ªJKKÝnwYY™¾Ê¿ÿ~Çct2ñ¶P($ÅËË˥ȜÒ&–\__/ÕŠkŸ”—&Š‹‹U7ÚÛÛõzS‹vÙ`¨“““žY¤cÎ=\ÀÎb¨*† ê:Œ0T CÅP1TÀP‰7ÀP1TÀP1TX^xR Ò?80T CÅPauÁ·MA†ÎŠ*†Š¡b¨€¡`¨€¡b¨€¡&m]Ùᇡ`¨€¡b¨€¡š·® 1ð9¡0󆊡†Š¡Â2Á“RþÁ €¡b¨*†Š¡®R0T CÅP1TÀP0T CÅP1TÀP0T CÅPCÀP1T CÅPað¤¤p`¨*†š©†zýʵÁï|Oþb¨°°7C…Ìš0T CMkC}öå'|wI†‘XœÏ ÀPCÀP1T uÅ õú•kg¿øWÜ©~óé§ßîás0TÀP0T C]C}öå¿ùø=êú¸¬|⽿w<·êbÏ)>'C CÅP1Ôe7ÔÞøìËÿG]•ê¼éS·ÖtåoZj=ås"»áI)HÿàÀP1T 5}#ge_„_Öƒ¡`¨*†:ïÈé<ü`ÏïézÏï>öö ÊßrÇñ¼Ëp0T C ÕÊPe —^|õT¸©Ó½¾Óó‰Î·I CÅP1Ô6Tµæú•k?n=vâ}Ÿ^†gùCÀP1TÀPç6TÍ¥_Užºt߇ Y OJAú'†Š¡b¨™g¨ŠëW®oëœxãq < 2kVÀP1T 5 C CÅPWÌPåï—³‘Ïæ¼ßykmmí¶mÛ¿ °§Ž/ 0T b¨°t`¨€¡`¨0?dÜ6þŠøÃeeeøÀnÏ>›óþ=öÈVÉS\\|ë­·Þ°xTëêêÄP~øá'Ÿ|2_¸p™V ž” CÍ<Ξ=ûüóÏwwwËGéƒYÁœ‘ó…/|áÏþìϾò•¯<°”È1%G–_r”]¼x‘Ù0T Råüùó¯¼òŠ|ˆ>ùä“YÁœ‘ó'ò'ú§úÕ¯~µ`)‘cJŽ,9¾ä(»rå ³ `¨*¤ÊÅ‹åãóìÙ³ñxüù¬ •s¨;wîüÆ7¾ñ<ÀR"Ç”Yr|ÉQ6>>Îl*† ©"œW®\‘OÐ .œÏ 挜?ÿó?—àyðÁÏ,%rLÉ‘%Ç—eׯ_g¶ C…Õ ‘+£Á €¡â@ä@&Á *žÌÅD '`¨x9@àœ*žD '`¨x9Öð0 œjÆ{FNNΊԳXí9€¡b¨*‘€¡f£gäü ¯×»iÓ¦×_}¥|.­ u||üž{î ƒn·;///‰œ>}úfú¹²Œ¡`¨f¨*1==ÝÒÒR^^Ž¡ ›7oÞ¹sçÈȈ¤§¦¦ººº6n܈¡†º¬†ªp»Ý:ÝÜÜ\PPàñxêêê‰Ä à W]ÊTܸ¾­­Íï÷»\.µxèÐ!uJ²¢¢¢¯¯Ï¡'“““;vìÈE²¨ÖG"¯×+•ˆA^¾|Y­ƒŒF£ÒOŸÏ×ÚÚjìRò.8ç7ŽÃÌÌŒe'“GÀ¸§–L.5¯¾‰wvvê‚âÍEEEº† i £Á €¡.Â9Ô¯}ík¢jqÏž=ápxllLÖ×××766:+i²¡nÛ¶M[”,ЇIm¢}ûöí[·nCOššš¤éñYjjjdQ­_³fM<—¤K»ví“Së%-Ùt~]Ý.Øå7²iÓ¦íÛ·«æœÞ´§v4•rî›Ô&}ÓU¥z{{ËÊÊtÙ††‘×€› <‚0T ÕÉP5Á`phhH­:=11‘ŸŸ?_Cý2®×§BÅÞŒgj“ËúýþÁÁA•–„ÏçKÎ,•äåå©tQQ‘îªä×õØí‚]~#W¯^½÷Þ{Å ½^¯Ô³sçNYc·ËÆ=µë¤©”]ßd߇‡‡UZºTeeeGG‡$ämšššÂP 8CÍfCU‰ÑÑÑ»îºëĉÉæ*¨KØó2ÔT²¥RVël<¯ªªeL±K Û…dÄb6mÚ”JñÔ;9¯¾uŠ Å„‡Q€àÀP±uoÓ—ŽlüÜ£yw<êªTçMŸºµ¦+zŠ¡:o•l:;;_{í5Æ –-ðNÀPW‹gðâµ°×C=ôÜsÏ;wŽ) 80ÔE6Ô½M_úVø¿u¾ûS½}ƒ2ã·ÜqQ[É ¥mfû÷ï÷xöŽOÆ·üwN©`¨*Ü,<‹"©¿<™úŸÃ'Þ÷i»Sª0`¨7Ô/^¼x-ó«ómëUbè±ççÌÌǤÿç)†ºÈ†Ê9T`^†¥f¸ûÅG]ÿ¥›¾u}×»>u¢èÓ¯ßwDCsšb™C`^†%ÑÓÇ î|ÌûÉÞßÿâès/ÿûô¿¥>M1†ÀL€¡0/âñÓÃ'Ô©SãIÓùNS÷ÝwßáÇO:Åx3!`¨*À/áùXêûPó|ÂtÒ”i ˜ 0T¦~X&Þ¸ôãÖc?¿0Â4€¡2õÓ†ÊÔÀ4€¡2õÓ†ÊÔ«ž¦)fBÀP™ú!“¢€i T •©˜—C`& •©˜—˜¦€@ÀP™úy0¦)`&ÀP¬áù`š`& •©0T¦©ù±wïÞœœœššµ‡eQV22*S?ÓÔ"}Í’bæééiŸÏçr¹úûûÕIHñÂÂBÙDŒ`¨LýLSËm¨?ü°d¾ûî»+«««eå±cLj1 •©`9¦)%pG-))ñx<@`ÿþý¦­ííí~¿ßår©•’á¶Ûn“ÌEEE---svÌ2ÿç?ÿy©ùàÁƒjñÀ²(+çlBºZQQáv»¥«GޱÔP½˜óë8wIزe‹dOýµ¡ë蕲‰ÀP1TÈBx>ÒÖPkkk‰ÄÄÄ„R4-©jkCCƒ¾ÆÝÚÚ*kêëëgffÄí$½oß>‡víòK…ëÖ­G|e1NYT­84ÑÖÖ&‹ápx|||rrr×®]ΆzÃêªCýÁ`P‡††Œù‡‡‡e¥l"Ƙ CÅPaÕ À êÈȈZ•ÅÛn»Í¸UÌUç…B:¿¥¤‹‹‹ÚuÈ/"èóù€ÈŸ$tŠHBEÍŸ799"»)ªCý"Ê–·˜êfBÀP1T`^XrC5­QsØjB_ý·þ$pÌï½÷ª•’H¥ˆÝM¥ó2T‡ú1TfBÀP1T`^HCu>‡j̯N@ާØ+‡üçÎóz½kf‘Ä믿>guÕt^õS[f"‘HåªeýWù1ÆL*† ÌËËg¨[¶l±›œœ¬««“EÉf§w²IÝÄ955533ó /TWW;´k—_Ú1ÍËËOíïï—„,ÊJç&Ô}¨555W¯^5Þ‡ªÌUvMò«g°t·sss%-æÊ.ð¤3!`¨*¬:x>ÒÖP=ZZZêr¹ŠŠŠ´žÞ°¹ª~äÈ‘òòrÉìñxÄíÄðœ;f™_QjùSyÔ×<ÉÊ9›x衇***d“ßï×ÏòŸ>}ZWVŠªÊ¾»ÝÞÞ.úkÚ»ú;f÷mSÒ.1ÆL*† °|†Êè)Ô7öË€èoì?wîߨ€¡b¨êJrÿý÷{<ý«§’ÅT¾ö0T `q¦)Ï,7ᧀ ÞÀP1TÀP™¦0T¦~˜<™5M-ÊÕÿ›¬¤··7ÌY 7*0`¨*,I`¨É¥*++_zé¥åé*0†ÊÔÌËé2M;w®¦¦&77×ív‹vww¯¸¡jìnfMþ™¨Eþ»9±fBÀP1T`^Xø4500 :455%‹gÏž­««KCµ+Ž¡2`¨*0/CÖNSÑh´½½ÝηÄ\Å_ÝnwEEE__ŸÞÔÜÜ\PPàñxDg‰„^¿k×®üüü¼¼¼¶¶6“´‰ûT㢸W(’üåååÆ'ЛššÄÒ,Ÿå™“Ì_ûÚ×$-EJKKÝnwYYÙ®òïß¿ßãñØeK¥«ËÎ8×/ÙŠ‹‹¥N‘ûööv½ÕTÊ®õÉÉÉíÛ·K‹bºbíÆ[eðÕ…~fB C…´ˆ"¦©UÈÀÀ€(¬^”ººº˜ 0T¦~`^¦)Xnššš‰ÄÕ«WÃá°¤ùut¿pŠ™Ceêæe`š‚壽½Ýçóy½Þh4ª¾'ËãñƒAãwr1`¨LýÀ¼ LSÌ„€¡2õÃêƒç€i ~ù9×£`Ì„*S?0Me†Ï-`+† *† LSi*²Ùj¨8.†Š¡Ó†Š¡`¨LýK9M‰ßìÛ·Ïçóy<ý\¹ZßÖÖæ÷ûõ¯É777H¶ºººD"agK©”ššš’¶d¥´ÛÚÚ:çïëD,+++“‚¡PèÈ‘#Æ­9t –­[VbdxxXý„ÛíV¿’µà1q¨ÊÁ/íz˜Üœå^†Š¡Â<àùHOC­©©ŸEÆ_+ݶm›¶®={ö„Ãá±±±éééúúúÆÆF;CM¥”´bl4uC-,,Ô?7ªk³+n׺e%FÖ¬YÇgff¤ tUÿZéÆÄ¡*Cµì¡]sç¦Ì„€¡b¨IQ°R†:44¤Òƒƒƒ~¿_¯ÒÙ€Î611‘ŸŸog¨©”***26šº¡ƒÁ¶¶¶‘‘çlέ[Vb‡Èe^^ÞÍŒ‰]U~iÙC»æ2ÎP™ CÅPy°¹ Õ¸¨#>YèËܦKÌ©—JE1“×÷õõÕÖÖŠœ•””Äb±9‹[¶nY‰‘x<^UUåõzçì¶eýóªÊÒ/ív3•Ád&ÀP1T`^†l0T»s¨Æl@@ßCé4ï§VÊîªX×ÌÌŒJ' ëííMîê|ûl¬ÄT°§§gzzZÒòwÁõ§R•³_{h׆ €¡b¨À¼ Yh¨555‰YL÷¡³µ´´TWWKº¿¿_ßOélKv¥Ô}¨ªÑp8¬K­]»Vz(’**VWW—¬qRƒ­R·‚‚ÓÖÜÜÜÑÑÑ9[·¬Äˆ¬Ô·nݺÕÎ&S“9«²ôKËÚ5gÚëôwVfBÀP1TH/x>ÒÓP[[[ÕâõõõSSSv–#† …\.WyùÿÏÞÛ·q݇¾LQAX–aÊ¢(‹ÁE˜«ÇË*>T¯ £:ŒÂ|8ªÃ¢ /çêñ©*Gåä^ËÉËU8l8™Uj_v^6áåèú™bÛMdÚ´Óì¸þ(¥â&­üú*.Í*aiš¡$ŽTe)–Eõþá‰O×Àîü@þ~ƒáœ=Ø={öìÿýq±‹­ÉÄP­–’µÈºœN§¬×x/, …B2³ßï?}útªÆI •••²`0Lý–¿¿¿_¶ÂØÓµ›6bD*ËËËU7ml2혤mÊÔ&­zhººÔ­ÎqC%†Š¡†šÞP³´à—’CÅPCÅPCÅPr4M¹\®¬÷<ú*† @šÀPIýp¦È„€¡’ú!Ÿ¢`«ÓTv_ì †Š¡y°JS? €¡b¨@^ C2!†Š¡yHS? €¡’ú!ïàþÀPÈ„€¡b¨€¡b¨*† ¤) 0TR?† €¡b¨°ÓÒÔ­ë7§ü¶üÅPCÅPaÁý›iêõ¯Œœ/ý„Ì0‰f¥o@&ÀP1TÈÑ(Øæ4õ㉩ ¿ýù×ê™O?øêÁdBÀP1T /d'M½þ•‘'Þû;çÞñ©|*ØôèÏøÍ± ? CÅP¼ °Ýiêž?øü×Þÿé‡ÔISy½ð‰öÑwt«õ”à‚0T ÈË@€™¿›Ý;È„€¡b¨‹pd7MêøÂýõ­”zì=-®UâøÈ»j¶á*™0T 0T»4uõù¿wôyçßú•CH*`¨*@®¤©[×oNôã‰÷}zîåÀP1TÀPצ®>ÿâã¾Oméï¡`¨*`¨kNS·®ß|}`dñ«Œ$`¨*ì"¸?HSdBÀPIýOQ@šC%õy0T2!`¨¤~ /¦€@ÀPIý@^Œ4dB ÀîÒ™0TR?`¨¤) •Ô¤) •Ô°‰iª`•]2h§N’­¯¯W“ápX&¥’pÀP1T u“#mÞwß}jòôéÓ2)•6‹(Ξ= …ÔÚ¥o¦ª' ÞŽ}—„ÇËlâ©o¨sç¤RÞ"¢È„€¡b¨@^ÈNš!kllŒ¯"£Þ‰’J¹««KÊ'Nœ²½¤öööÊ<---‰DB\PÊ}}}·W¿Oß¿¿8â_­"Æ)“ê›t«EtßÂáðÂÂÂÒÒRgg§½¡Þ6;‡jÓ¾˜·LNOO矙™‘Jy‹ˆ"†Š¡y ;iª¼¼\„lvvVMJÁ(y>ŸOÊ7nܲh¢”¥Æf-@@·&*ei_½%"èñxdq‘?)è5Ú,¢ú&ʘ|t)(ÙÍÐPmÚQ6½$ ©} †Š¡y`[ÓTªÒÙŸ”Åêìò~ ‡C¿«Î RÈd«‹J×d¨6íc¨dBÀP1Tî€\LSöçPËÊʤ¼°°p{-çPÕüILLL¸ÝHáòåËiQ}Kú^¤¶Ìx<žÉ9TÓöm¾å·ßL †Š¡lašT7-...--%]‡zòäI}ʳ««+íu¨Ò¾ºèsyy9‘H<÷ÜsuuuR/-‹˜‰§NNNJA&¥Òf‘Ûo]‡Z__ãÆ ãu¨Ê\eCd~u–îpaa¡”çææÒvé6wJ`¨*@Φ)©,++s8•••<ð@ÒiHÑSu&5Ã{ù‡‡‡«ªª¤5—Ë%.(F(•"ˆÒ‚ÈŸšGýÌ“TÚ,¢þ„B!yËëõê{ù/^¼(‚+•¢ªgÏž5vX„[ô7i¬Ú衇¬~mJÖKD`¨*ij»Q¿Ø/>ª±bb‚_ìÀPIý¤©lrï½÷º\.ýÔS)ÈdÒo²†Jê‡ ÷ÀŽIS.3Ø@& C…E¤) P0TR?—CÝ›‚‚¬,»AÆÇÇÕr¶‡dBÀPIý€@ì–4µéÊ•CÝøVTWW¿ð ¹ìÐdBÀPIý€@d-MMLLÔ××:NѦóçÏçˆzf¸¢<=‡juåî&n™CÅP!=Ü9˜¦®\¹â÷û‡††–——eòå—_njjÂP·+v¡’ CÅPCM“¦š››MYZZjmm-\E êPÊ™ÄhÅkNg(ŠÅb·ßzܨñ§ògffÜn·ÌvèСk×®%)—i;B$ ƒ.—+è_é7[ÒŠ¬Ú¹½ú¬’’iJ´;Ûè ÕÆŠ¸ËI §··×¸RÝHgggqqqQQÑÀÀ@†Ã˜º[±u€¡b¨—iJÔÊô™õBGGG8^X¥¾¾^&µ3‰ÍÏÏ'‰¾¾¾ýû÷§J›°wïÞh4*󬬬ˆÃ‰ç¥ªi;¥¥¥ª“"µííí6ZißNww·ô_ê¥---öMYm¬ô\&Åÿ¤^„;ÕPOž<)þ-k—ÕÏeÍpÍ›·u€¡b¨—iÊétZ-âõz§¦¦TY G;“>Å(Ú¤[°ù>Zf+**J5TÓvü~ÿÀÀÀìì¬åÑ%ÅáLÛñù|ÓÓÓª¼¸¸(.nÓ”ÕÆJýÌÌŒ*K!ÕPËÊÊô‚kÆ­Þ:ÀP1TÈË4es5I•¬LÔô‹o!ÖÔÔ¸ÝnõµÃá°Ÿ_OÆb±ÆÆFéXEEE$ÉÄá¬úcDw “Îg¾±ký¹¨´B¿‰[*† éáþÈÁ4ÕÜÜlzõäíÕ“ú,ÍÉ?+Wóù|cccê÷ò7­Û¥Ûøø¸ôaÝ'п¦ÕG«5žC•Öw5“aÜŠ­#`¨*l4в’¦Ô½ü÷ÝwŸRÉW_}õÈ‘#ê-u æÂÂB<‡ÃVPêɹ¹9]_RR¢/'•637T‘få|b¨ÒHê¶$­Èªžžžºº:å—“““úBXÓ™­6Vêâ«466Z]‡*Ûhuj†Ã¸[G&ÀP1T /C¾¦©‰‰ uÓ½ÃáØ·oŸþ=Ô¥¥¥ööv×*mmmêç¨lœ©¿¿_æÔ“‘H¤¼¼\ÚÌÜPGFF*++Ng04ý–?iE6'Eã€ô¡ªªJšµ1T«•úcÇŽIgÄ•{{{M¿£é­´¹—?“aÜŠ­#`¨*—4µÃ¹råŠ!ã@& •ÔäeÒT–éèèˆÇã7nÜ0~GdBÀPIý°Càþ Må#ƒƒƒÇív777ë~2!`¨¤~Ò†JêÒ†Jê M`¨¤~ M™¥ï‚õ$ðõ-€¡b¨M¸?0T2!`¨*äSd%ME"‘`0èr¹Àðððí·?ð]«çÀÀ€×ëUŸ™™Q¿ðït:Õã”L—ºººJJJ¤ñ¦¦¦x<®*———›››¥ÒãñôööªùŸozzZ•‹‹‹UÙëõª§Éß^=«—ª®®>wîÜíÕG7I¯ôÓAL€¡b¨@^†Ý•¦b±Xcc£ècEEE$±2Tãd4­©©q»Ýê;}õÕ¿éRFlfS…§Ÿ~º²²R ÍÍͦ¸2!†Š¡ÂæÃýËij||Üëõfb¨>ŸollleeEÊò×x¹jÒlêÕ$ŒçP§§§K…B¡S§ND"Á%`¨*ìÒ4ÕÜÜ<55¥ µ¤¤DUÎÍÍYªÌ¦/]=räˆ~7i©žžžºº:%£“““úrÕÎÎΆ††ø*ÆÆGGGeòìÙ³ìMÀP1Tؽijdd¤²²ÒétƒAý-¿Ëå²:9*³•——;¿ß?88¨ßMZJIj 9«ªªô­úKKKÇŽ“5ŠéöööJAÏþüyõE?†Š¡i*;\¹rEVO644ŒŽŽ²+CÅP€4µÝtttÄãñ7n„Ãa)ÿ´‡çÎp CÅPa;àþ M)=Ûínnn^ZZ’—Ëå÷û/]ºÄ~$`¨*äP¦€@ÀPIý@^ €L*©ÈËÙKSÙ}±ƒ€L*† äe Àr(Mü@0†Š¡BŽÂý€¡ CÅPCÅP0T HS*`¨¤~ CÅP€4…¡†Jê‡ü‡ûC †Š¡B>E@vÓÔ­ë7§ü¶ü%øL*† äe€,§©ë^ù¦÷“2Ãl$Jð™0T ÈËYKS·®ß|õîû¿ùËŸTÏ|úÁWÇ~ †Š¡y ;iêú…W^øxûÃÎ;ªG‹kyWÍ›c~ †Š¡Ân„û »iêTdž>Ù2RR+nªÎ›~ó—?ùè/|d«õ)2!`¨*`¨æïf÷Å C 5ùÝŸžC-þÈÃ?ûÁm>‡ €¡b¨€¡Ú¥©«Ï¿øüGÛ~â©ïøÀè»k‘TÀP1T€œHSê^þó¥ŸØ†{ù0T rî€ÜLSWŸQýæÔÖý*™0T ò2⛦n]¿ùúÀÈâWI †Š¡y€4*†Jêò2`¨dBÀPIýäe M C%õCÎÁý@š †Jê •4€¡’ú€4€¡’úHS*©HS*©òîÒ™0TR?äS¦r™S§NÔ×׫Ép8,“RÉÈ C%õy ×ÓTÁ*ypˆZK?WVV<ÃᘜœT5RÅKKKå-bL*† äe u»ûùõ¯]f¾ë®»Œ•uuuRùÐC{dBÀP1T /lfšR¢vöìÙŠŠ —Ëåóùúûû3Ü{ï½~¿ßétÊ[RÖõãããp»Ý²TmmísÏ=§[ÓØwXV´gÏY¼¬¬¬§§GU~æ3Ÿ‘ï»ï>5yúôi™”J›E² ¡PHõsxxØTCõ¤U?­Ú?|ø°Ì&žú¶!=wN*å-bL*† y ÷@Îjccc<_\\T*¦%U”T&»ºº¤|âÄ )kI‡“É^xAÊßýîwL¥ÐŠÞÞ^™­¥¥%‘Hˆ J¹¯¯ïöê÷éû÷ïGü«UÄ8eR}“nµˆ000 “ápxaaaii©³³ÓÞPMûiÓ¾8ºLNOO矙™‘Jy‹Ø#†Š¡l¾¡ÎÎΪɹ¹9™Ü³gšôù|2yãÆ )‹üIYjÔ[………¢/¿ü²ø\jƒi{ôzE@¥\^^®Þôx<²"‘?)è¾Ù,"™eL>ˆìfh¨6íË–šnTRû€¡b¨›f¨I5"d¦ïß9“‡ÃqàÀõ-æ†Z‚´£ßUçk)d²ˆÕJ×d¨6íc¨*† °Ý†juU}•¿°°p;åªH$¢Ñ¨ú†½´´tM†ªNXª–“˜˜˜p»Ý{W‘ÂåË—Ó.¢Î¡&} ¯:£-3grÕ´}›où*† °i†zøða¸¥¥¥¦¦&™”ÙÔ»'OžÔ'2»ººŒ×¡Êœ¯¾úªJã嘅……2)¦kß[Y…ºèsyyYL÷¹çž«««“z郈iQQ‘4;99)™”J›En¿uj}}ý7Œ×¡*s•M–ùÕ=XÚJSûiÓ>wJ`¨*ìX¸?rÖPÏž=[YYép8ÊÊÊ´ž*DOÕ™Ô¤{ùŸ~úéÚÚZ×*555ßýîwUýàà he&gR‡‡‡«ªªd¥Ò‚¸ ºN@QùSó¨Ÿy’J›E<ð@(’·¼^¯¾—ÿâÅ‹"¸R)ª*Ûhì•i?­Ú衇¬~mJÖKì‘ CÅPaÇF@ •ѳGýb¿ ”þÅ~uæ˜_ì'†Š¡yCÍ÷Þ{¯ËåÒO=•‚L&ý&+ CÅP¼ ° iJ}M¿u½r™ÁÎ"P0T ÈË@€‘¦€L€¡˜Áý°›ÓÔV\K0>>®ž)TÎéC㮿¦‚L*† êN6ÔêêjõØÕ¤òvZàZ×eüAV‚CÅPCM“¦>÷¹Ïùý~§ÓYTTÔÐÐpñâÅuûYê›6}“—´æËå­¼CÅP`w¥©C‡?~\=_jyyyttôàÁƒ¹l¨IOdÅPCÅP`§¥)§Ó™H$¬Þíêê*))q¹\MMMñxüöÛdŸ*RIoIahhH  …B±X̦嵮=iuÆþtvv ¨š™™™††·Û-=)¿víšÕÌ"è###ú]q÷²²²¤¦Ý@qýææfé¹Çãéíí5ý–?ÃN¦ŽêÒÒRkkká*RPßRsJS^¯W=ô5‰ƒAéC Ð3ÀPIý?û ÇÓTmmí±cÇ¢Ñhª§vww‡Ãáùùù•••–––öööTÍJ•Ô¤I‘KiAïëëÛ¿¿}Ëk]»iùäÉ“¢w² h¢~êÞ½{Õ6JkR)úh5óøø¸ˆn¶­­MÓj3­6Pšª¯¯_XE ©†º¦N&jGG‡ŒŒn\&õlGÕ2]ZZªö»˜®é“ CÅPÈÑ4uãÆ'Nˆ“¹ÝnŸÏwüøq©QoÉäôô´*/..¯ÃPõ>/§Óiß²‘LÖnZ.++›šš²éIQQ‘ÍÌÕÕÕê¬W®\ñûý"‘6†jºÒ¬î¼´Ÿj¨kêdÒ¨z½^½¬<žM”WÏ&=P×o 0Tò2äkšÝikk«­­ÕÆcD}w|{סEʹ夙ӮݴlÚ½h4ZSS#žÉ¶<ýôÓ•••RhnnÖ_Á[mÈúê7ÒɤI­ÅIõ±X¬±±Q̾¢¢"‰ C%õ—!Ó”ñL§ñzM+CZ‡¡Zµl$“µg~UZY‘²üM{.3 :u*˜^¤›ÖD×wÕª“©çPÏ¡šŽäøø¸,B& •Ô@^†¼ISµµµçÏŸW_UÏÏÏwttÔÔÔ¨·zzzêêêfff¤<99©/‹,,,œ››3m-é-+³jÙˆÕ<^‡*vk¼Ä³¤¤D_”yäÈû™…ÑÑQ™çìÙ³ö"nµê:Ôø*ápØê:Ô ;™4ªú"WÕ¸ñ:TcgdÄ”‹¡JËdBÀPIýÿ÷@ާ©‹/666ºÝn‡Ãáõz[[[g.E€¼UUU¥ïpïïïw¹\¦gì’Þ²9¥jÚrª¤¦Î“ÖPo¯ÞK$Vg¼M>‰”——KS~¿ppÐ~fA¬]}Ñ¿>Cïliiq:¢†V÷ògÞɤQ•'ÚÛÛ]«´µµéËd“:##&› }ƒYÿ–ŸL*† *ij£444ŒŽŽ2*©€4•£zîœñ§C%õ¦²‰Ëåòûý—.]b(0TR?i C%õöÀýÝ4•Ý;È„€¡b¨Q°ƒÓÁ`¨*—ÃPL€¡b¨@^ÒÁ`¨¤~ /`¨@&ÀP1TØÜ*™0T 0T CÅP€4…¡†JêØxšºuýæÔƒß–¿*`¨*@–ÓÔõ ¯<þ«wÉ ³‘hVú€¡b¨5¸?r*Mݺ~s¢ÿO”׫g>ýà«cÙê 0T r4ж-M]¿ðJôð‰GÜý…<ü³|Ä}Ç›c~ †Š¡y`»ÓÔ©Ž/Ü_ßúȯ|òÑ_88òs¤Fþ>Røá­ÖS‚ÀP1T /fþnv_ì †Š¡y°äw¿xüÏþÆá‡]whk|æ7~o´ø£Ûp€L*† 9 ÷@ޤ©©‡žŠ¼ÿÿ>÷ŽŸHêïû4’ dBÀP1T€œHSK?šÿîÑîGßU³ ÷ò`¨*`¨kHSS=uþ—>±¥¿‡ €¡b¨€¡®9Mݺ~óõ‘Å7®2’€¡b¨¤) •ÔY‚û€4@& •ÔùE¤) P0TR?—C †Jêò2i T •Ôäe ÀHS@&ÀPÌáþ M C%õ†JšÀPIý@šÊÖQ§ `ûÛ\ëJ×4ÿøø¸ÏçS‹˹@®õCÅP€4…¡n‡¡VWW¿ð ©å\ðBc0TR?À&§©‰‰‰úúúÂÂB§Ó)Úqþüù|ñˤEvž¡º\.Ór.kýÀP1TX'Ü9˜¦®\¹â÷û‡††–——eòå—_njjÂPsÄP3çÚ÷éëî™0T ò)в’¦š››MYZZjmm-\E 2©ÕD¯u:{÷î½xñâððpyy¹L†B¡Ë—/ëÙúúú<Ëå’µOú­®®®’’Y\¤9§RÒ"RÕV“žÄb± ›DÐ¥“2ƒt¸··×ØfÚÑ0ÝÌ™™™††·Û-9tèеk×ìÅ.µ‡$5ÒÙÙY\\\TT400`¿êÔ™<822¢ß-++KÓ 7Ý_dBÀP1TÀP6'M‰¯,,,˜.ÒÑчV©¯¯—IíX‡ž››K$Ò È™qòÀz6YJ/.ndc¨©õÝÝݲöùùù•••–––ööv“ÃLJS¢w²ˆôD¬qÿþý™7%Ý“NŠœIoEïR{e3¦›)îF¥'²R©yµ1T«ZCÕå“'OŠƒÊ‚bØiW:óøøx0ÔͶµµ‰gdBÀP1TÀP¶$M9N«E¼^ïÔÔ”*KALT«‰>Í&”4©”úééi½¸´¶&Cõù|zñÅÅE1éL UŸÂ4ö$“¦¤{333ª,…Ô^ÙŒ†éf‘ÎÙˆUÓjYY™î•)ÆU›Î\]]}îܹÛo]ï¡.öÈ0 È„€¡b¨€¡lIš²9‡š¤ Fõ´×Äõ-žZoÄápdb¨V-¯»©µnŽ®F£555n·;i¥¦bgÕô†jÚÚšVýôÓOWVVÞ^½ÞC_'°Ž0 †Š¡B¾ÂýƒiÊÊKn¯ž<3ž´:yfãv¦'E˜‰„*Çãq›s¨Æ (3qh«ÉLš2žC•n›žCµ ÓÍ”•Ž­¬¬HYþÚ;¥U×wÕjÕV'\C¡Ð©S§€Þ/ë2!`¨*À¦¥)õÝî}÷ݧ„æÕW_=räˆzK]š¹°° ‡­.@´1Tue§`¼@sß¾}Ò ‘!q²¦¦&=aaáÜÜœn§§§§®®NYãä䤾˜ÒHÒ"V=ɤ)é^CCƒêmcccªPÚŒ†éf–””¨¡–Í”!µ7T«fxª¬ÂxªÕªMgFGGež³gÏšFN†a€¡b¨›™¦&&&ԭ߇CôQÿêÒÒR{{»k•¶¶6}…bæ†ÚÛÛ«îOoiiÑ‹Çb±P($ë3>}ú´ž¿¿¿_æ4¶&ÞdΪª*ã-皤El:–¶)ÙØcÇŽ9Né°t;õ»l›Ñ0ÝÌH$R^^®6sppÐÞP­z˜ÖPo¯ÞÉ$¦n¼—ßjÕ¦3 ²ÇÕý¦d*† i* æÊ•+"‹»'räÿ“ÑÑQ>A€¡’ú0Ôœ£££#߸qÃø]öΛsçŒ?8€¡’úa×Áý°«ÒTÞ=sppÐãñ¸ÝnãïïldùýþK—.‘ C%õ@šC%õy0ÔÌÒÔ¶}5Ÿ•k¶s¥[½®ü½Ì—L*† äe Àvˆ¡nJÇ0T2!† @^†}ufZÆÇÇõéI)Ï,¦žC…J$ªÇ39‡jß ½ä™žC•­Y‘²üM]Ö¸EVC‘á9Ôt~Ó÷™0T €ßX\LS………sssi½§§§§®®nffFÊ“““úJM#R©¾ÂŸ+))Q•úêL±OãóîuËûöí“^‰¤ŠW555éz«Ž¥mÐ^òdÙø*ÆëP¥·úzÓ#GŽèeM·Èj(TÇTãÒ1Ól¤ó›¸§È„€¡b¨äeÈé4Õßßïr¹Òž¹TêGUUÕÈÈHê*¤²²²ÒétƒAýøÒÒR{{»k•¶¶¶åå外c±X(’fý~ÿéÓ§u½UÇÒ6ho¨½½½ê>÷––½¬ô¶¼¼\õappP/kºEVC!­I›2³´os/ÿº;¿‰{ŠL*† @^Ò™0TR?—#M™C%õC&p¦È„€¡’úC%M`¨¤~ MYÖø(ù5Í?>>îóùÔ"ÆrN& ˆ[ C u;Ò”•xm©¡VWW¿ð ©åí´Àuo ª €¡b¨€¡f騰•†êr¹LË9}˜Ü˜˜âµ*† ›÷†ºuógÎuÛ†J& C…|Š"€¬¤)±œ¾¾>Çãr¹š››—––týÀÀ€×ëUv—úÖÖÖÂU¤ “ñx¼´´taaA7%5ÒŽÔhsZ^^–6¥e©Oúû®®.õËùMMM²`Úù¤.[`A’Æuvvɦ©š™™™††·Ûít::d|^hÒÌ4þúýììlYY™îy’2JahhÈï÷K³¡P(‹Ùoà::™´™¦ûÈtWF"‘`0(}ÃÃÃdBÀP1T@ ÈË‹†ªžÃ)*õGÕÖÑчõlêqŸýìgE³tSR>~üøm³'”ª¥t}ww·´6??¿²²ÒÒÒÒÞÞn?¿«e­Î¡êòÉ“'EïdAÑD½™{÷îF£‰DBZ“JýŒÐÔ™ÇÇÇEìt³mmmÆmO5T±gY\Z–öïßo¿ëëdÒø˜î£Ô])ÿWèG¼êÑ#†Š¡[†:==­ÊSSS^¯W׋'éÙ¤^=¡^Íæñxn¯>öÝï÷ëyÊËËÕãàµ9•••×õ>ŸO×/..ÛÏoÄjÙ´†*ëM0E°¨¨ÈfæêêêsçÎIáÊ•+²áú™¥¦†ªOaJ³N§Ó~××ɤñ1ÝG©»Rz>000;;K& C /CîªqR»”ý“âõlwÝu×ùóç¥ ;–*jVgD}û|;³ÇÓg²¬iÙ´µh4ZSSãv»í{¢xúé§+++¥ÐÜܬ¿‚·ÚÀõÕo¤“îÊX,ÖØØ(f_QQ‰DÈ„€¡b¨°ÛáþÈMCµ:‡jœMê³éósãããwÜq‡äïåË—“–µ9‡j¼âS“á9TÓe×wUZY‘²üM{.3 :u*$‰uêúΡZu2Ã}du9¯ì;½»É„€¡b¨¹e¨õõõñU’®C5Φ/ ”ÙÂá°¾ÆQ¨ªªºóÎ;S…L-¥—¥t}OOO]]º$`rrR_Xi5¿«e3¼UìÖx‰gII‰¾(óÈ‘#ö3 £££2ÏÙ³gÍ{é Õj××ɹ¹¹´û(©32bJ‚ÅP¥e>8€¡b¨¹h¨½½½êÖø––}me’Ö,--µ··»Vikk3^‚)z*•¢;©B&*~Þ6ÔIDAT³I›N§SÚOº7_D38\}¼ÍüI’šºlZC½½z/‘Xñ6ùH$R^^.MùýþÁÁAû™o¯^Ì ¾è_Ÿ¡Zmàú:Ùßß/#o¼—ßt%uFFL6Aú ³ò-?†Š¡†šÞPºÌihhe0T CÍ>wÎøƒS€¡b¨°àþÈÁ4•/Í:2P~¿ÿÒ¥K ™0T v‘@¦€@ÀPIý@^ €L*©ÈËÙKSÙ}±ƒ€L*† äe Àr(Mü@0†Š¡BŽÂý€¡ CÅPCÅP0T HS*`¨¤~ CÅP€4…¡†Jê‡ü‡ûC †Š¡B>E@vÓÔ­ë7§ü¶ü%øL*† äe€,§©×¿2r¾ô2Ãl$Jð™0T ÈËYKS?ž˜ºðÛŸqÝ¡žùôƒ¯Žü@& Cò2@vÒÔë_y⽿sîʧ‚Mþü‡ß»@ð™0T v#ÜÙMS÷üÁç¿öþO?ì< NšÊë…O´¾û£[­§H CÅPC57»/v`¨*`¨ÉïžêøÂýõ­”zì=-®UâøÈ»j¶á*†Š¡†j—¦®>ÿâ÷ŽþÑ#ï¼ã[¿rI CÈ•4uëú͉þo<ñ¾Ooýü*† 9 ÷@n¦©«Ï¿ø¸ïS[ú{¨dBÀP1TÈË(Ènšºuýæë#‹o\e$L*† äeÒ¨*©ÈË€¡ C%õ—4@& •Ô9÷i €L*©0TÒ†JêÒ†Jê M`¨;<õì°`¨*†Š¡nÉŠòÝh¹?HSdBÀPs=õ¼…Ûí®­­½|ùr¶ü,©åÜ1Ôv’•ßX €Ljª*¬¬¬ôôôTUUa¨*† *†š†ªp:ºÜÕÕURRâr¹šššâñømà W½”•VJa``Àëõ:5944ä÷û¥ýP(‹ÅR»‘Ú²Õ"©K"‰ƒA™! «Ê¥¥¥ÖÖÖÂU¤ “©}NÝ›M¶iÐ~cÉË€¡b¨@& •ÔŸ‘¡®¬¬üñÿ±•šìîî‡ÃóóóRßÒÒÒÞÞn¯¤©¶wôèQ­2)6)­%‰¾¾¾ýû÷§ue«E¬:f¤´´T µk×ô ²àÂ*õõõ2™‰¡ÚÔÛ4˜vcÉË€¡2V@& •Ôog¨¿ß?==­ê}>Ÿ./..¯ÕPEÑŒõú£x›ñL­MS¦‹XülÈÀÀÀì쬱ÒëõNMM©²<Ï Õ¦Á´›E¸?0T2!`¨y`¨ª077wçw~ë[ßJ5WA}Y¿&CÍd¶Ìç1¶lÚ1#±X¬±±Qäµ¢¢"‰˜6¨Åq݆š¶A›ÀP+ÀPIýé U˜-++Sçÿ|>ßµk×Òz¤b"‘Påx<¾ †jÕ1SÆÇǽ^¯*KAŸ|5=‡ºÖmIÛ † *† €¡nÂRG޹ï¾û¤ÐÓÓSWW733#åÉÉÉææf5CaaáÜÜœžß¾}²F;±Æ¦¦¦jRËV‹XüTªïßÅPKJJTeggg}}ý‚Øg8N½Õj[¬:–¶A 0T CÝC}î¹çª««µ ‡ÃQUU522¢*ûûû].—^*‹…B!™Çï÷Ÿ>}z#†šÔ²Í"¦3"••••N§3 êoù—––ÚÛÛ]«´µµ-//'µlµ-VKÛ † *† €¡¦Iý·®ßœzðÛò—°ØÍp`¨dBÀPs"õ¿þ•‘ó¥Ÿf#Qb`C•A 5k©ÿÇS~ûó#®;ä-yýà«c—C †šÔÿúWFžxïïœ{Ǥò©`Ó£?ÿá7Ç. @^†¬§©{î¹çÌ™3.‘€LêîIýðù¯½ÿÓ;¨“¦òzáí£ïþ(z äeÈ‘4%ÿEŒŒ¼ôÒKŒ CÝ-©Ÿ/^¼rüõÀ<óÌ3Ÿ ‹p§`¨Ûj¨§:¾p}ë#e‡{ÏÇF‹kÕñà‘wÕp¶™3gÎ|ùíHÍÈȈèéK/½¤~ÒC݆ª¯C½úü‹ß;úG¼óŽoýÊ!$¶Ÿ .<óv¤FÜtbbBôtaa! uתª¹uýæDÿ7žxß§¹—CÍ CÕ\}þÅÇ}Ÿâ÷P0Ô\1TÅ­ë7_Y|ã*a±›áþ ØNÀPsÈPnó+@°œ€¡b¨@^‚ €àÀP1T /Á@p†Š¡y€`‚CÅPa}pl'`¨*`¨*†š†ZP°ùƒ³mfwE9ÞÀP1Ô|5Ô¼ÞXÀPw𡼅Ûí>|øðµk×6Q°’ÉCÝ%ÍK^ÜÁ jNª*,,,;v¬¡¡CÝ͆Êo¬Á@p†šC†*,..º\®¤ú™™ÑV·Ûít::d<É:44ä÷û¥> Åb±TåÒ¤]¤«««¤¤DÖÞÔÔÇSû‰D‚Á Ì†‡‡UåÒÒRkkká*RɤΛZ£iÇÒ6¸Yk:ž©‹k***^{íµŸîÄsçTAjöìÙc³wL‡‹¼ HÁ ê1Ô½{÷F£ÑD"±²²ÒÙÙÙÜܬg›œŸŸ—·úúúöï7Y{ª š.ÒÝ݇¥^VÑÒÒÒÞÞžÚTii©ê¹H˜ž¡££C\X¥¾¾^&31T›z›7kcmÆÓtÇ}ö³ŸUŠ9;;+&º¼¼,eq_5V­™y‚0Ô<3ÔL¾å***Ò3èSŒRït:3‘6ÓE|>ßôô´¶äâââÔ¦ü~ÿÀÀ€Xš±ÒëõNMM©²<Ï Õ¦ÁÍÚX›ñ4ÝqcccbºRèïï///W¶zøða©·iÍt¸ÈË€œ€¡æ¡*\.—éRÑh´¦¦Æív«Ù‡½äÙK›•/Ñ«0‹ÅE^+**"‘ˆiƒÚ×m¨iÜøÆf8žš••1x)TUUɾ»ãŽ;”ÓK½Mk¦Ã• ÜÛÁ'†šÑ·ü¦õ"CcccʇäoZÉ[‡´É*´§e||Üëõª²ôÉWÓs¨bl‰DB•ãñx&çPíÜ”Íd<ÜyçO>ùä¤,Å8ïºë.ûÖL‡ 0Ôb¨%%%úŠÆ#GެÉP çææÒJ[OOO]]ÝÌÌŒ”'''õÅ”F¤R}ÿ.Ê%]R•õõõ bŸáp8õ:Ô}ûöÉ&‹¤J盚št½UÇÒ6¸ñµϤÅôõõíÙ³çôéÓR–¿åååýýýö­™`¨;ÄP#‘ˆ(‘Ãáðûýƒƒƒk2T)—Ë•É""©@@ÖRUU522’Ú”TVVV:Î`0¨¿¶^ZZjoow­ÒÖÖ¦î"2¶‹ÅB¡ê¼¸®·êXÚ7¾±Vã™´¸‘Ë—/ˆ߸qCÊòW–•ûÖL‡ 0Ôü0TÀP1TÈ]¸?6‚0T ò)Š6 80T ÈË@°œ€¡b¨y´ó2øÉ§Ímœ¼ @°Á €¡b¨*y6‚0T 5g\s› 5“ߓʑmÜR¸?6‚0T CÍ-C 5' µ³³³¸¸¸¨¨h``@Õ,--µ¶¶®"™ÔÂ488è÷ûNçÞ½{/^¼8<<\^^.“¡PHÿŒ¼ÌÖ××çñx\.Wss³qñTý*0 ßêêê*))‘Å›ššâñ¸ª\^^–Ö¤RZîííµ2ÔÔU<77—H$¤)ñ!ã¤zp¼šM=;T-®›µ:O™TßÝÝ-k—.­¬¬´´´´··k“66ke¨©«eÔó´µµ‰Æ¥.˜4) (}MåÝ¿¿}ߌ”––ê'‘êRÛ?zô¨öEÑýh4*ë’f¥Ïú¹¯kiM¶ZLW-•ÉV†š‹†ZVV¦ànÄëõêJ)ˆ‰jaÒR%:•4ét:õlÓÓÓzqimM†êóùôâ‹‹‹ÅÅź«Æf­ ÕtÕÕÕÕçΓ•+Wü~¿~œ©¡êS¿ÆM³ê›i```vvÖ¾}MÓ=%«+**ZÓÈÈfÎĮ̀²ôRi·0Ô\4ÔL®æ4ª§½u­oñÔz#ê{ðÛ™]*jµê§Ÿ~º²²R ÍÍÍúb†L¶%µ«¦}3‹ÅE+**"‘H&íG£Ñšš·Ûv“×42i·:î€mƒ`‚C]ó9Tã™Hã9Ô 5ÎôD¦U"‘Påxðm9Êë¼çÎËÿík·®ßdLCÝ¥† ;Ì>ù—;üädê;ïqø‹ßú×tJCÅP¶PRÕëÑwÕpJ0Ô\1T^¼xñâÅ‹—z=öž©Âô#Ϧa •s¨M¸?6Ø‘|ÿÞ¯ž{ÇäPøðÏ~ð©ªÿg,ð;O¼ïÓßÿogÕ9Ô´P0T r7Š6È_=}¬äcßÙwt´¸ö/ÿãÎ=ó½]ù—Ì Œ!`¨* @°lÓuZ}So÷9¿ßït:‹ŠŠ.^¼¸Õê“Ô2†º¦òè$+† €¡®çzèСãÇÏÎÎJyyyyttôàÁƒ*†Š¡b¨Y;€:ÎD"aªAU³´´ÔÚÚZ¸ŠdRÏ900àõz‡ªéêê*))q¹\MMMñxYDú º¹ÿþ55%ѽêììLZÅÞ½{ÕÖI#ò®Ø¤ýJ×ÚiÓØSC---UÃ.æ§´OCµ©·iÐt‹ŒX’iÏ1T `k 7nÜ8qâD0t»Ý>ŸïøñãRcjB^¯wjjJ•¥àñxôlb?z6idzzZ•‹‹‹31T}ÂO$Éét®©)=ôJ:iº ÝxQQ‘ýJ×Ú™²²2cL×ë÷ûÔ¥™Œçú Õ¦AÓ-²Â8J¦=ÇP1TX'ܼ`¨ë8€ŠÖ´µµÕÖÖڛ¨tI³Ñ_ýÛª•Š­µ©Ô^E£Ñššñï¤FÖêVÉäšÚX,ÖØØ(R[QQ‰D2ìù:z˜áJí¡Õ(™öCÅPaK¢€` Õ+³ñz½Æ“…©çü>ŸO_Åh).™j†MÙŸC•FÆÆÆVVV¤,×íVÉäªf||\÷0íxŠ&êK/âñx&çP3ÙA¦=´%Óžc¨* @°lí´¶¶öüùóê[àùùùŽŽŽššõVaaáÜÜœžS_p)¶‡S¯›TôôôÔÕÕÍÌÌHyrrR_Ñh$©e+…ʤ)uj|ÓëPKJJô•”GŽY·¡ZuF ‹ê€ ‹©ÿÉÌêûwñ<éO†ã¹oß>Ù_"©Òó¦¦&]o5zî ÓZ’iÏ1T  ضözñâÅÆÆF·Ûíp8¼^okk«>SØßßïr¹Œ·Š···»Vikk[^^¶2‘¹@ VUUŒŒ¤v&©e…JÛ”ÌÜÛÛ«n±oiiIíU$)//—ü~ÿààຠժ3²FY¯Óé”>XÝË/3WVVÊ<Á`PWžvw÷0#ܼ`¨*`¨|À8€f—ËÅ®ÇPù€pÀPù€p]“eé«ÿ\¸ä`‹ú°¾f1T 5Ÿ [*s[mŠVíg²Þlõ CÅP!ûpó l€¡æÚt;Ï/nÛº2ù!§\SÉuº&†Š¡ÂVGÁ*†Š¡b¨* @°äètff¦¡¡Áív;ÎC‡éŸë#òûýR …b±Xj›VËÞ^}¾QqqqQQÑÀÀÀí·?Ô>Éx–––Z[[ W‘‚zºU†yúúú<Ëåjnn6.»Á­«¨¨xíµ×~:tçΩ‚ÔìÙ³G·oºQ™ôÙtÙH$ eCÀðð°é‚2˜^¯×áp¨š®®.õ¨‚¦¦¦x<žÉ¶§®×ªååeR©”áµz†Š¡ÒÀ–@÷îÝF‰ÄÊÊŠh¥~˜§‰øÊüü¼¼%¸¿IãVËž“Øg{{ûmÛPeÒgÓeKKKõsGÕZR`F¸y6ÀP1TÀPù€A>EÁêÎÀår±ß1T>`€4lÀCåHl@×*Fkùö||Üçó©EŒå³*0@€`ØEМõ¤5u¬ººú…^H-oçÖ­u]iTCÅP!áæ ØCÝŠhÞœº[K?³æË…­ÜÛ¶1T C]O?3ïÈ ÄP1T€<;€ÎÌÌ444¸Ýn§Ó©¥ßêìì,..Ö¿ºo|z{’è,--µ¶¶®"™Ô3 ùý~i< Åb1U‰D‚Á Ëå ÃÃé½µê•UƒËËËÍÍÍÒ Çã±ùyù®®.õóþMMMñxüvÊSéhl$i(Ö4nÔ?ƒ/ÌÎΖ••©µ§*ãZ7pLÝ6»Ošòz½‡#“½†¡b¨›sÝ»wo4M$+++¢8ú¡—êÉ¥óóóÆ'—Z=3©££#/¬R__o|r©¸ 4"í÷õõíßÿÓ–––êr¶··§öÖªWV êG§ª˜jww·tR–•6[ZZôz­Î¡&=Ä5i(2·ññq;Ýl[[›(f²ÉŒsM¸¾N&Íî;zô¨–é´{ CÅP6ÿ**STT¤Êeeeê1ñ¦"•4éõzõÌRðxw\¹"Û®ŸÎjj¨kÚÀõu2i|lvŸ(¯žmM{ C0›W€` 5Ãh4­©©q»Ýêk_õ}îítÏ”Oõ*c½ö*«ùc±Xcc£8bEE…é3è3ì•U¬:oÄ´MÓ²ikk·§Ÿ~º²²R ÍÍÍú+ø5mHÚút2ÃÝ—v¯a¨*l(Š6ÀP5>ŸollleeEÊò×þœœÍ9Tã>ãI8{q—eS{kÕ+«3<‡j¼ZÔÞJo§;=¹¦qB¡Ð©S§@"‘X‡¡®ïj†Ã˜áîK»×0T  Ø6çZRR¢/.ÑÐÐ0::ºÛ¶Cxܼ`¨@shל;güÁ) •†Ê4›¸\.¿ßéÒ% •†Ê0T>`¹}Íî‹„¡b¨j@1T •7¯Á*† *† yE`¨|RCÅPT† *0 Ml@4b¨€¡b¨€4ÁÀ”O `¨æpó l€¡b¨€¡b¨ù}½uýæÔƒß–¿*`¨*@– ×/¼ò¸ïS2Ãl$𕾆С`¨?áÖõ›ýßøö¯ý'õ̧|u,[} CØí†zýÂ+ßk>õhÑGžü÷Ÿyç‡F~îÀ›c²Ø7ÀP1TØpó l€¡¦¾{ªã ÷×·>òï>uÞsçãþ:uêT u«õCÅP1TR!l@4š¿›Ý;CÅP4M*‚ ˆÆäwÿä÷þàÁ÷Þõð;> ­ñ»¿{÷è»?º çPCÅP Ø€h´<€~çñ'_éüçK?¡$õÙšÿŒ¤†Š¡Ò@N@¯>éÏ´<ü3ÕÛp/?`¨*7¯Áj¦Еø?­žRýø–þ*`¨*†ºæè­ë7_Y|ã*# *† À0T>`*†Ê €(`¨|À`WÃÍ+@°†Ê0T>`OQ@°† €¡ò¤6 €¡òÒ4ÁD#PÀPù€ÒP Àn^‚ 0T €¡òà €¡®åV°ŠÃáp»ÝUUUsssÄ pÀP³i¨ªH$._¾|âÄ Ç3==MèP 5ˆªéééinnV奥¥ÖÖÖÂU¤ “z©¡¡!¿ßït:C¡P,Ó‹wuu•””¸\®¦¦¦x%3ÌF¢Ä`¨*`¨Yû€Ýº~s¢ÿßþµÿ$oÉë_# CÅPCÍÎìú…W¾×|êÑ¢<ùï?=òÎüÜ7Ç. €¡b¨€¡n÷ìTÇî¯o}äß}ê¼çÎÇýuêÔ©*z ªÍ»÷ÜsÏ™3g.\àp ê|À~úú¹ßü·rÁþ¿þ/=¯ÞýÿIAݵ@y——Õ‹q ¼ åg?ü_Ê9R6M__þò—GFF^zé%Ì 0ÔÍ7Ô?ù½?xð½w=üŽèÜw÷îÑw”s¨al°³¹çž{î¾ûî“¿öñ»ßBjDOxàgžyfbb‚! uó U]FóÇŸ|¥óœ/ý„’Ôgkþ3’ Hl™3gä@ùÅ÷ü䤩BjFFFDO_zé%õ{Þê–ª¾Ðûêó—þü@ËÃ?Sͽü€4Á \¸pAŽ’_vþú3o!5⦢§ `¨[n¨Š•ø?­žRý8¿‡ Hl'`¨9a¨š[×o¾>0²øÆUÂb7Ãc~€` 8CÍ!C C CÀP1TÀP1TX'ÜÁ *† ùEœ*† äe ØNÀP1T /l@p`¨*—` 80Ts¸?6‚0T 0T CÅPCÝVC-(Øò‘܆UäàªCÅP1T»UïUåþ ØNÀP³o¨¦ÖµVK;ÿ:Ü.i‘œ:‡ºéã³Y£´ÕQ@°Á €¡b¨*† HÁ *†úöJ)ôõõy<—ËÕÜܼ´´¤ê———eR*å­ÞÞ^=ÿÌÌLCCƒÛív:‡ºvíšjD£WÑÕÕURR"-455ÅãñÔ$-"…¡¡!¿ß/-‡B¡X,–aSV½²‘`«­SÓÍÑD"‘`0(ËááaÓù·n”dµ¶¶®"½¿ÈË€œ€¡î(C­¯¯_XE ª^ Æz=ÿÞ½{£Ñh"‘XYY‘yÄóL×ÒÝ݇çççe¶–––ööv›>èI±4YDiÞ¿æMeØ+=iµu™\ZZZªFX¼Sw&iþ­¥ŽŽYD÷\&ÉË€œ€¡î@CžžVå©©)¯×«ÊeeeÆzÓFÄÀŠŠŠL×âóùôâ‹‹‹ÅÅÅ™ª>#(-;Î̛ʰWzÒjë21T¿ß?000;;›v·b”dI‡uÏ=ÏZ£ˆû`Û Ø€àÀP×f¨‡Ãô]í…VnFkjjÜn·úêÙª‚·£g³7T+‡NÛTæ½Ê°ÞÆ8c±Xcc£ØdEEE$1ÛFIï/ÀPóÉPÀk¯½f¬‘I¿ß¯gMçP}>ßØØØÊÊŠ”寕ÒÉlúbPË}“™¡fÒ”U¯Äù‰„*ÇãñM9‡ª×Õºù[4J²FcÏ×q0ÔìjOOOuuõ¥K—ÔäË/¿|àÀ©ÔT___%õ:TU‡µ'•””è«09¢ë çææŒ+­««›™™‘òä䤾ÓHÒ"V†šISV½Ú·oŸ ˆHªÔ755%]‡šºuV›cD: ¾gC•õšÎ¿u£¤¯ U=_Çu¨€¡fßP…`0è\E 2iÁÞÞ^u;yKKËòò²ª—‚LÊüò–ñn÷H$R^^îp8ü~ÿàà ®ïïï—Œ–)údΪªª‘‘‘Ô'-bsJ5mSV½ŠÅb¡PHÕŸ>}Úx/¿éÖÙlŽF:PYY©FRËŸ4ÿÖÒÒÒR{{»k•¶¶6½¿CÍ3Cµ ø¹Ëàþ ØNÀP1Tȧ( Ø€àÀP³o¨.—‹(!/l@p`¨9d¨@^ Ø€àÀP1T /Á@p†Š¡¼÷Á@p†š}C]XXøÜç>ç÷ûNgQQQCCÃÅ‹::¶D&&&êëë eÙêêêóçϧ}« …Lê‡Ûí®ªªêèèHú‘Ôu,€¡æ´¡:tèøñãê9òËËË£££ÌÄP¯\¹"^;44¤~tóå—_njjJû–Õ¤­O$—/_>qâ„ÇãÑMZßRjNªÓéÔOþÌÄuessóàà é‚6o­ÛP5===úéJë[ CÍiC­­­=vìX4MõT{C-..^XX0]—Í[7Ti¹¨¨h#K`¨9m¨7nÜ8qâD0t»Ý>ŸïøñãR“‰¡:N«uÙ¼µŽëPmÚ_ßR`÷Á@p†š}C5255ÕÖÖV[[kãy‡C²uu~~^ÚßÈR°î( Ø€àÀP·ÛPú\c xíµ×ŒoɤßïWåæææÓlÞÚ¸¡Þ{ï½ë¸Õ¸—` 8CÍiC­­­=þüÒÒÒíÕ555ê­žžžêêêK—.©É—_~ùÀR©&Õ û÷ÝwßÊÊŠL¾úê«GŽIûÖFîå?>qâ„×ë]Ó½ü©Ky6‚0Ôœ6Ô‹/666ºÝn‡Ã!×ÚÚzíÚ5ýîÀÀ@0t®"…¤3£ jÙ}ûö%ýªé[ëû=TÁår…B¡ÎÎNc÷Ö·—` 8CÍiC0Âý@°œ€¡b¨€¡b¨*† *† €¡æ¼¡f÷E´åÜÁ ê&ªýëË_þòÈÈÈK/½”kö ùòÁ'†ºNî¹çž»ï¾ûä¯}üî·ÑÓxà™gž™˜˜àÓì) ØNÀP·•3gΈ~ñ=?9iªš‘‘ÑÓ—^ziff†O;°§€` 8CÝV.\¸ 2úeç¯?óR#n:11!zº°°À§ØS@°œ€¡fœºÐ›O;a@°Á €¡òÿ(† *†ºnn]¿9õà·å/† €¡f™¹ï|;Eg#Ñm[)† €¡&_è}ëúÍ—ÿëŸ>Vò1õëý?øêØvvCÍÓ° Ø€àÀP·D ç¾ó½?ÿàï?ìø Ô|ó—?ùhaÍ›c²ÕÈ—° Ø€àÀP·ä¦Nš>ì¨VçMŸx_ýhqíöë)Ÿvò2Á'†z{6UVš;/¢¼ @°Á °« U}À^ë}è[¿ò[¼óåˆþü‡-:˜•s¨@^ Ø€àÀPÿíBï«Ï¿x!Ü1â<0âúÍ‘w~I…L€`‚CÝrn]¿©N©få^~ÀP-¹úü‹ÊS·ó÷PC Cµ ½°‚ €à 5·àÇ2€°‚ €à •6œ*0 l€` 8a7ê?ßüñÕç_Ì÷—|Àò´çÿüñü £0ù6ù?»6Œ·-Ø B4ׂ“˜„uªŠN^Ùz]{áÅü #†øÙìà0&$Qbv”¡þmÿ—®>Ž×v¾þö¿) •€!~vÀᇅ1!Aˆ“° U"é_ÿõ%^Ûùš{î\þ*Cüì€Ãÿ cB‚%&CåµiݧþûýÑhôå—_~ýõ×ß|óÍ……ò&¯?„1!»ÍP‰IÀPóò£;ÒÝ÷øã?ûì³õW%ŸÞëׯ“7yíàø!Œ Øm†JL†š—Ý¿pï׿þõóçÏ˧WþÅ”ÿ/É›¼vpüÆ„ì6C%&CÍËîxÏÙ³gåÓ+ÿbF£Ñþð‡äM^;8~cBv›¡“€¡æåG÷ỿ$ݯ}ík###òÿå믿NÞ䵃ã‡0&$`·*1 *]ò&ñCêÇP B”˜ uí¯‚‚‚ ÎÅ׿ö CÍëWÖ•ÔO„hŽ@‰IØ*C-x —ëç<ž÷44|ôÁÿ8 µÀ uKóæ­[ß;q¢¥¼üWΟ-**”˜¹pax#£—³ÿ±l¤c›²Q¤~˜Ø!ªgÇÏx½¿xüøïþÓ?}C…]g¨ºüã_|ì±¾šš_ÿØÇ> 雳\ªéF=ú©ææßþû¿¨Ãü£~éàÁÿ CÅP cR l¢¡êòßþíùºº W v¯¡êW}ýGþèZõä7¿Ù_Uõ¨“¬]]¿oœSÞ …þƒÓù³~¿÷Ì™î¤6åsu×]r»]2ƒÌöðÃ’ºÒûï?©NcÈßÿù?O;ögvï¾}•ò–´ÐØøñ«WŸ]ÓQÁ¾Yo P&µg_Ö«Û1l·“o¨2 ÿò/—ÒžÏV5_ùJ§×û‹ÇÏÈäo<ÕÐðQ‡ý¦OÓóß6ÛºãþîïÆ|¾_6véŸÿù¯KJ~áþá‚qA«h´Y1ºÖô)0Ý(›P7Ž©Ÿ0¶ cBbª¼$ $ä’Þ2 Eûƒ×úŽk69Š˜„í6Ô—^úÆÞ½ïSå§ž¬®J”ô£gÂáƒ'O~F½õÄ_‘Cò_þåY)¿ùæwŽ 'µYYù^IÄ’gU›MM‡’føÆ7z$AÿÍߌ¨äÈ­¯1yäSô½ï=¨Nî~æ3Ÿ–âZ Õª±½Þÿý¿•²©¡Z-n3&»ÁPËÊ~é•WÎä4’LÊ;ú+A%Ñ"Ç` ‰ÎÎf«S6Ãkµãd×|ík_Ô-H¹µµ1©oVÑh³:ݱ| 2 uã(‘ú c›0&$0Tý–M(Z¼Öw\³ÉQÄ$l·¡JÄË?Xª\SóëßÿþcÆÏ‰ü#¥ßúÎwNÛ´)ÿŠ]¿þ¼Í òyxþùûuý³ÏÞ·ÿûõ"©6µÏVÑh³ºLöxÚOÁ:BÔO[…1!±{ UXYÙ/?þ»ÿøÑ¤HË07~\³¿ò•˜„m5Tã·ü£ê‹ÑÔ—ÄqÚcó+¯<ü§ÚÑÔtÈãyÏ—¾ô_w†¡ÚŒÉ.üµ©§ž,/ÿÕLíû÷¿ÿ‹_lÓ7áY µÍðÚì¸Çë»óÎJAV!eÓÅM£Ñfu™ìñL>*a¼YaLHìÂs¨VoeŠ?®a¨C†Z_ÿ‰{}æßxiTÒ×X’Ö3lóG?zÆøEƒþ€¿ú”²ñ«Ï­3Ô¤¯\¥¼&Cµ“]h¨Æ3îIç’SÙºü×ýgÆÌ˜WVÃk³ãä%†ñýï?¦ïȶy£Ñfu™ìq«OAÒF­)ÔIý„±UªiÈCq}†šI&$&!k†úÿ•ÿÚ%3Ê?ñú©gŸ½ÏëýÅGý’dpy]¸0,þšzÈo<¥/êÿÿÛ;Ð&Â0/ÖÒE,í‹ôÌB)B'‡Š8¡¥ÐÅ¥C&qé`urIë Bé"¢4….RA„‚I¡ Y%ËMjS—âä>B’;¯Oïõyx mÓËŸ÷{îû~WrW÷˜33·^J}÷wY÷ [[+šŽƒót›É\ÕOH¨z;îLÝz^æL 5¢'ÿCBž¾%O‚‹ó)ê ³ ÷ÜÙ'‡‡å°!Èf¯ç°ûþûÉɬ»·k«ˆöF œªTz<1qsuõQß—fcÄÓÅñ°½ ëMIu¦~4Ó%H¨ç"÷Uñ| 5ÎLˆ“tB ¸02rynîŽ[8]ÉTÍæÁÇSôEç_Œ4Åçóã:’ë{µ)ý¦ò®îÔ#»Z÷^mJ?Ñ­¾Ž“/ãŸ)ñz.½f½6=ïÆÆSwfXÌÍ#zb>¡ê½ÏÏßÕ˜ª{££W––œœ|v')kî¼LOç†Z¿s¹-ëëËîÞ®­¢Û6pÁ§þõªú^•,ÂÆˆ§‹9â}÷‚Þ7_u¦~4Ó%H¨î®0Ï—Pc΄8 É%T*¸à‹çe¸”qê„鸽½R¡0Ë¿¸Ã£Š2M Õx-.Þ.º¡åaj*W,>d×M…0aw|¼ŸÏ7›I¨`Xc”@Q¦) ¡¯ííçž—¼86víÅSjb§o‡‡/…}Ö‚©ŸåߌÆ(¢LS@B¥H¨ƒ?,ÿ((Š“@B%a0oRLýhŒ(Š“@B¥ØuXþQP'„J ¡RLýhŒ(JB*ƒ]—„Š?,ÿ((Š“@B¥H¨S?£P~“P¿®½IT’¥ž§7¡" þXþiŒ(Š“`0¡R«RšP)ü1°ü£ (N¿›P~ÿñíÓ— >¬½ÚyöòÝ“•òrQJQIÔ›rºv]„ÁXÖ%P'ÁFBí¤V«íîîV*•·,ê¹:¯þû¾Ÿ"¥ `Rc”@QœS µ^¯ëø¦Z­Ê¤MH u[=WçÕÿf³™"¥ `Oc”@Qœk µÑhÉ!èì@R¨Ûê¹:¯þ·Z­)…0øc{£Šâ$XK¨:²‘=:Ä©Õjûê¶z®Î«ÿív;EJ! þÀžÆ(¢8 Öª¼ÑÁò}¿I¡n«çê¼úzzš"¥ `Oc”@Qœk €„ $T*@/¿Õ‡7^Ca«PIEND®B`‚python-watcher-4.0.0/doc/source/images/sequence_overview_watcher_usage.png0000664000175000017500000013262213656752270027213 0ustar zuulzuul00000000000000‰PNG  IHDR:pÌX»5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gUEzTXtplantumlxœ­T]oÚ@|·ÄØG‚mB)*U(A4„¨@ûR©Z΋9Õ¾³îÃ)ýõ½3¦Ô$TÊpÜÌÎìŒ}­ *cÓ¤ÔdF*D)\…î—?î÷a¨ –)ÁÀFÜ€&c¸ˆ5ôûþΚá¶!=`;¬ Çº 4KüQ‡žòõ –˜\€f2£ ðQ ölGÄs:¥ä"4¸BM§¢>4›ð n¹¼½)½NÐ ¶¬(~Ñc²Ãr fC0ËH¸ ³ŸNØZ¡ÃZf¬"P¤¥UŒôwñÈŸ–)ʹ´:Ù–‹N4žãggã¡1™BX¹tó"ïì~<»½?“ifÝúÐU#±†KF² '—Ì Oùo,Î}ª¯+m¾G£›Ñ>¸qIàvk“¿œ™ Ÿ>`…Ò‡Åó“9ã)‰È‡tŠH;ÓE|´^s†lû¹Ç=ÈIñõ¶TàÆ‚Ûu¤ Î1±¾«þÖaJŒ®ó¹Œcg\óœ„ûØ»ßÇrÜíÓ9Uçv½Øí¹ «Ø †‘(‰(z¢[Fñ8vßvXçHT'M*çîñ9¯Q•éÿSù×§>ªìµ[~ñæ <Ü,§ßíùº­·ïês+`Š Zh_õÚÝÞå{Žð¦Õî4‚úøa»—D~_I7‚;̱¾˜6`>‚/V¸¢ŒDΕ.oSüŸ¥™gÒ÷:—ÍOî™»:•_§A;솭WíæªÝ &\Ø_Dþl)>|€IDATxÚìÝ\T÷ïn'“ÉÄPKjg¹þ¸ó`i 1†°Ô^JÐb¶M›PÂBèÏk®Ëv뺬»M©-ñ&®ks)K å²&ÆC `, T©¡(ÑdÚB]cÅ-‚Aaµ”ºb õ÷‘ozr2sΙÃÿ^ÏgΜóï÷œÃ÷¼ùÎ93a—ÂÂØÄ5׈ko ï¿N7øŽUzäIùݾ½Iæ°ÛqmòõwŸ—ˆöÂu)Ua‰ú™óö·JåYv ®Mš3{߬‹JSùl×Mõæ?|ûÐ#ëÞþÖC?ý‹¬ê«’dfíœ/nð±ÿqmr²šT“pvî­—þøÇ·ô?¿}gWcÒÿ+ÏV_u[gý~v! ®M¨þîó/FÞ%iì­úÎàúe5íGž•ev\Gçá¶ÞÞÞþþ~ö% ®M„_®ùw5®6ð_̲šúyù³Ù²äË÷?ÔÖÖÖÙÙIhĵq7Ð÷_/\·DBXà{ ?¿}gWõUIÕîÅÍ;ÚÒÒrâĉîîn ®£Îúý’ÕnÉ šÕôl/<ÖÔÔ¤[oo/;×ÆË‘¢J‰_oþ÷mƵ·¿õ,ÿ£ÿï›;vìP‰­³³“6@\/‡yRâסGÖÙŒk²¤,ÿ\Æß?÷Üsµµµ¯¼òJ[[l€¸6^F<ºöÌ3ÏlÛ¶m×®]---ÝÝÝìW@\#»vmÛª‘¸¦Ø|>_gg'ûׯÅî ­ºfQÕÓ[$®mݺuÇŽ¯¼òʉ'د€¸6^´Ï]³øŒÜ†Ö>7ôÝé_]]½eË–gŸ}¶¦¦¦©©©­­ý ˆkãeè[ R¯\Á¶ê»‰íà?^ùVƒªð%Õ7×qmB }gè•·DwÝz¯áw†þô~õJVs$U¯~L²ZUUo†âÚ„'¶ˆ/^Éda‰õ7eì¿÷MK–½š½b×_dU_üÁ¸ÚPVÛ¶mãV@\›h§oüÛ©v/V¡íßk]¹^mè=P5´&Aòĵ‰ÖÛÛÛÖÖÖüÓÆm»å¾+ØqË}ÛVýKÕÓ[´ &Me5>&×&ZgggKKKSSSÍWÿY}{Á3C¶ yæO$«ñ%T€¸6 z{{Oœ8!!¬îþïJ\{>ë%™mܸñ߇ÈÄ“O>¹mÛ¶ÚÚZ¾â×&Aww·„°Æ¿»òÝ µýmIfÏ<ó̦!2ñì³ÏîÚµë•W^QYMfh ×&:±õööú¾Y,qMB›Ïç{á…ª†ÈÄ‹/¾(A­­­­³³S#«âÚäx{M…Äµ× ~ ±ì•W^i"?ÿùÏ»»» j€¸6^Îì}óÐ#OýÑ>ÅC¦÷®|Tÿ¸°”É~ĵ±¡bcõ#e²_qml0ºÒqÍ&•ØÔ°Ù;Å.ĵ ÒÛÛ{öìÙS§N?~Ü/“í]ù¨Ä5ù-Ó?ù¨wLH!R”È€¸F\ ®×q¸@\#®L͸f;Cq¸@\#®׈kÄ5âq €¸@\#®׈kÄ5âš±×s×I\{í¾ï×q-äüqà}Éjêg ï¿ˆk€¸Z|KÒâÚ+w­zçÐ×q-T¨«Öv|â‹íÛ›jç|Y¦÷üí:â€Pö‡ó¿;³÷Íiùó‡ßö±âÚGüºb‡ä³í×,îùù!yøÞkooŸ•"s^.(ò yã7úûûÙ‹BŠÄí=iös¶ùMö/@\ûÐÉçvU_u›ô¿yþey(±¬··÷?ž®­r$É̆Ââ×^{MâšÌ$±Á¸ö«’ïŸÙ[5m~~õoß'®ĵ8ßrô…ë–H×pø_ŸÖ²ÚÙ³gO:õòwŠe~µ{ñÞ§«$®ÉL€ŒkqþøÇ·¦ÍOמ*â@\ûÐ…Sg^ŒL•~áío•ª9ZV;~ü¸Ïç«ÏY}åMÒO|Á÷ã]2S%6ö%âÚxǵŸþÛSÒ ·´´´µµuvvÒ÷34®½ñRcÒruèÞ¿<4´¦eµwÞyçµ!/¥ü,óãOßsìÐ;*±1À€¸6Þq­fmq]]]SSÓ$±uww³»×$Ÿí»û›Wþû‹e8ÿ»ËzTËjâÈÃoÿò'·ý/YR~{çW²o‰ ®w\{î»nÛ¶­¶¶V[KKKgg'»˜qq­õ;åÒÔEÝ}ñ½žËºKÖ´¬&’Ì:†œüÕ±GÞu%Û¥ÿÓñ¶c\Ä€¸6Þqmëw¾·eËIluuu>Ÿïĉìn`fŵwÛrå‚«n“žîòGo/Ðg5ËÔSGv6׸]ù0¶o¬×ž"± ®S\«~äûמ}öÙššš¦¦¦¶¶6v70ƒâšö±2á—ÕÔК>«õQ ÚöõѯÿŸÍ$6Ä5â@\ÿùÆ;êc;Ž–¿Ñ‘ÔZ½‹Ä€¸F\ˆkc©ïØ)õíRoþC‘ý¬æ—ØöÿÝú+·,…ßþ Í$6¡×bc£ßyçÇÚÃ^ø¾äªÿ¸X›ó«_ÕΛç}#®ĵ±ô‡ó¿{ù³¨íxÿâ¥Ëýˆ5uÉšuüÒÛžì+Æöâ¼{|›c‚qmÕªœü @{˜Ÿ¿4>þFù­Íyꩇeâ€ÐŠkò¾'ÿ?[ü·*«ù}ÄšZ :T¦Û™ÓïíZô7Ü(ú§9Ø„N\Ûµë‡éé)ÚÄ„›$ŸÉomÎÒ¥wÊ22qêÔO³²¾èv»œÎ«RS9Ó¤B˜žZåÅK¤YÌë|úéµZ\ûÑ]¸p¾Ì—B²³¿¤JÐV‘˜èr]íñ\_Xø7ú÷Ä«##?ép|Œ¸×>ô‡ó¿{=wÝ…Sg.}ÄšJ]vÞÖTëž<ü«Æ{ü徟k]˜˜¸ö‡?¼!áéý÷Êôïï›=û:™Ÿõ»ß½ªü¤ž]°àÓ¯½¶E¦e•Õ«sssÿÊpØì'?yBRš,)Ó//_ž¡-&Yíõן“i)üë_¿WŸzê§?-OJŠ{ë­çeú½÷32nøá¯kkefþåo»ŸÑ5€¸fÊð#Öìld&,®ÉOzzŠ&5Ä•ý%™ ¥._{ûíjýØ›ö#¡M"a\KIùÌË/W¾zâÄNíaÿë.×ÕÚ*úëç$œEF~R[ë׿®çÍP€¸f+®)#¸þLÝ›B\Rqí‰'V««Ó|ðk2­æ¨Ë×~ðƒ‚þð;j1 [ÝÜn—zßS{wÒ/®9W©Ñ¸ ×®is$·i?²ºEáÄ5€¸f¶”$-ý]¥#.Æ/®ýêWµqq7ÈDRRÜÛoWËÄ[o=¯._ËÌüKyV-–˜xË¿þëÊþþ×ý‚”_¢’È5ܸ&áìxc7(×âÚ‡qM3²˜¥%¶Q–ã×ä'&æÿ9qb§ºpMý„‡ÏúÏÿlŽÖæè/öãéÖߛ¡ê­UûqMVyöÙ%®ĵ‘Ç5½g,•ØÆ¤(ó¸öõ¯ß›™ù—êÂ5õ“•õÅ‚‚V®üª6gþü?ÿ÷_£Þ]¸p¾¤æÎýÔϾÕðVƒS§~ºbEvиÖÔ´12ò“/¼ðý÷ß?(?û÷o–Ê×âÚ0b–ÞE\:qíÇ?.–`ôÿo¡6ç?(˜=ûº_,ÑæH&‹‹»Ááø˜¤±þð;Zª®~\›þƒ<¤´øøÕ’úò°9“ˆö…/üOuùšLhãsÄ5€¸Ä5¾3À¤Æ5ù·lÌ—n™@\#®31®•••EDDÈïŒk#Ës¤@Ä5â0­âZbbbUUÕÂ… C0×׈kÀLk‡¾ýöÛUh;tèþ©Í›7GGG»\®yóæmܸQË@2Q^^îõzNç‚ ^}õÕÊÊJYF&$$?~Ü/3ÉĶmÛ$Ên·;;;ûìÙ³~Ë=z4--MžU…H7¡žÕSsÊÊÊ"##‡<ìèèÈÊÊRk¥¦¦ªb×›6mЉ‰‘Åä·´Eìôq¸„\\+((PÙHBŒLkówìØ! L8ù-)G¿î¼óN‰JƒƒƒëÖ­óx<•´‡_øÂãšdµÈt__ß׿þuÉX~ËÌŸ?_ÕÀÀ€L·´´ää䎓 }á]¦öE}>Ÿ¼®¬¸zè[÷ תªªÒÚ"…K[$_Ä5âZqM‚N||¼üVA***JM‹¤¤¤ææfmÉ}ûöéãW{{»V‚<<}ú´öÐívƵ“'OjE]ºtÉårù-#szzz j×´Ñ»À¶„‡‡®å×–={ö$&&-ˆkÄ5`òãZCCÃÃ?¬=ÌÍÍݹs§šv:ZtÓb™Y„2|h¶¼áS………‘‘‘_ÿú׫ªª:::‚®H LOO—€ø§/Ýs.Ø–ÀL Ä5âŠq-;;ÛïJ¯ÌÌÌI‰k¢µµµ´´4''ÇãñÛy­ÄÄÄõë×_ºtÉú‰kˆkÄ5`JƵsçÎ…‡‡kYG È™/Ó‹/¶x3t<⚦««K{·Ôï¿åõÏŸ¯®®nÛ¶m[¦iŽ4JšvâÄ v1@\€©­¥¥¥©©©¶¶V"γӂ4Dš#’¦uvv²‹âLmmmmpSWWW3-HC¤9Ò(iZww7» ®}èÐ#O²·L9kZZZ|>_Ó¸yæï¾Û4Q¤!Òi”4/}ˆkQ–ÈÞ0åH éîî–dsâĉ¶qSêüLÛD‘†Hs¤QÒ´þþ~v1@\#®=$@\£3ââÐC˜æq[ € ®€¸âq Ä5.¤zH€¸Ò¸Mè!â××茀q ˆkˆk i€ ®€¸âq À¨ìÝ»÷ T577óG ׆ i1ÍÈ1 Uk×®å ® ·©cZƵŋ¯BÉ¢E‹ˆkq¸|×äìx%rL×âq ®!ÔãÚ<àóùZZZÚÚÚ:;;{{{ù›ˆkÄ5×€ŠkÙÙÙuuuMMMÄÖÝÝÍß,@\ Ž[ @\&,®effnÛ¶­¶¶V[KKKgg'³q ®aÒ¨{!§C¡>“×î¹çž-[¶Hb«««óù|'Nœào ®ĵ½÷Þ{=ôÐ-·ÜrÍ5×8ÎO|âéééÓ)Œ¡¢¢"Õ™ó¸6[)°L;s&åÍP‰kÏ>ûlMMMSSS[[³q ®}àÝwß½õÖ[¯½öÚ²²²³gÏöôô466×ÌÜxãŽ!7ß|ó˜oÉŠk!ríq ®Ä5㸶råJ™ÿØcYœì‹ŠŠ<d5ó{ßûÞ§?ýi—Ë)Åê—üñÇeyYòãÿøòåË% j…h´…-ÊÑÛ¹sgbbâ5×\#K¦¤¤¼ôÒKÚSÿûÿ﨨(§Ó9wî\™ö«öO<áõze­›nºiûöíßúÖ·>õ©OÉÃ[n¹e÷îÝ톪‰{ï½÷Þwß}2Q__o–[*õ”—¸á†$ލ™m%››Â°e¾Jàtyyytt´Út Ú«ÈqbÖ‹º×€)×øo`âÆ5Éa2ÿ7¿ùE\ËËË;{ö¬š³nÝ:™óµ¯}­§§GEÀõë×kË˳o½õ–LÈyW}:ƒÙˆŽu9z’?´l¤ù“|&ó¿ùÍoÊ´D1™Ö›¾Ú’ÌJn5„5Y@ê  ÑAZÄ5a322N ‘‰¿j¶),Z1Ü7CÕ´DmÙVµµµúm¥Þ¾ûrÀÈ«ëW4«›Í¸& ¸   --íæ›o¾þúë%‚ó× Œ+»qˉË)0®©ó¢õ[iÚ¯×+sŽ9"ÓrFןÎ×½îºëÌ"‚ýr®½öZ©ä+¯¼"YD?îܹ²–À“ô Ó2GÿrZ |¨Ú¯ÆáÇe­[o½U=” yxèÐ!­L—Ëe€¤@í%„LŒ8®™m ‹VŒ,®n+ÕÙÚѯhV7û£kO?ýôSO=%A¿®®îàÁƒüÍãçâÅ‹Ä5`*Å5;£ksô´ÓùÎ;“““¯¹æ™cñv[Ðrü”••IR $&&jï²hI\°óÖd`ÅìTãÛßþ¶zƒU=,))‘‡23h\³lÆ5ëMaØŠ‘ŵ4ĬnÄ5 ÉŸo†SéÍP;×®äÆ;•ü$´ÉtOOõ™Þ¢œ@RÚîÝ»Õ»ŠŸüä'ÕLõî›*ÁptÍN\³Y ©À§>õ©Àí)3Õ`’>!:ujœF×Ì6…E+Æ0®ù5äСC~ ÖÍf\ãÍP Dß UÔU¥ò¿TMˆyøæ/ÕÓHvv¶a\“sü­·ÞzÍ5×Þxj—`§.“êêê’…_z饻îºK=¥z;lÅŠ~ï”ÉôÑ£Gí”ãGN䯽öšL¼ñƲJTT”~¸ë[ßú–Ló›ß ¼vÍN\³Y7ÊbK–,ÑÏLIIQWåkQ¦¶¶V yàô/¡.ùº÷Þ{;;;ß{ï=‹k×·’ÍMaÑŠÀ2çØŒkZC¤á™ÕÍþèÚO<±~ýúGyD~ËÑÈß,0~$w ûÎÐÐŒk?üûÕìNÌ„¸vîOŸ»vóÍ7;‡|üãÿÊW¾b1Þ#§Ò[n¹Åáp¸\.‰ÚÛ^õõõ7Ýt“Ì—Suqq±ß™þºë®ó+ͬ?;vì`ä’œœ,iR{J²šc3¼3ÔæC;ÕHLL”Užyæý̧Ÿ~Zf~ö³Ÿ•醆ÕvÉmàü^Bâ”ÔSž½ñÆ+**Ìâ‘áV²¹)ÌZXfà›qM5D6µ¾!Ú˜¢E݈kÀtˆk--->Ÿ¯ ÀxºÿþûÃøVŒÖÖV9¢n¸á†1¹ªRâšÄ\‰’?þxiiésÏ=Çß,0~$w ;®uvvž8q¢ ÀxÊÏÏ'®a”î»ï¾Ÿÿüçê ¼»îºKŽ¨ŠŠŠ±ŠkO>ù¤$6)°²²²±±‘¿Y`üýž7>&­ÉìûÑ~ô™Ï|Æår]sÍ5K–,‘‡cuÏ2“ ð­ˆkQÄ5€¸6r‡y’½â@\ˆk¡«*,‘½â@\ˆkÄ5`BãÚ¢E‹V¡DŽIâ@\#®Æ5 4×âq ¸ÜÜܼöOxà9;fffÞsÏ=ÙÀÜwߨ–'ÿN×âÚ°q«¦1ŸÏWWW·mÛ¶-Àˆlü?%ãQ¬“rdÊñôC¡×€i®¥¥¥©©©¶¶VÎŽÏ¡AŽF9&åȔ㳳³“¿S€¸ÌhmmmͯëÅLþC9&åȔ㳻»›¿S€¸ÌhrFäëzj_b(Ǥ™r|öööòw ×€MÎ…ÝÝÝ|]/BíK 嘔#SŽÏþþ~þNâZpÜjôq-¤ñA@ ×茀¸€¸Fgôˆk@\@\›h\H ôq Ä5×ð‘}6£*¶~ýúììlvmèòÑGe· ®!ø™/ÔNãQŸK—.Í™3çäÉ“!ÕäÉÚS7ôL³¸vüøqÇ#'Ý€i×&øBÚP;[»Ýnù=&m1k]˜‘IÜÈ#xõ­[·.]ºtêî‚Q–¬-V\\,•‘Õ¥b/^ô{•1Ü R™Ó§O=ë-gwACCC\\œ¬-ÛÓ¢Î~嫟0<8ÍêiV‡ ‰Ê¬@Ùé+V¬˜5D&´cÀp×Û‰kf5 ì Ì^Úì°ÁáaGWW—l’×~øaéè{zzäT!µÖßÿýZW»víÚŒŒ Yf`` ///??_Í—áóùe¾¬+½¶áÙÂlu¿³—t”&§ÄÄD‹uW­Z¥Î1’-ä\¥.j‘ªaÉééé[·n•‰ÊÊÊ´´´ YA’™™Ù;D& OÞÃÛ0k¾,¶téR9I“KKKå̧¨òZL_ý>ó½ gý ‘ÂN9“¸ æÌ™£¡d­n»ÀÎ`›ÙÁ©ê)&RO‰MÚò†u°×Ì ,((úkDk×ÛÜJ†]ÙK›–#8µA´ ÈÌv½áŽ º• »³—6;,‡uxX×V?=Þ䀸6H,'*¿;Cý–‘^8::ÚápÄÇÇk0 ’“d¦×ë-//×Ö’äär¹ô…®nsœÃpÝÇ;ÎsçÎÉ´ü–gµ[)5ÉÉÉÍÍÍ~3åô–””tyèý „„Uy |úÛóòò¤p9Þ–hغ gbÃ&ØkR u«TLû°Pý}ˆcµ<ÏñãÇ­Ïfå6|âwÔgþüù²n\\œþHÃ:û•ã÷0h\»xñâòå˵zjošÕ!h\3+Pæçç细¬\¹R;Ìv½6qàÀ””û[)°+0{i³ÃÒlS›ÕÄN\ãcr×òÙÄ~†–œky×i$RHF µï¸ãŸÏ ‡åhj"¤DìxáÔ™ãO½xñ½ŽO€¸65p!-q ã¡   ¯¯ïܹsú7 CªÀ~XžnðU…%nwþÌÞ7é!âZ¨ã6õ©k\?¿£T^^îñxÜn·ßGņN–¿yþeéå§vΗ­}ª¿û<=$@\#®@héj|}ûµ)Õû\µã¶jçm{¿¸Òo° ®× 4›#I´mw-Ö¶ÑCĵP‰küðÃ?üþ´oo"®ĵÀ…´føèZÍÕ‹T>«q-úñõwÔýôwÛ¢F×è!â $²Zíœ/o¿6eúƒ2çï³eâ`òiw†ê‡Ó×!áƒÏ]s-b8 ®BÎ…SgŽUþþÄi6@\›¸è!âZHã6u ‡ˆktF@\@\£3zHÄ5 ® ®M4.¤zH€¸âˆk€)v6 £bYŸÝà ®B=®M×x4Ve׈k! i Ä{Èââb·Û-¿Ç$=˜å‰0#“×&8÷ŒU}ˆkĵqÁmêâ=dllìÆçÍ›7®qm‚q Ä5âŒoÙÐÐçr¹¢££7oÞ¬fvttdee¹Ýn§Ó™ššzöìÙ¾¾¾9sæôööj+ÊÇ£æFDDH!9992ßð…öìÙ“œœ,IIIf±@{xéÒ¥ÜÜ\)S^¥¨¨ÈïÍP;#gOÖS+//÷z½ÒØ ¼úꫲbbbäaBBÂáǵŊ‹‹¥2²ºTìâÅ‹~¯¸ÑÌêtsé»iÓ&U7©LkkkàÂfÊf} ³h¯Ùò†’ÍcÄ5€U)!L…'9ïæçç«™’Z|>ßàààÀÀÀêÕ«ål-3W­Z%i@[Q¦|ðA™X»vmFFFOO,œ——§â'==}ëÖ­2QYY™––4®Éëfffö‘‰Àk׆;ŒdVOYléÒ¥]]]ÒÞÒÒR (ú‡*bªÅôõ‘êù½ŠáF³_ ³¸&)G–b%<%&ìG³ e³>‹¶×lyÃÉæ±âÀª‡ôz½eee§OŸ6[@NÌááá2qìØ1YX›ÓÑÑ!QQQíííjæ… fÏžXˆ, ‹IQªÀÈÈÈ“'OZǵ¹sçjÅÊ£kfõ”Å´Q©›ßC§Ó©-¦¯4Á¢ÚF³_ ³¸¦ ké+£g¶¡lÖÇb1Ãöš-ox Ùi,fV\ãVAÙÚÚš-çÑØØØ††5Óç󥤤¸Ýnõ®™ÃáPóÓÒÒjkkeB~/_¾\;¯ëi ë­Y³Æo±ÂÂBë¸tþpãšY=Í^Èúåô1Îz£Ù¬Æ°6‚ Û¬ÍÅ´öš-ox Ùi,fV\ŒÆ¾}û´”¨¨¨úúú™–ßÚ™[–Y¼x±LÈoí¢.YX»€É” %k£,—‡Û<l“S¸š¸ò¹]ò›þ ®× ´zÈîýo×EÝ- œnðÑ?Ä5â„JÙß}þhÉó»æU}ëÀ¯Ÿ¬¥ˆkÄ5‰²{ÿÛ¯ç®{áºÛë£3j®^TíH:µc/ý3@\›Üj=¤6œV;çË/þÙ]jP­Ú™<ÞY¸×Ánðñïq Òú»Ïÿò¡ŠÚë¿TíHRªæêäí³–tÖïgãÄ5@éúÙ{–|C…¶ê«n#±Ä5@(ÒÛÔHÛ»ÏÔ³MâÚ¸ãVAÙõ³7j?ù¥qýÜ5ĵp)+Œ¸‡ìï>ßVVsáÔ¶@\#®=$†®Ñq qmx¸Õè!âˆk ®×@\ iÃÂBt+…lÅÌœqíC“x›º–ŠB-×Lz €¸6fQqq±Ûí–ßc’ŠÌrR˜‘IŒk#xuÃU¬k೪á‡C¶y|||AAAWWlq qÍJllìÆçÍ›7®qm49išÅ55188xøðá5kÖx<žöövþÞââš±={ö$''ËDRRRcc£Y4Ñ^ºt)77×årIÈ(**òK!vFΟ*,,Œˆˆ2srrúúú´ÅÊË˽^¯Óé\°`Á«¯¾ºyóæ˜˜y˜ AG[¬¸¸X*#«KÅ.^¼è÷*YYYn·[VLMM={ö¬Y= «1~qM³aé9oq ÀtŽk£¹6==}ëÖ­2QYY™––4®­^½:33³wˆL؉#Ö¡gíÚµ===yyyùùùÚbK—.íêê,--•@¦¨"¦ZL_©žß«HÔóù|²–”/ÏjÁÈf5& ®IÍÃÃÃù{B°‡@\›|íííQQQe.½7yòäIë¸6wî\í;YxôqM* xáÂ…Ù³gk‹iC\R7¿‡N§S[L_i‚E5dE-Ù¬ÆÄ5¡5×>bÍš5~×þZǵ ó‡×ü*àp8¬_Èúåô1NMø|¾””·Û´|Ãjkfš=¸XOOa:3=® DFFê/r—iÇ£Û$g¨ Ñ××7®£kêz² ©Î,®Y®IùõõõÒXÕd³zšUC/::úÈ‘#ú9òÐëõÚyÖbû<úè£\»qÍ@eeeVV–ßÌÔÔÔ-[¶ÈÄÂ… KKK%±IˆÉÉÉñ»v­oHFFF` ™5k–õ'Søå• 6¤§§wttÈô±cÇÌ®-³ˆkZ} ¯]‹ˆˆP·PHC–-[fVO³jøU5))éàÁƒêaKKKrr²Ì´ó¬á¡’çÖ¬YãšÀ4ŒkA/¤íï>ò¹]ò[?SÂDss³ß’’l$sÈDkkkBB‚Ãáðz½ú;CóòòœN§Ä À;CEII‰ËåÖ¡’i¢££åµâããkjj†פêŽN©˜TÏo†††˜˜Õòòr‹zVÃOYYY\\œsˆLÈC›Ïú}îšW—-,ù2言qí!×&‚ÅmêÝûß®‹º[8Ýà›ž;‰o/0Òqm2;£þîóGKžß5ÿ«ò”üüúÉÚi»“ˆkˆkqmjuFÝûß~=wÝ ×Ý^Qsõ¢jGÒ©{§ñNr¹\©ˆkqm tFÚpZíœ/¿øgw©AµjgòôÎj@\ˆkSƒï¯Vù¬æêEjBýøÛG=ò¤L¨+m™fši¦gÚôen5ˆk¡£¿ûü/ª¨½þKÕŽ¤?E·äí³–tÖïgGâZéúÙ{–|C…¶ê«n#±âZ(ÒÛÔHÛ»ÏÔ³;q-uýìÚO~iî ®Mv¾Õ ­¬æÂ©3ìT3 ·ĵÀmê@ ×茀¸€¸Fgôˆk@\@\›h\H ôq Ä5׈k ®ép!-ÐCĵÆmê@ ×茀)oïÞ½`&inn¦‡ˆkÄ5`*‘ówf’µk×ÒCÄ5â0õâÚâÅ‹Wcº[´hq ®….¤ìÇ59—ŸÃt'{Y‹kôq q !×xàŸÏ×ÒÒÒÖÖÖÙÙÙÛÛË_@\@\Cŵìì캺º¦¦¦Hbëîîæ¯ ® ®!„âZffæ¶mÛjkk%±µ´´tvvòW××Bqíž{îÙ²e‹$¶ºº:ŸÏwâÄ þ âÚ¤áBZ`JÇ5õ©d,ývå6ÑÞ •¸öÄ·©©©ijjjkk㯠®MnSF×&=-Mƒ¸VTT¤Z!c×F°}ôqm}Ú׈kq¸×fz\»ñÆCn¾ùæ1ßÄ5Ä5€¸fšüqyJRÈÇ?þñåË—¿û«•——GGG»\®›nº©¡¡A[Ræßzë­N§sîܹeeevV±Y%ÃF™•ù½ï}ïÓŸþ´ÌŒŒ”Í¢fÞpà ²Êþýûeúé§Ÿ–égŸ}V¦_yå™–åͪ´sçNYàÞ{ï½ï¾ûd¢¾¾ÞlÛú=”jDEEI5ä¥ekލù}Qq €i\ã»\€icXqmݺuo½õ–LHpQŸãe3®I:{ölmm­LK`Ò'­¯|å+¿ùÍoÞ{ï½U«V]Å~• eX¦” ¿öµ¯õôô¨»~ýz™ÿo|C¦KJJdZ²—J`2]\\,Óò¬Y•Ô»wï–‰ôôt;qMmŒŒŒSCdbÌß •øXPP––vóÍ7_ýõ’’9þ©Èn\ã``ÚÜ-8â7Ce™ë®»N›v¹\qM™öÐáp¨i LòððáÆ0\Å~•ì—éõzåá‘#GdZœ–ä¶oß®"ŽÌ¼æškÔ°œL« %Ͼ4GJ¾õÖ[ÕC™‡‡ º¡ÔÖPÕ21×®=ýôÓO=õ”DÛºººƒòWL9/^$®Ä5Ó4°sçÎääd .’?üÞŒ ×ìõ©OÉüžžÃúö·¿-‹=ñÄêaII‰<”™ÃÝPçÆçVâ0ÕÉ_.o†¼jš<Ì—„$ÓVü„6XuêÔ)›qM'i#O#ˆkU²_¦]ÓÞôîºë.y*%%%11Q& ‘92ßðU¤æ7²ÌTñÎbCMÀèo†3ëÍPE]¬*ÿ¢Õ˜‚²³³‡×Tyå•W$y¬X±"0jÔÖÖÊS<ð€Í¸¦®Öºûî»ß}÷ÝÀk×ìÄ5‹*Ùk=ö˜ºv­««KÊy饗´4¦j¨]Á¦†ÊDqq±á«lܸQž]²d‰~¦¤½°¡»¬7”úì{ï½·³³S¶†Åµk×^{­L=zt£kO<ñÄúõëyäù]VVÆ_0åHîö¡Ä5`ºÆ5?2¿¾¾þ¦›nr8QQQêr{-@444¨§$ŽH.±×DEE…ººËãñøÝj'®YTiX#vòÒ·Ür‹”ãr¹$«IbSóßxã YÌétª±·S§N©)³·åÙgžyF?SÝUúÙÏ~ÖzC©à)ÏÞxã²eÌ6£»ë®»nXclÄ5`Fǵ––ŸÏ×bžù»ï6æþûïãK¨fÒuŠ×$#þË?~ëñÇ/--}î¹çø+¦É]ÃŽk'Nœh 1¥ÎÏ´&??Ÿ¸6ÓâÚ“O>¹&}YEEEeeecc#À”ôÛ~ù˜\`F|L.¦q\ãcrô1¹Ä5€¸ââq ®¸`šÆµC<ÉÞlƵE‹­Æt'{Y‹kO|ûâ@\0•âfמ}öYâ@\05477¯ý“x@Îå™™™÷ÜsO6¦) èÄ5€¸`ªòù|uuuÛ¶mÛ‚éNö²ìkÙãA?q @iiiijjª­­•sù³˜¾dÿÊ^–}-{¼³³“# ®Mn5†«­­íÀ|­ÜLøúšgþ¯ewwwsäĵIÃyÃÕÙÙ)çïÐüZ9Œí××]så{_d÷ööräÄ5â0eÈ™»»»;4¿Vcûõ5ÿþ±ÏȾ–=ÞßßÏ‘׈k@ €¸Fgô¦\\ãV ‡ˆk ®€¸@\qM‡ i€ ®…4nSzH€¸6U;£°°‰hÅļJ(W~ß¾}QQQA‹šÒj²6¯Ím ×âqmúǵѴ"))©¹¹yzo¨q:6‚.`sÛÄ5€¸F\ éR\\ìv»å÷˜T~Íq¹\v å†:tèPZZš{Hjjª< ‘È5®Ç†Ù¶Ä£”¡>ââÚðÏâæÒθ»qãÆyóæMV\3[e ãÚÑ£G###7mÚ40dóæÍçØ±cÓ>®ÕÑE\›±¸Õ ®…|þt^éèèÈÊÊr»ÝN§355õìÙ³ÖyB&$x½^Y>!!¡µµU[fõêÕ³gÏ/++ º°¾d³e #""\.WNNN__ŸŠ_GŽùà?ãª*5!s Ùž={’““/½kÖØØ4*]ºt)77W^NâNQQ‘ß›¡a:¯uñâÅ+VÌ"òÐb•ÀùÃÚ~¤Î¥¥¥ú9òPfj%K‹¤™©*fQ²YMâââdáèèhI„†­0;–ÌŽ 5ÑÒÒ2wî\IÕf‡¨a•ü^]~KÉ[‡EMì´Â¯(‹`í×(âÌZ¤ ;{Ùæ* 4;8‡U½ÀÝ7¬ qm´qmÁ‚>Ÿoppp``@:eýiÞ¬+—Þ¹§§GV‘˜øÁ» ?ü°œe¾„)Çza¿’ —Y»vmFF†Ì—ŠååååççËÌU«V©sÀéÓ§åd,¯%Ór.WÏúIOOߺu«LTVV¦¥¥kRíÌÌÌÞ!2xÒµ;)((ÚjëÊÃᎮÙß~ä”yîÜ9ý©ƒœ\µ’õÒvYÉf5™3gŽŠ¼}ô ë_×ìX2;6ä÷îÝ»%«É9Þb™UIÿê2}ÿý÷k¡Á¬&vZáW”ÙÑbѨ GšßKØÙË6W1l ÍƒÓ¬z†-V€¸6Ú¸¦'§7ù:hW®ýw.Ë;N5-'Ý“'O¾ŠáÂv–‰ŠŠjooWÓ.\Pù£¾¾^ÎÜ2QRR£¢ÛÒ¥Ke¾_±²®” ªb###µê™5Mš ½¢,<¬¸¦/_&<Ïpãšý`gWj%ȳúFI=­K6«‰×ë-++“”ô¥%³cCr¶v옖?Š‹‹çÍ›WQQ!Óò;&&¦¤¤Ä¯ÌÊÊʬ¬,¿™R½-[¶X4M]»Ö7$###°É³fÍêêê2ÜžÚuojÝ ×®ù5Ü wôèQ9¡nÞ¼Yr‰4J&ä¡vg¨ºvM5JíšYÉf5‘Ô9[NÃr̶ÂìX²86dæ‚ ŠŠŠÆ6®™ÕÄN+üŠ2;Z åW”Í?";{Ùæ*† 4;8mVϰ¥ÃªÀTì!×B+®544Hâ‘ÿ³½^oyy¹þͯ„„5_R‘7Jä4 §+ûÿF×Ô)!::Zê_SS£f>|Øétª‹ëå·<+süÊLNNüU9'%%Y4MÎFyyyR¸œfï ½  ‡ˆktF ®Ä5ĵâBZŒÕ75Ó=$@\qm‚›=cÞV›Š-§:‡ì¦àM^ÀŒŽk6?*lŸ¼‰kņÚ6!®ft\3;#2ºʯn±Vqq±Ûí–ßcR¬Eš4‰[‰<ĵ©a”ÒצG\‹Ý¸qã¼yóÆ5®MðÖ#®aÒ{Hĵ±av›ºœ¿9òÁ2UUjBæ¨3º:ÏŽ‘¨ïäöz½N§3!!¡µµ5°äŽŽŽ¬¬,·Û-˨OB7<{<—Ë•››«}{·Ì/++‹ŒŒÔ¾°°°P}¾kNNŽú\þ9sæè¿SæH92G«¤”¶bÅŠYCdB_¸á¹¼¡¡!..N^"::Z}a¼öìí·ß®ÿÀÒÓ§OÏ;W^ѯ9«W¯ž={¶ßÀZ7Äb[M®~éÒ%Ùz2S¶€þC}ýìÙ³G}aWRR’ú ë bV¬ÙQa''Ö_+//W‡Ó‚ ^}õUÙþ111êèÒ>úØâh“͈™Œòˆk!Ý­ZµJE‰ ržSŸr.Q,??ÿ²å§ÉËy®§§gppPΠ‰‰…Ëy×çóÉ’c̾NG}%ŽÐ-’Ì¿ÿþûµóèÚµk322ä夨¼¼›””¤F?.Kê¿ÁI1ûžï  ±ØV_æm¸ºÙÒëɲºúoù­ß8fd4ßs?¬úËbZ¢•ºù=ÔŽ.‹£eL6#ˆkˆk!Ú ÈÙK&âãã/^¬Îg2ÿ²ïj´8_ú|¾””·Û­Þ‡ÒÞ ´XQbö[LO+*--­¶¶V&ä÷òå˭ÄEáj¢µµ5;;[NÞ±±±ÚWjÏîÞ½{þüù—‡¾¸Zÿm‰ÖÁfC̶•ÍÕíìŽ5kÖø­^XXh½zÐùÃk#«¿ý:ÊÍââÚ$³¸öÎ;ïܹs§zËI~KRÑÞ)M\“ÌW__¯bŸü6K3vÆK¤(ÃKßöíÛ§ò¥üÖ_á¤&¤4}áÚèšœžÕ Óå¡+Þ+&ÅÖ$!!aݺuÑÑÑÚêzf£kvb¶­l®ttMÊÔoËCƒm²ATCÌ6ÈxŒ®™]Âh3®Y-£ÜŒ˜É¸Õ ®…ºâââyóæUTTÈ´üމ‰)))ñ;ÏÍš5«««kXq-""B»lÙ²efq-33SÝ:`x5’²aÆôôtõ^ç±cÇô—ÁÅÇÇoÚ´Ig`e´K¯¤ðŒŒ íÚµ… –––J@‘ŠåäähËK±*oI\“ÊÖdÇŽòpË–-†›Q]»&eú]»f§!fÛÊo³›­®Zª6£´4pSWVVfeeù͔ڪ¶˜m³bͪ4‡™Õß~\³>ZF¹ĵÐuøða§ÓyîÜ9™–߇#p¤JœËå²9ئ444Hò“Ò¼^oyy¹Y\+**R÷èåååi„.,§Øèèh)Mò™þ&MÉj²®¬ÀÊ\¼x1??ß5dåÊ•Zá­­­ ªbOµå¥Øùóç˦ˆ‹‹ |3ôòÐ[®êýP3’%øÝj§!fÛÊo³›­.M“­'5—-ixghrrrss³ßLI6IIIĬX‹êYÇ5³úÛkÖGË(7#¦Ÿ §ÎêÅ‹ïõ°)âF±ù¦ÔbeeeíØ±ƒ½ÆÑ‚)ätƒ¯*,ñ…ë–œÙû&[ ®ašŸ€«ªªôŸåŽL¿~òEIlòó“î=RTÙß}žm×BTh^Hër¹¦ÄÖ“zz½ÞƒrÄs´`*:YÙPõ±¤=wþì¶¿yaÖßÒ‡üÛ¸Õ ®…nS@b»òÆè¬”ÆÄÿU}~° ®…J\ã‡~øáGûyé¿§ª‰öíMÄ5€¸ÆèL²®Æ×_øø_¾è¹S‹k5®EûþêÁ¾_Ÿ¢‡ˆkÄ5‰¬¶cö®µÿö¹ú茶'ªÿ8ð>=$@\ -\H `fúÍó/§ÑCÄ5@HPŸ»VsÕm~Ãiˆk€ÉwáÔ™#E•¿?qšM×@\ÀôŽk\H ôq-¤q›:ÐCÄ5:# ® ®Ñ=$â××&Ò=$@\ƒÉ6 c+âÚTˆkä6@\/ÅÅÅn·[~&®Y̱~Ê:ð>6ÄápHããã ººº8X ®Mg±±±7nœ7oÞT‰kjbppððáÃkÖ¬ñx<ííí¯×B×h.¤Ý³gOrr²L$%%566šE+íá¥K—rss].—„¤¢¢"¿¦3ÞqM³aéÇ+€1ï!ׯÌhnSOOOߺu«LTVV¦¥¥k«W¯ÎÌÌì"vâÔxÇ5©Ixx8Ç+€1ï!×&¿3jooŠŠ¼<ôÞbddäÉ“'­ãÚܹsµweáPˆkÂétr¼ ®ĵiØ­Y³&ì£ ­ãZÐùÃk‡cXÏ.ÖÓÓ3{ölŽWÄ5€¸6Ý:£ÈÈHýEú2íñxÔ`›ä$5!úúúÆdt-::úÈ‘#ú9òÐëõÚyÖ¢üG}”k××âZHÙ…´•••YYY~3SSS·lÙ" .,--•ÄvöìÙœœ¿k×ú†ddd¦¨Y³f™}²Æ† ’’’<¨¶´´$''ËL;ÏÞ*ynÍš5~¡FßC ®… CÍÍÍ~3%3ÉDkkkBB‚Ãáðz½ú;CóòòœNgDDDà¡¢¤¤Äår™±•••ÅÅÅ9‡È„<´ù¬ßç® y©¡ÄG ”¬צ¶þîó'ŸÛ%¿Ù©€¸ZÎ6¿U;çKUa‰§|ìQ@\ ýÝçùPEí§î” &?ï>SÏîĵI£¿öló[{n_Q}Urõû\õU·mw/î¬ßϾ0cq«@\ Ua‰ÚpZµ#I¨Õ\¼}Ö²€Žòˆk¡Ò©Ÿê«nÓ¦åçÀß>*ÿVÊ„úç’i¦™fz¦M×âZh®-y~缬Ú9_®õÜõAzs&ŸÚ±— €Ñ5ĵêŒÎì}óõÜuÛÝŸ¯ÿó{ª¯ºÄ€¸ÆFˆk“/ðBZm°M ³ýúÉZv'€™‰[ âZ¨;³÷ͺ¨»ùÜ5@\ iýÝçÛÊj.œ:ÃNÄ5×0¥o5ÐCĵÂmê@ ×茀¸€¸Fgôˆk@\@\›h“r!mXØdnŸÉ}uS·ĵ¼uB#®‘Û ®ÂŒŸ}N*..v»Ýò{Lê9ÜúØ\ž8qmæÆµØØØ7Λ7¸fn\»téRnn®Ëåòxz¦ß<ÿ²$6ù©óåCkŸêï>Ï6ˆk€ÐÒÕøúökSª?ö¹jÇmÕÎÛö~q%ƒmq-q!-[µ#I´mw-Ö¶ÑCĵ z(~øá‡~ü~Ú·7ñAq-Tâ{ ÀL]«¹z‘Êg5®E?¾þŽºÿ‘þÎc[Ôè=$@\#®@HdµÚ9_Þ~mÊþôeÎÞ§‡ˆkÄ5˜|Ú¡úá4zH€¸ЏÀ ôÁ箹ù §ÑCÄ5Àä»pêÌ‘¢Êߟ8ͦˆk ®€¸@\›4\H ôq-¤q›:ÐCÄ5:# ® ®Ñ=$â××&Ò=$@\qmRþï¬-V\\ìñx\.Wnn®~õÀ3S˜ŽöTaaaDD„¬ž““Ó××XçŽŽŽ¬¬,·Û-/ššzöìÙÀeâââ¤èèh©gЦ «’Ú„E›6mRÛJ6Nkkkà‰Ù¯L³¢l6m³i#‹kjC)úc×ÌŠ²ÓpÃýhVíu 7õ±cÇdÓi…ÄÄÄtttØÜ/[c¸‡„Í#M¿º¤ó„„©›õ¶])몭¤F΂–i³nÓ0®q›:Œ ‡4<=øÍ4;;ÚL6AWœ¯çp8kèóùRRRÜn·Å2­­­ÙÙÙ’*bcclÖm¸qmdÛÊÎ2†±À¬áÃ*ßì=D­´´´´ÚÚZ™ßË—/·¿_lnA;zô¨ìh-3ÙßVãº+ÍÊÖ~$®ÀLkf£kú±À!¨ §(á9' ªé¾¾>‹Ñ5ÃkÑü–©¯¯iùm}’Û·oŸV‡ M³YIýðɶ•ÙèšaQvn6ºf!Ì6µl±Å‹Ë„üÖ.³³_,¶Æh\ƒ.&›?þ/~ñ ;Ûjò¹]ò›¸ס¥{ÿÛuQwK¢:Ýà§— ®Ä5À°õwŸ?Zòü®ù_Uß:ðë'kÇ﵈kqíÜjvzÈîýo¿ž»î…ën¯Î¨¹zQµ#éÔŽ½ãúêÄ5€¸FwÁ{Hm8­vΗ_ü³»Ô Zµ3y¼³ý3@\£;[=$_ñ׈kÒ=d÷ù_>TQ{ý—ªI*BÕ\¼}Ö’Îúýl€¸F\€ê!»~öÆž%ßP¡­úªÛHlqmâp«Øï!µÁ65Òöî3õl%€¸E]?{£ö“_×Ï]@\ŒV÷ù¶²š §Î°)âˆkĵÄ­@ ×BäôqΈkˆktF@ €¸Ä5ĵ‰Æ…´@ ×°uÂÂfì«âqÍî«“Û ®ÂŒŸ}N*..v»Ýò{Lê9ÜúØ\ž8qmæÆµØØØ7Λ7¸¦y\³¸öÒ¥K¹¹¹.—ËãñéÃGaaaDD„<•““Ó××§¢‰&0µø=%›6mòz½N§3!!¡µµÕ¢ä@{öìINN–‰¤¤¤ÆÆÆ ¡Ð¬!j¢梡¡!..NÖŽŽÞ¼y³áòYYYn·[š“ššzöìY³bí´À”è!×&ŽÅmê«W¯ÎÌÌ”TÑÛÛ+qD‹k×®ÍÈÈèééÈËËËÏÏ7LK†ÉI{(yEJ,..NLL´.ÙOzzúÖ­[e¢²²2---h\S é"×®YÔ|Μ9*J3ké‚ |>Ÿ´Eª-¯%ÑÐp1›­0%zHĵèŒ"##;::Ô´Lhá#**ª½½]M_¸paöìÙ#ˆk/^TÓ’rœN§uÉz²€,&k©u¥’'Ož´ŽksçÎÕŠ•…‡×¼^oYYÙéÓ§-š£'U 7\ÌNë×׆Ñ™¥Ÿ°r8#ˆkÃ*YoÍš5~‹-Öz¾EÍ[[[³³³%ZÅÆÆ644.ïóùRRRÜn·õ±Ó:Ä5ĵatFúѵöövý蚺<Ë:“ ®™•¬ZicTªbG ¶IR¢¯¯oLF×4ûöí“—6\^ª]__/uS54+6hë×× X\H»zõꬬ¬¾!ÙÙÙZøØ°aCzzºJrÇŽÓ.Õš5kVWW—aQ~O™Å5³’5•••R%¿™©©©[¶l‘‰… –––Jb“T”““ãwíšjHFFF`œ²¨¹ÔA½Ù*q-""Âpy™¯]ß¶lÙ2³bƒ¶Àê!×BÂÅ‹—/_ît:%ŽiW˜©äíp8âããkjjÔÌ’’—Ëe8Rå÷”Å`›aÉšäääææf¿™•’’’.½q™ ëz½ÞŠŠ ý¡yyyZC«aQs©Ãüùóeݸ¸8íÍP¿åe~LLŒzÝòòr‹b­[`b\8uæøS/^|¯‡Mצ•ãÇKÎ`¿˜N7øªÂ_¸nÉ™½o²5âÚÔVPPÐ××wîܹŒŒ ™f¿˜6NlÝ%‰M~êÿüž#E•ýÝçÙ&qmJ*//÷xµ\›Ðn?)ú›“O½ Ú™¶L3Í4Ósmú:—ĵ!ÓÕ¾Ë'JŸÛó¥]Ÿø(«í½õ¾½1˺šŽ±#Ìe|‘@\‹¸Î¨sÿ›?ZòÈ®¾ 3nþ"‰ qFˆk‘Øýû`ÛÈHÛ/7±;××"±3êÜÿæ÷ãþ˜ï]@\@\›Nv~ÕàLMÕ Ù©æ.5ˆk ®€¸@\ÀœŒkœH ôq-¢q™:ÐCÄ5:# ® ®Ñ=$â×צ'Ò=$@\q Ä5`f9räÈSÀtkiiáÍד#e0ÝÖ­[Ç› ® 'ÒbÖǵ{ï½·˜K—.%®ĵ Àeê˜õqMŽš—€é ¯=â@\#®Ä5Dz\{ôÑGý~kkë™3gºººïM€¸F\ˆkˆ ¸VPPpàÀC‡½ýöÛ’ØúúúxoÄ5â@\Cŵ¼¼¼;w666Jbkmmíêêâ½ ×ÂÃ¥ ®M6íòÀXÉ;v$&&:›FZûèë3κiqíÁܶm›$¶øýþóçÏóÞˆkBǵ>ø ¬¬,))Éétº\.Ç“““3!Ùbô3.®³Âñññ²ú©S§¦¾}ªªª´Òdb›b õT†J\{å•W:tæÌÞ›q @ˆ¸&ÇK¯×û©O}jÇŽý#^{íµ¯~õ«Äµ ©°õê“Ú>·ß~»cÄwÞ9áõ'®Ä5S×JJJdþÆ-´ßùÎw>ûÙϺ\®øøx)GÀVÌŽèA hÓµµµIIIRàwÜáóù¬ŸÈ¬äªª*Ç#qÄzÝ×_=--íæ›o–‡233%ŒÈr5ßþö·NgBB‚L[·À´¶ÕRÎC=ôõ¯]&šššÌ’VÐ]y ÙyŠÏ}îs555ãiâ0Sãßè L¯Ñqí¶Ûn“ù]]]fGÙõë×Ë?üp¿–ùž~úéqŽ®÷öö666Ê´ä’OdX¬M)$äºÚÇ‘Z^innVŸóÚ‰kv¶Qò™,ðÄOÈô“O>)Ó£Û· & Èöó™ÏŒ¸ºpá˜s‰öa«öÔ5º6žö‘Õ>à"3µñE‹ Ÿ‚Ñ5> fÀ‡¡í Sù¿ª!¬½ó+ À,UPP`×äð÷ÝwËQS›6Æt~öÙgµS¦zzz´ëFxàõ9£øñ,?ýéOe™ÄÄD}4ill”å}ôQ³,rµ¡¬'Ÿ|R¦Ÿx≰Î]›ŒöÙ²e‹,vß}÷égfffjW0Xo¸öÝ=ôPWWׇ~hqîZÈf±]{þùçŸ~úé§žzJþÖÔÔðÞ¦†ä®°¯ %®ׄ˜¿õ­oÝyçÎr$þüç?_XX¨cê]wÝåp8\.—Dõy¢Ýo½õV‹D2zëËÍžÈΨžÙºûöí“°â‘‘‘ÑÜܬÍ÷ù|wÜq‡,/ñErŒY%Cn㥑+ ´1¶ +CCƵÉhŸ´´4Yåå—_ÖÏ|饗d¦ìVë ×B¡l‹õ{ Ä5€¸×"×®¨4öfw\[ºti0äµG\ˆkÄ5 t\¦q ®×S---ë>öè£ÊQ3//ïÁ,lúú×Ç_†üÛ@\ˆkÄ5 4¿ßàÀ;wnlÛT¾v¢Š’מ¼åuò‹ ׂq©æˆÖÖÖC‡566ÊQóÀžç¿ñÔ„”#¯:yíÉ+P^‡]]]¼âgΜyûí·#ó§{1~¸P^{ò ”×a__ïG€¸À@WW—)#ó§{1~¸P^{ò ”×a àý×cd__?Ý‹éúáByíÉ+P^‡W¯^åý×0‹â—=$@\‹h|‘ÐCÄ5:# ® ®Ñ=$â×צ'Ò=$@\q cÞUQì,ˆkˆàDE\€¸F¨úHuuµÛí–¿Ó¡¢>&uX±bEoo/q âÚ 0'Ò†Ì= ,زeËüùó§+®i@ ¸¸8??Ÿ¸`ÊzHĵ±³¸LýÚµkEEE.—ËãñTUUécMeee\\œœ‘‘!éééÍÍÍú‡***bcccbbjjj ‹Rƒƒƒ¥¥¥Ñ#dBîªêêê¼^¯ÓéLMMmkk³Ž€W®\‘ÊÍïìì” çv»¥¬¬,ýð[ÈÂÌJ|‘@\‹ôÎHRT^^ž¤±@ 9FÅšuëÖåææö÷÷ •”””••ÎC£åäälß¾]&êëë³³³Õüµk×J6’Ò$Ê3¥î–——ËSFHÝä®Z@‚£2<<\]]––6†¸¶hÑ"¿ß/%ÈvIM$ªÚ/q qm:£øøøÎÎNmZ&T¬ILLìèèP¹'666d\“åe-‰;2-¥äööví¡„„5m­ôwõ+Ê„ÇãQ ¨‘6)ßétZÄ5;†J!111ö @\@\›†ÎÈ,3Eý>‡Ã2®­Y³&h­ÊÊJ‹µ,žZ?_%'³åƒfj\.—á¥~¿?33Óív[oçºÄ5ĵ)eq"­~t­££C?º¦²Ž344$E©9­4Ç£ ¶…;º¦Ê ]³׬S©lWSS“ÔV«³YF$®s—ĵHWQQ‘ŸŸ?0¢  @Å” 6ääähIîìÙ³ê¯èèèžžžÑåÔ×׫O•¬¬¬mÛ¶]ÿøÜ5ÉúsׂŠRO­N¤J¹¹¹úsׯ×âââ´k ¤2+W®$®@\‹tƒƒƒÅÅÅN§SrLUU•þœ-IlIII‡cñâÅ ÚÌ7º\®Ñi&##£¥¥%h¦£ôôtmZR—ä3ueèè¢ôW†–••¹F¬ZµJÞÆ5ŸÏ—œœ,åõzkkk‰k×f’sçÎI>c¿˜®\¸xîÅýƒöÓqm6(//¸té’þ“G˜éº}þ]Qi ®¥ ‡h €¸é¬O¤­­­õx›÷³g·1º×"'Ò˜ãYm;Ów×_ìùÒ¿þÍžæ·~;ôzH€¸˜~¼ôš6¨¦N@\Dí{×öÜ|oÐpâ`ú]¹pñýªú_ï¦)âˆkãÆ‰´@ ×"—©=$@\£3ââÐC ®q qmªq"-ÐCÄ5ÌÌ]5©Ëϸ œ­u˜®Í9zôhbb¢¶Š~@\ÃLŠk“züž•qmz7*¬gOOOoii=Mhâ¦!®UWW»Ýnù;]™O#uX±bEooïlŠk“´c+6¬µ\.—á4€¸†iˆk ,زeËüùó§+®i@ ¸¸8??Ÿ¸ qM¿0#j@\³2æiåSSSïp8´9•••qqq.—«°°p``@-YQQ#ËksKKK£GÈ„ÜUeÖÖÖz½^§Ó¹hÑ¢ãÇoݺ599Y¦ž:u*¬Å̪$«×ÕÕi«ËòmmmÚük׮ɧªªJ5,Çby½Ã‡gdd\ùÀ«¹¹YÿPPËDéÅ-ZÌp[ÌbÁ•+WÔ@ŽšßÙÙ)ÎívK!YYYúá·…‡»®ÍF3lpŸÏ—’’"3“’’d[·˜Í‰aýG;Î×Àèu£Lí¯Ñï³½ð²eËÔ£ÝÝÝ ú7&&µ‡@\›Hc¾L]*<òˆêý×­[—››Ûßß?44TRRRVV¦Í_»v­Td¾Øäp¢Í,//—…#òòòä®*sÅŠ===ÃÃÛ6m’C þ®zì/fV%Y]Žš2_–¯®®NKKSG;©Œª•:jš•c¶|œœœíÛ·ËD}}}vv¶šoØ2A…¨»-f¸-aÅ5I3~¿_J ”šHþ°_x¸ëÚi4³Ÿ7ož–w%£è÷¦áÆÚ|‘XÔßN•Ƴ9f£kjÚðbVáÑ =zTÒ­*vÕªU(éš§¬‡@\‹”¸&Çu711±££Ceù/_›–èÛÛÛƒÖW3eB¥ªL•ÿä€t×ét†µ˜Y•dy5:¥_^ªª–—Z©£¦Å¦.¯' Èêò,Úsé7ܰėE‹n‹a!v> •BbbbìîºvͬÁ½^oMMMww·³ù"±¨¿*gsBÆ5ÃWˆY… NOOßµk—Lœ;wNZO’]3q Àœ‹kAwõÔ'¤†° ™úfóHls1;U Yl¸åè­Y³&hõÊÊJû-cV7›-T—Ëex©ßïÏÌÌt»ÝcØÀp× «ÂAe¶µµHÜY°`Ï盉ýúù5`g]Ãéñ4¸æàÁƒ .”‰¢¢"õq*ˆkæn\KLLÔŸF£˜®éÇ$ôcE×̪d¶¼ÅèšÙ¦Y¬ é·Tl“ÕÛÂ]C‹YÌ j¨¦¦&©­Vç°¢U¸ëÚŽ2lpåèÑ£Ò õ"±Sÿ1¿l¾í®™UØl(.55uýúõIIIÚ«Ä532®çRýÝ 6ääätvvÊôÙ³gƒN©‘ÕþüuÆÏÀÀ@nn®þL¬ ŒkfU2[^«ÕÀ©UÈrÌ–WêëëÕ'Š´Æ¶mÛÌZ&::º§§Ç¬nᶘ͸§Î [¹reXq-ÜuC6šEƒË„–H$®ÉóZ·˜Í‰YýƒŠókÀæëÐâܵ WˆY… ûöí“e´—¦²‡@\‹Œ-ud’Ã’üïp8/^¬¿$M²…ü‚® -++sXµj•:¥fbãšY•Ì–—j”””8N9"]ågXŽÅòšŒŒ õݧŠkÓÓÓÍZfãÆÒ&†W†Ž¡ÅlÆ5ŸÏ—œœ,[çõzkkkÊk᮲Ñ,\&.\(릤¤¨CÍZÌæ‹Ä¬þAÅŽù5`óuh6=úbVaÃ…Ecc£öy(`6ǵ«}—Ûw¼!Ù©ÀŒ“ŸŸ¿oß>Úfm\ëmy·qÞWvE¥uûüìQ`ÆÙµk—þ»<³'®]í»ü/ßÚÜxÛýÔäöÁËMìN`Æq¹\^¯÷ĉ4Ìø¸¦?‘¶·åÝÃËJwߘ±û_Ø}ã={Ü÷v5c_˜³¸Ô ®E„]Qij8m·#]Qk¸)cOô}d5s_ä×"¥3Òn»o¼GMËííÿçù·R&´.™fši¦çÚ4q ®EÖèÚ鯾>?¿qÞW=|”ÞœöaG`t q-‚:£‹GÞy«hý÷›þèÁÝ7ÞCb@\£âÚô}"­lÓ†Ùþõ…Fv'€¹‰K âZ¤»xä‰_ã{×q-¢]í»|¦¦áÊ…‹ìT@\q 3úR=$@\‹ \¦ôqΈkˆktF@ €¸Ä5ĵ©6Ù'Ò=z4111*j, 2¶µæÊ+,ÌÆ¡1ì!×""L¤§§·´´LM"!®Mec²wĵYÂår9"*®±;Ä5Rq¸qÍžyóæýǣͩ¬¬Œ‹‹s¹\………2_Ëkjjâãã‡:´Gé,[¶¬¡¡AØÝÝ VZXÍ©««óz½N§355µ­­M-lX‡ l¡¯Å*±±±111²¼6gpp°´´4z„LÈ]Ufmm­VŸE‹?~|ëÖ­ÉÉÉZõN:Öb-i¸Õ×®]+**’…eGTUUéÓa9Ë+ùùùn·[ž+++«··×¬Y ÷NÈæ2ÜŸÏ—’’"KJJ’–¡³×B0;‘öñÇ—c¼º+Ó«W¯–‰uëÖåææö÷÷ •”””••©có#<¢ÏAG•ƒ´*pÕªUúò Çoä®äy®áááêêê´´.Ñ2«Cкúú˜­²víZ‰)2_Âmfyy¹,‘——'wU™+V¬èéé‘úlÚ´IbþnFFFX‹Y´¤áVKõ¤2ªVª­ÌÊ1[^OÒ¤ßï—'’uey‰wÍb6öiÑ\†"ÿ 477Ë„¤CÃ}D~ €¸6¥Ì.S?{ö¬×ëUw“““;;;e"11±££C›yåÊ•ØØXul–óèc¹þŸžž¾k×.™8wîœ.Q d\SC5rÈw:Ú´Y‚ÖÕ×Çl•„„„ööö uãããÕL™¼¥ÊTùOêtWUÏæb-i¸ÕRUµ¼ÔJµ•Ŧ.oFž+&&Æ¢YÌâšEsnˆìúšššîînº ÌÜq-R:£ìììÆÆF™¿ÅÅÅ꬧>j4;–ëçy´F—iXëuíTÛp¦>‡Ù¬Þx¶"ÜbÃ-GÏï÷gffºÝî14‹YÅB6W[[[AAÄÊ ø|>: ×ׯÞ=zôÞ{ï• ù«Î¸JLLÔŸáî±<55uýúõIIIÃÃÃcŽkfu°X×l³Ñ5ý¸”~¸hãZ¸-i1ºf¶i!G×dݦ¦¦¡¡!™–¿úç ktm Í¥½Àd]: ×ׯÕ-^¼¸®®îþûïWs6lØ““£}0zöìYu¶“Íì²oß>™³mÛ6狎Žîéé ™Ìê`ÌVÑNÒ’¸£?IKõ500››«?kãZ¸-©Õj`„Ô*d9fËëÅÅÅ©ÓÈV®\©–1l³½nsI µ,(qM*@gââZÖ'ÒJVs¹\rX Š>III‡CœºØÓfvillÔ>5´qãFy:;¹Ç°Öc9f«H¼ tehYY™kĪU«Ô9v×ÂmI©FII‰Ó锈t¥§a9Ë+>Ÿ/99YVôz½µµµúeF7‹ÙÞ ·¹¤†òŠ¥¤¤ða(ft €¸6;åççïÛ·vĵH´k×.ýwy×"ˆËåòz½'Nœ )q ³%®q"-ÐCĵˆ6y—©OÞ/‚OãoÏÍŸ9çÇÝç £G&&&†ÜõSöÚ˜®!_ä׈kĵ™˜&äÙçr曉۞žžÞÒÒ9›îMTňkq¸6Õ%aE;?å4ƒZolÅ2º6¹\®™õŽ˜¤š×âq¸F\C¤va&;¸€¸6 ,N¤íììÌÏÏw»ÝN§SûI"Õ-ÖÕÕy½^™ŸššÚÖ֦ͿvíZQQ‘üSîñx̾F_TVVÆÅÅÉb………ÚO$Í›7/¨dŽ” s,*`}ä0\Qÿ;èf•}ZÅlÛCe±ŒÏçKII‘™III[·n5{^5Q[[«U`Ñ¢EÇ—U’““µú¨Ÿvg ØÙ•fËØÙkfm¨í}³×Ã膲ùŠ5«ªÙ|™¨©©‰W¿yosǙ͔-¶Ë¬üp_löë£788XZZ=B&ä®ÙKeÂ߆u3¬OÈw½Å[Æ¢@‹×¡q-"HðûýÃÃÃCCCúµ”þ·¿¿_ª®®NKûè¿OõÛ‘B& ñëÖ­ËÍÍ•u¥Ì’’’²²2™ùøãËÁR-#Ó«W¯¶®€u\ ¹¢EeBŽ%n»¢Ì–‘£¸úáN5Óâ®V¬XÑÓÓ#Ø´i“ïõw322&¤ììJõ›¤²Œ„$ûoÖ†ŠÙëÁ°¡ì¼bͪj6_&yä•0ÂÚq{Ól»ÌÊ÷Åf¿>zåååRšÚ×f?ù:ïú…¬O¸?þfQ õëqmæ‘-&&Fusê?T™/ÿ›jÓ Út{{»awŸ˜˜¨–¹råJllìõ‘&—ÿqÕ2ÉÉÉÚ–[TÀºƒ¹¢EeBœ ·ÝNQfËȶ×ÔÔtww[?¯~œFU 讪Ï8[ÀήŒW»I&ì7¾Y*f¯ƲóŠ5«ªÙ|™£øØvœÅÞ4Û.³òÃ}±Ù¯OÐ~”]¬öµün\ó;°n!ën\³(Ðúu€¸63øýþÌÌL·Û­}Ä >ÛO›«™zªÌìììÆÆÆë#¿_\\l³fÏh¿æ†•±›F×ÄNQ†Ë´µµÈñlÁ‚ê'ÏÇùûñãl›»rlOm§pÃ׃aCMÆ+Öf+ÖÇzonW¸{aüõ±h•ZÆvîZXï;¯üÑõçÞ4Û@NЈk3•üÜÔÔ444$Óò7d·hstMQ¤wôèÑ{ï½W&ä¯:+dä ÿkÓj¾Íš›Uf ';E…\F!>>~BâÚ8[ ÜÑ5YØþSÛ9L¾ ÊÎ+Ö¬ªfóÃ}ÖÇpoš½Î Ëó‹-d}‚ö£~_atmÌïú…¬Ù»ÞbtÍÎ×âZD³8‘6..NY²råÊÝ¢:Häææv6lÈÉÉÑŽ‘gÏžUg‰Å‹×ÕÕÝÿýö+°dÉ’M›6Iß- ªùf+FGG÷ôôØ©Œ´ŠÙ¶Û)Êl™Ð>¬‘ƒ–ÔÜúym,ÇÙvv¥,“ŸŸ¯-SPPò©ÃŠk†¯ƲóŠ5«ªÙü *…µã gê ½]få‡ûb³_Ÿ ý¨§¨íëç®Mà;°n!ëcö®7«˜Í 4Û^.5ˆkÁâ2uŸÏ—œœ,ÿËz½ÞÚÚÚ‡Ûk×®•””8Néy-® •~<))IŠ•ãVCCƒš/Ç0—Ë%·ý ´µµ¥¦¦j lÞ¼YÍ7[qãÆòúŠ™UF ZÅ¢‹Y”Ù22±páBi·””õ‘ÙóÚŒkãl;»rpp°¸¸X-£>c ¹×l&G¿ ÊÎ+Ö¬ªfóGWÉþŽ3œ©/pôv™•î‹Í~}‚öcYY™kĪU«d×[ï— |GÖ-d}ÌÞõf³¹fÛËyĵHk€MçΓóŒ®ê ÚÐC ®v•—— \ºtIÿÓ̪ê ÚÐC ®a«­­õxûÀ¿*m̆Ê2·Þz«šv¹\qMŽÄꮯ’£šÜ=uê”a W±× ×õz½r÷ý÷ß—i9²†<Šwuu©9ëšmEBB‚Ìׯ`¤>2-sÌ*iVgûKZÔ0ä.ÐÖ2¡oÒ[m¶biyÆ»ï¾[»+r÷äÉ“v^fsÂj^;¯ÙÝö? zñÛ|:‹Ñµ—^zéÅ_” xàÀ'NÐ;aŽ$®×^ýõŒŒŒ›o¾YŽF£?¥²Žkv2«€õQÓúܵÑO§n 4\×þV8N;h³õìo]X»`leZï¦o|ãÚGºÚÝ7Ê]™V†l›Ík¶[eÓ$ž>ñÄ===£W´~ñÛ|:âD^í| Lð‡¡GæËqK¦ûûûƒŽXê@~á›3mXE²Lv\ÓÆŠÔ(ˆõÀaP±ëšmE||¼ZÅpøgbãšE CîëÑ5‹­¶3º&/•Ûn»mô OfÊCö_AsÆÜ¼ö’õZ¿øÇ×ø0|j÷RíOù·¦!¬½ó+ ÀäÿéÊkÚA÷Í7ß”ÃUiiéè£~cc£<ôè£Ú<˜i'-}ík_ûàƒ,NZ²8þÝrË-òÐéÓ§íD“gŸ}V;«§§GêùÚk¯=ðÀ6âëšm…6¤ôä“OÊôO<1ú䪉k5´XKûr‡z¨««K*t šE™Ö+êmÙ²Eæßwß}ú™™™™Úéù h½sÇܼc‹k/þ ]{þùçŸ~úé§žzJþÖÔÔÐ;aŽÜö•¡Ä5×Ì>Ó?MMMwÜq‡ÃáHLL¬®®Ö–|>Ÿöä69Û?˜mÞ¼Y;“Éãñ]jçø'¡áÖ[oµÿtòwÝu—<Ëå’ä!ùÃþQÜb]í¸4r ¼6dxéâÄÆ5‹Z¯%™L*)kÝ~ûí²!ö[ÌzE%--Mæ¿üòËú™/½ô’Ìüüç?oÑ€!wîØšwlqÍâÅO\¦4®µ¶¶úýþCæo?v˜<òH?BL÷ù£×䟉§Ï=÷ܦM›vìØAï„9BrWØq­««ëüùóg"Ìë?s˜eeeÄ5 âÚ /¼ ‰móæÍõ‰Ñ22»IDATõõÍÍÍôN˜#BþB._“ ðï@¤Ä5¾& ®Ä5€¸×ââ@\&#®-]º´Àtwq ˜ qíäS/°·0©q Àô"®3>®íŠJcoa’´´´¬ûØ£>*ÇŒ¼¼¼|°˜)¾þõY°òq ®¡ùýþìܹs0sl*_;k¶EÞ}ò”wbȯ6ˆkÄ5ÌQ­­­òo}cc£3^fˆ§³ž"ï;y÷É{PÞ‰]]]ôHq 0pæÌ™·ß~;2Š ˜õ?Ó'ï;y÷É{PÞ‰}}}ôHÀ ‹k\j€©!ÿÐËq"2Š 0óòýëYóS<òî“÷ ¼=0Ãâ05ä!ÿÓGæO±sá§xäÝ'ïAy'^½z• ®×@\ ®M?.5zH€¸Ñø" ‡ˆktF@\@\£3zHÄ5 ® ®M5N¤zH€¸6c·!**2Ÿej*6[×fs\›Ô§&®@\›©q-h™ñW¬ººÚívËßii„¨IV¬XÑÛÛK\€¸F\û= ,زeËüùó§+®i@ ¸¸8??Ÿ¸q͔ʼn´*=tvvJ¤p»ÝN§3++Ël4HÝ•‰ºº:¯×+˧¦¦¶µµ©e***bcccbbjjjB.¬PB®RYYçr¹  ·ëðáÃ2‘žžÞÜܬ(¨z†O­M –––F ¹kss‚ÚíÊ•+R[û ²pSÓC ®M‹ËÔUzX´h‘ßï’@STT2®IZêïï—Uª««ÓÒ>zеk×Jøù×®]“r¬6Ë7«¬[·.77WæK=KJJÊÊÊ ·+''gûöí2Q__Ÿ­æ›Uϰ&åååò\yyyrwl›c×,ËT6@ `ǵ¨ˆkÄ5ˆk!TWW»Ýnù;-Çæ¨IV¬XÑÛÛK\#®@\û= ,زeËüùó§+®i@ ¸¸8??Ÿ¸F\€¸öï>œ‘‘!éééÍÍÍú‡***bcccbbjjj®ë†ÁÔZM –––F ¹«¨««óz½N§355µ­­ÍúxåÊ—Ë4¿³³S2œÛí–B²²²ôÃov 7\ƬLŸÏ—’’"uHJJÚºu«aÕÕÕG–)**R[**++ãââd~aaáÀÀ€õü°ZÆ¢ —·(h¦Æår^jà÷û333Ýn·¶˜Ãá«pûfe¶µµH4Y°`Ïç Y ~Kõô•4›o¿òá¶€Íù!sÍââÚÄtFCCCñññj¤äúÈ`›ÇãÑÛÂ]Ó¸èG×ÂMT£ç'&&655Imµ:‡ %v*lV¦rôèQ5rT‚Ùèš šzfóê¼Ym%Bi;K „l™°ö©YÍ]q qmê:£úúzõÉ£’••µmÛ¶ëŸç$lýyNÑÑÑ===£íê|& ¹¹¹úsׯ×âââÔùd+W®œ¸fVfQQ‘h$®É2†Ê–ŒÐŸ»¶aÆœœœÎÎN™>{ö¬:½Ìl~X•7«í’%K6mÚ$‰Mæ†lÃ}j¶°Y͵}­µ€ìkâˆkˆk¶„<‘öjßåöoÈ_ýÌŒŒŒ––– %%¤§§kÓ’º$Ÿé¯"ܸq£Ëå2¼2´¬¬Ì5bÕªU’&0®ù|¾ääd‡Ãáõzkkk'$®™•ÙÐаpáB§Ó™’’böahUU•v½dII‰ÚR-ß$%%I™‹/–r¬ç‡Uy³Ú¶µµ¥¦¦jó7oÞl§eFïS‹… k.›,.M$À•¡˜=$âÚ4ë;öÞįÉ?—Ý>?{×"Åվ˧7¾ê»ë/$¨Éílív' ®E„¾cï½U´~_ìò}þÑײwÝð…s/½Æ¾ĵi¦†ÓüÇœºç¿ì]þ»qµÒÉj€¸6ýü¹VûÐóµ?üš6¡Ýþù/מ|ê™Ðδeši¦™žkÓ×¹Ô ®Eé˜~Õþ¡ÿ/Öì¹ù‹*«íÿƒö~òK]MÇØ‘æ2¾È ®E\gtîÅ×^_P°ë¿Klû>µœÄ€¸F#ĵH쌂Û>x¹‰Ý €¸€¸‰Ñ¹_Û7rÙß»€¸€¸6mìüªÁ™š†+.²SÌ5\j×@\q €¸€9×8‘è!âZDã2u ‡ˆktF@\@\£3zHÄ5 ®¦Çþ(.4$®MN¤€ ï!gÐa›„1£“Ý7Wâ`&«««Ýn·ü5ø“'Ofgg»GdeeÉÝÈß³„'‸fjÁ‚[¶l™?þ숧OŸŽ¯««±uëVÇsöìYâˆk»GeEÍ©©©‘„áp8ô‡ík×®¹\.IUUU!ç‹ÊÊʸ¸8y¨°°p``À°ü ‡ÎÈȉôôôææfýC±±±111²ºY͵‰ÁÁÁÒÒÒè2!wÕ›¼^¯ÓéLMMmkkÓæû|¾””©gRR’ĩѵ ·@=iœM›6éçÈ]™©J¨®®–¦“g—™ªd‹Ö³¹ £Û§³³3??ßív˺YYY½½½fm«oÌÖÖÖ„„ ÐAÛeÖh!wú¼yó€Z^–‘Í—9úWÎèú˜•rß׳$±Ý}ä‘GôZuÍËË“ùrd•£~ÈùëÖ­ËÍÍíïï*)))++3,?HNNÎöíÛe¢¾¾>;;[Í_»v­$ )MÒ¡<£Y͵‰òòryêÀ©›ÜU È‘^ –”–öÑå ´h( FÕS/Üõ$a\ºtI?G ‘,¢JUÉjÓ,ZÏþ&µÏ¢E‹ü~¿¬(eÊ©ÈhѶ”¬&‘hôv>£þøãK¬WåÈôêÕ«õµ5¬YÉ!÷q-âp©Œ¡‡zä 8úÑøøøÎÎN5Nr~bbbGG‡6}åÊ}@Ñ—¯'ËËZ’'dZþJÉíííÚCÔ´EÍU•ÔÂ2áñxÔjøJÊw:Ú´×ë­©©éîî6k¢p ´¨¤F-)ªV’’å‰B¶žýM°ø@SÖ‰‰±nÛºº:yèĉ†%>£~öìYYW­’œœ¬½~Tm ëcVrÈ}G\‹8\¦cè!ÍBOÐÝ1Ì×S}Zdˆ5kÖ­UYYi±–Í*鳑áòmmm’,X`8’nzR¬þ³¿ë#ÿésŒYÉvZÏz‚öûý™™™n·ÛΑ™RÔúõëÍv–Ù3Ú©vvvvcc£LÈßââbëÝg]rÈ}G\#®ÀŠkúQ´ŽŽŽóõgG…ò’¢ÔŠVšÇãÑÛÂ]ÓYéìkrôèQ5¾¥7æ¯Û8wÍltÍNëYoBУRfSS“´³ÖÚÖ£Yò¨T@2PPåGÓ?£ÍjË*÷Þ{¯LÈßS§N-c6ºfXrÈ}G\#®ÀlˆkÑÑÑ===!AEEE~~þÀˆ‚‚‚ó7lØ““£%¹³gÏêŠa5êë륜 ™YYYÛ¶m»þñùLrÀÖŸÏdVsítº@ UÊÍÍÕŸjf¸iR7-È!?..ntÝÂ-PïôéÓ’?¶nÝ* I¢§LÈ]ue¨vîšÖzús×l¶žõ&µÌW§y­\¹2è\± ¶Õ•™‹-ÒŸj¦¡£ŸÑþN_¼xq]]Ýý÷ß?z[ ëcVrÈ}G\#®Àlˆk7nt¹\!?Ü,..v:rP”ã·úØÎl¾vˆMJJr8rlnhh°Žk---A3%^¤§§kÓ’$è¯4«¹T©¬¬Ì5bÕªUrÔ·Þ4©ÛÂ… ¥æ)))†¨…[`S§NIþÐVÿò—¿¬ÿÞ5YEM»à±¤¤D•l³õ¬7!¨}d~rr²èõzkkkõEn[õ¨Ä&©ÀÓO?´Qffs§KV“ºIÆ2l½Ñõ1+9ä¾#®E.5€©é!Ï;'NûóazˆåëÍ0×â`R•—— \ºtIÿ Å|×@\L©ÚÚZÇãv»ƒ¾ÐÕl>Br¹\4ˆkÄ5×ìàR0³+*mzo쀸öQgÄÞ€ì!éŸâÝ×â×èŸâÝ×Ìæ¸Æ¥™=$q ®"q ®ˆkq 0«]í»Ü¾ã ùK\ˆk€ÈÒÛòn㼯H¢êöù'é)ˆkqí#\jö{È«}—ÿå[›o»_ûÕ^nš¼g'®Ä5º£‡ìmy÷ð²ÒÝ7fìþÄvßxÏ÷½]MÇèŸâq ¦¹‡TÃi»éÚˆZÃM{¢ï›ì¬Fÿ ×èÀVÉO¼Ä5âDtyµïò鯾>?¿qÞW=h)j·3ã¾#´@\›"\jvzÈ‹GÞy«hý÷›þèÁÝ7ÞCbˆk€H¤Û´a¶}¡‘6ˆk€HtñÈ;¿6©ß»€¸¯«}—ÏÔ4\¹p‘¦ˆk˜«qK € ®E4¾Èè!â××茀q ˆkˆkSi€ ®aTëDE±]€¸F¬™ê§&®@\›©!,è¡ñÇšêêj·Û-§%BE}Lê°bÅŠÞÞÞÉŽk6K&/@\‹”¸¶`Á‚-[¶ÌŸ?ºâš6Š‹‹óóó‰k×&‹Å‰´×®]+**r¹\§ªªJ*++ãââä¡ÂÂÂ뺧щaôC2QWWçõzNgjjj[[›EÉ£>|8##C&ÒÓÓ›››õUTTÄÆÆÆÄÄÔÔÔ˜=µ6188XZZ=B&änȺ¦¢+W®HmƒæwvvJ†s»ÝRHVV–~ø-dá>Ÿ/%%EÊLJJÚºu«áV–o¸ ì´'€1ôˆkSÇâ2u‰>yyyrŒTX·n]nnnÿÐÐPIIIYYÙèc‘o´»’¤„áááêêê´´4ë’ƒääälß¾]&êëë³³³Õüµk×Jv‘Õ%hJåÍžZ›(//—ç ŒÍ”»Öu +®-Z´Èï÷K ²!RI½ö Ÿ7ož–A%„™µ­EùúÅl¶'€1ôˆkÑÅÇÇwvvjÓ2¡¢@bbbGG‡ +±±±cˆkj4K2‡Óé´.YOÅd-m]©d{{»öPBB‚š¶xjµuja™ðx<Öu3,Ä·¡RHLLŒý½^oMMMww·ÅVX”¯ÈN{ ®ĵÜ™¨ßçp8Æ×Â*YoÍš5A‹UVVZTÀâ¹ôóUr²sšzj—Ëex©ßïÏÌÌt»ÝÖMdXx[[[AAD« ø|>Ã%í—²=×âÚ îŒô£kúÑ5PB†°âšYÉÊÐÐÔJióx<Ú`[¸£kªœ Ñ5;qÍz3eCššš¤¶ZÍ2¢õ•G•J.i³üí €¸×f‹i+**òóóF¨(°aÆœœ-É={V8ÝÓÓcXTÐCf©Å¬d¥¾¾^}ò¨deemÛ¶íúÇç®I@ÑŸ»föÔÚ™y@@¶.77WîÚøãZ\\œ:ÿlåÊ•aÅ5Ùj-tJ\“r ·Â¬ü ÅB¶'€1÷ˆkapp°¸¸ØétJ8¨ªªÒŸh%9 ))Éáp,^¼¸¡¡A›¹qãF—ËeA‚²H-†%+---A3%¸¤§§kÓ’º$²¨+C-žZ¶®¬¬Ì5bÕªU’ð&0®ù|¾äädÙ ¯×[[[V\“­^¸p¡´vJJŠú04h+Ìʽ ¬ÛâÚìqîÜ99ê³_ÌW.\<÷âþÁûi €¸6”—— \ºtIÿq!ÌtÝ>ÿ®¨´=7ßûáÁ·h €¸6³ÕÖÖz<·Û]TT¤¾f_¼ú#IlrÛ÷©å­å5Wû.Ó&q-Bq"-€9«§ù­=Ñ÷5ܘ.¡m·#½ù ÿwÐ`=$@\‹\¦€ÄÖpS†6Ò¶Ûq~° ®EJ\ãÆ7nê¶û†/h{×â£k0ͺ_ÿñnçGCkó¾ºÿ²šþèÁŸ=»Ñ5€¸F\€HÉj»oHoJÊÝ{ë²çWô4¿õÛ¡ßÐCĵȉ´æ¦óÛßÐÕôÃiôq Ô÷® § ®¦ß• ߯ªÿÕùn𠮀¸€Ù×8‘è!âZDã2u ‡ˆktF@\@\£3zHÄ5 ® ®M5N¤zH€¸âÚ´üßyòdvv¶{DVV–Üý÷튒‡‚%êß›¥§§§¬¬,11ÑétFGGË´Yò„< ®Ír§OŸŽ¯««±uëVÇsöìY›ŠŠŠ¶lÙb˜¥$Hy½Þ 6ô÷÷ËÝk×®ù|>•½B–<þ§˜ýqM¢Ò¦M›ôsä®ÌT±i```Ñ¢E*fé³Tiiiuuõ˜KÿSÌþK âââ.]º¤Ÿbccõ±Éï÷§¥¥ÎRóæÍ Z7Ü’Çù0©=$âÚDóeêú³Ä§ÓôèÚµk×­[4S-¦ækÂ*y ºýô±g¥Ÿ’ ­·bši¦™žkÓ×ù"€¸9qíjßåÓ_ýÁåíûÔò=Ñ÷iq­á¦¥ûßdG˜Ëˆkq-â:£‹GÞùÉÃë\Koû*‰ ˆkq-B;#5ئ ³{é5v'ââÚ´±8‘öâ‘wüa6ß»`ÎâR€¸63\í»|¦¦áÊ…‹ìT@\q Ä5âÚÄàDZ ‡ˆkËÔ€ ®Ñq qÎè!×€¸€¸6Õ¦åDÚ¨¨ÙyåìlÝ.`ÎâR€¸6‡[gúbͤ>5q âÚL aA?ÖTWW»Ýnù;-*êcR‡+VôööNv\³Y2yâZ¤Äµ lÙ²eþüùÓ×´‰@ P\\œŸŸO\€¸6 ®]»VTTär¹<OUU•> TVVÆÅÅÉC………×uN£Ãè‡d¢®®Îëõ:ÎÔÔÔ¶¶6‹’G;|øpFF†L¤§§777모¨ˆ‰‰©©©1{jmbpp°´´4z„LÈÝu3LEW®\‘ÚÍïìì” çv»¥¬¬,ýð[ÈÂ}>_JJŠ”™””´uëVí0,ßpØiOˆkÍâDZ‰>yyyrŒTX·n]nnnÿÐÐPIIIYYÙèc‘o´»’¤„áááêêê´´4ë’ƒääälß¾]&êëë³³³Õüµk×Jv‘Õ%hJåÍžZ›(//—ç ŒÍ”»Öu +®-Z´Èï÷K ²!RI½ö Ÿ7ož–A%„™µ­EùúÅl¶'€1ôˆkSÇâ2õøøøÎÎNmZ&THLLìèèPa%66v qMfIæp:Ö%ëɲ˜¬¥­+•loo×JHHPÓO­¶N-,Ǻn†…Øù0T ‰‰‰±_¸×ë­©©éîî¶Ø ‹òõÙiOcë!×"¢32 :Q¿ÏápŒ!®…U²Þš5k‚«¬¬´¨€Åséç«ädç4;õÔ.—ËðR¿ßŸ™™év»­›È°ð¶¶¶‚‚‰V ,ðù|†KÚ/?d{ ®ĵÜéG×:::ô£k* „ aÅ5³’•¡¡!©•1Ò*æñx´Á¶pG×T9A£kvâšõfʆ455Imµ:›eDë+Ž=*•4\Òfù!Ûq ®ÍìΨ¢¢"??`DAAŠ6lÈÉÉÑ’ÜÙ³gÕ‰SÑÑÑ===†E=d–ZÌJVêëëÕ'JVVÖ¶mÛ®|îšý¹kfO­™dërssõç®?®ÅÅÅ©óÏV®\V\“­ÖB§Ä5)Çp+ÌÊZ,d{ ®ĵÀâDÚÁÁÁââb§Ó)á ªªJ¢•䀤¤$‡Ã±xñↆmæÆ].—a zÈ"µ–¬ddd´´´Í”à’žž®MKê’È¢® µxjÙº²²2׈U«VI›À¸æóù’““e+¼^ommmXqM¶záÂ…ÒÚ)))êÃР­0+ô.°nO`nºráâ¹÷~Ø?žq-âœ;wNŽúìW³C·Ï¿+*motæÅ#ïÐqmf+//¸té’þãB˜~ñê$±ÉmÿÈúÙsÛ¯ö]¦MâÚŒT[[ëñxÜnwQQ‘ú ˜zšßj¸ù‹#Ãl÷5Ü”qìOW3Ø×šØ´ÛÞ[—1Ø×"Žê¤¸qãÆ›þÖ±ç—ĵH‰kì-Œ®éG×þ¿¿}I]£‡ˆkÄ5ˆˆ¬ö»s×nþâ›<.s~;ôzH€¸F\€é§¿2T §ÑCÄ5âDí{×ö¸– §ÑCĵHĉ´æš+.¾_Uÿ«óÝôq Ä5׈k ®áDZ ‡ˆkËÔ€ ®Ñq qÎè!×€¸€¸6Õ8‘è!âÚìþê¯þÊëõ:Θ˜˜üüüãÇ´ñQ›¯Ÿyúôé¼¼¼èèhY7==½±±1äCQ£Ø™ïp8Ün÷âÅ‹ËËË{zzô•ÃZ€¸6“dee­^½º»ûw?ºwíÚµ}ûö-[¶ÌN\;w¼ºº:YKî¶¶¶†|ȰL;󇇇O:µfÍÇÓÑÑ1žµqm&q:’iìG(5³¨¨¨¶¶ÖpE‹‡Æ×” 6HùãY ×f’åË—ûýþÑ¡Í:®ÅÆÆÃ2-\“’cbbƳ ®Mƒ1ŸH{éÒ¥5kÖ¤¤¤¸ÝîÄÄÄÕ«WË;qÍétš•iñÐÎ]³(lk˜k¸Ô ®E„ ¹L½½½}ÕªUË—/·=‡C›˜®Ñµþþ~)4ž+C%,®Y³&>>>¬+CG¯€¸€¸‰ÑÕ¾Ëí;Þ¿Aó?^PPàv»‡dšÒÒÒÞÞ^õhMMMJJŠs„L™>}:??_[wÉ’%Aß»føÐؾwM¸\®ÔÔÔŠŠ }õƶââÚ4°8‘¶ïØ{þ0[z«nŸŸ= `âR€¸¡®ö]>½ñÕ|._‚šÜνô»×"Bß±÷~òðº=·dî÷<°û†ôÝÎ{:÷¿É¾ĵi¦†Óö}jùÞ˜eÚ ZÃMKÉj€¸6ýº}~-Ÿíº!ý£‰‘ÛO{öäS/È„vêÓL3Íô\›@\‹Ò+]í»üþ†íûÿ «ÁµTŵ=·dv5cG˜ËÈmq-"è/S¿xä£ÙµÛyÏîï!±_ä×"´3RƒmÚ0Û/7±;××"±3ºxäýžûùÞ5Ä5ĵˆîŒ®ö]>SÓpåÂEv*ââÚ4àDZ ‡ˆk ®€¸@\qM‡i€ ®E4.SzH€¸FgÄ5Ä5:# ‡@\ââÚTãDZ3ů/ÿÛÅ#ïLåí'þÍÔ<ѯ9Àþˆk0ãI¬Ù•6+o½-ï°âÌ’¸öóß½xd׬¹ýüï¾K\ˆk0«âšDœßþöÝYsë9¼‹¸Ìø¸5áˇ[&×&5®ýðï^ôûý­­­gΜéêê ìn "âZMMM\\œüÀ¸6¶þ3Ç Ä5`fÄ5ÀÄÆµ_ÿú§ž~ó›2ý«_ùcco•‰˜˜èû·ãÚ’Ÿ´G-úì¼M¦e•‘¯!úSÃa³üàyIi²¤Lwuý¨¸8W-&Yí­·vÈ´þØcIøÓúákÓÓSÞ}÷U™þðÃæÜÜek×>¦ÖÊËûÒ/yŒÑ5€¸s4®É-''S“6ÄUPð™ ¥¾öÞ{»õcoê&¡M"a\ËÌü¿~ô£Í††ž?ÿºº{õê[.×MjýùsÎâã?£Öú×mâÃP€¸s:®=ÿ|…vvÚêÕË´6G;}í{ß+ÿû¿ÿ¦¶˜„-‰nn·ëãï!ºÁ0®97j£q!Ï]Ss$·©›¬nQ8q ®À\Œk?ÿycJÊçd"==å½÷vËĻᆰ¾–—÷%yT[,-í®¿ýÛUW¯¾¤‚•D®pãš„³_ÿú§c¸@¸×L üŸnŸÿýªú“O½ ;ö’9ì634®É-9ùÏŸ];qM»ÅÄDÿïÿݲ`A’š£?Ùÿ§?ýG}Ø ú0TûhÕ~\“U^yåo‰kqmb\í»,mï­™ÒAèo2ç½'7É£ì<31®=öØCyy_ÒN\Ónùù\^þèªU¦æ,\øGÿðk´OE—,Y¨‚TBÂmÿüÏÛ /5¸pᇥ¥!ãÚ¡C[âã?³wïwó›r;vl«T†¸ׯØÙHÌÖòÙwüé;ÿã'ŸZÿÞ“ßúáÊß}cºÌlœ÷•nŸŸý`ÆÅµï¿Z‚Ñÿú_•jÎ÷¾W{ëþýÕÉd))Ÿs8n4ö÷ÿM¤vï~N›þ‹<¤´Å‹o×–Ô‘‡ÅÈ™D´åËÿ³vúšL¨ñ9â@\ ¯§ÓÕ$œ]z÷µ .ã—?{£9ýÏ×kÜxOWÓ1v!€™×øÍP3>®]í»¼?þéÞý¿9üëf‡<*Ëìûô—»N W¯^e_ ®×âÚTø—5ÿ « ýŸ·­ûŽ}¾@–üÑ#ß’þ¢««‹Ð€¸F\ˆk“nhàÿì½õ>éF:úöËŸ½±ûÆôÝî{[^ÿakkëùóçûúúHlˆkÄ5€¸6‰ºšŽIwà»+Ïf÷¡ °í-Vz -±v*âq ®M–÷«ê¥;xç|Ãf÷ñÞ“ß’åÿñ/žØ·oŸ–غºº`@\#®ĵÉrò©¤;8ùÔz›Ý‡,)ËïÈýï;vìhll|óÍ7¥ï`€ q¸×&˘G×^~ùå;w¾ñÆ­­­}}}ìWÄ5â@\›c;wmçã#qM`óûý]]]ìWÄ5â@\›c¸2t×ÍKw½´MâÚöíÛ÷íÛ÷æ›ož?žý `ÚãÚÏÿî»qfÍM6‡¸×>¢¾wÍâ;r?ZûÂÈoäü÷Ý»wÓ}ˆ´¸6+oÄ5€¸ö;#¿jõ»3Øý«»bîÛ½e+q @Dùõ/z[ÞÑnGj·ï¦æÕoÿÏíßüÎ,¸½²ùéowîÜyàÀ¿ßϧÀk×?úÍÐß}$úÆÝþfèÿóŸý.«9ÒwW<+Ym×®]| 2µ¶¶Ê¿‘q^™dCdsÔw'±‹9×>Jlq¬½7Ý‘{ì¡ÿvè¾•Ç JßøOù»oÊøh\m$«‰ÿ¿½» ­²Ž8~aPQA]tÑ]t×E](ŠEAHYDEÙ‹Y–eDö‰½\„9²in¾®e+¶tÎØÓ¤Ùô䶆։dÓG1ç ®ß9OΖéœg󤟇ñìÙ³<|Ïÿ<Ïÿ‰s‡[ €òÔÙÙ¹eË–ˆ›ÚÚÚ•„88œ8¨84wâÃEkáŽî†™oT_>ièÅ—MÌ]¯–ÿ4Z‹P3‘PžâddMœšâÃä† BHNáaÍÞb¸¨s-Îq:ØômÚy –ß8=7aÇÓ—Ïz£jÉÒB¨E¢%­fš\ lOeñ2Êf÷îÝ„88œ8¨84O‘‹=×â,g„䲕÷=Ÿ<½ 2oi^åIÑjBȵóó©4>ÆE„Õ>üJäÚŠ{ž2[¼xñ'y±PQQQ|Ñ«G¼rmLõ÷÷ïÛ·/"¬áÉܳA×=øR”Yeeå§y±°lÙ²ººº¦¦¦¤ÕbcCk€\ëbËf³©Ù #×"ÚR©ÔêÕ««òb¡¦¦¦øŠW­ȵó£íµE‘k­s>Œ,kjjJîNŠ…––—»rmõ4nk½âŒ¯Â,±Üøô;ůoûô¾r­4ŠS¬T¯Ø§÷k¥at  ¬sm˜’bK†ÍÒƒy ¹6F²Ùlooo&“éîîÒdO¿¹?cyý`éÿ;‰]Å݈È5¹ ×ä ×ä€\“kÿÏ\; w†rM®È5¹ ×äš\kr @®È5¹ ×äš\k§Ö:c~äZóô¹r keçıãÑjÉëØÁ¿ä ×ÊKêþy…\kš2+ݾC®r­\$W­­¹úößVmXwͱ¼qæ|¹ȵ²ðË¢5Ñg«.›´¿¥=~ÝÛܶêŠÉ±¦~΂TÞÖ­[ûûû½‹€\;ö|QW}É„ˆ³_WÔǯ‘eÙlvÇ’uUãÆÇÊo^]ØÜܹ+ ׯZß«¯¼%²lç›K ­ÖÛÛ›Édêç.ŒõÕ—Oj\R¹+ ×ÆÔ¡LO͵wF“µ½øQ²¦ÐjÝÝÝ©Tê«^Î}Izõm©µu±2)6ï% ׯÂñÃGÆ?–ÜzâØñüÐZ¡ÕÒétsÞ—“mÖ^?­«=›6@®ºè³ÍSgG‡}{ÓCGû œü´Ðjáç¼m?­Ÿðhl?»Ò»b_‰rmÔmŸûqXíuSïÝ?PtÉZ¡Õb!Êì÷¼=»ºÖ^;%×vw=×ÝÙå"6@®®ô»Ks÷\2¡§qÛÀàÛ Š[-ɲäO?½iå¥s“±=õVáOŠ k¥W˜¶#†´Z2´VÜjýyÉíË×'S{´¾÷™bäÚ¨øsk:™¶£ãƒÃiµä¿ ›m{ÿóÜ#ªÆß^]§Ø¹Vb»2ÉÓ¥¶=³`ø­6¤Ø¾ò­ØÃÊ«nÝñÍ&Åȵ’9Úw þæG’i;Ž>20xе䒵ÓçW¡Ø6Þ››Œ­æ†»;~h3 ×JcËoGc}7ifÒjC¦XK†ÖÎ8T–[Ï{ë&>þÏ¢'ä °ríœí;Ð:cþ¡LÏÀ©¦XKªk8_k&ÿ»g箆i/ü´¹å¬þ@® Ë)§XþÙÈFæäÚYçZbן_÷–k€\+q®eNAißU:âȵÓåZÁÈ2«Pl縹vŠ\+6âÆJŠ­$»kƒ2«X w%×¹€\k”•¿Õÿ@§ò¦IEND®B`‚python-watcher-4.0.0/doc/source/images/sequence_trigger_audit_in_decision_engine.png0000664000175000017500000021720113656752270031162 0ustar zuulzuul00000000000000‰PNG  IHDRªZ]´ ÷5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gUzTXtplantumlxœ­UQoÚ0~Gê8±*5¶–u¨›ÚRÔu*-[Ú½LÓd’#XsìÈvhé¯ßÙ ¶[µ¼$àïûîî;Ÿ}b,Ó6OÅNm§f~s™1ÍRHÙÃa †ü¡Ýj¹õúéàëÎrS‡àÔÏ1â†+ }™p‰u€.XÍ“5°<æÖqXdù”Y܆{Å- /Ì,1C¿ºg±c{½&%Kßáæúâæòú¢”Ññ7#‘ãÏ "±.HeùxïçR°-õb* ÚÙ›„µF¢˜Ø#9MzÉlšÍæn•Úq”å^/T¹ßƒ#J1Ë´Ê4wõ©Ìò”?2뀅04¦œ „Å?¡§*Má"ŽY,=eL±î*ÁŒrâ˜%gÕõ%n§ô¬ñœJOä†jç TŒzJÌs),˜ØaR‡YH=OÊY‚Û‘Êf Æ¾v.ƒS¥gOJep?á¥Ê“ L¸±Žá‘c¥½ÐªjX<›E"Ê÷N‡—EY#7a¤FKšG¦’ïwͶK aâ2e\éó–iF Û7>ŒIÆuLI³ªJ‰|¾kÐ6eÑ„ã }pûô…0å8óˆE32=¦òp;Ð? –2L„1± ²ªÀ #–\*£^9±W™5“ñf<š™+7ür˜êCÁ¤D]jšŠuJI(¶ê†‰&ç5—˜Êñ'f6Ð;§¿þ"T¡^„[ŸÛUÆÞÍUðu‰¹‡*ôŽÈK.°ªXu¿4Ì~¦8šgÛ´–¤?²½²/°löK¾®Ÿ·†MqSZë‡Ùa^}…w½^¿Þ?ÿ÷ÑšX¹ÔªësB2tÈO©éUA.cvBÇ‚¿âkÎ/{7¸¢xðQëÝûF˜K0 ­´»í£îÁèõoám«ÝÙ­5.†W4(¹Žb:5ù™Ù­}aSÖ¸ìB؇o¹¤«)ú”k%S”Ö¯ÃgeÃLYëgTkˆÚ¥ü}Pk7š­_‡í`ÔîÔ®¸Ìþ>¸Á.>ÑŸñ€IDATxÚì½tcW}ïë"!ü\WપÑÓ¦àz¹^¾fÊtH§î0¤ÍuSW5ªïõuݹZS­´^®—7×OÔ+k0“dp¯3¸Ä¸CšÄL&!q”88  YsžA@&wŒ¢8ï€Q]Ç1ï‡Oº{¢–ÿÌXÒù|––×>[ûüÑ>ßsôÑñÖQÙÃPF€Ñõ÷g?þéŧ¾Éãg?‰o¥séFvŸÑ:Íp\ó(äÓ|rz„Íë¯ìæñ²Ý<¾üÍ­t.ÝÈî3Z§Žk…|x’ON°UýýÁðŸ7æã¿c»üÉÈÝÈî3Z§G9®yæáI>9=ÂVõWvù/~ñœ1±ÿ=¾]þdänd÷­ÓŒ£¿×< óð$ŸœýÝjÖŸøøÝápø[ßúÖ‹/¾øòË//--ñ6Éî£ÓÐ_Žk{x’ON€þn5ëÁcŸÿüçÏœ9óôÓOKÜ/]ºÄÛ$»NC9®yìáI>9=ú»Õ¬ßÿ÷G|ðÁS§NIÜåŸ|Úãm’ÝG§¡¿×< öð$ŸœýÝjÖO~øö'NHÜå_8¾páo“ì>: ý叿Q°‡'ùäôèïV³þðGî¬ßwß}Á`P>í½øâ‹¼M²ûè4ô—ãšGÁžä“Ó# ¿ø»ÝÇùýåþòàôèo‰úSYYÙìàc{·­ØõwGöç÷«ŒB> ÿ‘Oïmo«¥ÂŽÛYý-¨®@a'õ÷Î;6Û¯Êß”#dïÞÿ”Þ¸¹ùÝúcæ'?9{èÐ_Ùío1›ßèp¼UÊR£_ˆ†ÉôíÙû·¯g<Úrn¾ù/ž)Ro±\']áõ¾ÿþûo+"ý-Ë„qôwÓ»/Ï—‰þ¯þÊùê#ù›ÚZ·œ—¬VË7¾÷ñÇïÜúnÝ–H\íÜnâíàÚèoŽrµo»ŽåíÒßìŠy;Èæ*Ù4#Ûª9=¢¿¯=vïþ‡ܵ«.%RõõïøÆ7î×W>óÌg~KEJB&sõôü——^zB&åoooGSÓog ß~pªµµÙçû³u—_þ’$xÏž†"ò'UþéOÏ=öØ1yW¸á†ß{õÕo”Ì•˜¾x¿é݇þ–¶þJ®¿¾QÎi?üá¤LþìgÏ|éKÇ=ž}…£¿W5·};¸6ú›{§J7Ý×øÔ´kÉæ*Ù4ýEséï÷¾÷è¾}¿«ë;ß ê#õè£w¤K2ùðÃÿ "%Ÿ;»ºþ$e’¼[oíɾŸüä¬|BÍóð°X®Ëö¬ª‘¬ßtÓÈ2峯h·lÛŽû“z´µ½ï£íV“Ÿýìpcã»´ë4ýý­o)OÉÆËKp¹÷Þ;çkÔ¯ôî»×Ô¼MÈßO}êý†}æ3Gåd¡]hoÿ£Œ×Ôsœžr/AÖëvWË‹zç;]²Þô7˜Ü³çè“‚Ú}ò¹Îë}¿¶ ZZ~_{ éË36ÓZJGIÉ+•îºï¾[ÕZ²Í’#Ø×²Ó ®¿·ßþw~Û&t*÷Á›ñß,Ùv«4¸ó΀ÃñV“é ×8·)ëZ÷í ljHrfËx®ÈýÚ7ºSrŸ Ó»¡ÓrÆ—cuÙvÜvéïº]±nÇæù&òÉO~X:Až’@gÏŽÝÿm²µHkæcuÛ•öÜ®’M3Ð_ô7—þöõýwí¤,‰—rJ¤äÃýóÏ?¦ÕHA&õ‘’Éçž{(eßþöÃ’ì­è¯œŽú«ýûß³î»K]ÝÛå •O½R–-éìl)’í‘“…V~â‰Ñ={´¾zå•'åmãðᛵ§üN9³|õ«'´ËÞzò|ª|–“‘v:ròRÿ•6¢žÚ5›ŸþôÜÍ7@Þç6ª¿Ù– ±Që•“”3êo¶ÙsôI¡í>)ÈúùÏŸ•½ø²]ZÈÑLu”ü•òcË=K¶~;Íàú+ç7ý»ì&ôw݃wÝÝ*-Åhõ#Ê®YnÓוûí ljHÔYÈSj®Ü¯}£;%÷É0¥°•Ór>«Ë¶ã¶KswE>›ç›È7¾WÞ”%!òÉJ„U>#©Iõ6ÿ~Ü\Ú×uôýÝØù],ª*5;q:C+«ÜÈû´ú|)î"g1}¤äó_Æef ß‹/~N‚žíÜšBuõ¯ÿèG¡uß]äãã¥KOÔÏõý ú§¹ùÝêmC;>å®zêK_:žc™Ù^£j §’§žº[ÕŸ9s—|8Vm.\ø‚þŸeêšzžcs,AÖû•¯Ü«ž’rFýÍ6{Ž>)´Ý—R_QQ¾îdž”fú$•q¥~–l;ýwšÁõ7c6¤¿ë¼ëîVi©¿Àv-s›¾®Üo9ND)OéϹ_ûFwJî“aJa+§å|V—mÇm—þæîŠ|:6Ï7õ^,ñIù<þ^Ÿÿ~Ü\Ú×u•Œšþ¢¿YŸ/~ñ“ú«Gšô/vHæ$ýòH¿¦+‡ŸŠ >¦úq jLºí¡Cõ¯ÿÎçêoggK¶Üúšþþ¿Ö¾''^µñÇåOª¤ ÒiZ‡äèÃ|^c¶½íãGŽMÍ}õ7[Múz3êo3ÈÖ'…¶ûÄà[[›¥WU˜3Θ£Y¶”m–l;ýwú»Eý]÷à]w·nèØÜÞÜf\Wþoúœç8WlôµçÞ)ùœ óYõº§åMœ{¯±þæÓ±›xÉ6™ÿ~Ü\Ús»J6Í@Ñ߬ÇO{û¥\ókk{_Jnä¿äIê?¶¹?HM]ÝÛ·øÝ W_ýFy¹5‡ûö·þÇì]¶ÛßrÇÿoaþ÷\Kí ORëžg3¾Æb×ß}Rh»o÷îß¹õÖõ¢l¯.G³l;(Û,Ùvú5î4?lqðúﺻu£ú»¹Í¸®o›Óß¾öÜ;eCú»ÅÓòŽëoî®È§c·Qóß›K{>®’ÿ9=]ÿÏÿùrEE¹þkÂJ©‘ú”ÜÈùQý+Dÿ”œ4øÓ”ÅJÍ-·ø·¨¿òéMéT¿‘?úQ(ãb_yåÉŒÿÙßÁïN©¯67¿[ÿ…§”ÿ=ñÄhžËÔ¿FÕ@vþÿVRNÿßÕÐß”ÿ—IyCú›£O m÷鯇=óÌgôgü”ÓzÆf9?d›%ÛN¿ÆfpýÍó«o›8A¥$'ÇnÝè[û6æ6Ûº²½ä8Iàs ~ØÐkϽSò9æ³êl§å”®Ûй÷õ-ŸŽÝÄ›H¶É«ÛzÚówôýÍëø¹ó΀` R£ÝT/ŸHIå ééù/Ú` ùÛÛÛár9´PnZåý£³³E‰µ|쓃ü§?=§wíªSí[[›åL*Uå!G”ºì±ƒþ$âþØcÇöíûÝo|¯úŒ{æÌ]Ç[}ômSÏžSŸ\õß±xé¥'º»ÛS–™í5ªŸùÌÑšš·}ûÛk—šÜîj©¹ú›òu–ÚZ÷†ô7GŸÚî««{»öUè ¾ _uõ¯ík'Õ¼Ùši_}ÓvÖQêÊY¶Y²íôkÜi×ß”KI‡g¼ñÙ&NP)Éɱ[ó<6¯Fn7zŠÈq"ÒÎê)ýWß6úÚsï”|N†ù¬:Ûi9¥ë6tî½Ú7>KéŠ|:vo"Ù&s¬nëiߺ«pzDS?(뿺¤>—k7ÕË3RrÒ|’où„§ÝR‹ßþ¦Ü¹úCúK5|çâÅ37Z,×ÉSMM¿ýÐCƒú¯îÊ_ê­V‹×ûþl£æ¯Ùï&˜Ío¬ªú5ÙuîP9È÷ï6àI úK ò¾ÕØø.yo|–í5¦ß³Fj䯔ó‘×ü¿ú–c ².ÙfÙ6Yï?ýS¿‹–çì9ú¤ vŸœ»~KÛAŸüä‡ÕKxøáä«îÊÖL ¢ÒEZGé¯|d›%G°¯e§\µ[›ô£Ý"ˆ’ éð––ßÿâ?™’äMœ R’“c·æ>_ÕÜnâr¶‘<>ýéªC@ÎùœrüìE¶’ÏÉ0ÏSPÆÓrúŽËÿÜ{•~ö"GW¬Û±›xÉ1™mu[Oû¶¸ §Gô÷*>^yåI1Wýç<ƒüníÞgµµnv_)eýåÁcgOòÉéŠ@µÿkÈgG5øý-íÇÁƒí¾fâ¾{ö4Ü~ûß±û8¿£¿åœ:uJvù}ÆC^µ¼vé釗_~yÓmðnd÷°ÓŒÇ5òáI>9=Âæõ÷Å_|úé§egËÇ ñW-¯]z@úáÒ¥K›îhƒw#»Ï€f8®¡OòÉé6¯¿òùFv³|Ð ‡Ãg®9Ÿþ›¿?³£È«–×.= ý°´´´éŽÞÙn,ê]`ØÝ·S=¿]fŠ÷¸æ¬b„sZ±ä³ˆòÀéÑ@ú+;X>âÈž¾pá‹לO˜ßýâŽ"¯Z^»ô€ô믾ºéŽÞÙn,ê]`ØÝ·S=¿]fŠ÷¸æ¬b„sZ±ä³ˆòÀéÑ@ú»³Œ—íf'± èy² äýv=@¶< ¿À. çÈ6€âÕßï~äŸÙIìz€ly0Šþ ¿è/ú Púˈxv=@¶<H¹ »€ž Û@Ð_`ÐódÈú ìz€ly@]@Ïm Å¡¿%?"¾¬Œ]ôù¤Ä›»ººTË]»v}⟖úÎÎÎôaÇ‚ÇãI·Þ”—Ɖ‰·² äÀ@ú›mDüðð°ÅbYwDÁÊÊÊÁƒÍf³HêÐÐr×kìv»M&Sccc0\WC¡PMM´w¹\£££ªåôôtSS“Vüøqýü~¿ZuúKHyi» €ž Û@ý-hfggÅkó¯/1–_º8{÷gW^Y$ô€þ–2}}}ñxüòåËGÊëÖ—0ó¡ðxÙîGÞüŸú&Ñô·4µÛíV«Õç󭬬¬[_Ú\8ùE1`yœ²ßxþc÷½zéÇ€þB)3ÿ…¯>l¾þ——ß´7h¾þ+ò?¸ èo¡Àˆø«jÀÚãÑ77ç¸Ì. üdÈ ¿×¥h<®êã±·Ü "œY·1‡Í5 ?dÈ€õ—´í<ôÓã¿ò{Ò·¿ñ½O4þå¤ûÏÇžÿØ íêoî>g~² äÐ_ŽŠâsßÇl7|i׉Êý_ý‹ÇžüÆ/?ϳÏÙ#„€lyô—£¢høNÿqmƒþrï†ú\ž½ýöÛï½÷Þ³gÏÒŸ„€lyôw›aDü6òÚ}ß´7årïFõ÷#ùÈ'>ñ í§žðm €þB!²üÒÅï=ð¯æ·øAýôJ ôÐ_@Ñ_@ý@·FÄ£¿†…ðÙò`Dýå~(è/;€ly@ýå-€ly@ýå-€ly@¡ðõ·¬Œoa~ Û@Ð_FÄ£¿†ðÙò`Dýô?ôÐ_ôvNSôTMJáØ±cv»Ýb±ø|¾••­~uuU&¥RžRí£Ñ¨×ëµZ­f³¹¥¥eaaA[ˆB­¢¿¿ßf³É:;;ãñ8;Ð_(ýmkk[ZC @@«—‚¾^µ¯¯¯‡ÃÉd2‘HHQäŒËðx<‹‹‹ÒÌï÷÷öö²ã ô—ñ% ¿‘HD+ÏÍÍ9­\]]­¯Ï8¶A$¸¢¢"ãòN§š}yy¹²²²ôvá‡R…lyôwó*E¡¿úz³Ùœ»}8nnn¶Z­ÚP“É”­½ÕÌ8;€ly@a'õW4™Ljåx<¾é«¿N§srr2‘HHYþfÓhi¦ æ-€ly@aôw×®]R),VÚÙÙ™2ö7¾FúØ_­Þãñ¨ö6›M[²,§««KÕ———Çb1µºÁÁÁÖÖÖh4*å™™5D˜·² äý…k¡¿ÓÓÓMMM&“Éår?~\¯¿CCCÚ-ü~ÿêêªV/™4›Íò”þΡP¨¦¦F[Îè訪–%诋»ÝniÙØØ yK Û@JA_,ú›5aܯ—ðm €þú €þB ê¯Åb¡Ký£è/ú è/ú› FÄD"Løly@×W1(=ýŃ ?m è/HsÔlt [iFøÈ6@9*8¡¿„€lyô—£ý]#z½^«Õj6›[ZZ2ꦚ\]]õù|‹Ån·ëõM+”éH߆P(ÔÐÐ óºÝî±±±Œí3nOÆÅö÷÷k?J×ÙÙÇ ?ÙòèïNˆøbÑßúúúp8œL&‰D µÍ­¿Ò¦­­mi )¤ýÍq™¶ªªJ[»HmoooÆöynÏÀÀ€ÇãY\\”f~¿_--e›3BøÈ6@Á¸ú«G¤³¢¢"·þVWWG"­<77·!ýu¹\###óó󗼡íq:j3–——+++ ú èïúú‡›››­V«veÔd2åÖßuësèìôôt{{»¨jmmm(ÊØ>ÿíÑ£šú èo.ýu:“““‰DBÊòWY¦e2™ÔÊñx|[®þ*¦¦¦GÆöÙ¶'½™¦œõ Ù¡Á€þBáê¯ÍfSãq»ºº”îÚµK‹K}gggÊØßø']OËËËc±XІjŸÏ'Ƭ鯬7cûlÛ“Òlpp°µµ5JyffF ôwg`D|±èo(ª©©1™L.—kttTéæôôtSS“Vüøqýü~¿ÙlOM¿óƒ0<}:Ï%oË*ýôwúû /8Ž{î¹'±ÆØØ˜ÝnŸ™™Qêóùîºë®Œn*bêr¹¯¬Ý8 )—]wÉ[_ ¿€þnLE=¥R_#“êGÔDCãñx}}½ÒV½›vww;v,Û×]òÖWèo¡ÀˆøbÑ_›Ívùòe}ÍÒÒRee¥^CÃáðîÝ»ÓÝ´ªª*eÞ.y‹« üdÈ ¿Å¡bP8ú«e«0›Í)Ï>|x`` ¥R5SõZòVVAøÈ6@9*8mL+++—––ô5ñx<ým2™Ü½{÷ÓO?­¯L¿¾«6Ï%oe„€lyô—£‚3ÑÆô7ϺWÖ¾ÊÖØØ¸¼¼¬*<844”ÍMóû»éU~² äÐ_Ž ÎDÓ_1Îêêê±±±D"‘L&¥ “é÷gи뮻º»»Ue$q8GÅbWÖ.ßž>}Z=›ç’·² Â@¶<ú[(0"¾XôW8þ|KK‹en¸!ãÝy­­­úÊh4ê÷ûív»Édª¨¨ðz½SSSZòWqÍX~éâìÝŸ]ye‘ðƒa!Û@ý…Ñ_È“ùPXºñÑòæ‹O}“Þ@ý-}þåÞÏIOÊãsΛ¾?ôÀ«—~LŸ ¿€þ–2ÑSO=lz¯ôçc¶‚–ß?×ÖÇÅ`ôÐ_C°<‚o¼þ±Êý\ 0ºþ2"¾õ—ÇÕ{¯»^+D9Cø¡T!Û@ýݼŠÁŽè/W·‹~jbüW~OóÝÏÿß­Ÿ{ÛM“oÿóç?vB»úKøÁ˜' è/Gú[Êî¼î÷¿PÛþhÅû¾úŽ=ù_$~NøÝò€þú[j|ð>í¢¯þr/áN2@Ð_Ž ô·ÑîûûÈ›ö¦\î%üÀIÈúˈxô·ÔX~éâ÷‡ø× ó„ Ùòè/ ¿è/ ¿è/ ¿è/ ¿Å¯¿ŒˆG á² äÀˆúËýPÐ_vÙò€þúË[Ùò€þúË[Ùò€þnš©©)§ÓYVVÄ_ÜÄÆ£¿„€lyôwÇØ‘ñJ÷ìÙóå/¹¸wð¿¿–ü=ý5røÈ6@Ñ YÔÐb±”ÞK[׃Ñ_@ ª¿E=ìý0ŠþŠáÝsÏ=.—Ël6755MOOkõ+++ÝÝÝåkHA&µÆ ½ êë÷íÛ Õòççç«««ãñx>+úûûm6›ÅbéììÔæª­­ýþ÷¿ÿšPŽk©yç;ß™òZ¢Ñ¨×ëµZ­²Ø–––………Œ«&WWW}>Ÿ¬Ën· åxQè/ ¿¥£¿"š‹‹‹Édòرc»w¿æs}}}gi¶¶6™Ì­’ª055ÕÐРôôôˆYæ¹ÒY©Ô' ¿ßßÛÛ+•üàÇÆÆ4“µg•²Ø³ö¬žúúúp8,˔ـ¨mîm–6òÒÔkL-\ýô·pÙ܈x1<íÊ® âh6›µ²Ãᘛ›ÓÊR°Ûíyê¯Ái—iggg].—&¬ù¬ÔétF"­¼¼¼\YY)…ÉÉIqe) ×ÔÔh*ÜÑÑ!õ9^—,¶¢¢"÷6WWW«ÕÉkD‹¾dÈ€õws÷CÉGge¨ù´?}út]]|>ßÈÈȆVªÇd2Ie"‘-–Bcc£xäÞ½{5Q–ú”ņÃáææf«ÕªŸ=ÿ׈þ/Ü È6ôw«úëp8ôWF7tõWhjj:räˆÛíN&“ù¯T¤V ØÕsã7~á _¸þúë¥,C¡ÐM7Ý”ÞLfŸœœÔ´XþªÅŠ«ÍˆÇã\ýå-€ly@S'Õ¸XñEÇ£Æþ–——Çb±uõwbbBjNœ8±¡•¶¶¶F£Q)ÏĮ̀Á»ÇŽ{ç;ßyüøq)ËßšššáááôÅÚl6M4Å¡»ººÔbwíÚ%*,õ)¯1¾†¼Æô×’òbÑ_ÞÈ6@KVWVVz{{-kôôô¨ñ»âR³î…ÒS§Niã6´RÍ€Ýn·ÉdjllTw8þ¼Ùl¾|ù²”å¯<+5é‹ …BbÆò¬ËåU‹žžnjjÒêÅžõw~ðûý²dñæô;?¤¿Xô—·² äÐߢ FÄ{½Þ‰‰ ƒŸ‰Ð_c†€lyô×xOÇÇõ÷>CÑ_@K‹Åâr¹ž}öYôýôÐ_ôÐ_ôwC0"ý5,„È6#êï6ÞeÝŸ{ØP3ôý-¢ðm è/ú‹þò–@¶<ú[*GE(jhh°X,n·{llL“Z…Ö&z½^«Õj6›[ZZ´ß%No&ô÷÷Ûl6YZggg<GÑ_ÞÈ6@ 먨ªªR?ÜÛÛûÚK}ýeÝúúúp8œL&‰D P¿EœÒl``Àãñ,..J3¿ß¯–öºNÌú ¼%m €þnŒÍˆw¹\###óóó)’š­½HpEEEÆfN§3‰hååååÊÊJƒŸ‰Ðß?ÙòPÜú»9¦§§ÛÛÛEUkkkC¡PF¯ ‡ÃÍÍÍV«U»Rk2™26K¹ «š¡¿è/ ¿ÇÔÔ”ÃáÈèµN§srr2‘HHYþªgÓ›iÂsu"ƒÐßÄçóÍÍÍiúk³Ù´ÊòòòX,¦ÚH½ÜÕÕ¥l5¥Ùàà`kkk4•òÌÌŒ"\ ¿€þÁ`°®®Îl6744¨ÁÃÃËEi®Ô×ÔÔ˜L&—Ë5::ªêSšiìv»¥ecc£,ýEý½Z0"ý5,„È6#ê/÷CAÙdÈú è/o dÈú è/o dÈú è/o dÈ@qè/#âÑ_ÃBøly0¢þnþpúîwoºé&ë---2ù¯¹¬LžJíÝÎb±Xoo¯Óé4›ÍåååÒøôéÓy.y[Vþ ¿à…^p8÷ÜsOb±±1»Ý>33£4ÔçóÝu×]ÝTÄÔår ...Êäêêj(R.»î’·¾ ôýÝ¢žâgú™T?Ø&Çëëë•¶êÝ´»»ûرc›^òÖWþ ¿Ãf³]¾|Y_³´´TYY©×Ðp8¼{÷ît7­ªªJ™w£KÞâ*Ð_ãêïæFÄëGÙ*Ìfsʳ‡H©TÍT½Æ†–¼•U ¿°•ðm Å­¿›»JeeåÒÒ’¾&§_£M&“»wï~úé§õ•é×wõÏæ¹ä­¬ý…­„€ly0¢þæ9B÷ÊÚWÙ———UåÁƒ‡††²¹iþc7½ ôxK² äýÝbœÕÕÕccc‰D"™LJA&ÓïÏ q×]wuww«ÊH$âp8Ž=‹Å®¬]¾=}ú´z6Ï%oeè/ð–dÈú›•K—®Üÿ/ÿ¦pþüù––Ë7ÜpCÆ»ó*Z[[õ•ÑhÔï÷Ûív“ÉTQQáõz§¦¦6´ä-®ýÞ€ly0¨þæöì§SDóJ(Ä~DK¾dÈ€õ7K—® _y×»~)¾òøô§Ù‰è/@)êïÙ³W¸R^~å7óÊÞpÅb¹29ÉDJKÕåÞ·¼åŠÍöÚEß7¿÷EJNC¡×|÷ ox­Àãj?Ð_@wŒï~äŸ_xáÊŸþé•ë®û?ûã?¾òk¿ÆÕß«ú[8á§€ly0œþêUìŽ;®¸Ý¯pc#ŒþzGm ¥¯¿)ƒ¹óúË[ÙòPÊú«¸ãŽ+UUÜ÷ýå-€ly0†þj\ºtedäÊK/±+Ñ_ÞÈ6€â×_FÄ£¿†…ðÙò`Dýôýôýôýô 8ô—ñè¯a!ü@¶<Q¹ úËŽ Û@Ð_@yK Û@Ð_@%ÓeeFØdÈú èïëô·Ø=˜ðºäÀˆúˈøRÕß«ç¦éK.R&üPªm €þú‹þú E¥¿«««>ŸÏb±Øíö¡¡!½qö÷÷Ûl6yª³³3k>ªH_E4õz½V«Õl6·´´,,,d´X5™mÕZ!÷ºýôw3úÚÚÚÄn—––D^•k x<žÅÅÅD"á÷û{{{3º¬žúúúp8œL&eY¬¨mnýÕV½´†ÒÇþ"¾€þÂ6ë¯ÃáˆF£ZY Ê8Ng$ÑÊËËË•••RR‘àŠŠŠÜú[]]­V177‡þ@ë/#â‹E³¹iÙë1™Lë*i8nnn¶Z­¹gÉf·%£¿„J² äÐßÍ«Žþê¯þF"ýÕ_5x7‡.ë‘Y&''‰„”å¯j)œL&µr</ù«¿„Œy’ò€þrT‡þ¯×_£½½]çàà`kk«fÆ333j oyyy,˸ ›Í¦-\¼¹««K-j×®]²^1`©ïììLû«­Úãñ¤[oŽu~² äÐ_Ž ÎD›Ñß•••ƒšÍf‘ס¡!)¨§Ä€Ýn·Édjll ƒZåðð°ÅbÉx]6 ÕÔÔH{—Ë5::ªÚLOO755iõÇ×ßùÁï÷«U§ëoŽu~² äÐ_Ž ÎD[½ïïìì¬ø.=IøÈ6@7 #â‹HûúúâñøåË—=”éIÂ@¶<ú ¥¬¿£££v»Ýjµú|¾••zÐ_(eý@ý@ý@ÓaD<ú»¥|óÏ~(UÈ6@7¯b€þ–°þ~0æIÈúËQQú+¢yìØ1»Ýn±Xôw~ˆF£^¯×jµšÍæ––õÈÒ~ddÄáp˜L¦ÜÍFGG].—Ô××ן;wnll¬¦¦F&›ššÎŸ?¯6 ¿¿ßf³ÉÚ;;;ãñ¸6¯"G³ô …B ÒÆívËê?Ùòè/Gú›AÛÚÚ–ÖB ÐêÅYÃáp2™L$R©~ôXÚ8p@ hŽf±XLž’•Š^ë'¯¿þz­ÙÀÀ€ÇãY\\”Ùý~oo¯š]¿‘9šé7¦ªªJýê²jCøÈ6@9*Ðß×éo$ÑÊsss‡#}^qÖŠŠ Õ^44ã*Rš)+•ú”IõÓÊN§S­}yy¹²²2£þæh¦ß—Ë5222??OøÈ6@¯5Œˆ/"ýÕO*1 ‡ÃÍÍÍV«U„ .HoŸg³l“e¯'Çìù4›žžnoo9®­­ …B„€lyôÐß|¯þ:ÎÉÉÉD"!eù«Výìy6Ë6)³«áÂ9ÚçÙL155•ñ26 ¿€þþrìo| ýØ_›Í¦ÆÑvuueóÚ<›e›lmmF£Rž™™QC‡ËËËc±˜jŸ­YÊb¥^ ^Ó_Ù0"€þú›A‡††´›*øýþÕÕU­> ÕÔÔ˜L&—Ë5::šÍkól–cRÔÖív˃Á V9<<,Û³n³”ÅJ}]]ÙlnhhØÁÁ€þBAë/]èïVaD<úkX?m FÔ_î‡R,úk±Xè:Â@¶<úËQaýÂ@¶<úËQþá Û@ýå¨(Qý½í¶ÛÚÛÛ·9 RLøly0¢þ2"¾XôwuuµªªJ»].úKøÈ6@¡Äõ÷äÉ“ÛÐMéﵜ Ð_0¢þŠûNLL ¿€þ‚!ô×årÍÏÏ«ÉÕÕUŸÏg±XìvûÐÐ2Ë•••îîîò5¤ “Z}4õz½V«Õl6·´´,,,䩤¡P¨¡¡AVäv»ÇÆÆ´Yj!###‡Ãd2e[Wú\B¿ö;vñx<ÇKÛ·oŸú 9Aº¢ººZÍè/”šþŠJê'@[[›øßÒÒ’¸¦rʾ¾>dz´†4I­¾¾¾>'“ÉD"!óŠ_æ©¿UUUÚ–ˆÅöööfœK&8 d4Ïu Ȧ...J3¿ß¯žñ¥MMM‰…«y{zzÄŒÉú»a_¤úëp8¢Ñ¨V–‚2K©W_“‚ÝnO_…ˆiEEEžúër¹FFFôž3ê¯XlÆÙs¬ËétF"­¼¼¼\YY™û¥íÙ³g||\ ³³³²U«««„€lyôw›U GÝn·ÞAÓ4c½’æp8ÜÜÜlµZµáÚ(…|ôwzzº½½]Ü´¶¶6 å^û†ÖUözr4Ó §OŸ®««“‚Ïç#'üdÈ ¿¥¬¿úÁ¯úK¤‘HDõW]RÕ_ýu:“““‰DBÊò7›.ç`jjJžþæ¹.i¦† ëÉöÒ„¦¦¦#GŽÈ'd2IøÈ6@9*JY|ðÁ¶¶65¼^o|öövåˆÚÀÙ¥¥%©÷xŸOý¼EiþÒ,‹ËåzöÙgÉú Õ_ô ­¿íííG¥‡ýÍ #âKIgggívûÖÍ ~ Û@Œ¨¿Ü¥”ô÷ÊÚ½xà:™ðÙ €þrTBƒÁ`Æ›£á² äýå¨(AýÅb.—‹N&ü@¶È ¿†Ðß+k÷£“ ?mòèofþÂdÈ€õJLçççüè/EÇÇÇùê ¿P ú[VV¶n™Ÿú FÑ_~öBX~éâìÝŸ]ye‘¤ ¿`D|éïºttt ÑÃó¡°tã#oþƒ‹O}“ðƒ!Û@ýݼŠAqé/(.œü¢ô¤l¾þ——ß´7h¾þ+ò?R.~@w€< ¿€þ–¦kGßܬ¿LøÝò€þBAè/«÷xì-7h…È#g? ;@Œ¨¿Œˆ/@ýåêïvñüÑOÿÊïI—>üÆ÷>Ñø—“î?üxþc'´«¿„J² äÐ_@ë¾ÙnøÒ®•û¿úŽ=ù_$~NÏ ¿€þ–ßé?® rÐ_îôÐßäµûþ¾i/—{Ð_@Kœå—.~è½0OW ¿`D<úkX?m FÔ_þ²#È6ôÐ_ÞÈ6ôÐ_ÞÈ6ôÐ_ÞÈ6€âÐ_FÄ£¿†…ðÙò`Dýôýôý…Õß²2‚ è/Oñ`(qýeD<ú›ÑzàÁ„È6#ê/÷CA «¿„È6ôŠU£Ñ¨×ëµZ­f³¹¥¥eaa!£ÅªÉÕÕUŸÏg±XìvûÐÐPÊà‡2¼%m è/œþÖ×ׇÃád2™H$€¨mný•6mmmKkH!}ì/WÈ6ô WõˆWTTäÖßêêêH$¢•çææÐ_² äÀ(úˈøÒÐßp8ÜÜÜlµZµ &“)·þ®[ÏWßÈ6€ÒÔ_( ýu:“““‰DBÊòWÉ«xp2™ÔÊñxœ«¿€þB)è¯ÍfÓÚ/,,tuu)yݵk—,J Xê;;;SÆþÆ×ðx<éÖ[^^‹ÅØY€þB!êo(ª©©1™L.—kttTYìôôtSS“Vüøqýü~¿ÙloN¿óƒ0<z½^«Õj6›[ZZTû‘‘‡Ãa2™r7u¹\R___îܹ±±±šš™ljj:þ¼Ú€þþ~›Í&kïììŒÇãÚ¼ŠÍÒ7& 544H·Û-«#0è/ ¿ô·­­mi )­^œ5'“ÉD"!•bƪý”€æhÖÑÑ‹Åä)Ù$Ñkýäõ×_¯5ðx<‹‹‹2»ßïïííU³ë72G3ýÆTUU©pVmýô÷uú‰D´òÜÜœÃáHo#ÎZQQ¡Ú‹†f\TJ3e¥RŸ2i6›µ²ÓéTk_^^®¬¬Ì¨¿9šé7ÆårŒŒÌÏÏôwûaD|Éè¯~R‰i8nnn¶Z­Ú mtAzû<›e›,{=9fϧÙôôt{{»Èqmmm("üdÈ ¿×NÅ ˆô7ãÕ_§Ó999™H$¤,õªŸ=ÏfÙ&ev5\8Gû<›)¦¦¦2^Æ&üdÈ ¿èï/ÇþÆ×ÐýµÙljmWWW6¯Í³Y¶ÉÁÁÁÖÖÖh4*å™™5t¸¼¼<‹©öÙš¥,VêÅà5ý• #üdÈ ¿èoýÒnªà÷ûWWWµúP(TSSc2™\.×èèh6¯Í³YŽIQ[·Û-Khll ƒZåðð°lϺÍR+õuuuf³¹¡¡áª~ ü€îy@¡ˆõ—Ž%üdÈ ¿ëÈxô×°~ Û@Œ¨¿Púk±XèX@Á(ú €þ‚±ô÷¶Ûnkooßæø2¦Ð_(@ý]]]­ªªÒî†þú›FÄ—†þžõ#‚tEuuµš…ð' è/GE©é¯¨¤~2h¿¼´´$®©œ²¯¯Ïãñ,­! dR«¯¯¯‡ÃÉd2‘HȼÙ~Ž8ªª*õkɽ½½ç’É(Ís]²©‹‹‹ÒÌï÷«…g|iSSSbájÞžž1cœdÈ ¿FÑ_‡ÃFµ²”YJ½úzœìv{ú¢DL+**òÔ_—Ë522¢¿ðœQÅb3Ξc]N§3‰hååååÊÊÊÜ/mÏž=ãããR˜•­R?ûLø“ :Ðß×ÁˆøÒÐ_·Û­wÐtÍX¯¤9777[­Vmø6J!ýžžnoo7­­­ …B¹×¾¡u•½žÍ´ÂéÓ§ëêê¤àóùÄÈ ?² äÐ_(}ýíèèÐ~Õ_"D"ú«¿ê’ªþê¯Ó霜œL$R–¿Ùt9SSS²ð|ô7ÏuI35YO¶—&4559rD> $“Ib€þB)ëïƒ>ØÖÖ¦&€×ë¯ÑÞÞ®Q8»´´$õGýµÙljoWWWF%ͨÂ>ŸOM!ú+ Ñ*ËËËc±X¶³­+e®ÁÁÁÖÖVÍtgffÔál/M˜˜˜É'Näo@¡ô7‘HØíöÙÙYmreeåàÁƒf³Y\shhH rúÞÞ^Ë===j€l(ª©©1™L.—ktt4]Ÿ~úéæææôõƒÁºº:Y~CCƒü0<<,ËÏvY7ÛºRæÒ ØívKËÆÆFum;ÛKN:¥PdÛl@¡¸õW“ÅŒ¿|!N,¹Åm¾á†Âáp¡õdÊKóz½)7?.ÌÍ@wFÄ—Œþ¦Ð××Ç/_¾¬äPd|iãããú{Ÿ~02dÈ ¿›W1(^ýµÛíV«Õçó©Ÿ·( Ò_šÅbq¹\Ï>û,á Û@ýå¨0¨þá² @ýå¨0´þ¶··=z”þ'ü@¶< ¿`ýµÛíùü;€ly(YýeD¼qô÷ÊÚ½xàvá² äÀ¸ú †Òß`0˜ñæhè/” þÆb1—ËÅ.ô ¡¿WÖîÆ.ôÐ_è/#â ¥¿óóó ~ ü@¶<Z¹Š¡ôw||œ¯¾~ Û@Ð_(ý-++[·ÌÏ?m è/GE‘éï«—~bÝûòäY:Ð_()ý=Ò÷÷w·u?ò›ÿùÑ_ݼîz©‘¿”ÿ!î è/” þf|<ó·ƒßýÈ?KAû7å­—ý…‚Ðß[}øÄ{:¶ìUîûä{þûDåû¹ú èïVá2Xê¯û;÷À¡ßùoã¿òK~üÀ€ ?ÙòèïÕU1ØYýÕjV^YüúGßÜÌ?Ùòè/GEéë¯bî'Nýúoú¾¿@øly@9*ŠI5^½ôãG‚Ë/]¤' ?Ùòè/GEéë/~² äÐßÍÈxô×°~ Û@Œ¨¿€þ ¿€þ ¿€þ ¿€þ‡þ2"ý5,„È6#ê/÷CAÙdÈú Å­¿eeŒº!ü@¶È ¿è/~ Û@Ð_vúkLi&ü@¶<QþV ?m FÔ_(ý …B ‹Åív©úþþ~›Í&õñx<]d36@eeeEEÅÈȈ6‹‚}è/ì°þVUUi5 ½½½ZåÀÀ€ÇãY\\L$~¿_Õ+…ÍÖàðáÃ---R¿ºº*œ.Íè/ì¤þº\®‘‘‘ùùy}¥ÓéŒD"Zyyy¹²²2Ed³5¨®®ž››K +ú è/ˆþNOO···‹¿ÖÖÖ†B!%¬zL&SŠÈ®Ûýcé/#â‹ESSS‡C+;Î………"›­W ?m Õ_î‡R,úëóù4aýµÙlZåàà`kkk4•òÌÌŒ´IÙl ´±¿bÆú±¿ååå±XŒ@¶< ¿°óú ëêêÌfsCCƒü  ®Ûí6™LÒ&E³5úúúÄwÕ„ááa‹ÅbœkÀ„È6ô Wðm €þrT ¿@øÈ6@7#âÑ_ÃBøly0¢þú €þú €þú›’o~)Ð_0ŽþâÇPdúˈxô×°úKø¡T!Û@ýݼŠAáèo4õz½V«Õl6k?ئüòرcv»Ýb±ø|¾•••uëGFF‡Éd’I©ïîî._C ]ÙmÆÍ+Ó¡^H¿Íf“ÅvvvÆãqÂ@¶<úËQþfÐßúúúp8œL&‰D Ðÿ¾q[[ÛÒRP¿`œ£þÀÊ;ûúú<j&“š]ymŽÍÓ¿ŠYÝââ¢4óûý½½½„€lyô—£ý]gðƒXfEE…òËH$¢•çææǺõbŸjQR/Ϫfv»}C³gÛ²yú§œN§Zìòòree%á Û@ýå¨@3èo8nnn¶Z­ÚXmìAº_šÍæ Õo×ìyn^ÙëQÍ?Ùòèï5‚ñÅ¢¿N§srr2‘HHYþ*­ÜÄÕ_ýb¥^ß,Ÿ«¿õ7Çæ¥¼ 5j™ðm €þú›Um6›V#úØÕÕ¥÷˶¶¶øéc3Öë+õÚ_iæñxRÆþ®;»šÌ¶yååå±XLµlmmF£Rž™™QC„ýô÷uú …jjjL&“ËåÕëïÐÐv/¿ß¿ºººn½~±+++½½½–5zzz6:»šÌ¶yÃÃòý\bÀn·[Z666ƒAò€þú»ûþf»±îo¸ËÏUú è/@‘ë/#â‹]-ˆêód‹³„È6#ê/÷C)výÂ@¶<úËQþá² @ýå¨@ðÙò€þ²“Œ ¿ý*Û¶|õ­À¿?GøÝò`DýeD|©êgqÝ/‚ðC©B¶<ú è/ú è/•þ®¬¬tww—¯!™TÆyìØ1»Ýn±X|>ŸV_¦#EL¥0::êr¹Ìfs}}ý¹sçÆÆÆjjjd²©©éüùóéíÓ—F½^¯Õj•¹ZZZr¯4ÇÆßsÏ=ÚÆÈÚ§§§‰ú èï/éëëóx}@BþÍ^xá…ÚÚÚh4ªjÂápss³ÕjÕ†:˜L¦ ­tÝ$üdÈ ¿;£bP8úëp8ôWy·rõwCú»°°PWW÷õ¯]ßÌétNNN& )ËßužcãwP ?ó$äýå¨(ý Úßx<îñxRÆþÆ×Ðý-//Åb[×ß½{÷ާl¤ÍfÓ6O丫«K5ζÒþm €þrT ¿™ïüÐÛÛkY£§§guuU)ãÐÐø¨Ôûý~U?<<,5ïü°!ýÍxç‡P(TSSc2™\.×è訪϶Òþm €þrT ¿¸ï/·Ú%üdÈ ¿€ñè¯a!ü@¶<Q¡Øõ×b±Ð¥€þ‚Qôýôw‡¹í¶ÛÚÛÛÙ×è/@éëïêêjUU•ú™·ÌC–ÐßñE­¿›“È«­ž'Ožìèè(üAø¡T!Û@ýݼŠú» Ä}'&&Š}G”êIÈúËQQú …,‹Ûí»òú¤P^;22âp8L&“LF£Q¯×kµZÍfsKKËÂÂBƹ„þþ~í‡3:;;ãñ¸V¹ººêóù¤Òn· ií÷íÛ ÕŒóóóÕÕÕj —Ë%õRúªªª¥¥%õ”ÔÈÒ¤F­=ãF~² äÐ_Ž £ë¯x¤úáÞÞ^廯K[YÙ”ŒÖ×ׇÃád2™H$€¸lƹ<Ïââ¢4óûýjáÚ/ËÒÄVÅPµ¹¦¦¦ÄÂÕ¼===bÆ)/ADV•?øÁêHùСCúmȶ‘„€lyô—£ÂÐúër¹FFF´«ªzßM™‹Í¸XñËŠŠŠŒs9ÎH$¢•———+++µ²ÃáˆF£ZY j®={öŒKavvV¶Jý‚qFý™™‘6j²¦¦F[fÆú$üdÈ ¿WFÄ‹þNOO···‹›ÖÖÖ†B¡lú«Ÿ ‡ÃÍÍÍV«Uê ˆÈ8—žÍ´ÂéÓ§ëêê¤àóùÄÈÓ_‚ÛíÖkúM7ÝtêÔ))È߃¦,-ÛF~² äÐ_0´þ*¦¦¦G>úët:'''‰„”å¯~ˆpJ³Œ#nõW#‘ˆ~®¦¦¦#GŽˆæ&“Éô;::ôãƒeƒ÷îÝ+ù{þüù”mȶ‘€þ‚¡õ×çóiwÒ›´ÙlZeyyy,˦¿ÒL îêêRϦÌ588ØÚÚª™îÌÌŒ}¼^o|öövýÂ'&&dòĉ_ƒ>ØÖÖ¦¯ill¼çž{n¼ñÆôMͶ‘€þ‚¡õ7 ÖÕÕ™Íæ††5øaxxØb±d»¬+ÍjjjL&“ËåUϦ̥°Ûí––â©êÂíÊÊÊÁƒeb¨CCCú½§NÒÆ?d$‘HØíöÙÙYU#î+kqO×ßl è/ZwqYñc5éõzsßÙW|º(~ù §¿ŒˆGsÐ××Ç/_¾ìñx¤üÚŽëï}V¼~(UÈ6@7¯b`pýµÛíV«Õç󭬬HÅbq¹\Ï>ûlÉï² äýÃé/o dÈú ¡¿|EŒðm €þrT ¿@øÈ6@sˆøbÑßÜ¿p±!–––>ô¡¹\.³Ù\QQáõzÏ;·•Å©‹~(UÈ6@ý}---‡Ò~—xuuubbbß¾}Ô_@¡@õ·L‡ª¹çž{´+¸MMMÓÓÓªq¿Íf³X,ñx<}2KÆ+θ–‘‘‡Ãa2™d2z½^«Õ*K‡Ö~*9}®lÛ ªíóù¤Òn· iíżõ¿,R^]]q³ý£èï•LWÅ,Ed;¶{÷kËðxk±ÙlÚV‰Ývuu©gSæÊ¶ @ÀëõÆ×hoo×/|bbB&Oœ8AøÈ6@9*Ðß_2<Ÿ6øØb±¸\®gŸ}–` ¿€þ ¿€þæ¾²k¿ööö£G²sýCèïìì¬ÝnçÛ€¯¾ÁöèïÒÒÒ‡>ô!—Ëe6›+**¼^ï¹s箆þnZše“xà"Ú„J² äÐßÍ«Žþ¶´´:th~~^Ê«««ûöí+(ý ƒ%³#Jõ$äýå¨(ý5›Í9~¸¿¿ßf³Y,–ÎÎÎx<ž.²\Yû%¶ÊÊÊŠŠŠ‘‘m…Ö`ee¥»»»| )h·kÐZÊ,‡Ãd2i5±XÌårñ–@¶<úËQÛ ¿û÷ï?xð`8N—àdz¸¸˜H$ü~oooŠþfkpøðá––©_]]N—æ+kwê•y—ÖhkkS7ë•fЛô•µ»˜ñ–@¶<úËQÛ ¿—/_¾å–[¬V«Óé|äÈIx0|î¹çÈ!'v €þ–ŠúkØþ^ ýý§¿ùÿn½õV1àÛo¿ýÞ{ï={ö,9äÄäÐ_Ž ô·Pô·¬¬ÌÈ;ý½úûðá;þñÿñ£ý(Ÿñ8±yô—£‚3úKøÑ_àÄäÐ_Ž ôWG¿Íf³X,ñx\jzzzxàÕ@ÊR“­±F ¨¬¬¬¨¨ɨ¹Úd™ŽÀ[ú‹þ’m E¦¿Œˆ/ýðx<‹‹‹‰DÂï÷÷ööJe2™Ü·oßç>÷9)Ÿ:uª¹¹9GcáðáÃ---R¿ºº*œCÓë³-³x)Ư¾½úê7n¹Å_Só6³ùå^ïûÏžÓž’ýµ +ÝÜ\è/ÙòPÄú Å¢¿N§3‰hååååÊÊJ­¼°°PSSsäÈù+åÜ«««çææR3šŸþf[&\Ký=pàO}¾?ûÑBš ?úèûöý.ú è/” þ–½“ɤž:zô¨ÔÈ,ë6Î8œ7OýͱpÍô×l~ãÏþlF‹Õ£ÕÜygÀáx«Éô™|é¥'¼Þ÷[­YBKËï_¼x&ã\òøìg‡ße±\g·¿¥¿ÿ¯õk¹ûîÃnwµ<õÎwº>õ©[d–þpÒéü ý&ýìgÏØl¿ú“ŸœEÐ_€-é¯ÓéTwõœ?Þår>}Zþª+»Ùg¼ú+"›L&µr<Ïqõ7ã2áZêouõ¯ûÛçsW&ÛÚÞ§4´¾þ_ýê ñTÑÓ@ÀçóýYƹžxbtÏž†çž{Hʯ¼ò¤Ç³ïðᛵ§~øÄz¿ó ”¿÷½G¥¬Í+V}ß}·ª%H¹»»«¿è/ÀVõwpp°µµUû‘Ø™™ñ)¬¬¬444„Ãa)‹755­®®fk|åßÇþŠÅêÇþîÚµKV',õÊzËËËc±Xî €k¬¿>zGUÕ¯uuýɽ÷œ9s—þ²kºþþð‡“EY檨(Ï8Wsó»Ÿþ15)öìp¼U+‹å+÷ª§¤¬Íûµ¯ll|—ªß¿ÿ="Çè/ú[¸0"¾XôWP·Ûm2™ƒÁ Ôøýþ“'OªÇ—šl5úúúÄkõw~˜žžo––.—K– ôwxxØb±è¯g[f‘R¤¿úvñâ™Ï|æèÍ7`ïÞÿätþF¶¯¾¥L^¸ð…ÖÖf«ÕòïcWÞ±™Årz˜ÍoÔ·Lw!e5¯˜ñSOÝ-…ý($ÍØ_² äÐß"V1((ýƒ„?ÿŸ=ñÄhMÍÛòÑßÝ»çÖ[{^}õ)Ϧ4ÙýÙϞɸ®úûØcÇn¼ñ½RUHý%Û@ýå¨ô—ð_ýµX®SæšCõÏ>óÌgÔ³)s57¿[?WÿP—xµ‡”õ« þùÇÔm(Ð_² äÐ_Ž @ ÿ6èïþýïyì±cÿöo_×¾švóÍèè¸Q}+îk_;™MëêÞþ©OÝ¢‚ص«N=›2×™3w9o}ôÑ;D¬åqöìX[ÛûÔ°cõÕ7ù[[ëÖ¯âÎ;MM¿ý±ý?è/Ùò€þúKø·MÅGÛÛÿÈjµ˜Lop:ãС¿úéOÏ©;3ˆËêo|¦ŸQ·¡á·d.—ËñÉO~X=›2—¶ ‘lmø¯žxbTã3™]RSó¶ú§~³ùúßã­Òþ’m €þ4ŒˆG K‘~õ­@ßûÞ£µµn5ùøãwÞ|óøÙ ² äà 7>ôJFô\¸ðÍ}÷ìi¸ýö¿S7£hl|×Ë/ ýô®®þfü·«4 ¿=4X[ë¶X®«©y›r_™´Ù~U?Fý@Ð_(ýÝÊý@ýݘþ†B¡††‹Åâv»ÇÆÆ4‹U(¯q8&“I&£Ñ¨×ëµZ­f³Yû±·Œs ýýý6›MÞÙÙǵÊÕÕUŸÏ'•v»}hhHk¿oß>ý^ÌÏÏWWW«YýE€¯¾ÁöèoUU•V#ÛÛÛ«|÷ui++;pà€’Ñúúúp8œL&‰D P?Sœ2×ÀÀ€ÇãY\\”f~¿_-\fikk“¥---‰FksMMM‰…«y{zzÄŒ‹tGðÕ7ô·TáÄäÐßÍ«Žþº\®‘‘‘ùùùßM™‹Í¸X‘àŠŠŠŒs9ÎH$¢•———+++µ²ÃáˆF£ZY j®={öŒKavvV¶juuµ$wú‹þ–êIÈúËQQú;==ÝÞÞ.nZ[[ …²é¯~2777[­Vm¨ƒ6""ã\zr4Ó §OŸ®««“‚Ïç#ç-ýEÉ6ô¶_SSS‡#ýu:“““‰DBÊòW?D8¥™6,8ýÕßH$¢Ÿ«©©éÈ‘#n·;™Lò–€þ¢¿dÈú Û¯¿>ŸonnNÓ_›Í¦U–——Çb±lú+ÍÔpá®®.õlÊ\ƒƒƒ­­­šéÎĮ̀!Â@ÀëõÆ×hoo×/|bbB&Oœ8Á[ÂÕÓß|üQÆ’yÈËAÉ6@ FÄ‹þƒÁºº:³ÙÜÐР? [,–l—u¥YMMÉdr¹\£££êÙ”¹4v»ÝÒ²±±QÝØaeeåàÁƒ²FÑè¡¡!)¨ö§NÒÆ?5þÕ·’| ¿œØ<ú èo¾ú»³ÌÎΊ«I¯×;11Á®Ü(Ë/]œ½û³+¯,®Ó,{ñýÿì½p#×}çIn#0—c‹Aé ŒšËBÑÈTF£±BM¼q9¡aææP“)Üns ƒxc›fÂJhfŬåM¸.oâóˆ–—=¢DSc£t.EZ΄µñJ–p—‰jÄ‹¦ šÃaIÅBX‚è~áÓvÚøÓ9üôçS,ÖÃãë×Ýè/^¦çuCýüŸ½}ÿñ7ÎÿÿÓoÿ_¿ý9qÇšÿùê×Ð_ôÐߣ«¿ÙlöÍ7ß ‡ÃR~g ¯\1?û vÄrj^ñã®ûo=÷b5í~øá‡zhxxø _øÂÚ7¾tá‹ÿé ‘Ý‘’]»zõ*‘@ý=Bú;11áõz].W,ËårRãt:u]á…8Ž»æ‡ßüÔL€oýë_ýÿè/îÜ~Ë¢ñÔÔ”„áÂ… ¢Œ²oü郿õ'…ìˆìŽì”ìÚ÷¿ÿ}ò€þú{œÿ®{V¾û·SïýÈ?Ï…}÷½½ç¾gøíJƒ%_ûÚ×$=ôÐç÷áó±Ï²#²;²S²k7nÜ èïAÃŒxô×¶nø V?SÚɲƒ¿ÿýïK¦¦¦~øá/í_ø×Ç¿tPÈŽÈîÈNÉ®OÖv €þƒCÑ_~ìùóW?V…¥ÇŸ5ò ‚xãÆ 1Å«W¯~wßø’ö‹ß=(dGdwd§d×Ö×ר<ú˧ýåê¯-ÂÿòÀ—®4Ü»5øcÏýJß·ï‰üÕ?ñ÷_|Äz*0‡€È ¿|*Ð_¨½ð+÷}ZÿwýÀÿ>íþ·óÉ?XùîßþSþ9(ÀÀäÐ_>è/Ô[ø_H¾3Ûáð.÷Ùò`_ýå?=Ñ_Ûr(á繿?q?—{È ¿€þ¢¿uÎÆë·^ûú?¼¶Ì[è/ ¿è/\ôÐ_ “è/ ¿€þÔ´þòŸžè¯m!üè/Ùò`Gýå´‡þr ì¹ïÖ?ăly@ýå”`£² äýô—Sú dÈú è/§„z áC=ôðÃ_½z•œm õ¦¿ÌˆGm‹ÃdCØÐÀÓrÈ6@ó@ýÌàè‡p§:»;ýEšÐ_À<Ð_@ý¸»nnnÆb1§ÓéõzÇÆÆÌÞ944äñxäOÑh4›Í*+5(»¢ÁÁA·ÛÝÔÔ”L&‹D¶hãe*• ƒ²–@ 099Yi-¥£ZÊŠ|>ŸÃá öÕ_fÄ£¿¶…[ßvBÖžžÊõõõH$bçÈÈH8^[[Ëçóñx<‘H”µX3ÃÃçOŸ–ED©¥Û*õ·¹¹YmÕêêj¥µXl̹sç &ÛÀX`Sýåy(è/‚}¯>„>Ÿ/“ɨ² ïôûýKKKª¼±±áv»·Õß–––ÅÅÅâs;ýÕu=™L.//—ýë¶#NÌñ>ïè/Ÿ ô—Á¾WÂJVÚðã³ ,ô·ìŸ¶Õßt:ÝÛÛ+FÛÖÖ–J¥*5ÞéÆm è/ ¿œá6W—––ÌWWWW«t\…õÕ_qÖB¡ ÊÙl¶´Ÿ¹¹9Ù˜²kÙÅÆm è/ ¿œaù¹¿‘H$»Eoo¯!”£££ÝÝÝÊŒb±˜ªoll\YY)»5÷W<µìÜßcǎɈKƒh4jÔKÏJšE=OÙµTÚôø¼ ¿ÿ 3âÑ_Û­o»a.—;þ¼¦i¢žcccR0Kg p8SSSªr||ÜétVòÎ1ײO~H§Ó¡PHzÓuýÒ¥KF½ôÜÞÞ.ë ƒÆä‡Òµ”Ýûè/;@ýØûÞ¼yS“7Ð_@¡žC800Ífß|óÍp8,eÞL@ý…záÄÄ„×ëu¹\±X,—Ëñfú è/B@*̈ǘ~BH¶<ØW™yØn}#„dÈ€õ0 „„Ð_À<€B@óB€þæ„ 6ô—ñ˜‡máÖ·: áÜÜœßïohØfÔݶÙòPçúËóP0û^!² äÐ_>˜þzaÙï(–J]×_xáòI¶<ú˧óÂOlyô÷®aF<æa[¸õ’m vÔ_À<€îU-îfô0ô! ¿€y`@ýÌàè‡ÐüÕ—/_Öu]Ó´P(”N§+µŸ˜˜PÍ:::®]»699ÙÚÚª–º~ýºÑrhhH}AF4Íf³;Z<—Ëõõõ5n!yi,žL&}>ŸÃá8uê”ù{.–——[ZZŒ@]é/3âÑ_Û­oûª¿â©kkk…BáâŋǯÔþÌ™3+++ÒLVçõzÍ/Ož<©šŒŒ„Ãaé-ŸÏÇãñD"±£Ådñõ-zzzÌ_’|îÜ9å¸sssæ‡ ÷÷÷‘m`¬¨Oýåy(è/‚}ßý5.²ŠŒjšV©½q…Uš½4–òûýKKKª¼±±áv»w´¸Ïç[\\Te)ˆ%‹‹Rsâĉ+W®HáæÍ›º®›¿„™lcú è/§B¸þVªWX7«´”Âápìtqs½¡ÅEõÏ<óL{{»b±X2™$ÛÀX€þúË)Þ­þVjoýÒï÷¯®®îzqŸÏg\<.ºú[Ôa(ºpáB ( dëÐ_@9%ÂÃÑßÑÑÑîîîL&#å………X,¶£Å{zzÖ×׳Ùl86Ïý-Úžééi©|ä‘GÈ60ÖÔ³þ2#ýµ-ÜúV+ú« 88ŽÎÎNã U.žË剄s‹þþ~cRoéVÍĮ̀ùdëêYýBH‘Hdzzšx ¿€y!´Á{råŠùÙg€þæ„°nq:º®¿ð dýÌ!@é/3â1Û­oÂJ÷½ôXÜÐ@¶±ýÝþ,è¯m!DÉ6ôÐ_N „ð€¼sÏmuÛëÉØ<ú˧ýÂþ¢¿ÀçýåSþr Ø÷½áàà ÛínjjJ&“EZYék)R©T0t:@`rrRýÉÀh?44äñx¤Y4Íf³F'²"ŸÏçp8жdss3‹I{¯×;66ft•Éd"‘ˆËåÒ4íôéÓêë”K×X¶ÙÆ:€zÐ_fÄ£¿¶…[ßö<„ÃÃÃbŠkkk¢žâÁUêoss³Z‘(f"‘(Ûxdd$KÏù|>››;wΰá"Wßu,HÁè°££c~~¾P(HWÒ¦ÒW(WjF¶± æõÐ_ „{–––ÅÅÅâAp;ýÕu=™L.//—ý«Âï÷/--©òÆÆ†Ûí6š‰WÚcÙª²sÄn›ššÊ®±R3@ýBø¶…An«¿étº··WŒ¶­­-•JUjlƘê`ᬕV7??ßÕÕår¹¬»ªÔ Ð_@¾ƒõÕ_1ÈB¡ ÊÙl¶Ô\çææ|>_YõûýegßZèo¥«¿ÒÕììl>Ÿ—²ü®d畚ú è/ÂwPsÅSËÎý=vì˜ô), ¢Ñ¨Q‹Å”4‹þz<UÙØØ¸²²bô<::ÚÝÝÉd¤¼°°Pi®5÷7»E86ZÊ*Œ©ÆgÏž5ê‹ÖX©Ô¼þ2#ýµ-Üú¶!,ûä‡t: …‡®ë—.]2ꧦ¦ÚÛÛ5M ƒÆä‡ññq§Ói–N1à@ ‹wvvÊ"Ûê¯(x<—nEdÍO~U´¶¶ªÍ˜˜˜0ê‹ÖX©ÙÆ:€š×_ž‡‚þr ØwBH¶< ¿€ypJ „„ly@óà”@ly@óà”@ly¨ ýeF<æa[¸õ툇pÛ»ÍxÙòè/`è/ ¿€y ¿€þæ„ýô0 „» ¡øèÄÄ„®ëš¦utt\»vmrr²µµU^†B¡ëׯ«f¹\®¯¯¯q )ÈKU¿¹¹‹ÅœN§×ë5Q…044äñxäOÑh4›Í¢¿èïîaF<úk[¸õm?ô÷Ì™3+++…BA‹5¿ŸÏ|•·š«¿«««Û®È6@ùT ¿@ø¢þs|³Ùl8.šû›ÝBêö£££ÝÝÝ™LFÊ ±X ý%Û@ýÝ=̈Gm ·¾í:„wn¿µø—ߑ߻Óß\.—H$œ[ô÷÷onnªz)ÄãqMÓ<OѓĀ€Ãáèì윚šBÉ6@ý8ˆÞ¾úòÓxP,§æyýôê3„wn¿ucü›õÁˆüI~n>üï! ¿€þB†ðöÕ—ÿËo<þ¾®'½¿öØ»O<¦Ý—yò?óú è/ÔU/ üáWzúÿÀéé÷ô‰¦Sê¢ïÔ{?‚û@Íë/3âÑ_Û­o}çç]÷þK¹áøý÷_”7M ê­£|4Ë ìÀXèï]ýµí°y/ üáåîßžòüÛ)çG ý}ü}]oÌ^%9dÈú è/§„º á­ç^œ{ð÷Óî{ì=÷aÀdÈú è/§[„ðÎí·^ýÚ“?uZ]þÁWgÉÙò€þúË)¡þCx빟ô~œçþ’m 5¬¿ÌˆGm ·¾í:„wn¿õjrjãõ[¤ˆly¨=ýô!!ô0 „„Ð_À<€ ¿€y!¨ ýeF<æa[¸õ’m vÔ_ž‡‚yp ØwBH¶< ¿€ypJ „„ly@óà”@ly@óà”@ly¨ ýeF<æa[1üýýý?{¨ü›†¦*#ýÉŸüÉææfŸÏw÷ëêìì|–Ó?äM€:΃œ"9…¡¿€þB!RØ»âÔ©SœGÐ_@¡Võw°®QûNìïÿûÑ_ôÐ_¨aýA|³®Q,yæˆÀžðÀ ¿è/ ¿`ýUWRë^Íÿ¿Y}{ëúê{;ä³EÛYÍþåíß333º®;޽ê¼Vò°»ýª×½+ÒßøÃóóó/½ôÒ«¯¾úÆo¬¯¯sZAw·¾¡¿¶åÃþ ¿eëK³}oÂÝl¿---²H&“Ù« Ûó·bwîÓ)Ûm=è•þvtt<ýôÓÏ>ûì÷¾÷=1àÛ·osFC÷RÅýµí@kZØüŽš>–f»võ÷î·ý­§½Òßöööo|ã333bÀ/½ôÒo¼Á ýEÑ_¨ýýã?þc¿ß¯iZKK‹”Í-+ýIõ011ñÁ~ÐétÊ_zè¡}Õß¾¾¾»Ô_¯×ëñx¤0>>.õò[Ên·»¹¹ÙÜ^!õ_ûÚ×Jû1—e«b±Xcc£¼ÿøÇË^S¬Ô¦ÒºÖÖ֢Ѩs‹ÞÞ^YÜz;::Jÿ«ºRÿ¥ïõþNOO{œj¶€ª,ºk~ÛmVÜÿýò[$CêŸzê))wuumO¥}1º’†B¡çŸ¾ìf”mVi«ßrë*½‡ÕdCÞÛÖÖVë÷¶š},]ªÒÚ˾ÇÆ&úû¡}è‘G‘wàé§ŸžŸŸíµ×8£¡¿è/ú u®¿"µRþô§?-åÏ~ö³R64×âOª‡p8üú믿ñÆŸøÄ'äåþðË/¿¬Öx7úÛÓÓ#/777ƒÁ äw.—“B$1·—³à‚D—­ÕóüùóRþÞ÷¾§ÚŸ8q¢t*µ©´.±1y9;;ûío[ gΜ1Ú‹'•nCww·j/”ngiÿ¥ïõþŠÒ9ÞN§-ÞX‹m¦¦¦ ¯Å”r<—ò_ÿõ_m±/ªŸRñZ^^VOÿ°¾äYÔì.·¼š«¿¥ïa5Ù÷V Õú½­´%ÖoE¥µ—M‹Å¡±‰þÊ? D}ôQyŸŸ}öÙW_}•3ú‹þ¢¿Pçú«fCþà?òøC)KͶR=¼òÊ+êå7äåÏÿüÏï‡ûþÝßýÚG¿ß¿£ 3E'ï?ú£?’—###ò»¿¿_~ÿéŸþ©ü–zsûB¡`¡¼æ²Óé4_H3ôÔL¥6•Ö¥Ú¶‚¼4Ú›ËF{—Ëen_´¥ý—¾?Öûëóù´†wµ¶¶þþïÿ¾¸TÙ7Öb›ÍͤMÓäŸÒçñãe>/•ö%•JÉ¿Udٲx¥f‘¨rË·ÕÓJÇÑ:ò>H3ë÷¶Ê},»_¥k¯ôW:4è/ ¿UÁ­oè¯m©Ý[ߊ&B¨ÿWÝ韊þºîvúŸ’E'ïééiyÙÜܬ®™É)_ÊR333c­¹Öú›Ëå,¶¡Rë>Ë ™¼½R(kýµ–¹jÚ¿ôÒKkÿEå[íííw#‘2ÉËD"!¿Ÿzê©êõW&ù'–Å>Z4;ýÝ]6ä½D"Öïm•ûXv¿J×^é®thìsëú‹þú 5Æ]ê¯Ïç“òøÃÒK¼:˜«¿/¿ü²á¾Æå±]ëïâ⢪•—cccêåÒÒÒîôWýóÐÐÅ6TjS©Oã¿ãS©”ù¿ã½^¯¼”]øÆ7¾an‡-&?ܽþ*Ôµÿ³¯ìʘ)[i›‹úÙØØhjjRÓNʾWÆÍ«~Ì0Ôaúú׿^ª˜ÆfTjf‰*·¼ôß3ÛÎÉ®&Õ¼·E[Rå[Qií•ÒRéÐØäÁgè/ú è/ØN?÷¹ÏIù³Ÿý¬”?ýéO›'øZüIõð‰O|âõ×_ÿÑ~ôÉO~R^~ñ‹_Ü[÷mmmݵû–=y»\.u‰ZÊëëëR–šé ¹œÍf‰„Çã±xBj¥6·Ó7cIammMÕ‹î˜;1ßú¦¦2WÙÿNõ×øßó¶¶¶oûÛªr||\]G4n´*»Í¥ëU×e_ʯJû"R¢þ%VT_´•šYD¢ú-7(ZéÝd£š÷¶hÙ*ߊJk¯ôW:4è/ ¿€þB}꯺­MSKŸüPéOÆ“~á~AÎßÒfŸÜw§ó}‹.ÔÔ} ^xáu˜Žìæóù'NÜsÏ=|fZZJM>;è/ú è/ØHïž}}rð®ïu³'ÊQ\.×ý÷ßíÚµ£¼@`~~žCvÔÒbÛCƒþ¢¿{·¾¡¿¶¥¶n};Êú{7÷º;ôý=Z*è¯mDý鯺†´Ï÷½›{Ý€Èú‹þò©@Ñ_»è¯2àšF}¯îK¶< ¿€þúË)¡*ý­˜ïK¶< ¿€þúË)ÁŠÎÎNuÅôó‡Ä'>`ý×ÞÞÞsçÎ%‰jzÃ}É6ôjI¹Cýµ-‡~5‚Ÿ:uêȆ“ Ùò€þBÝê/ ¿€þ¢¿€þú è/ ¿œTýôÐ_@Ð_@ýôý…ÚÐ_î@m ·¾€þÊn’4v è/ú[K*è¯mú‹þ’m è/ ¿€þrJ°»þnnnÆb1§ÓéõzÇÆÆÌv;44äñxäOÑh4›Í*÷5(kÆ—/_Öu]Ó´P(”N§-ºR ºÝ¦d2iÝ8•JƒA© “““•™L&‰¸\.ٌӧO¯®®›'kñù|‡CŽ”¼!Æ———[ZZÌF¶±ýôÐ_N õ©¿" ===b~ëëëb†×ŽŒŒ„Ãáµµµ|>lj„!‘džVY¤P(\¼xñøñãÖ] ‹žJ½(¸l†uãææfõ µ®”óèüü¼lƒô ÝŠÜ›wîÜ9å¸sssâÍÆ–÷÷÷‹ú“m`¬Cýô—SBýë¯ÏçËd2ª,Ãný~ÿÒÒ’*oll¸Ýîjô7—Ë©²Ø§¦iÖ]µ´´,..uR©±®ëÉdryyÙܸl¥ÙŒ¦¦&cóĪ?8qâÊ•+R¸yó¦ô# N¶±ý…ZÕ_î@m ·¾íB‹tÖxÙðã8Žjô÷Ô8N÷ööŠ ·µµ¥R)‹Êùùù®®.—Ëe½ºgžy¦½½] ±XÌ<õ‚lcú µ§¿€þú[½þš¯þ.--™¯þ³f-·ý­ÔU¥«¿eÌÍÍÉ6[TJ³³³ù|^ÊòÛláEK…B¡ .B¡@†Ð_@ý[èïàà`$ÉnÑÛÛk8âèèhww·2ã……cmccãÊÊÊŽô·RWjî¯È®yîo¥ÆRP®,¦ëñx,*¥`L>{ö¬…þNOOK¥œt 0ú è/ ¿`ýÍårçÏŸ×4M¬qll̘°«L48ŽÎÎNã9 ãããN§³Ò¼…J/Ëv% ˆO=ù¡lc)´··ËæƒAcžCÙJ)´¶¶Ê⺮OLLXèïÌÌŒšÿè/ ¿€þ‚]ô×ÌÍ›7Å;ísÔ"‘Èôô4é@¡æõ—;$Ð_Û­o»Óßl6ûæ›o†Ãa)Ûåc{åŠùÙgdëÐ_¨aýåù8è/ýÝ‘þNLLx½^—ˋŌ'—Õ7N§S×õ^xlcú è/ ¿œl§¿@¶< ¿€þúË)ý² äýôÐ_Âþ;ôý=jp‡úk[¸õí¨é¯Åwd;ôýôj~·¡þ"¸€þ¢¿€þ¢¿è/ú €þ¢¿€þ¢¿èï‘Ô_Ù쉉 ]×5M“3еk×&''[[[åe(º~ýºÑrhhÈãñ8Îh4šÍfÕ²FoÉdÒçó9Ž"9t»Ýæ/xK¥RÁ`P: ²RR€þ¢¿è/ ¿€þ„þž9sfee¥P(HJ½^¯ùåÉ“'U³‘‘‘p8¼¶¶–Ïçãñx"‘0/êíܹsJŽÍ>}ú´,¾¹¹)¬*›››Õ‡buuÕèÐ_ôýÝ1Ü!þÚn}Ûþ¶*Ê[ôRÓ4UöûýKKKª¼±±áv»+é¯8®ù¥*´´´,..­Z×õd2¹¼¼LtØÁ¶y@Ñ߃8 úkÛþVÒßj^6ü8jnÃŽ/ݪt:ÝÛÛ+&ÝÖÖ–J¥0;Ø0è/ú˧ýô÷ˆê¯ßï_]]-3 V·xÙ«¿sss>Ÿ3°ú‹þ¢¿|*Ð_@ŠþŽŽŽvwwg2)/,,Äb1UßØØ¸²²²íâjyî¯t¢œXô×ãñ`v@Ñ_ô—Oú èïQÑ_eÀ@ÀáptvvJoªr||ÜétVšä`~900 ®l~òƒtÒÞÞ®iZ0dò;l¼~ëæWžÌýh ýEÑßÃè¯máÖ·]è/m8:,§æåÓúDc×­ç^´IÐ_ôÐ_¨íý€»ä?%XùyÊÿà+c_¿sû-; žè/ú è/ ¿è/€}ÉÌ<÷˜ã>ùØ~Ëó«SÎ\ë°¾Œþú è/ ¿è/@ð?xßsò[îÖëÅ`ôýôÐßý !?üðSÓ?Sï=© K?‹þú[ wH ¿¶…[ß,þÊI…lC ñßþbúÊ»îU¾ûôÏt?õgî7þþ‹¨«¿ÜúèïÎ΂€þÚö@ ¿œTÈ6ÔûN½÷#ßnë}¢éWþæ“°òÝ¿ý§ü?ÖeÐ_ô—Qýôýv[óÊè£ê¢¯ùroçýE%Ñ_@Ñ_``·/깿ÿÄýE—{Ñ_@%Ñ_@Ñ_² õÆÆë·^ûú?¼¶l«< ¿èïÞÀè¯máÖ7ô—ly@ÁŽú è/ ¿è/ ¿€þú è/'@ýôÐ_ôÐ_@ý@¡6ô—;$Ð_Û­oè/Ùò€þ‚õ—çã ¿ôý%Û@Ð_@ý唀þrR!Û@Ð_@ý唀þÚmoh ÛÀX‡þú è/§ôý%Û@Ð_¨Mýå ô×¶pë›Ýôww"[‹úËÀõšôýôj{GÑ_@Ñ_@Ñ_ô·†õ7“ÉD"—Ë¥iÚéÓ§WWWUýææf,s:^¯wllÌÊJõRH&“>ŸÏáp¨š¡¡!Ç#-£Ñh6›µ^¼ìf4˜06xGÝú è/ ¿€þþrNšŸŸ/ ù|~ppP$RÕK¹§§Gür}}]ÄÔÊJõR8wîœá£###ápxmmMºÇã‰DÂzñJ›Q$²ÖÝ®o!ôýôÐ_@·G쳩©I•}>_&“Qe)BY©^ b¥FW~¿iiI•766Ün·õâ•6£¨A¥n[ZZŒúÅÅEôý…zÓ_î@m ·¾í¹þÎÏÏwuu¹\.5ÇÀ˜ºPdfÍ­²Þ̶ÝV¿;ê–lcú u¢¿<ýå@ ¿{¥¿~¿vv6ŸÏKY~—½Ê»´´´m}éeZc±™J‹WÚŒ*»­ƒ«¿ ìP¯y@Ñ_>è/ ¿GK=Ê­håÙ³gÍs|#‘Hv‹ÞÞÞm닌stt´»»[™î‚yJqÙÅ+mFccãÊÊJ5ݪ)ÅB8F±ýôÐ_N èoyR©Tkk«ÃáÐu}bbÂÇ\.wþüyMÓDLÇÆÆ¤`]_jœ¢ª@@zîìì”m³^¼ÒfŒ;Nsçe»ÝÜÜŒÇãF·è/0Ö¡¿€þúË)ýÝ=7oÞ㬾þ.»%Ûè/ ¿Åp‡úk[¸õí õw`` ›Í¾ùæ›ápXÊÛÖße·d›7ê2è/ú è/Ôön+ý˜˜ðz½.—+‹år¹mëï²[¨ãÁýEýô·ôýEýEÑ_ôýEýEÑ_ô·ì`Ê·¯0x¢¿èïžÀè¯máÖ·£¦¿.;ôý=ü³  ¿¶=è/úK¶< ¿€þúË)ýÝÆ_'&&t]×4MÎO×®]›œœlmm•—¡PèúõëFË¡¡!Çãt:£Ñh6›U˽%“IŸÏçp8ŠäxppÐív755IU“J¥‚Á td¥d›8 ¿€þ2J¢¿€þ„þž9sfee¥P(H†½^¯ùåÉ“'U³‘‘‘p8¼¶¶–Ïçãñx"‘0/êíܹsJŽÍ>}ú´,¾¹¹)¬*›››/:6:$Ûè/ ¿Œ’è/ ¿û«¿†­Šò½4¾”Øï÷/--©òÆÆ†Ûí®¤¿â¸æ—ªÐÒÒ²¸¸X´j]דÉäòò2Áf`ôÐßmà ô×¶pëÛ~èo5/~5·aG‹—®:N÷ööŠI·µµ¥R)²Íê2è/ú è/Ôön[ýõûý«««»^¼ìÕ_ƒ¹¹9ŸÏGÆêxðDÑ_@ý­1ýíîîÎd2R^XXˆÅbª¾±±qeeeÛÅÕÜ_hóÜ_éD9±è¯Çã!cè/ ¿€þú»gú{çö[‹ùù½;ýUGgg§¬KUŽ;ÎJ“Ì/Ä•ÍO~NÚÛÛ5M ƒL~@ýôÐß½ÑßÛW_~úJƒåÔ<ÇÐ_¨Iýå ô×¶pë[õú{çö[7Æ¿ùWŒÈŸäçæÃO‘² äý…ZÕ_žƒþr öO©€IDATÐ_ ý½}õåÿò[#¿¯ëIï¯=öîi÷ežüÏ„‡ly@ýô—SB}éïÃþÕï\˜ùÙ7ýþ>ÑtJ]ôzïGp_² äýôÐ_N u¨¿ïü¼ëÞ)7ÿ¯ÿþ‹ÿßçÿo)¨ÿK¥L™òÑ/£¿€þ¢¿è/ ¿U…pêáGgÿ·ÏO7lÊùC_׳WIÙò€þBmë/wH ¿¶…[߬ô÷¿ŸTn=÷â܃¿ÿ˜vßcï¹&Û@Ð_¨ýôÐ_ ýU5wn¿õÊèמü©Óê2ð¾:ËAôÐ_@¡nõ×àÖs/>éý8ÏýôÐ_@Áú«¸sû­W“S¯ßâPú è/ ¿Pÿú €þB ë/wH ¿¶åÐo}{ÿûßÿÀ!ñɆ|¾2ò×_ú¥_:vìØ½÷ÞûÔ §6È›u™6Ñ_ôwßU Ð_ÛˆÐ_Øè/ú‹þ¢¿P{á—ã{ê¿óáX†òööö}èCÅ'>0Rù«´ù…-: QG ^ó‹ÅÐ_ôýE¡†Ã???ÿôÓOãßxäÙ6„ŸùÌg>÷¹ÏýÙŸýÙ#PƒÈäM€ºÏƒ ›2xÊúÚk¯qFCÑ_ôj)ü/½ôÒ³Ï>;33#Cù£Ŷ!üÝßýÝO}êSò'ò(Ô ry ¾ó ¦ ›2xÊúÆopFCw·¾¡¿¶åˆ„ÿÕW_ýÞ÷¾'ƒøÓO?=uPlÂßùßù½ßû½/|á SPƒ<Öûx ¾ó ¦ ›2xÊzûömÎhè/ ¿PK¼ñÆ2|¿ôÒKóóóÏÕ\ýýÌg>óçþçÏ=dÀ”aSOB×××9• ¿€þB-!÷íÛ·eíµ×^=(¶ ág?ûYÉá£>ú*ÀÑCL6eð”!ôÎ;œJÐ_@! ¿€yB@¹õ ó°-v?!$Û@ì«¿<ø óà@°ï„ly@óà”@ !Ùò€þæÁ)Ùò€þæÁ)ÙòPúËŒxÌöpë!$Û@쨿€y!¬­64T;®Þ¸qãž{îÙ¿þýôáÒß/ùË}}}è/ú ˜Â݇pppÐív755%“IU“ËåÄ2·‚¼4}úôÚÚÚææ¦x°ª‡Ãë[ôôôÈKCXÏœ9³²²R(¤7\óË“'OÍd)cq£Û²ú[Z?22"k—MÊçóñx<‘H¨zY‹è¸’éææfµ/«««FƒJ "¾u®¿ÌˆGm ·¾í"„---‹‹‹E•>ŸÏ¨”‚h®á‘ƵX‘Ñ¢—š¦Í–––ŒÅ¥·é¯ßï7ߨØp»Ýª<77×Õեʺ®'“Éåååj¬ýe`òèïîÏ‚€þÚö@²!,«†E•f¯µhVIg·]¼´ÞŒÃáPõ###ÃÃêœN§{{{EpÛÚÚR©”õ‚u ¿ ì@ýåSþáß›Vºúk¾|k¾ú[¥þ–½ú+>Z(T9›ÍZ\ý]]]-Ý‹®®®¹¹¹¢J©1ú¯´ ú Œuè/ ¿œá;¨¹¿b湿RP“wERÃá°yîo•ú+‹g·0Ïý=vì˜lƒ°¬.íWVVŒ~FGG»»»3™Œ”b±ØÛ[“¤™aÏR©¬]ô×ãñX,XÚ?ÙÆ:ôÐ_N ¶¡Ø­bÑ“‰„s‹þþ~1ãêïØØ˜zC<7O§Ó¡PÈápèº~éÒ%£ýøø¸´4÷&"¤eggçÔÔ”ÔÌÌÌD"£T¶··kš Ée,Û?ÙÆ:€ºÒ_fÄ£¿¶…[ߎH÷C4ÅÂŘÉ6yô0[èoGGÇ+¯¼Â@ó8r!t:ô0ô! ¿€yÔcw:ÿ/f@w3â1Û­oèo½ú4;@wô×¶‚Ö·þÖ÷õcv €þò©À<€ð£¿è/ðy@ùT`ö}‡!k¼|ù²®ëš¦…B¡t:müihhH}uE4Íf³Öõ›››±XL*½^ïØØ˜YFwÚÞ “ÉD"—Ë%Û¦¾šÎøÓàà Ûí6¾ª£ÁD‘ çr¹¾¾¾Æ-¤ /­w<•JƒAÙ°@ 099I¶±ýô—ð×›þŠ•®­­ …‹/?þN?###ápXêóù|<O$ÖõÆ÷$ R0ìs§íÍtttÌÏψɲÒÞøcõEÍÒ§ù‹š+}ÝÀÀ€l€±"ó8—Ýñææfõ.‰m¶±Ðß# 3âÑ_Û­o»Ó_エˆ ¦iªì÷û—––TyccÃív[×·´´õ‹‹‹†}î´}%dÛšššŒee‘Ò)ûÒçó¥àõz­w\×õd2¹¼¼L¶± fôÐ_ „;Òß²/~‡Ã±mýNû±Ø Åüü|WW—Ëå²^vÛ 0ך[©}:îííMokkK¥RD ýôl¡¿~¿ß<×Ö R½ÅÕßµ/Z×ììl>Ÿ—²ü6Úìôê¯yEæ«¿Öþ=77'Ë-ôÐ_°…þŽŽŽvwwg2)/,,ón+Õ«¹¼Ù-Âáð¶ýTjoÆãñÓpÏž=k´Qs¥Ò<÷·±±qee¥tGŒIÆjE湿ew\¶P¹µè¯lÑ@ý[è¯2×@ àp8:;;§¦¦¬ëÅDãñ¸¦i¢ŒEOrØi{ƒT*ÕÚÚ* êº>11an#+¾k<ùAw:eŸüH$œ[ô÷÷Ëz­w\¶°½½]6, 2ù 6ô—ñè¯máÖ7BH¶<ØQy æÁ`ß !Ùò€þæÁ)B² äý̃S!² äý̃S‚MB877ç÷û·ýÖ‰jÇdž!lEdÈÀÛÜúè/ᯧž8qâù矯9+ÝéŠj]—Ø<ú è/ÀÞ„ÐétýµÛåaôÐ_ „U…ð.5ý@ó@¡fBØ`âí­oˆèëëkÜB òÒh–L&}>ŸÃá°X\Õ\¾|Y×uMÓB¡P:6 y<§ÓF³ÙléF¦R©`0( Àä䤪´Ø$ ÿ.»aÛvXiËýôê'„E_¥‡×·èéé1?ð¹sçÊ:k©}ŠÝ®­­ …‹/?þÎVŒŒHÏRŸÏçãñx"‘(íª¹¹ÙøŠc£Å&Yè¯E½E‡e·ÞæÖ7@ ½ê¯Ïç[\\Te)x½^£xaù±Ä2ë©â‘𦩲ßï_ZZRå ·Û]Ú•®ëÉdryyÙ\i±I»Ó_‹Ën9ÙÆ:€·yð ¿„¿^õ·H ´˜Gk=÷×,£fŠ&Q(Òétoo¯˜q[[[*•ªr“vª¿UîãQ›:ÌÀäÐ_>è/þ½×_ŸÏg\£-{©u×úë÷ûWWW«Ü—¹¹9Ù’*7ILºP(¨r6›­æêo5ûˆþcú è/§„ú×ßÁÁÁžžžõõuñÈp8\:Ѷ”ÆÆÆ•••mõwtt´»»;“ÉHyaa!‹•v%•jZ‚è¯Çã©r“Ž;&û%,zFúJVå>¢¿ÀX€þúË)¡þõ7—Ë% çýýý›››Ûºàøø¸4Þöš«2à@ àp8:;;§¦¦J»’ÊöövMÓ‚Á 1ùaÛMJ§Ó¡PHºÕuýÒ¥KF}¥ «rÑ_`¬¨%ýeF<úk[¸õ’m vÔ_À<€B@óBHýÌ!ú ˜ƒ[]Ðß]ÁŒxÌöpëÛA†pÏmµR‡h1;@ïê,è¯m!ܧ¢¹dÈ ¿|*0 üè/0°yô—Oæ„ïB˜J¥‚Á Óé “““ª2—Ëõõõ5n!yYÉVL”¶Éd2‘HÄårišvúôió÷ºÝ¦d2itxùòe]×¥q(J§Ó¥Z<44äñxdk£Ñhv‹æææõõu£Ôx½^USÔØbÉ60Ö ¿€þrJ°KEU¥¸i"‘P•ápx}‹žžã›Kõ÷mË«¶óóó…B!ŸÏ‹ïßu<<<,6¼¶¶¶¹¹)õF?â©R)í/^¼xüøñ¢þGFFd«¤ôÇÕÖ~êSŸ3Ö(åÏ|æ3•WÚ_² Œuµ­¿ÌˆGm ·¾í"„º®'“Éååes¥Ïç[\\Te)x½ÞÝ鯑ڦ¦&Unii1ú7wh\f–Æš¦õï÷û—––TyccÃívKaaaAvÁ褵µ5“ÉTj\iÉ6€ÚÖ_@VÂt:ÝÛÛ+vØÖÖ–J¥Ê­a¢;Õßùùù®®.—Ë¥fG8‹EŠ*KûoøqŒÞ|ðÁ™™)ÈïóçÏ[7.»¿€þú ¶ áÜÜœÏçSe)—Nïæê¯ßïŸÍçóR–ßFËJW­û—Þ̳‡Í[~ÿý÷KA~_¿~ݺqÙýôÐ_°Kc±˜2QÑAÇ£*{zzÖ×׳Ùl8¶žûÛØØ¸²²Rv½Ò¡1ÑöìÙ³Æ"jî¯TÍýµÖßÑÑÑîîn5·aaaÁ˜I,tvv^¾|ùãÿ¸QS©qÙýôÐ_°K§¦¦ÚÛÛ5M ƒÆd€\.—H$œ[ô÷÷‹¤Zèïøø¸4+{ X:lmmu8º®OLL˜ÛˆR‹7=ùÁZ•ÔéP|W¶Ü¨÷•m£5÷P¶qÙý€ÚÖ_fÄ£¿¶…[ß!Ùò`Gýåy(˜‚}'„dÈú ˜§BHÉ6ô0N „È6ô0N „ð€†Ôî'&Û@ý½;˜yØn}ÛEÝ>Ñ_² äÐ_@!„‡¥¡è/ú è/ú è/ ¿ûBeŸ &T}*• ƒN§3LNN–ö¹¹¹‹Å¤×ë3Ìår}}}[HA^ªúL&‰D\.—¦iê+ßÐ_ôÐ_€ÃÑßR mnn6¾¯8‘H”ö©¾9›Í®¯¯‹×‹ „Ãáõ-¤ñ…Éóóó…B!ŸÏ˲Ʒ£¿èïÝÂŒxô×¶pëÛ꯮ëÉdryy¹RŸ>Ÿ/“ɨ²ŒÅ¥~qqQ•¥àõzK— njjBÉ6@â,è¯m!Ü‘þ¦ÓéÞÞ^·ÛÝÖÖ–J¥Ê …?Þ¾R?š¦©Âüü|WW—ËåRS,úK¶<ú˧ýÂTô×`nnÎçó•Ö›¯þ.--™¯þÊKU6_ýõûý³³³ù|^Êò{ÛõÙòè/Ÿ ôÿ>êoccãÊÊŠQ‹ÅÔÑ_ÇSÚçàà`$ÉnÑÛÛkô£æ¯¯¯K}86æþJ'Ædâ³gÏ¢¿dÈ ¿|*Ð_ ü‡©¿ãããN§Óx955ÕÞÞ®iZ0,;ù!—Ë?^ˆ×Ž“¤>‘H8·èïïßÜÜTõÒIkk«ÃáÐu}bbý%Û@ýÝ3˜þÚn};¬Þ¼y3@² äÐ_À<Ð_¨ç d³Ù7ß|Ó<ÉÐ_À<Ð_¨ÏNLLx½^—ˋŌ¯·ô0ô! ¿€yB@ïfÄc¶…[ß,þz¸?„“lyô÷Ð΂€þÚö@Âà !ƒï!@ùT`@øÑ_à=òè/Ÿ Ì?ú ¼‡@ýåSyáGÉ6o@+ÂŒxÌöpëúK¶<ØQóBˆþú è/BôÐ_@¾ýöÛo-þåwä7ú è/ ¿PÏ!¼}õå§~ú×¥ÁrjþP¶ Ðß»…ñè¯máÖ·êCxçö[7Æ¿9«oeûÁWgkÛ€lyô—3 ú „CxûêËÓ;4õ¿üÄ¿z@ê§Þ{òÙ«² äýô—SB]…ðÂÀ~¥§oê§>þDãSï¹O]ô Þo÷ePâ=òè/Ÿ ôÿ!„ðp'Ùòè/Ÿ ôÿ†ð«¿?ù±'¸òîyõÈ6@÷fÄ£¿¶…[ߪ á­ç^ü繿ï=ùøû~&Û@êAýB¸mÕ“žöwÀ“ýô਄ðÖs/î÷sýÌàh…ðÎí·^MNm¼~‹wÐ_@ú{„aF<æa[¸õ’m vÔ_ž‡‚yp ØwBH¶< ¿€ypJ „„ly@óà”@ly@óà”@ly¨ ýeF<æa[¸õ’m vÔ_À<€B@óBHýÌ!ú ˜ƒUªWoܸqÏ=÷ì_ÿP“úËŒxÌöpë[Ýëï—¿üå¾¾>ê/;@wÌö‚V áàà ÛínjjJ&“ª&—ˉe6n!yixäÄÄ„®ëš¦utt\»vmrr²µµU^†B¡ëׯÍ.^¼èõzNg,3/^j¥ &Œ? y<Y<f³Y£>‰ÌÌÌH!•JƒAid,,Û?ÙÆ:ôÐ_N 6 áðððéÓ§×ÖÖ677ŃUåÀÀ@8^ߢ§§G^ÂzæÌ™•••B¡ ½‰àš_žŸÇã‰DBÕËZDÇ•L777«}Y]]5TZ°®þ2°yô—Oú „oBØÒÒ²¸¸XTéóùŒJ)ˆæi\‹-z©išÑliiÉX\zÛ‘þúý~cñ ·Û­Êsss]]]ª¬ëz2™\^^®fAôëÐ_@9%·-Ô°¨ÒìµÍ*éì¶‹—Ö›q8ª~dddxxX•Óétoo¯n[[[*•²^ýÆ:€:×_fÄ£¿¶…[ßvÂJWÍ—oÍW«Ôß²WÅG …‚*g³Y‹«¿«««¥{ÑÕÕ577WT)5Fÿ•äÖ7`¬¨sýôaõ!TsÅÍs¥ &†ÃaóÜß*õWÏnažû{ìØ1Ù1`Y]45Ú766®¬¬ýŒŽŽvwwg2)/,,Äb±··&3H3Þ¥RY»è¯Çã±X°´@ý[‡PìV±èɉD¹E¿˜ñNõwllL=!‹§ÓéP(äp8t]¿té’Ñ~||\Zš{‘ Ò²³³sjjJjfff"‘ˆÑ@*ÛÛÛ5M ƒÆä‡² –íÐ_@îÝ(¹¢).ÆÌ@ó°…þvtt¼òÊ+hô· ̈Gm ·¾‘:NÒH¶<ú{T΂€þÚö@BBH¶< ¿€ypJ „å™››óûýjƒ¹ dÈ ¿|*ý%üuÂ'N<ÿüó¥e$˜lyô—O ¿„¿Chž¿Ë\^² äÐß# 3âÑ_Û­o{BóU^®ø’m €þ ¿P?!R_QF³Ù¬òݲ©ðàà Ûí6_F&“‰D".—KÓ4õer•Ÿ:uÊøZ ayy¹¥¥E­Ð_@ö+„###ápxmm-ŸÏÇãñD"ñÎHWáê¯QVß–, š¿-¹££c~~¾P(HoRi|ípiã¹¹¹`0htÛßß?66Æq@ýØßúýþ¥¥%UÞØØp»ÝUêoKKËââ¢ÅJE‚›šš,Ÿ8qâÊ•+R¸yó¦®ëÆw#ú è/À~…°hzƒÃá¨RËNžŸŸïêêr¹\½<óÌ3íííRˆÅbÆô @w 3âÑ_Û­o»¡ßï7ÏеVÞ··»ú+½ÍÎÎæóy)Ëïm/‡B¡ .B¡@€É6@÷å,è¯m!,ÂÑÑÑîîîL&#å……c¶n•sEÍs=Z…ÔŸ={Öº±0==-myäÒK¶<ú˧ýÂ@!‡£³³Óxöú+ 466šŸüJ¥Z[[¥+]×'&&¬ 333jþm €þò©@ð×#‘Èôô4Ñ%Û@ýåSyá¯ÿ^¹rÅüì3 Û@ýÝ%̈ǟ—²ü®´ŸÏgîÜ|¥¶ìŽÌÍÍÉ"Öú»Ó>É6€š×_@î¡þª¹¿Ù-Âáp¥¹¿«««æ¹¿ǘ­{öìY£qccãÊÊŠ±cb±êÜ11a4w:æ'?$ çýýý²®²355ÕÞÞ.Û ·üPeŸ€þú „ýÌ! ¿GfÄc¶…[ß!Ùò`Gýåy(˜‚}'„dÈú ˜§B¸}ìþ0nD#Û@ýåSyáGÑe² äÐ_>è/þý ¡õ×°¢þry˜lyô·Z˜þÚn}CÉ6;ê/ ¿@« aƒ £æòå˺®kš …Òé´ÑxhhÈãñ8Îh4šÍfK×’J¥‚Á 4“““ª2—Ëõõõ5n!yYd·eý»ì†mÛa¥-ôÐ_ „oW²O±ÛµµµB¡pñâÅãÇßévdd$K}>ŸÇã‰D¢t-ÍÍÍÆ dÁõ-zzzJ¿‚¸ÒåçJõ–Ýr@ýBh¥¿ÆõTñHMÓTÙï÷/--©òÆÆ†Ûí.]‹®ëÉdryyÙ\éóùUY ^¯÷.õעò[è/ ¿@­ô·’Œšq8¥kI§Ó½½½bÆmmm©Tªl‡†•îZ·í°ìK¨gýeF<úk[¸õmÿô×ï÷¯®®V¹1sss>ŸO•¥`\6.{õWLºP(¨r6›­æê¯u‡u©¿ ì@ýÝýYÐ_ÛBX6„+++Ûêïèèhwww&“‘òÂÂB,+]‹Tªi ¢¿GUöôô¬¯¯‹Ú†ÃáÒ¹¿ÇŽ“ ½ŽF£F}¥ ۶úÔ_v €þò©@ðïMÇÇÇNç¶×\•‡ÃÑÙÙ955Uº©loo×4- “r¹\"‘pnÑßß¿¹¹YÔs:…BÒ­®ë—.]2ê+mض¢¿ÀX€þúË „„ly@óà”@ly¨ýeF<æa[¸õ’m vÔ_À<€B@óBHýÌìÂÃýá ¿€þØ%„è/ú»¿0#ó°-Üú†þ’m vÔ_Î4˜‚}GÉ6ôÐ_N „ý%Û@Ð_@9%BŽ Ùò€þúË)r\È6€ÚÐ_fÄc¶…[ßÐ_² äÀŽú ˜†ðÎí·ÿò;òýôÐ_¨ç¾69û-÷G¥ÁrjþP¶ Ð_@ö=„ÿ°ø£ùÿù_—úV¶|uö°¶ Ð_@ö1„¯MÎ~§ý7¯¼ë^©œvT øÙ«‡¸m€þÞ-̈Gm ·¾YüõOÿÃçùÅß|Ìùu¹W~žü©_{âø•ýv_ô—lyô÷Ï‚€þÚö@Ø<„‡ûC8É6@ùT ¿@ø4„þð+=}ÿ̃OÿL÷wïû_ŸhÜšõûî7~Šäm è/ ¿œê6„·ž{ñocžh|à»÷þ/Oþô¯cÀdÈú è/§„úáÛoÝÿæw>ô›jr«ÉÇÈÙòP«úËŒxô×¶pëÛ.Bx빟öÿûúÜ_ Û@ýÌàh…ðÎí·^MNm¼~‹wÐ_@ú €y!ô0Ì! ¿ 3â1Û­o„ly°£þò<̃Á¾B² äý̃S!$„dÈú ˜§BdÈú ˜§BdÈ@mè/3â1Û­o„ly°£þæ„°¶BØÐPí¸zãÆ{î¹gÿúôÐ_ „GH¿üå/÷õõ¡¿è/`@wÂÁÁA·ÛÝÔÔ”L&UM.—ËlÜB òÒðȉ‰ ]×5Mëèè¸víÚäädkk«¼ …Bׯ_7š]¼xÑëõ:ÎX,f^¼ÔJLòx<²x4Íf³F}$™™™‘B*• ƒÒ È6X,X¶@ý›†pxxøôéÓkkk›››âÁªr`` ¯oÑÓÓ#/ a=sæÌÊÊJ¡PÞDpÍ/Ož£R ¢¹†G×bEF‹^jšf4[ZZ2—Þv¤¿~¿ßX|ccÃív«òÜÜ\WW—*뺞L&———«Y°ô—È ¿»? úkÛAˆ°¬Uš½Ö¢Y%ÝvñÒz3‡CÕŒŒ «r:îííÁmkkK¥RÖ Öþ2°yô—Oú „oBXéê¯ùò­ùêo•ú[öê¯øh¡PPål6kqõwuuµt/ºººæææŠ*¥Æè¿Ò‚è/0Ö ¿€þrJ „ï æþŠ5šçþJAMÞI ‡Ã湿Uê¯,žÝÂ<÷÷رc² bÀ²ºh4j´oll\YY1úíîîÎd2R^XXˆÅbooMff†=K¥²vÑ_Çc±`iÿdëÐ_@9%Ø:„b·"ˆEO~H$Î-úûûÅŒwª¿cccê ñxÜXè/þ݆°Á„Qsùòe]×5M …BétÚh<44¤¾É"f³ÙÒµ¤R©`0( Àä䤪Ìår}}}[HA^ÙmYÿ.»aÛvXiËÉ60Ö ¿€þrJ „oW²O±ÛµµµB¡pñâÅãÇßévdd$K}>ŸÇã‰D¢t-ÍÍͪÿÕÕU£ÁÀÀ€,¸¾EOOOé÷'[|u\Ùz‹Ën9ÙÆ:€ú×_fÄ£¿¶…[ßöDë©â‘𦩲ßï_ZZRå ·Û]º]דÉäòò²¹Òçó-..ª²¼^ï]ê¯E‡e·œlc@ýë/ ¿@ïF+ɨ‡ÃQº–t:ÝÛÛ+fÜÖÖ–J¥ÊvhXé®õwÛ˾ôÐ_ „;Ð_¿ß¿ººZåÆÌÍÍù|>U–‚qÙ¸ìÕ_1éB¡ ÊÙl¶š«¿Ö¢¿è/ ¿@+†°±±qeee[ýíîîÎd2R^XXˆÅb¥k‘J5-Aô×ãñ¨ÊÁÁÁžžžõõuQÛp8\:÷÷رc²abÀ¢×ÑhÔ¨¯´aÛvˆþ ¿€þ!¬Âññq§Ó¹í5WeÀ@ÀáptvvNMM•®E*ÛÛÛ5M ƒÆä‡\.—H$œ[ô÷÷onnõœN§C¡t«ëú¥K—ŒúJ¶m‡è/€íô—ñè¯máÖ7BH¶<ØQy æÁ`ß !Ùò€þæÁ)B² äý̃S!² äý̃S‚ÍC¸í­c;½·lGíçææü~¿ZÄ\æø@ËÀŒxô×¶pë[Ýèï‰'žþùÒ²m%˜È ¿€þÔ³þ:βe@ý¨Cý57æ ¾è/ ¿{ÂÍÍÍX,æt:½^ïØØXÙ¯™èëëkÜB òÒhpñâEYJ–•ŒúL&‰D\.—¦i§OŸ6¾*¹’Å y<é$f³YÕ²,E ºÝ¦d2i½êÒÆ§N2sÇòòrKK‹Z; ¿€þB=‡P}‡°˜ßúúº¸c©þ „Ãáõ-¤¥ùK†Õ—«zéGÕwttÌÏÏ …|>/•Æ×#—Õß‘‘é|mmMÇãñD"QÚ¸lyxxXW}ßvÕ¥çææ‚Á Ñm¿¨?)¨UýeF<úk[¸õm!ôù|™LF•¥Pª¿Ò`qqQ•¥àõzKKKF½4+]¯˜hSS“…þúý~£“ ·Û]¥þ¶´´[UóªË6>qâÄ•+W¤póæM]×/O&ÛÀXP{úËóPÐ_û^}‹¬´T‹hšf]???ßÕÕår¹ÔŒ‡Ãa¡¿EÓÊ6.[.ÛÛŽVýÌ3Ï´··K!‹Ó'È60Ö ¿€þrJ¨óš¯þ.--•½úk¾Ê»íÕ_¿ß?;;›Ïç¥,¿­…U›gèZ+ïÛÛ]ý­´êJ—ŠC¡Ð… @¡P ÛÀX€þúË)Á!ŒD"Ù-z{{KmUM^__—áp¸hî¯ZÐ<÷×ãñ¨µˆ×ž={ÖZGGG»»»•/,,”(l1÷WVažû[iÕe ÓÓÓÒæ‘G!ÛÀX€þúË)Á.!ÌårçÏŸ×4MÜqll¬tnƒ4H$Î-úûû9²Ò@Ú«‡6Äãq£>•Jµ¶¶:]×'&&¬õWp öÆÓ¶Õß··îÉkll4?ù¡ÒªË6fffÔü² Œuµ­¿ÌˆGm ·¾ÝeoÞ¼)&jŸ7-‰LOO“m`¬¨yýôáŽB800Ífß|óMó܆úÇ®\1?û Ð_@Á.!œ˜˜ðz½.—ËüíõÓéÔuý…^ 9è/ ¿@Ð_À<€ú{4aF<æa[¸õÍ⯇ûC8É6@í,è¯m!<¬2(ñyô—Oæ„ýÞC €þò©À<€ð£¿À{äÐ_>˜~ô—ló&yô·"̈DŽwn¿ucü›³°úV¶|uö°¶ È6@9Ó ¿@ø÷1„·¯¾ü7½CS?ñËOü«¤~ê½'ߘ½Êq!Û@Ð_@9%ÔU/ üáWzú¦~êãO4>0õžûÔE_‘àýv_%ÞC €þò©@ðB÷‡p’m €þò©@ðhß¹úû“{¢ñ+ï>qWlyôwaF<úk[¸õ­šÞzîÅžûûÞ“¿ï—1`² ä ôÐ_ „Û†P=ùái÷<ùÐ_@ŽJo=÷â~?÷Ð_À<ŽVïÜ~ëÕäÔÆë·x'ýô! ¿GfÄc¶…[ß!Ùò`Gýåy(˜‚}'„dÈú ˜§BHÉ6ô0N „È6ô0N „È6€ÚÐ_fÄc¶…[ß!Ùò`GýÌ!!ôêÙ<ˆ%!Dýô]&„è/ ¿`CýÅw !ú u«¿ÌˆGm«¿Üú†þ’m vÔ_ž‡Ræ‘J¥‚Á Óé “““ª2—Ëõõõ5þÿì½Pc÷}ï»rª(”«*¥ŠÊhÎÓ¥õe(OèûÞfã:ÄÙ&uhJUªË»;zÔ£Ù§i³ÃÓ¤y.-׳&v7e.¡Ô—r·™°ÄuÝv-{³N‰­¤ž\û±‰îKãl¢ö9Û Ý†‡U²Ìúr.¦Šâ÷ ÿÍ?§âHXX$}>Ã0ýùŸ¿ç|Ï9ýùsÎ&R—Ev[¤¹êå EÍÊt855eš¦a¡P(“ÉÔÀŽ „èo­î_ è/GE-˜GSS“j¿¼¼œH$TåÀÀ@8^ݤ§§G^V¢¿eêËtFWVV …ÂØØØ±cÇj`GBôÝò€þÂá5Ó4'&&­•>Ÿo~~^•¥àõz÷¨¿e:Ô#ÁbÀ†apI@lyô—£öÑ<2™Loo¯ÛínmmM¥R¶«­t×ú»m‡¶/ ?ú dÈ ¿‡fÄטyÌÎÎú|>U–‚*ÛŽþ:ŽB¡ Ê¹\®’ÑßòV—þò¯oèo­Â‰È ¿Pûæ‹ÅÔ´Ñ_Ç£*{zzVWWEmÃáðÖ¹¿G•7^^^ŽF£º¾¡¡aiii«ÎnÛaué/!Dý…j5d2ÙÖÖfF0Ô“Ö×׉„s“þþþ"=Íd2¡PÈáp˜¦yîÜ9]?>>.‹ØÞù¡|‡è/ú €æ„Ð_À<0 „€þ<̈Ç<êþõ’m õ¨¿Üó`Gð»B² äý…ê3ßÿýßïíí=ÐpïÏ¿¸ØΡ¿è/ºäý…j5¦¦&ýH6ô—𣿜dÈ ¿µlO>ùäÉ“':Üè/!Nì@ªN™_æ!î{ñâEô—ð£¿À‰È ¿Pæašæââb)ƒ´>ÀbjjJ† …2™Œª___?}útÃ&R—ºýÄÄ„Ïçs8onN±ˆÅbN§ÓëõŽŽŽZßehhÈãñÈ¢Ñh.—³]¼T³2Ýú è/`6ˆÎÚúîVýï\YY) cccÇŽÝ^“p8¼ºIOOõiÆ>ø öTýÐcÕLw;<<,‹K·ù|>' ÛÅK5+Õ- ¿€þæ±WýÕ#»bÀz)ŸÏ§ÿmN ^¯W·[Õý477/,,èfº[¿ß¯ë×ÖÖÜn·í⥚•êÐ_@ó°'T8ù¡’z­Å¥Úo]ÜŠžê°µ}…Í ú èï~ÁŒøÚ0“'O&“IýR̲P(¨r.—ÛV}>ŸuüÕ:úkm_fôwyyÙ&ýÿrñRÍîÖè/ÿú†þÖ*œØ<ú»û« T‹yüÅ_üEOO~yôèQéA Xt3n«¿zö­¸r8¶Îýµ¶WÍr›H3ýÓ‘‘‘îîîl6+å¹¹¹X,f»x©f¥º%üè/m €þrT`öäóy¯×{ýúuõ2“É„B!‡Ãašæ¹sç¶ÕßõõõD"áܤ¿¿ccö½ÔÇãqÃ0<OÑ-Dm€¼cGG‡‡Þ*²¶ÍÊtKøÑ_ Û@ýå¨À<ì³<ø'_pI „@¶< ¿€ypI „@¶<ú{XaF<æQ·ð¯o„ly¨GýÌ!!ô0 „„Ð_¨Eóà1„ýôjÙ<ð]@ ~õ—ñèoÝ¿¾¡¿dÈ@=ê/÷C© óÈf³‘HÄår†ÑÕÕe}¶ðàà Ûínllœ˜˜Pî«)²áõõõÓ§O7l"y©LMM™¦)‡B¡L&£êS©T0t:@`zzºÆv!Dkuÿy@9*jÁ<ÚÛÛÓét¡PÈçóâ»úyÂgΜ^YYÙØØú"ß-z900‡W7ééé±>ú8J'ÒÿØØØ±c·×¿©©I­¤Øv"‘à’€þÙò€þÂ]0‘ÔÆÆFUnnnžŸŸ/e ýõù|º±¼^¯n G‚¥sÃ0TÙ4͉‰‰ÅÅE. „È6ôÔ<Òétgg§ËåR³‡­é–×ߢz­¹¥Úg2™ÞÞ^·ÛÝÚÚšJ¥¸$ ¿@¶<¼É¿¾ÁÁ˜‡ß™ÉçóR–ïÚPw:ú»°° ÊE£¿efggeÙªÛüëú[«pbòè/Ô¾yx<= ·¯¯Oªšû+•Ö¹¿ KKK[uVôôô¬®®ær¹p8lûk«¿±XL¹µè¯¬@?ôÐ_À<î¤y¤R©––‡Ãašæää¤U@ÅbÅwõ„ññq§Ói{ç‡D"áܤ¿¿_Œ¹¼þ&“ɶ¶6Ã0‚Á žüð7ó7ìeôÐ_€º0÷¿ÿýétš½LýÀ<€ú{·aF<æQ·ð¯o„ly¨Gýå~(˜;‚ß’m è/ÔˆyÌÎÎúý~õÏjÖ2 ¿è/Ùò€þB šÇñãÇ_~ùå­e$ýEÉ6ôjÐ<œN§mÐ_ô—ly¨SýeF|m›‡u”—_Âþ’m è/ÔŽy y<§ÓFs¹œò][ŠTxppÐív[Ÿ‹‘Íf#‘ˆËå2 C=4®Tãûï¿?™LêŸ...677«wôÐ_€ý2áááp8¼²²’Ïçãñx"‘¸¿£¿EOE–­OEnooO§Ó…BAz“ÊX,Vªñììl0ÔÝö÷÷ŽŽ²‹Ñ_@ö×<ü~ÿ‚*¯­­¹Ýî õ·¹¹y~~¾LÏ"Áe?~üÂ… R¸~ýºišúQÉ€þú °_æQ4½ÁápT¨¿¶‚Óétgg§Ëå*Ó›æ _øB[[›b±˜ž>è/ ¿‡fÄ׆yøý~ë ÝòÊûæv£¿ÒÛÌÌL>Ÿ—²|ßv¨8 =úè£@ P(TÑŽà_ßÐßZ…;@w„j1‘‘‘îîîl6+å¹¹9=[·Â¹¿¢ÎÖ¹¿G½»Ô÷õõ•o,\¼xQÚ<ñĵ´#!ú[«ûÈúËQQ#æ!GGG‡¾öú+ 444XïüJ¥ZZZ¤+Ó4'''Ë7.]º¤æ?pI@ly@¡öÍ#‰\¼x‘K!² äý…Ú7 .Xï}Æ%Ùòèñ˜Ç^p:¦i¾òÊ+Õ¸#ø×7ô·VáÄäÐ_À<! ¿PÇæa{ߪ?ØŽp¸¡¿€þB}˜ÇNͯüc/Ð_Bˆþú 5e{4ÅÃ)šè/ú èïö0#ý­[ýå_ßÐßZ…;@w„j1ëd†©©)Ó4 Ã…B™LFÕollÄb1§ÓéõzGGGm'? ºÝnë#-²Ùl$q¹\Ò›zÞ›ZD£š­¯¯Ÿ>}ºa)ÈKݹtåóùÇ››OÓƒ²@`zzzëoaûv»ø¥?úËI†äÐ_ŽŠ:Òßh4º²²R(ÆÆÆŽ;¦Õ¶§§gu)lÕ_õ@cYÐú@ãöööt:-]åóy©´}–ò››‚ ‡Ãºsy©›=øàƒ¹\N½ljjÒÏRN$[‹2o·£_Šð£¿œdØ@ý娨#ýÕƒ¯"‹†a¨rssó‚*ÏÏÏoÕ_i õeÞBzkll´Õ_ŸÏ§—•‚×ëÕÍÄYu3Ó4'&&+ùŠÞnG¿áG9ɰ€<úËQQGú»»z[wL§Ó.—KMuPsÊô¦ÐzZTŸÉdz{{Ýnwkkk*•ÚûÛ•_ Âþ²È ¿60#¾Nôww£¿~¿ff&ŸÏKY¾—zŸÏgíÜ:úk»¶³³³²ÈÖú ßnÛ_Šð£¿u'v €þú{{šln“p8\jîïòò²uî¯Çãѳuûúútㆆ†¥¥%ý.z®êÜ:÷׺2±XL¶è¯ô¼õ·(õv;ý¥ýôê]Ejãñ¸a¢˜¥îü Ú*^k½óC*•jiiq8¦iNNNêÆãããN§Ózç‡D"áܤ¿¿_ÞËve’Éd[[›¬C0´üPêívúKú è/`„Ð_À<€B¨+ýeF<æQ·ð¯o„ly¨Gýå~(˜;‚ß’m è/Ôˆylûß`;ýw±µŸõûýjky¿éííýøÇ?NøÑ_ö/yô—£ý=Pý=~üøË/¿¼µ¼ß|ýúu¯×«o@AøÑ_ö/ô—£ý=ýu:¶å ‰<õÔS„ýeÿ@‹aF<ú»úkm|À7åM&“'Ož$üèo}‰È ¿Pûæ±±±‹ÅœN§×ëµ}ªÅúúúéÓ§6‘‚¼Ô ÆÆÆd)YVzÐõÙl6‰¸\.Ã0ÔÓàÊ[ìÐÐÇã‘N¢Ñh.—S-m)êdppÐív[ŸµQê­·6¾ÿþûEsõO›››Õ»/--™¦IØÐ_@¡6ÍC?þwuuUÜq«þ „ÃáÕM¤¥õ¹ÄêyŪ^?½=N …|>/•bÆeôwxxX:_YY‘Æñx<‘Hlml[VOZ–­OZ.õÖ[ÏÎ΃AÝm¿¨¿~yÀÓ-!ú g>Ÿ/›Íª²¶ê¯4˜ŸŸWe)x½^Ý`aaA×K³­‹‰666–Ñ_¿ß¯;Y[[s»Ýêoss³^+[¬omÛøøñã.\xsóßÝLÓ´þ»ú‹þú 5kEVºU‹†Q¾>Nwvvº\.5cÁáp”Ñß¢é ¶m˶½íè­¿ð…/´µµI!‹ééonN„`òú è¯ Ìˆ¯ ó°Žþ.,,ØŽþZGy·ýõûý333ù|^Êò½¼°Jcë ÝòÊûæv£¿¥ÞºÔPq(zôÑG@¡PøÑf¼p}CëNì@ýÝýUªÅ<#‘Hn“ÞÞÞ­¶ª&¯®®Jƒp8\4÷W-hûëñxÔ ˆ×öõõ•×ß‘‘‘îînåßsss¶…ËÌý•·°Îý-õÖ¶…‹/J›'žxºJÜø ýå$@ý娨óxãæëóö‚|·V®¯¯Ÿ:uÊ0 qÇÑÑÑ­s¤A"‘pnÒß߯çÈJi¯nÚÇu}*•jiiq8¦iNNN–×_eÀ@@Úwttè»1l«¿onþO^CCƒõÎ¥ÞÚ¶±péÒ%5ÿAÃc/Ð_N2l €þrTÔ‚yܼüç|”‹©t™DþÄDëg{F"‘‹/ZkNž#üè/pbòèï60#þ0›ÇÍËßxù‰§{Ÿv¿è>ñÌÛ;ÿiær©'''½^¯Ëå²>½¢¶q:¦i¾òÊ+„ý² äÐ_¨bóxtà÷¦>OzNˆøªßçÞùÁgÿÕûʸ/À áÙ³gÏŸ?ù2‘ôößù¤\ñ¯]»†þrÙC‹õ÷È‘;“¥;ÕÎǦv(ôw×úû‰÷ÛgΜyôÑG功L&¯\¹‚þrT ¿U£¿ÕëÓ„¿&‡€lú[úûŸûß?öØcbÀgÏž=þüåË—Ñ_Ž ô÷ðêoÍŒ~@w€< ¿wKŸ>ó‰?ú£?úØÇ>V“3Ñ_@ ?Ùò€þ¢¿‡þèYuú»¾¾~úôé†M¤ /U}6›D".—Ë0Œ®®®ååeU¿±±‹ÅœN§×뵕×RË ƒƒƒn·»±±qbbB­†¦Â“SSS¦iJç¡P(“ɨúT* eÅÀôô4á Û@öCå2„þ¢¿PÝú;00‡W7ééé‘—ª¾½½=N …|>/Î*Ê«ýUšéö¶ú[jÙ3gΈ ¯¬¬ˆCK}ÑšT¸bÒ J'ÒÿØØØ±c·ý¦¦&õ;Šm' ‚»Ö_=4ãp¼Åçû©‡úÍÿñ?þôý…Ñ_ŸÏ7??¯ÊRðz½[;ÑlllTåæææ……Ý~Û© EËê÷Úº&®˜4Ð#ÁÒ¹aªlšæÄÄÄââ"‘`p`ïú«Ëÿ÷—º»;c±_CÑ_¨ý-²Om“étº³³Óårýð㯣¼­Z©pÙò–Z±Rí3™Loo¯ÛínmmM¥RãÐf ºôW¾þÛ»ìr9‹~ôÚkŸD~Iê ã­]]¿xãÆuƒ?ÿó=Ú&õòÓÞÞTò#ùzî¹ñŽŽãtþ¸×ûŽ¡¡ÿú2ÿé? ú|?åp¼ý¸úëóù¬£¹zÕï÷ÏÌÌäóy)Ëwݾ’Ñß2Ëîhô×vŶõïÙÙYY–` ¿ûª¿íí?ûå/?ñ½ï½òÏÿüÕ͉~?ÁýÊWþLÊÿý¿é·~ë7Ä’·ýÑç??yüxðÊ•¿”òw¾ób8|ÿ™3¿¥—êéyŸ¬£¿‡þèYuú«çòær¹p8¬§Øz<=•¶¯¯¯¨}nio«¿¥–Us¥Ò:÷·¡¡aii©ò+¥¿râQn-ú++@øÑ_ Û@îˆþ~ë[Ÿ-?ùA$¸±±A7øö·Ÿ×?zã¯8?¾í:;á›ßüŒU¸}¾ŸÒKýÃ?Ì0ùËÜ1ý]__O$ÎMúûûELU}*•jiiq8¦iNNNêöÒ †!ŠYêÎ¥–}sóÚÄwõ„ññqykÛ;?Ø®X)ýM&“mmm²bÁ`ð®L~ ül v(‡ZÒ_ý¯oÍÍ?ýÐC¿ùÝ曆ôWD¶»»Óårê–¥üX×”ù‘x°þ2Œ·–ïýå¨àL´ýÂÏàm@+œü`û£cÇþçÇëã¯Tî¸e~$²ûÏÿüÕ]Üj ýå¨àL„þ~² äá ô×úh_ýêŸïQ;;áOÿô1ôýô—KÙòpHõ·­í_ÿÉŸ<¬fA=Ú¶Gýýâ?éóýÔ³Ï~â{ß{E¾._žîéyú[úË=Ñߺ…ðÙòPWúû_ÿë“ÁàÏ9o1MßÿñïîQåK”÷ĉÿUMÿ•Âç??‰þV‡þBíéï¶¶Øãâ{ìàŽëo}¡¿€þ¢¿p÷a°Ð_ôýôý%“è/ú‹þú‹þ¢¿è/ú[ÕúË=«ES©T0t:@`zzZ׺Ýnëc)DO§¦¦LÓ4 # e2ÝxhhÈãñH'Ñh4—Ë©ÊX,&•^¯×ú\ŒR«°ÖÛv( de|>ŸÃá üè/m è/úËeÕØþ655éç' U©J¼²²b}(±Ø§È¨T …±±±cÇn¿Ýððp8–ú|>Çu'úaÅ‚*×ßRJƒ|PÛ0áG äýE9*8íXMÓœ˜˜X\\´V677ÏÏÏgîÈ‘õõuU6 C•ý~ÿ‚*¯­­¹Ýn݉®—Þ*×ßRJqb. Ùò€þ¢¿°{ýÍd2½½½¢˜­­­©TÊÖP·ÕV+zfB%“lë+ìðm è/úËQÁ™hÇú«™õù|ª\jô×ö¥ßï_^^ÞÚa©Ñ_ÑÙB¡ Ê¹\Îvô×¶Cô€ly@Ñßà ô¬ýÅbÊtE=ªTsÅA‹æþÚÊèÈÈHwww6›•òÜÜœt¨êÕÜßÜ&ápX·?zô¨¬†°ôF·êo©«E ?Ô*dj5Jÿþ?!Y½_²þè/ ¿éo2™lkk3 # êÉÂÀÀ@CCCÑlõW k p8Ò¡ªuŽÇãÒ³XµõΙL& IcÓ4Ï;g;)¶CîŒû§¿µñ…þú»½þ08µÊÚk7®ê¹õïlóoÓk Kßzü/Õ×÷žþ¿~êÿî·ÿóoÿ{QÉêûúô“è/ ¿è/ÒL‹©´œ‹žyû{n¼ôµJÚŸ?þìÙ³gΜyì±ÇþhxüÑ?ø£}FÖ\Ö_~ ù]._¾Œþú‹þú õÅ·Ÿ|AM ¸ä}àÕ?øÓ7n¾^¦q2™”kå£>*ù±}à÷?ô›ÛgdÍeýå·ßåÊ•+èïÁÁ=Ñߺ…ð£¿dÈÃacñù/?mÜûƒaà·Ý—4îý¿åwJ ËUòÉ'Ÿ”ËåÙ³gÙÎüüÙgdÍeýå·ßåÚµkè/—=ôý%ül `‡B=æA°úzöí¶ƒÁW®\‘ e2™<þüãûÀcï8öø>#k.ë/¿…ü.ê6Jè/GEýê/_|ñÅ_|ñ¥¿>óŽ÷«ÂÂ3_Ô—KñÅk×®‰8^¾|ùÅ}àqã^ÜgdÍeýå·ßeuuýEýeô—ðm¨Ç<|ó㟾ðcïúÁ­ÁÞúîÏwüï3_ÿÜÏþÆ7ÿà‰òSï8L"Býå’@¶<û~Æóþ¿>úàE÷‰/ÿÛß]zñ+ßÏýˆþòáýÂ@¶¡Öòð·CçÔ$‡»2Ü u¤¿€þÜunß÷÷m÷1Ü‹þú °ï0Xw—µ×n\}ê»ß^dS ¿€þÜýL0.è/ ¿€þpb„êÔ_>Ü ¿|²ÎòdÈpb¬#ýeì`S°C<Û ýô—S[ÁàÙòl7ôÐ_NmdÈã€þúË%€ly¨jýåà úË'{² ä ŽôÐ_ôjD9¼ <ÌëVo08è/ ¿‡WñæÎ$ã€þúûfm¯™àÄÈF¨Yýåà ú[·úKø9Ë×*dÈ'Fô—½[ûú+*966æõzNg,[__/RÌl6‰D\.—a]]]ËË˺ÁÔÔ”išR …2™LùzahhÈãñÈE£Ñ\.§ÛOLLø|>‡Ã±Óu+ò`ýrccCÚËR²ìèèèÖöeV’ð³)Ø¡äí†þ²wk\{zzV7‘Âàà`‘)¶··§ÓéB¡Ïçå§¢•ºXìÊÊŠüH$õرcå뇇‡Ãá°ÔK?ñx<‘Hèö>ø ¶á­[)ý•–Ò^ú”ÅÝmõ×v% ÿNapˆË6`»¡¿P}ú»°° Êóóó>ŸÏÖ,"‹ºŽ•zÃ0Ê×ûý~ýFkkkn·[· Ýݺ•Ò_i™ÍfUY ¶úk»’„8ɰ€<0.€þrTÔ¾þZ_Z-VÒétgg§Ëå:²‰ž¢PJ=ËÔ[)ÕÏŽÖmëP¾áN2l €þòá¦öõ·ü«ß™ÉçóR–ï»VOéGÏ®P=·]7qèB¡ Ê¹\ÎvôWz¸ãúKø¡V!Û@ý…ºÐ_5MV°_ëñxT‡"¯}}}»Öß‘‘‘îîne¥sssÖ9Ä»^·£GÊï+,ëF­s#‘ˆZ°··÷Žë/ ¿PúûÆÍ×çÿìù^d~£££ê† ñx|cc£ÈS©TKK‹Ãá0Msrrr×ú« 8HWÉd²ý-¿n™L& ©u;w___?uê”a²¬ôPù¬ Ø) ú ‡Qo^þÆs¾JƒÅTz«bÞcàN¬Ûõë׏ Ï]É$ã€þÂêï7_ÿ»G>õÜ; ¾òõŸžÙ'Å<„ú;00Ëånݺ‡¥LxÐ_NŒè/njYo^þÆËHÁKA}ާ¼÷²Ú°lÊd›2y l»Ý0ô|ô×ý¾§ßúnF ?Ùòpð0iý…Õ_=÷÷ÆK_{é—ú Á?ö®‹?y&üdÈ ¿|¸©eýU5êΗš~¹Ì€ðm €þBè¯æÆK_S·?Ûzß_ôjMoÜ|ý[ɵ×n°%áNÁà ¿pxõà€3 À¸ ¿€þú À‰ªSùpƒþòÉ8Ë“m À‰±Žô—½‹þ²#€MÁòl7ôÐ_NmuƒCdȰÝÐ_@9µm Œ ú è/—² ä ªõ—7è/ŸìÈ6€:Ò_¨=ý=rdO©Ûvñ=öè/ ¿è/ÜMôÐ_ô—L0.è/ ¿è/ú À‰ªZùpS-ú›J¥‚Á Óé ÓÓÓº~ppÐív766NLLh=šš2MÓ0ŒP(”Édtã¡¡!Ç#D£Ñ\.§*766b±˜Tz½ÞÑÑQm·Ešk[oÛ!Ÿì9ËÙòÀ‰ýeïÂ^õ·©©IÕ,//' UyæÌ™®®®••QXñ`­§"£RY(ÆÆÆŽ»ývÃÃÃápXêóù|<×È‚===«›H¡rý-Õ!§6ÎòÀòÀvCÙ»°Wý5MsbbbqqÑZÙÜÜU.5úkûÒï÷///oí°Ôè¯èl¡PPå\.g;úkÛ!—² äÐ_>ÜÀ^õ7‹)Óýõx<ªRÍý-šûk«¿###ÝÝÝÙlVÊsssÒ¡ªWss›„ÃaÝþèÑ£²bÀÒ4ݪ¿¥:ä“=Ùòè/À^õ7™L¶µµ† õäa`` ¡¡¡èζú«„58ŽŽŽéPUŠ:ÇãqéY¬Úzç‡L& …¤±išçγaÛ! ¿{Õ_€ýƒÁ!@ý2 À¸ ¿€þú À‰ªZùpƒþòÉ8Ë“m À‰±Žô—½‹þ²#€MÁòl7ôjJmŸŽ„_ÃàÙòl7ôª[ñ]Âd€<0.€þrT ¿@øly¨{ýåÃMµèo6›D".—Ë0 õ¤7ý£ÁÁA·Û­Ÿ|qÄB‘ ¯¯¯Ÿ>}ºa)ÈKÝ`jjÊ4Mé< e2UŸJ¥‚Á Óé ÓÓÓ|² Û@ª^¡Zô·½½=N …|>/¾«Ÿ0¬ž{¼²²RÉsÂáðê&===òR7ˆF£Ò‰ô?66vìØí5ljjR«!¶H$Øw€þÂé¯‘ÔÆÆFUnnnžŸŸ/Ž] ýõù|º±¼^¯n G‚¥sÃ0TÙ4͉‰‰ÅÅEöZmÃà ¿pèô7Nwvvº\.5«ÁápØšnyý-ª×š[ª}&“éííu»Ý­­­©TŠ}WŸ™`\Ð_¸ úë÷ûgffòù¼”å»6ÔŽþ.,,¨rÑèo™Å…ÙÙYY–}‡þpb„ª×_>ÜT‹þz<= ·¯¯Oªšû+•Ö¹¿ KKK[uVôôô¬®®ær¹p8lûk«¿±XL¹µè¯¬Ÿì9ËÙòÀ‰ª^Ù»Õ¢¿©Tª¥¥Åáp˜¦999iV±Xñ]}ça||ÜétÚÞù!‘H87éïïc.¯¿Éd²­­Í0Œ`0X{“?›‚ 䡿Y{íÆõO=·þ¶úËÞ­>ý¿08D¶<Ô0‹©´l„gâ½7^úÛ ýeì² ä¡ö¹:ò§²äk¦¥çêèSoÜ|qôÐ_vÙòPËüß\¼ðcïJ÷¾Ðö¿=óö÷¤Oþ‡ CMé/nÐߺ…ðÙòP‡|{ø ög~¥òÁ`¨)ý…C¨¿|ñÅ_|ñÅ×|%ü^UXxæ‹Û6F`Ð_`ô¨&²—^zÚñîÛâûÖ{/þä/}¶ùW^=;­F·½:³Ñ_@¡^2 PKîûÏûŸq½göWÿ¯¥¿òýü÷*¿:³ Ñ_@ý¨þñügÕ ¯u¸w§Wç³gÏž?þòåËlϪ×_þè‰þÖ-„ý%Û@êußßgœ¿X4ÜËÕ¹~õ—ËúËŽ6;ÈC­²öÚ«£O}÷Û‹\Ñ_Ž ôÿ`pˆly®Îè/p€qI Û@ØP\Ñ_àã’@¶ŸÏÇãq݉,ØÓÓ³º‰*×ßR¢¿€þÀ^¯Î¦iNLL,..Z+›››ççç‹ðÈ‘õõuU6 C•ý~ÿ‚*¯­­¹Ýn݉®—Þ*×ßR¢¿€þü‡vwuÎd2½½½¢˜­­­©TÊÖP·ÕV+‡£BÍ-U_ªCôÐ_€J3 À™pÛ«óìì¬ÏçSåR£¿¶/ý~ÿòòòÖKþŠÎ UÎår¶£¿¶¢¿€þ ¿{½:Çb1eº¢¿GUª¹¿â Esmõwdd¤»»;›ÍJynnN:Tõjîon“p8¬Û=zTVC XúF£[õ·T‡èïàžèoÝBøÑ_² äöxuN&“mmm†aƒA=ùAhhh(ºóƒ­þ*a ‡£££C:T•¢Îñx\z«¶Þù!“É„B!ilšæ¹sçl'EØvˆþrÙCð³)Ø¡@€«3ú `숺„Á!² ä¸:£¿ÀÆ%€ly`CquFŒKÙòÀÕªZù£'XÝBøly®Îõ¨¿Àu¶}:ú `ƒCpu®aßEý…šÊ$gBôýôÐ_®Îof³ÙH$âr¹ ÃPOzÓ?t»ÝúÉG,ÙðúúúéÓ§6‘‚¼Ô ¦¦¦LÓ”ÎC¡P&“Qõ©T* :Î@ 0==þîþè‰þÖ-„ý%Û@`Wçöööt:](òù¼ø®~°zîñÊÊJ%Ï=‡Ã«›ôôôÈKÝ J'ÒÿØØØ±c·×°©©I­†Øv"‘@¹ì¡¿@øÙìPv(‡»puImllTåæææùùùb),¡¿>ŸO7–‚×ëÕ ôH°tn†*›¦911±¸¸xh7&ú è/á?ì08D¶<Àî®Îétº³³Óår©Y ‡ÃÖtËëoQ½ÖÜRí3™Loo¯ÛínmmM¥Rè/Gú „È6Ùòp@Wg¿ß?33“Ïç¥,ßµ¡îtôwaaA•‹FË,.ÌÎÎʲè/Gú „È6Ùòp@WgÇ£§áöõõiCUs¥Ò:÷·¡¡aiii«ÎJƒžžžÕÕÕ\.‡­smõ7‹)·ý•@w ôDëÂdÈìñêœJ¥ZZZ‡iš“““Va‹ßÕw~ÆÇÇN§í‰„s“þþ~1æòú›L&ÛÚÚ ÃƒL~ô¸:£¿ÀP‡¸:£¿Àd€3!Wgô8Àýàê U­¿üÑ“¬n!üè/ÙòUzu¶½¯0úËe ?›‚ @j\§£¿€þþÃCdÈÔóèïwhôÐ_Â@¶ö¶r}}ýôéÓ ›HA^j%•6>ŸÏápÈËX,æt:½^ïèèèÖÉR˜šš2MSÖ$ e2U_jA«ûjʯRíè/ôDëÂdÈìñêÜÞÞžN§ …B>ŸµÑTõê¡Ç+++Ö‡ÛV „ÃáÕMzzz¬O<~ðÁs¹œöfõ`dÕÌV£Ñ¨t.+366vìØ1ë‚Ò,(¦n;Ð[TYj•jGý€½_Å;U¹¹¹y~~¾¨m¥ÏçÓ•Rðz½ZIÅe­Ë.,,èf¶ú«‡ieM ÃÐýg³YU–B%ú[j•Ð_@¡v`p`wWçt:ÝÙÙér¹Ôä5Qáͳi+©ÔÚZT_êe©[@l[_IçE«„þú õ’I΄¥®Î~¿ff&ŸÏKY¾kqÜÑè¯uX×:ú[´ì¶£¿¶ ký•*ýµ]%ôÐ_@êýêìñxTÍòòr__ŸöH5ÍW*·Îý-ªÔ“zs¹\8¶Îýµ¾‘žÂ«šU®¿²`$Q öööÚêoCCÃÒÒRÑ{m]¥ÚÑ_þè‰þÖ-„ý%Û@`WçT*ÕÒÒâp8LÓœœœ´Ê¥X£heѶV®¯¯' ç&ýýýbƶ:+õñxÜ0 îRw~°Õ_éÿÔ©SzAÛ™ ãããòîÖElW©vô—ËúËŽ6;ÈÔÃÕùúõë@àÞýô—ðv"Û@ †¯Î¹\îÖ­[»›É€þ—² ä«s5199éõz].W,ÛÅ3,Ð_àã’@¶ÞBôtjjÊ4MÃ0B¡P&“ч††<tFs¹œªÜØØˆÅbRéõzmŸsQôÒ¶`ûŽ¥z®#ýåžèoÝBøÑ_² äöxunjjÒ=N$ªR=ÜxeeÅúpc±L±[©, cccÇŽÝ~»áááp8,õù|>ëNô“‡)ìNmßQ??YzŽD"õ¨¿\öÐ_v°)Ø¡@`wWgÓ4'&&­•ÍÍÍóóóÅFxäˆ¾ç®ø¨~ø°ßï_XXPåµµ5·Û­;ÑõÒÛîô×ö}>_6›Ue) ¿€þ²#ê‡È6ØÝÕ9“Éôööг¶¶¶¦R)[CÝV[­8Ž 5·ýÝQè/ ¿\È66T¥WçÙÙYŸÏ§Ê¥Fm_úýþååå­–ý?. ªœËåv¡¿ÖÑ_y ôÐ_vÙòÀ†ªôê‹Å”éŠþz<U©æþŠÔÍýµ•Ñ‘‘‘îînå£sssÒ¡ª×3t…p8¬Û=zTVC XúF£»Ð_é9‰¨ž{{{ù×7@ëÂdÈìñêœL&ÛÚÚ Ãƒzòƒ000ÐÐÐPtç[UGGG‡t¨*Eãñ¸ô,Vm½?C&“ …BÒØ4ÍsçÎíB×××O:¥{Ös‚ëHý€ú¼:_¿~]Ìý0¨;¨««óÀÀ@.—»uëV8–2ú `@&8ÖòÕyrrÒëõº\®X,¦oކþI΄\kQù£'XÝBøÑ_² ä¸:×£þrÙãcG›‚ äöõê¼7;lïˆþúË%¡š`pˆly€ZÕßòo½O+†þúKøÈ6ôw7\»víž{î© áFýå’@¶ŸÃáx³ôƒÖ¬õCCCÇétF£Ñ\.§*S©T0”Ê@ 0==­G"‘K—.©ÆÆÆ¼^¯´±Þ­L âÇ ›HA×Û¾õàà Ûí.õ,º­Xuë/ ¿°ký}ðÁµ,n«¿ÃÃÃápxee%ŸÏÇãñD"¡ê›ššTÏËË˺²P(ˆÔ*£•zzzV7‘‚ˆ¬j300 êzý ‹­o}æÌ™®®.yë ½øîV ýôª‡v­¿¢Œ[}·”ƒúýþ……U^[[s»ÝªlšæÄÄÄââ¢uñÙÙÙÎÎN݃^p~~Þçó©²䥮÷z½¥Þº¹¹Y·Üº†;Z1ôÐ_¨ñLp&,£¿•¼´¬¨)B&“éííélmmM¥RªrxxøÌ™3¶=†Qa}ù[@ìnÅÐ_@ý@€Xc¡PPå\.g;ú»¼¼\æfggõÈngg§¼Ô=”ýµÖïeô·ò«zýåžèoÝBøÑ_² äî¬þ=zTÚˆ‹JF£Ñ­:22ÒÝÝÍf¥<77‹ÅT½”›Šez<ž77g 444h™Vss›XçþJAÍ –úp8¼íÜ_Y1Û¹¿•¯X-è/—=ô—l v(¸#ú›ÉdB¡Ãá0Móܹs¶D4€´éèèH&“ªR mmm†aƒA5ÇàÒ¥K‘HÄúF£££êÎ ñx\VÕ¯¯¯' ç&ýýýºÞö­EŽE©KÝù¡ÂCýå’P 08D¶<Àa»:‹ËŠ@—òìCú è/á Û@¸:ïŒööö«W¯¢¿`@øÈ6‡z¼:;Nô÷ŽÁ=9ÀêÂdÈpu®Gý0Øöê|w¿Ð_@î$ æ«3ú `õ~b@Ñ_à@Ð_ô·ø£'XÝBøÑ_² äÐßzÔ_.{`ì`S°C<ú‹þúË%¡Ž`pˆlyôýô—KÙòÀ†BÑ_@¹$m \·ðÆÍ×çÿìùÎN|“}ô—ðm 5|u¾yùŸý™_‘‹©4ŸaªCý€^߸ùúÕѧþÊÿ«ê©lÿøé™»µnè/ ¿;†Á!€Ê¯Î7/ãrx é¼ï™·Ý—|Û{žyû{þiæò]\7ôÐ_€Ú?±üÕùÑßûTøÃÉŸþ€øî…·¼Kjžý‰÷>Ûxÿ~»/ú è/'V€»pu¾»_èï„?z¢¿u áGÉ6¨üêüèÀïM¿÷Á§:ÕЯ|}îg{.ºOÀèoÕÁÏý%ül `‡y¨«óâ éßuêéM ¾ôÎ. ýô—KBõÁàÙò»¸óÕßùÃϸOÀÐ_Ž ô?ÙòpX®Î‹/¤Ÿûéöõ¾¿è/Gú „€ly8\Wç7n¾þ­‰äÚk7Ø’oò¯o€þ~² ä«3ú ``\Ñ_à・ƒC\Ñ_à2 À™«3ú `€þpu†ªÖ_þèÉV·~ô—ly®Îõ¨¿\ö8ÀØÀ¦`‡y®Îè/p€qI¨#"Û@€«3ú `‡û¸:r„ðm \Ñ_Ž °Z3Tô€lyàêŒþÞaø£'XUøî~¼á‡Z…lyàêŒþú €þ¢¿PC˜èàÔÔ”iš†a„B¡L&£444äñxœNg4ÍårRÓÚÚzõêÕÛowá‚*HÍ=÷ÜSÔm6›D".—KºíêêZ^^Ö?t»Ýj4E†º¾¾~úôé†M¤ /·]gëï566æõzeýc±˜uÙòkXIç apýE¡úôWìvee¥P(ˆ/;vû-†‡‡Ãá°Ôçóùx<žH$¤ò#ùÈôô´E766¤,²¨~j¥½½=NKŸ²¸ø®¨ª?s挸¦t+ËJ}‘’½uXݤ§§G^–_ç¢Nd½ìÖ÷*µ†•tf€3!ú‹þÂaÔ_=2*Âg†*ûýþ……U^[[s»ÝR˜™™5”ÂøøxKK‹Rá“'OJ}™·nU¹¹¹y~~~ë:ؾôù|º±¼^oùu.êD¯¿,+]Ù¾×Ö5¬¤s@ÐßzÔ_þèY3úkûòÈ¿ÄápHe>Ÿ-–BGG‡¼Ë}÷ݧDYꋺM§Ó.—˺x)û,³Özm¢¥Ú—©Üºl…khÛ9áGk² äýE¹ìÕ¯þŠÔZ'ìjxàçŸþÞ{|O¥RúЇ¶6“Ågff”ËwÝíNG­#¸ÖÑßJô·üèo©5¬¤sÂϦ`‡yôý…ZÓß‘‘‘îîîl6+å¹¹9=5vllìž{î9wå{KKËøøøÖn=Z qè¾¾>Ý­šû+•Ö¹¿ KKK[×A¨ù»¹\.[çþV¢¿²lnÛ¹¿¥ÖýÝ ¡;@ýEáð`oÜ|}þÏ^ï•è¯2à@ àp8:::’ɤª|õÕW øuë–”å»üTj¶¾W*•3–Ÿš¦999iíV,V|WßùáÍÍ™ÄN§ÓöΉD¹I¿úO»ÊõwttTݹ"o]¶Ô¢¿ÀI€< ¿è/GEÕ`7/ã³?ó+Ò`1•®—ãg?ïïKøÝòèo=ê/ô<üØ7_¿:úÔ_ùU~$_ÿøé™::~öS ?Ô*dÈú‹þBµ`7/ãrx é¼ï™·Ý—|Û{žyû{þiær]m+§ÓI`ýE¡Æ°G~ïSá'úâ»Þò.©yö'Þûlãýõæ¾p`08°íÕùìÙ³çÏŸ¿|™k1ú ûp€Ù~}õÃ#â(RP¦B™2eÊ”)S¾ƒåR×_ýõøã'“É+W®`2è/ìËèïô{|º¡S ýÊ×ç~¶ç¢û£¿pW2 PWœ={ö‘G9óóxä‡H¸ï“O>ùâ‹/^»vMTõúË=¡þêÙE‹/¤_|ש§7%øÒ;»0`ÂþÙò°ßœ?^®Â½ãý ©I&“r]¾r劺•>T·þrÙ;Ìú«jÞ¸ùú•ßùÃϸOÔá?›Ø¡@˜Ë—/Ë%øqã^ü!R#â{íÚ5qßÕÕU6ú û®¿šÅÒÏýôuuß_Â008D¶<Û ý…迊7n¾þ­‰äÚk7Ø’„€ly`\ý娨}ýÂ@¶<úˇô?Ùòè/ ¿è/ ¿VôÐ_ “Œ ú è/ ¿œ¡ªõ—7è/Ÿì³<ÙòœëHÙ»è/;ØìP ÀvCýåÔVG08D¶<Û ýô—SÙòÀ¸ ¿€þrI Û@ªZùpƒþòÉ€ly¨#ý…jÑßT* Ng ˜žžV•Ùl6‰¸\.Ã0ººº–——s¹\SSÓêêª^Pj¼^¯ªòx<ÒI4•z›¼9255eš¦t …2™ŒþÑÖe[[[¯^½z{µ/\P©¹çž{ØËè/ÀžôW¤VÕˆã& UÙÞÞžN§ …B>ŸŒÅbRù‘|dttT/(å‡zH ÃÃÃápxeeEÇãqÝI‘þŠÝJésllìØ±Û«j»¬¼‘ñÅÅEQð )‹=Ûö ‡‡ý…C§¿¦iNLLˆh–ZP„µ±±Q sssÒX×·´´d³Y)øýþ……U¹¶¶æv»mõw}}]wh†*Û.;33#®,…ññqy¥Â'Ož”zör-e€q@á.èo&“éííïlmmM¥Rª2Nwvvº\®#›8Uÿ¡}èÒ¥KRï§NÒjkE7.Ò_Û—¶ËæóyÑb)tttÈÚÞwß}J”¥ž½ŒþpbDùp{Ò_Íìì¬ÏçSeqÍ™™¥›ò]Ûª´Q2*ß_}õUÝxyyy›¼–ÐßRË>ðÀÏ?ÿü½÷Þ+eù.^.æÍ'{Îò@¶ŸßÕ>s挸æÊÊÊÆÆ†ÔÙdÑËp8¼ºIOO¼Ô ¢Ñ¨t"ý‹ã;vÌÖPe½ìÖ÷*µ†v^ycÂÇapˆË6`»¡¿pèô׊¨acc£*777ÏÏÏ—²É¢—>ŸO7–‚×ëÕ ôh®tn†­¡.,,èe¥+Û÷Úº†v^ycÂ@¶<0.€þrTÔ¾þ¦ÓéÎÎN—Ë¥f58Ž2öYJ‹êµ\–j_¦rë²®aùÎ+iLøÈ6@ùpSûúë÷ûgffòù¼”å»öÂŽþZGp­£¿•jùÑßRkxhõ—ðC­B¶<ú µ ¿GÕ,//÷õõi/Ts¥Ò:÷·¡¡aiii«DJ57—Ë…ÃaëÜßJ U–Ímb;÷·ÔZýô¯þ¦R©––‡Ãašæää¤Õ ÅbÅwõ„ññq§Ói{ç‡D"áܤ¿¿_ŒyG†:::*Ž+ËÆãñ­Ë–ZCô÷ðÃà ¿pèô÷îG­×L0.è/ ¿€þpb„êÔ_>Ü ¿ât:ùdÏYÈ6NŒPõúËÞEÙÀ¦`‡y¶ú ‡]ïÔT„zžÒ@ø5 ‘m ÀvCýåÔ@¶<0.è/TþVèµè/ÙòP_úˇô—OödÈ@é/T‹þnllÄb1§ÓéõzGGGµ¶f³ÙH$âr¹ ÃPSR«)ÓLµ“>¥gé}}½|ûT* ¥q ˜žžÖ«744¤žˆFs¹{ýØ“þêç RÐ^ÛÞÞžN§ …B>Ÿ—6¢°Úk­‹—ifíV?ʸTû¦¦&ýdãD"¡*‡‡‡ÃáðÊÊŠ4ŽÇ㺪‡ý…C§¿ÍÍÍ ªŸ—²|×õE^[¦™íèo©öšÙÙYkc=9˜OöœålyàĈþ²wá诚û›Û$kõxÜ ¿µÌ7?þé ?ö®Ïü䉿þ_bÏ6t¦Oþ‡¢Á`µ Ùòè/ ¿umÀ²IŸ~뻿úÍó×Ê ú wGùÚ¿¯Ï¼ãýª°ð̉¢†Á!@ánê/£¿wŠÅç¿ü´q¯vßgÞÞyÉûÀÿû±O1ú»£L0.è/ ¿Õä¾Ï¼í¾¤ó¾—ùÿ\zñ+ßÏ-ƒþpb¬kýåà ú[“|ûÉÔˆo™á^ÂÏY¾V!Û@81¢¿ì]ô·¾P÷ý}Æù‹å‡{ ?›‚ äØnè/ ¿UÏÚk7®Ž>õÝo/þÊapˆË6`»¡¿€þrj Û@ôÐ_. dÈ@Uë/nÐ_>Ùm u¤¿€þ ¿€þBÁà ¿€þ™`\Ð_@ýàÄU­¿|¸AùdœåÉ6àÄXGúËÞ­ý=r„?D~6;ȰÝÐ_ö.úËŽ‡È6`»¡¿P‡ú[‡~LølyÆÐ_@ÙdÈ@Më/nªE‹ôT¿”ÂØØ˜×ëu:±Xl}}]ÕollÈK©”ŽŽêöÙl6‰¸\.Ã0ººº–——U'ýCCCGzˆF£¹\ŽOödÈ@-è/Ô€þöôô¬n"…ÁÁAU/k½nßÞÞžN§ …B>Ÿ—6¢È¶ý‡Ãá••ilj;Ð_8ú»°° Êóóó>ŸO•›››­õ¶sD‚mû÷ûýzñµµ5·ÛÍŽ«aôªI­õ†a”oŸN§;;;].—šêàp8Jµ·¢›Af€q@á.è¯h¡PPå\.·ëÑ_¿ß?33“Ïç¥,ßKi´4SÓ‚Lpb„šÒ_>ÜT‹þ=zT*Å€ÅJ£ÑhÑÜßÜ&[çþªúp8¬Û{<Õ³ôÓ××§ë–––ôÛŒŒtwwg³Y)ÏÍÍé)Â|²ç,dÈ'F¨nýeïV‹þf2™P(äp8LÓ?88¨ŸB\JÕCW7‘ÂÖ¹¿e†i›ššôƒ‘‰„mû ×gxx8¯¬¬H³x<®{+Zg[?ç`‡y`»¡¿ìÝúÕ_+"åõ·¹¹yaaA•çççw¤¿¦iNLL,..Úö¼£õñûýz5ÖÖÖÜn7á?„08ÄeȰÝÐ_8tú›N§;;;].—u8åõwÛú2:›Édz{{EU[[[S©”mûÊ×ÇŠnFøÈ6ÆÐ_Ž ÎDåô×ï÷ÏÌÌäóy)Ëwm™"”…BA•s¹ÜýÕÌÎÎú|>Ûö¥Ögk3=M¹äAÂä² äÐ_>Ü ¿Eúëñxô|ܾ¾>m‡G•ÆbÀRF‹æþæ6 ‡Ã[õ´¡¡aii©HCU!‹‰1+ý•÷µm_j}ŠšŒŒtwwg³Y)ÏÍÍé)„€lyôÐßrú›J¥ZZZ‡iš“““Z73™L(RõçγÞù!†!žºõÎÂøø¸Óé´²êr2™lkk“eƒÁ žüPÔ¾ÔúlíV 8HËŽŽé™ ¿€þn¯¿ûƒC€þú d€q@ýô€#Tµþòáýå“=p–'Û@€cé/{ýeG›‚ äØnè/ ¿œÚê‡È6`»¡¿pèôwuuõ£ý¨iš†a466F"‘/}éK·Cf÷„kåµk×zzzdÙãÇ_ºtiÛ•zEùz‡Ãár¹:::¬wÿÝÝR„€ly`\ý娨_ýíêêz衇ßܼ§ïÅ‹ï¿ÿþJô÷úõë"ÍSSS²”¼üú׿F·ýQ©‡®m[_(^}õÕ‡~ØëõêÇÎín)Â@¶<úˇ›úÕ_Ã0ôÃ+QRë#Ü&''m,ó£]ë¯fddD?ãmwK~² äÐ_¨_ý=qâÄ©S§ÒéôV .¯¿n·{uuÕö½Êühïú+=766îe)@¡~õ÷Ö­[?üp0t¹\~¿ÿ¡‡’šJô×0ŒRïUæG»˜û[¦ÿÝ-ƒC€þ¡Ó_+óóóýýý'Nœ(#‘‡CîÖèïÊÊŠô¿—¥àd€q@á.ë¯B’«W¯Z$/MÓTåX,611aÛC™í]?þñïbî¯u)@81¢¿|¸©_ý=qâÄ¥K—Ö××ßÜ"èììT?9~üø+¯¼¢^~ýë_¿÷Þ{¥R½T·wøä'?™Ïç°Çÿîïúúú¶ýÑ^îü òýðÃû|¾ÝùaëR„Ÿ³½¼¼¬:11 M¤P4¦{íÚµH$¢–=zôhÑ}m´»ûþ N§3 ZWowK~6°C<°ÝÐ_önýê/TÈÚk7®ê¹õï¬þÊapˆË6`»¡¿€þV1‹©´lÆg\÷Ýxék„8Éy`#0.€þrT ¿µÏÿ÷—-[R¾>óŽ÷ÿíÃòÆÍ× ?p’ò€þòáý­e–^üJòÇQ¶çÓoy×Óo}÷ßûÛEƒÁ„j² äÐ_@ëÝ€ÕWÒ¸·Ì`0 ¿pwô—¯ýûúÜ¿«ÂÂ3_$Їý…»©¿ŒþÞ)¾1ðø…#ïÚœü—Þwúù{"Ÿû¹ßøæ<ÁèïŽ2 À¸ ¿€þVûþ•ùkÿå½¾èþ¥/ÿÛß]zñ+ßÏ-ƒþpb¬kýåà ú[£î;q{¶Céá^ÂÏY¾V!Û@81¢¿ì]ô·¾¸}ßß·ÝW~¸—ð³)Ø¡@€í†þú[õ¬½vãêèSßýö"ᯇ¸ly¶ú è/§6² äq@ýå’@¶<Tµþòáýå“=ÙòPGú è/ú è/Ô ú è/IÆýôÐ_NŒPÕúˇô—OöÀYžlyNŒu¤¿wwïÎÎÎúýþ#GªxŒ|+þ~6°C<°ÝÐßúÚ»Z?þòË/W÷þáïR¹£¿œÚ ‘m ÀvC÷Ë‹p:U¿ƒ·üjÛz0úË© €ly`\ý­Sý­êiè/—² ä úôwwnÄ𦦦LÓ4 # e2U¿¾¾~úôé†M¤ /UcU­õ÷ß2™Ôý/..677çr¹JÞTòxÙm €þÖÊou䈈æÊÊJ¡P;vì¶Ï „ÃáÕMzzzäey•Ô…ÙÙÙ`0¨ô÷÷‹YVø¦ÃÃÃò¦RŸÏçãñx"‘Ê|ä#ÓÓÓʤEmÅY¥,ö¬~j¥½½=NKŸ²øàà ¨mùu–6ò«éßqëïÂè/ ¿5¨¿jdWq4 C•}>ßüü¼*KÁëõV¨¿onþœ¦½~ýºišJX+yS¿ß¿°° Êkkkn·[ 333âÊRoiiQ*|òäI©/ó{I·å×¹¹¹Y¿üŽè/Ô ú»½þn«³‚6ÔJÚá _hkk“B,›˜˜ØÑ›Zq8R™ÏçE‹¥ÐÑÑ!yß}÷)Q–ú¢nÓétgg§Ëå².^ùïˆþB ÀÔ@ÆÐß]ê¯Ï糎ŒîhôW…B>úh ( •¿©H­ž°kåxþùçï½÷^)Ë÷T*õ¡}hk3Y|ffFi±|×ÝŠëÕÈårŒþú À‰jAwý¯o¶/õ¼XñÅp8¬çþ644,--m«¿/^”š'žxbGo:22ÒÝÝÍf¥<77§'ïŽÝsÏ=çΓ²|oiißÚ­ÇãQ¢)Ý××§»=zô¨8¨°ÔG£Ñ¢ß1·‰üŽ[—¢_ýå“=gy Û@81¢¿U¿wK™èúúz"‘pnÒß߯çïŠwJͶ¥—.]Róvô¦Ê€€ÃáèèèÐwxõÕW øuë–”å»üTj¶v›J¥ÄŒå§¦iNNNên3™L(RõbÏÖ;?ÄãqéY¼yë¶þ²è/§66°C<°ÝÐ_ö®=‘HäâÅ‹u~D¡¿œÚ‡È6`»¡¿wcM.\°Þû ýE9µm Œ  ¿5{T8NÓ4_yåÎDè/—² äÐ_>Ü ¿@øÈ6@ý@ýØ ú»«_õÈ‘;Ø ýEád€q@Ñ_ôÐ_NŒPú»»7©T* :Î@ 0==­¤V£Úd³ÙH$âr¹ ÃèêêRÏ%ÞÚLòx<Ò[4Íårè/úË'{Îò@¶ŸÔÏ".j6<<‡WVV¤Y<×½ý‹Xô8µ±)€ äí†þÄÞ5Msbbbqq±HRKµ nll´mæ÷ûTymmÍív×ù…þrj;x"Û@€í†þnC&“éííUmmmM¥R¶^›N§;;;].—+!–%ÑIDAT©u8¶ÍŠtu3ôýåÔ@¶<0.€þº£bvvÖçóÙz­ß™ÉçóR–ïú§[›©iÁå6"“€KÙòèï]üp‹Åæçç•þz<UÙÐа´´¤ÛH½žÜ××§mµ¨ÙÈÈHwww6›•òÜÜœž"\·g"ô—OödÈ ¿‡Žd2ÙÖÖfF0Ô“ÆÇÇN§Ö\©oiiq8¦iNNNêú¢fÊ€€´ìèèžÑ_ôÐ_@ö ‡ýôÈ$ã€þú è/'F¨jýåà úË'{à,O¶<'Æ:Ò_ö.úËŽ6;ȰÝÐ_@9µÕ ‘m ÀvC·auuõ£ý¨iš†a466F"‘/}éK·a»§QX+¯]»ÖÓÓÓÐÐ Ë?~üÒ¥KÛþ¨Ô/Ê×;—ËÕÑÑ100`½Óðî–B9µm Œ @ýêoWW×C=´¸¸(å‹/Þÿý•èïõë×E𧦦d)yùõ¯=nû£RxÛ¶¾P(¼úê«?ü°×ë]XXØËRè/—² ä ôwwn ÃG¬\Iue,›œœ´]°Ìv­¿š‘‘ý<¹Ý-…þòÉ€ly¨ýÝ'Nœ8uêT:Þ*Áåõ×ív¯®®ÚöYæG{×_é¹±±q/K¡¿õ«¿·nÝzøá‡ƒÁ Ëåòûý=ôÔT¢¿†a”ê³Ìv1÷·Lÿ»[ ý…†Á!@wÀüü|ÿ‰'ÊH¤ÃáP…»5ú»²²"ýïe)ôj¦üÿípUžÇшYTDV„KatU–Ó,‹Ür5ògYå@á)‹È¢Ç"–Š!Hq å&‹{àJ`×¥¨I !7`@©lÄ`Р„sS@"·£‡C–ƒ€.9ðÌý°÷ºÚ™é7===“™îϧ^¥zzfºû}¿~ïûÞtw˜Àþ†>Kšššzøðaã[ò²ÿþÚò¬Y³Ö¬Yt Š·"·¿9996®ý5~ û Ø_Fpƒýµ7¸5jTyyù¹sç:¾™"ÍÌÌÌÈÈÐÞZ±bEzzúû￯½¬¯¯>|¸¬Ô^jwX·nÝ… .îýÏž1cFÈ·"yòƒ˜ï%K–¤¤¤„õä‡ÀoaÙÓʹ ä #¸Áþ†ŒnkkGaáÅ¿FöîÝ;mÚ´nݺ%%%‰Gœ;wî‰'ôw׬Y“––ÖõdÁoN·±±qêÔ©Úw‡ â÷Üß oÙ{œºyÚþ¶¶v,[ÖѧÏEã+eˆý¥i ·|`^Ühkj:ÆŒéèÚµ#)©£GŽ+®èؾbéÈm \dep£O÷ŠñÕf|eùê«ñ¾f³³³óòòjd8ŒìÈm ûë,;wþÝòR:«ˆßõ+«W¯.--=pàç`G›ýíÕ«ã²Ë˜ý)ÙÙÙ˾¬ï[PP°{÷îÆÆF$‚ØÀäxËþêTWwŒ}Ñ_rIÇ5×à€£N^^Þêo#kJKKÅû8pàÓO?E"ˆ \À¼€Gí¯†>Ì“¢MMMÍîo#kÄø666Š÷5ûÐØ_FìoT7ÕÕüÏýe|‰ò´ò@nù@Ã.±¿VþëÛš5ÇŽJN0”G  @> xÀþ!@y×Ãä¹ ä öÊÛ@>0/Ø_ (@nùÐö—Á !@yrÈÙ_ì/x&‡û ‚Kü`^û asrÿ!±Ζ֚ƒ Ø_Fì/ƒ›¸£í°¯¼×ÈÿÜøð… YN•‹~SÞë§§ê‘—ä§•rÈFì/Ñ#¾<Ö²=uì‘Õ3ô¾ZiÌÍ©è7þìGü >’)€€ù€nØ_¢œ?ufçm“ëÞï¸÷ÕJõ¼ùe©Öìóù|­­­íííhNòG“Cä6€nØ_°ÃWçþgwúô}3ï;na”ìïùó‹^Ÿò‹m¦¼ûVMSS˜ä ·|`^ûËYÑ9|}á«·Ç?¶çÞñÑó¾Zio_´mø?oü`mÍ^0É@nùØ_7Ã{³ï¾ãgíg3­Ù.]ºø-X,gN.zõGS^ñ˜ä ·|ì/tŸùMå­c¾8þtX.Ö¶ý•ò×O–|ÿÞm“Ÿ¨­­ÅöbÇáß°=ud›ïÉ6×Aû+åøá…[®½íç™8`ˆ&‡û Vù¸ðõò^ÿÝð¸•Y^gíïŇï_¸åše/ÇC$pi óØ_°Äg;ß)ïu÷‰w³x‘×_>3sfZrrRïÞW¼ðÂÈ öwÑ¢a={~G>óàƒÿpêÔS!ðáÏ_qWù¿­Âö€†ûËà&êçÆÁÅ÷[¿Æ733ý¾ûn9yòI)²h—.½sâÄ›?ÿü‰sç2gÏ4oÞ+sÀo/xXŽä[ËpÀ$?­<Û@>Ð0b‰nù¸è¥×RîT\õëçnûö½ê/™«-=:7Ðþöë×]ÿ@[ÛÓ=z$‡ô¾g=µ­ÏÝ…¿X¸iÓ¦ŠŠ 0É@@|@7ì/Ñ".}²òÖQçN=cã ë»|›¤¤KB<ølæŽÁ?+žøØÆ7lØPTTTYYYWWçóùÚÚÚˆÉo&‡Èm ݰ¿`ÿÝ;õþ·'Ý£øoaÍþ~öÙ|ë·¾ýiÂÄ’¡nÙ\´iÓ&qÀùùù¥¥¥UUU ­­­Ä†är€|`^ûËYá<_ûëC~Z¿pšÅkOzJÊĉ7Úßœœ»Ç¿éã/ÞKwäÈœ™3ÓÞ÷ƒg,½~\É+ù%%%[¶l),,Ô'€ëë뱿$?Ûä`ÜD‹ÿú°¦¼Ï]Góf†|òÃìÙƒºv½´gÏï˜=ùAðõ×÷HJºä‡?ì]\<ÉÌû-˜ýêu#KÖ®/ùì/É@nùØ_ˆÍÍ;¶nëUÃ>ë_í=Á7¬râÝÇJ®º«dÙ*Ýûrñ`!vˆ×ljjz?w‰•ADX¾yÔÈ’_.ñó¾Üú¶ar°¿íííš~÷™ ù ˆHŠö¨‡’û/ùÄòæçç‹÷-,,,//çÁg`. `^û 6ðG5VOœ¬~D$E{ÔCIQ±ß%¿Û¶mÛ³gÞ°¿4ŒØ_71uÀ‡þcß®ÛÇ(a»õ _ö Þ×ï’_¼/ÉO+On#4ŒØ_¢;|ñ6¸··—÷¹ÓìAöJࣴK~Y~ã7êëëe×x_’)€€ù€nØ_¢SÚÚÚ|>ßÁ²—E[gKࣸÝäw&‡Èm ݰ¿`ý6¸ÚÚÚŠŠ íú±ª² ¶µ$ +Í1z_Y]p»É@nùÀ¼ö—³"ްþpÍû±:séÆpж#”Íâ}I~rÈÀþ2¸‰;\YYYZZºiÓ¦‚‚‚ür.ËÙˆlJ6(›Åû’üä6€ý…¸sÀuuuUUUÛ¶m+ÙˆlJ6(›ÅûöâÎû|¾††q«{ö쩊ÙˆlJ6(›Åû‚#09Ø_pÒ·µµ‰OmnnÃÚ1²Ù”lP6‹÷GàÒ@æ°¿€ý a7Ú_7„åVžÜòh=d‰.!@y@  ä öÊ{&‡Èm ݰ¿@P€ÜòyÀþ!@yrÈ€„¶¿ nÊÛ@>DÝþž?u¦¥úÊùÓg# Ÿ+Es±2¯>…ü¤ ¿S}hBÚ_©ö–.C)'Þú q‘‘ð¹R4+ãñêSÈO ú;Õ‡&°ý=òÛ•-Õ[¼Y޼¸Ò)ÿäe Ÿ+Es¥2¯>…ü¤ ¿³}hÛ_‘àë¯x³|þ§-Nù'/ËHø\)š+•ñxõ)ä'ýíC±¿ û]/®¯­­­¯¯ojjjnnnkkÃ?>Ds±ýõlõ)ä'ýíC±¿ ûÒçVUTTTUUÕÕÕIø[[[ñO„Ñ\l=[} ùIAgûPìoǾðÙœ¢¢¢òòr ¿ €dôƒ"|ˆæbûëÙêSÈO ú;Û‡b8ö‹²7nÜ(á—Pmm­ÏçÃ?>Ds±ýõlõ)ä'ýíC±¿ û’e+%öùùù¥¥¥2úijjÂ?>Ds±ýõlõ)ä'ýíC±¿ø'š!ÂGûNõ)òý±¿Ø_ü“Ó¥K—.~ ‹³Ç–èö·S"…ývõãùŒÿbE=gÖ·ëÜÓ3®¤p¥þñ&,ö×4~¹¹Y={^-ýâwç? üpFÆíÆÐž>]³`ÁC½{·k×ËRR®•eYc܈FRÒ¥Ú»ûÛ»AsEGÛΜ9SZZªÅ?韜|¹H1uêèÂÂ_%ýí ïØ_Ûá³XÍĵ¿ííï-Yòè7~OÎÊîݯejj^‰¤R1“©îMÚ«eË~9`@ª(Э[òرÃvìȼ.Žèí¼µÑÄÆþ*‚mÏçTÇ ?ãÇþ:¸3¯bf3Ìvíˆþ6ö‹ý;û;tèmÅÅ+† èÚ[oýþ{ïWîß¿9-í&=´lùÖ¼y÷;¶K^Êßùó<øA“àÈ‘ò 2fÍšòôhn~S2)==-ü“¾|æÌÞ²²UÒ+üä'ÿ$îÁ531.ž¼·>×Ûß™3ÇË ûÉ';5+¼uëÊ#þÑ;öWª<|ø iÓŽÝ./ÏŸßÿæ›/Oš4"~ìoTó6Üî 6öWOÙ_ÛRÄø|tp/f^ÅÌfDÛþ†»_ìo|Ù߆†­Z—&‰õᇥÆÐJoçw.ÉË’’ôÐʸsÆŒ{ü6(ðüóó‚&ÁéÓ52Bµxz$'_nö®¾FrîÞ{ï’mÊØWl·[§û'½Lž·ë݆vžÈ(MëÍ7_VlÓ¬Žúä|¨®^¯¯¯ªZ'ƒcý3>ßëÆËô9u‹×þ*¶ ûݳ'OK–ƒÚ_³¯+4‰·ðù­ïÞýÊÿ$B½ŒÒø³ GC4…2r¾÷êuÍŒ÷äå='yeœv ´¿Æy ‹Š)ªc–]ûöè£k)ÒûŠ9ŽÞä·½Ó­Ÿ¼!uPí¼ Ü—º;P4D~oÛ uÝÃ Šº1ô[ˆ¤Y¶²;³ÀÅ&?­k±ÑûbIy)ãÀ¾Þzíe{H¯ÔfÄÌþZÙ¯ß9hÖ3Úë4Ê7ÙßÊʵÆÙ# ^à’s’ýRçtƒþ6*kŒÔ¯ G»`ÁC_|QkeöwúôqfnãšÅ‹Ñî““Á«výq\ù']YЋˆ¦ ò÷euÍ¢`6üPªzö×lMà~ƒÚ_…30Ó$ÞÂ'Ñ„ ¢ªžÌA¿¨ø˜Y€Ì¾bôhˆ¦þu¨¥¥jóæ9±ìÒå˜Ýúf] ¿ œ,žÆìÒ»ji”¤?è,{a%ÉCž¼!uëÜt6oƒîËzw`ÌsE4í»:(VC+»Ù,Ûh{cœŸV„µÑ‰˜½´G{Ù®ö*f6#f·¾™í×â9y§éT¾yÈþN›6ÆoÎoòä‘~jʈ_â*EÿÅV}ñƒ¬8ð†ï ’Ï•Wv³âá,yé¥L±Ë½{wåʧãó×sÉQíРTÈv6hÝþ*4‰·ð zÛóÏÏÓï(2«âcf2ûŠYУ!šõ'ûìÚõ»oüž•NÑ¢bŠê(²K¢±c‡É‚ìBo”âóâ‡'oHµ¿æmÐ})º{ö7ܺ«ƒ–ý°Yîtû«–аÚ_ëq´—íV¼Šõ Fã⇠oY<#ï4±¿áÙß“'ßêÞýJãm¢¬¬‘õ~jJõŸBŒoI\gÎx·ø’%Fheeœ:5ä'Ÿì ºÙãÇwýe¿ïÒoÌȸÝxÁ¢ß/Aâ*,nÓXGã füÝJ–ýõû½L–ò¿ Mâ-|Æù°ýû7#¿f=èÇ?˜}Å,èÑͺý5Î,úÕ=°‡³¢˜¢:Šì’".üС2ý1rk‘qJ&Üʺávíæ­Ù¾ÌºEC$ ¯¸ø!¬º«ƒb¥1´²k³fÙOº°ÚÞç§amt"f/»‹<Û­{•x³¿ŠNÁ†ýµ×P`ƒÄ/77K¿À@/²F{¨ž•ÐJ:ÊI2oÞýÚÅ@òwþüú÷OÑ’Ò¶ý•þcúôqº±–aŸœägÎìÕhÈúç'LÈ–Tze)’ú´G'ú'1îee«¤c;v˜>P«ªZ—’ríÖ­+µC­©yE¹ï±8vl—~OÈ:êؼ9GÜ€v‹’üMMí+kb`ýng0 5,û«Ð$ÞÂ7pà Ú­Ð>ßëÆôëÛ÷º}û ôïš}L»õM &”>sfö³ GC4…2£FýXU{Ž¡¸·9s¦<ðÀØ u÷ ´EÅÕQd—Öv üƒ_ÿú‰X>XJŽ0èƒÏl4PÖu°xnF#oÃm" ‘Mý-ã­oáÖ]+¡•]›5Ë~Ò…ÕöÆ8?­k£1{©Ø]äÙ¹Wé,û«èlØ_{ ö7HüĹo.ÑÇåÚCõ,¦”4úYY³$¿e”£=R¿.>,ûë÷é§žú¹~MKK•tºÉÉ—Ë[Òç¯0Þº+-¾¬ïÖ-yêÔÑf—~Çìÿ&tízY¯^×È‘èm‡^$SÅLhWíÈ‚qjAú­Aƒn‘Z}ð™YŸY#kä¯,[1¯Öo}SlAö%Ç,Ç&ûýýïë×¢YüºB“¸ Ÿ´Ýii7iZ»v‘^…’’$óu¹Ì>& ÒŠDšPÆá»ÙW‰í¸h ed_âíääHúõë³`ÁCšÉ ¬»_ -*¦®ŽYvi¿•â?ã8øo–/Ÿ+}˜ì]Žpܸ;*+×úe²ʺêv8ªykc„lÖIÙ°a¹~ H4­´Š{a+¡Å&(h³8ëmoŒóÓŠ°6:ÅK³ÝEžíŽx•N±¿ŠNÁÞÅT6 ìo,þ©éñã»Å¹Çynzæ3ÿ;Z]¶Jø.çB¿ìÚ±#wΜ)Þ©>…ÓÑпsûPìoˆRUµNÆŽúÅØ_w—Ù³'ihw’žž–ý8á£}vvµ´T t‹ñW&º7 §'ý±¿œ?ø§•ââ¤&'_.cž(y_ÂçÙö=hvÉËž=¯6»ä{Aáô¤ ?ö—óÿDø°¿^ì_±ò“‚þØ_ì/þ‰ða±¿Ø ùIÁþbÉuüá£}Çþr^SÈOôÇþbñOÈHøhß©>…B~¢?öû‹¢"|´ïTŸB!?Ñû‹ýÅ?Qí;Õ§PÈOôÇþÆ¡ý=òâJ‘À›Eêî”ò²Œ„Ï•¢¹RWŸB~RÐßÙ>4í/ÅÿD!|.ÍÅÊx¼úò“‚þNõ¡ iÏŸ>{â­´²ëÅõ¥Ï­*|6§`Q¶há­²¡ ’Ø##ás¥hnVÆãÕ§Ÿôw¨MHûk¤¶¶¶¢¢¢¨¨h£W‘º‹¢ƒÏç³-42>WŠæJe<^} ?ýíCÒþÖ×׋ë/// ò½‡ÔZê. ˆÍÍͶ…ö¸Œ„Ï­¢¹OWÈO@ÇûЄ´¿MMMuuuRy±ÿ¥ÞCj-uD‡ÖÖVÛB{\FÂçVÑܧŒÇ«ä' ¿ã}hBÚ_ñûRm1þµµµUÞCj-uD‡¶¶6ÛB{\FÂçVÑܧŒÇ«ä' ¿ã}hBÚ_©°X~©¹ÏçkòRk©»( :´··ÛÚã2>·Šæ>e<^} ?ýïCÒþ`°¿Ø_€xàÿ¢Èó6õζ¤IEND®B`‚python-watcher-4.0.0/doc/source/images/functional_data_model.svg0000664000175000017500000025313313656752270025103 0ustar zuulzuul00000000000000 python-watcher-4.0.0/doc/source/images/sequence_create_audit_template.png0000664000175000017500000007376713656752270027006 0ustar zuulzuul00000000000000‰PNG  IHDRcãÉÉšì)tEXtcopyleftGenerated by http://plantuml.com09–zTXtplantumlxœµ’]OÂ0†ï—ðN¸‚‹™± "Yˆ~2dáC/Œ1e=jÖÍîðßÛ" #Ä ¯–ÓvÏû´çœçÄédVrJ‹)UÐæ‰"'ÅLe— à¶ üÈ(~Gð¶ MX¬K¦¹ Â$›1BˆÚO Y‚-ÞR6kÁ“ë®@øöéj-8›²õ\rŠàݤv´JŠ£ñwÌË&§’1eU^µ¶Å_v]FlÊr´ !~hÌ Ä+X1À¥¹`B_ŸÚ'®{5èý)vsïÑP1û™Jç‚#¯þ‹Hç»9{Ïù§'¸ß``‹™Ln»UK „uO‡HZÉ•Æ0´:7ãqa3©©qTûSY¯Çò«s”|5ôN4c’&ýæf€,¿áÕ½)ó*WJ@;SàûPk4ýzÓo@çÒ  ïÕNªNå: !OµŠ¸Sm «Î Cy×íÁPK Â¥œ •Ê%m÷Nêî…1¡2ÁðÐwjG§GÞËÙ±;õ<'R/¿ö_%9 BˆªuçIDATxÚìÝ \åþøqT8¦ ÈEC‰Ÿt MOÞRÑD¢¼D¥¦u~þ³òÖÉãïtާò–YêR:ÙÅ /© (HhfŠ‘¥yEIACDEM”«×ÿW¦3ìÙÝYÝ]>ï×¾x=;ûÌì3ÏÌ<Ï<_fgì®ü‡UTD @" ŠD Φ8»)wIJ^BêùŒÃl3êåF ._,Íœñq²×£±vAº¯”»‡Z{íò6uÎB#¿í>”ìþŸÐ@øöáãüßIß?96Ù+T™øå}ËsO°ý°ýHÁÙô w<kôuð°“©±×®íÒ}å%Æl$Ÿ&y$X€G .ýv!És`¬]ÐŽQ/_½ô³^˜@y]º°ckÿ?Kž/ž<]p²¨¨¨¼¼œm € F vMŠŠµ Jí3J+L  ”+ R_ÌÎÎÎÏÏ'^€­E ®”U$ÜÑ[Æÿçv­3&P† 9W·˜––¶{÷îÜÜÜÂÂB‚ØN¤ ?å;üoî9¢Ú0òRnp¸váÇ[¶lQ‚EEElTl$Rppþç2òßó·×ÍŒ|ÿäXÉ¿â/3Ö¬Y£ òó󹬉dÎX,#ÿŒéoš)øiÌdÉ¿lôß—/_ž˜˜˜–––ÍeØH¤àÀœ¥2òß;e𙑂íÃÇKþÏ'L‰‰Y¹rå† vïÞ]XXÈvÀ"y ©2òßÚÿÏfF ¾ð —ü+ßz7&&F¹¬`ûöíùùùlWl!Rpùbi¼cÏ8‡à Ù_U&8™kçÚ/666&&fÙ²ekÖ¬IKKËÍÍe»` ‘ñãÿ¾)ãÿïŸk:LpõÒÏ›v#RðÔ¤¸¸¸%K–,]ºtõêÕ[¶lÉÎÎf»`#‘‚’¼S wô6}·‚«—~Þ>òeå‚‚¸¥Ÿ)Àf#¢`ãŽ8‡n±vAÛžkø3„s»Ö}Ýk¤|ëØ#nî{qqqüú[Ž(Á‚Õèu#`¼©÷³ßŸ˜ÚgÔwOûò¡á7&Ê«U߸w¢ã*­\¹’;`Ë‘‘·7ëËA“âšvÿ=4 ¾{Ä>:6î³¥J˜ 66vùòå<%egg³ñë5Ÿ³ê¾ˆX» UO®|uvìòjŒ`åÊ•J˜@¹  --Mf‘Ù®ØZ¤ ¼¼——wäȽpÀÖ oÇÚÉ_I§ü·d!²(Y ÷;€H‘ˆ)€H‘ˆ)À¦#&ðìn "  ‘P…H¨B¤T!Rª)Uˆ€*D @"  ‘PÅ "—/–nx:Ö.hsÏ®]¾B¤€úcé‘‚k—¯lòj¬]òÚ9ñ_D ¨?–)Ø5)*Ö.(±õ€Ü%)ñŽ=%ýí?)€[àÒoNmÝÉËÄëÒù‹ì'Í-ÀQl{‰EG ²ÿkçЭpÛy{xùúWØo™ûñöJéééåååP¤ Q/éâeôuú›ì'Í-ÀQl{‰åF Ž'¥Å9t“8»IÞ–——¥O]t#vàÔsÓûK¿ÿþûôôt™H°ê¯³?øîüS[cyé½FÍç| Ù¡å8Šmµ1±ÐHÁÙô wô–Òïë35Lpúôé¼¼¼/ÿ‹L_íÒ/-á‹ôôt™H°ê¯³—îäÚµ]¼ô^'Sc9_hvhyŽb[mL,1RPœ{"É3LŠþÓ˜ÙÊ5LpäÈ‘íÛ¾K zV>]ÓþñSÓd¢,à°:û[ÜÅnŒZ¼}ûöÝ»wgggçççÓ4;´<à(æ(¶ÆÄâ"ê3·öŸxíò•땨a‚|ÿý÷ßmÞº¦Ã`ɳîO#dÿ¢ ¸¬èìoq»zFdrrò–-[~úé'ée ÙmšZpsÛ@cbY‘‚k—¯¤ö+…þò¾—/–^ÿÏïÔ0Ȫ”±í‡„ÖÜ„<>I>’ üèìoq»üŸo¯\¹211QzÙÝ»wççç³Û4;´<à(æ(¶ÆÄ²"?þï›Râ$ϰ’¼S×unO † ” ÀñJ7~»ú½n\}0ám™Î €Îþw±Ë¦Ì^²d‰ô²ÉÉÉÛ·oÏÍÍe·hvhyÀQÌQl‰E 2g,–â&ÜÑû·Ý‡®ÿ÷] uÃJD@ù(ãó/bíƒe®ïßþXýˆ`ÐÙßš.6nú|éb—.]ºzõê-[¶dgg³Û4;´<à(æ(¶ÆÄR"G?ÿòFYºå§|§&P.(Ð ”WR2¤¿óé§AÚïY™B°èì9_hvhyŽb^¶)ý)Ρ›”õð‡kÍ (s©Ù¾›0çÆýþÐ+sÓ· Àf:{;;;ÎךZZpÛÆ!l9¶ŽHAQVîšVý¤ {þ¶Ðü0^°`sø+²„µw=zð‡  ^;{??¯ŒŒxõm|ü¿¤““¿ê”ýû×Ü}w»›ïë£ï,-ýaæÌqüc‡fÍš¶lÙ¼ÿ‡’“šø:­2p¾Üâ1†U·<òŠŠú›£cSù«÷] i‘üý½_ýù’’&ŠAËNný!¬§NNÍÚ·o3xpŸ•+ß¹zug]•„Hqeg¾ð¹ñ¼Ãò{àà¿ÃÂôÈ_ œÌË_ÿàs²œäû†ý’Q5 Ç*Ôyg?aÂÓÿú×_Õ·/¿<ÜÛÛsâÄaê”èèè¾µœ¾³¬ì‡îÝ;=(33¡¼üÇsçÒÖ¯/,¬‘Àò#ÖÛò(¯ûïïøÙg3:wþ£ß%­ÓÎ+ûôy`üø§‰€“K‹( 9s8vì˸¸¹ÁÁ¡¡Ýä°%RP¾}l²qsÏÔg"ê… Ìy¨,8žyhßÐKùšz—@wö_|±ð‘G‚Õ·râûÁ¯ßwßÝꔡCû¦¤DKâàÁĈˆ~..w¶lÙ\&ž>ª›W(³$%½û§?ÝÓ¬YS9oX¼xšÚw~þùì.]dº»»Ë¨QnU¿Ef‘3où¨C‡»Þ|sü¥Kéê\‘‘ÿçååѨQ#½’Ïž=qȾ5ꪉ)°Þ–G^»v­êÙó~%^ i­æ×_¿ôðp%RN,ê6<e.Y9©PÞšY`£Ù”œRwßÝ®I)ÕgŸÍP¿Hk–ììur>ãææÜ´i“®]V­šSí Z_¤ (+÷ÛÇ&—œQüzaykÎï”ymݱ1dì¾owÔh^@:ûââíwÞy‡ü•ô¹siÒUTüÔªU ¥»råç¶mÝ”O;uò߲壒’¿ýöíÄ‰ÃÆŒj´Ó•³‡6mZ''/¼pá;éüž}6\ÍöÇ?vؼù#™~üøWÏ<6bÄ@å£mÛ>“…Ë_ù"™eÀ€no½5A«oßÝ`Ø/Þýý÷KˆÖ)°Þ–G^¯¼òÌŠo+ÿ5•´‰H lˆ€“‹:„Œr:!'JÚÌ›ÈÖ¾}›­[KSS?öööܸñ}Ó³tí0þä³g¿)/ÿñ‡–=ñDHµ+hÅÏ>¸þŸßè… Ì¿. v×#jqÊþÈ#Áë׿'‰uë<þøÃ’ï¥üÚðçŸWêþÓ@}?¿ÍËËÃhßÙ­[§Õ«çí›wî\YÕ«Üìêz§’îßÿ¡Ÿ~ú\÷ôZýq£ÌµgOœÑ~Ñѱ©t«D klv¬·å‘áÐ}÷Ý-'ô’–&¨];™bxUóîݱ!!Žû$‘pò`Q‡°ÑƒQ†âNNÍjT`Ù¤BÔÖ®ìÝûO¦giÑ¢¹Þ0‰´‘H¢÷нǂHÔGg?þä¿üe„$þö·ç"#ÿOóæMR~^( eмrs×?ñDˆ››³ré½}c£}§ôµgÎl5Ú7_¹ò³ÑÞºuëV²4y‰FÉtI¨y”3r"€E ¬´å‘óþ×^{^};bÄ@™¢w§´¦M›øùyM™2ZùŸ*‘pò`9‡°V¤àp¬QMd;w.MÍ&'*..wšžEÚ Y——^ŠX²äÍüüMê¼&VÐ"yÿQ‹A¾îsj½@µ}ffÂÿüO{I<øà½Jäþ§Ÿ>W~mÖcÿþ5J¶>}Î,/oã¥K饥?¨]¦^ß)=¢Vg¯5EÆüº]£™wâ×€UG ¬´å<¸Þ/–eJµs)'rýè»ïbÔ”šY`Ù´"Z³ÈkÏž¸¹s_yòÉþÎÎ-Õ ‹‰´…Hªv#|5Xp“˘>e÷ööÜ·oµ››³¹¿|ùçV­ZHÿ¤{w¢æÍ.\øNIóÍ'jçà`/ùuûNÝç$™ÓÙ÷èqÿ¢E¯Õ´³ë­ ÜѰÞfÇ[žS§¶ÜyççÏoÓ½„X¦Èt"àäÁ*N ?’b„†v{ç¿Ô¨ÀZÙ }ððÃ]MÏ¢û:rä iRª]A«èªõð^ ÔÉ¢€Î^«³ñÅ'†í«þgLùµáäÉ£^z)BÒ¥K@däÿoOO_qÏ=¾jסÃ]_~ù¾zq`jêÇwÝåž’mxS"­Þúë¯?tq¹3&ffaáÖ‹¿—·>Ú³Úξ´ô‡‡ºïùç‡df&TTütî\Ú† ÿæ)‰€E ¬®å‰ŠúÛÈ‘êM”)2H8y°Š“õ#9sÈËÛÿ/ë>%ÑÌke3¼£¡œœ˜žeàÀî_}µHVAfyÿý>ðÀ«]A뎔ÿ·:\‘¨óÎ~íÚHé±”“]å5oÞ$'§fêïoåµsçÊ®]š6m"ÝÞÂ…W{¸„„ù¾¾wÙÛ7Örÿý%§OÛO>™^mg//éSûö}°ys'ùRIHwhÎs‰KJvLŸþR@€|W‹Íy$ø‹/êý`X÷™FD ‹ŠX]ËÓ¹óÿlÚôÞD™¢\·L¤œûì³:4iÒäî»ïþðÃÕáwå-ùúú6mÚ400pûöíË–-ó÷÷wrrêÕ«WNNŽÞp]+V¬èÒ¥K³fÍÜÝÝGuæÌ½<¿üòË!CÜÜÜd™]»vU>Õ¥L‰ŠŠòòòjÔ¨‘¼=tèPDD„‹‹KË–-‡ZXXht.!å÷óó“u‘¿ü±nLAw@gÏ‹óu€f‡–à(nè‘‚W^yeåÊ•’xï½÷$­N߸q£Ï7ß|sñâÅ­[·z{{ëŽü{õê•••%ýãÿhÕªÕÃ?¬¾}ì±Ç #üã¥d’!??ÿ™gž1b„^ž®]»FFFž;w®¢¢âÇ|â‰'ô2¨oûöí{ìØ1åm§NRSSKKKÏŸ??qâÄ1cÆkíÚµ^^^R€ .È_I'''] ÐÙóâ| Ù¡å8Št¤àÒ¥K÷ÝwŸ Î%-£ôvíÚÉå#ü«Ãi‘˜˜¨;òÏÊÊRÒ2—ÞÛV­ZF víÚUµo:åêꪗ§E‹yyy†%4ŒìÝ»×èºyyy«{÷îR~ÝÀA=ª] ÐÙÓÅr¾ÐìÐòÅ .R ãç×^{M};bÄuDíââòÛo¿©)ut}íÚ5ÝÑ»Þ[ÃÄÕ«WŽÿÕÄ”)SZ·nýÒK/Éjœ8qÂD¤@‰k(Ž=úÄO¸¹¹)?4°··7:—³³³”_w]dŠÑ=/ÎךZ€£¸AG ¬÷«~™¢Ž®MD ŒûMD ô¾×èG{÷î7oÞ“O>)_eÎwõéÓgÊ”)Ç¿|ùrYY™Ö7šŽp´ ³§Cå| Ù¡å8ЉÜpúôé;ï¼³¨¨H"i™"Ó¯W÷ëƒúˆ¨rrr¤JÚÁÁáÊ•+ZßÕ¼yó‹/*é´´4õS½¹ºwïž””¤».º¿>àhÐÀ;ûƒQó¥;á¥÷’já| Ù¡å8Šmµ1Ñ ¿ûî»#GŽÔ›(Sdº$6lØ`⎆u)8pà¦M›Š‹‹åë-ZôÀ(Ó;tè°qãFõÇ z‹êÒ¥KTTTIIÉÏ?ÿ|Ï=÷¨Ÿê͵víZ)¿¬…º.ºw4ähÐÀ;{^&^œ¯4;´<G±M6&š#áÎ;ýõ×zeŠLWÒŸ|ò‰Ñ§$Öy¤`ýúõ½{÷vttlݺõO<¡>jqÍš5¾¾¾öööêSu—³k×®®]»6mÚTÿÑÑÑê§zs‰>øÀÏÏÏÁÁÁð)‰í¬Kç/žþf§òÚµxõŒÈåÿ|{Ù”ÙÒ¯ðúý³Œóu€f‡–à(¶½Æ„‘0 zÛ·oONN^¹råj‘Ê‘*ÊÍÍeWhvhyŽbhLˆª·{÷î-[¶$&&Jw²:¤B¤Z¤r¤ŠòóóÙUšZ€£Ø"€êeggÿôÓOÒ‘$''¯†©©©©¢ÂÂBv€f‡–à(¶ÆÄj"™3s°Àí’ŸŸ/]ÈîÝ»·oß¾å¶Z2öµ-–D*DªE*GªH÷Al¦Ù¡ålà(¶½v ¾«‰ÄÚq°Àí"Gaa¡ô"¹¹¹Ù·ÕÂ&]³-‰TˆT‹TŽTQyy9» `{Í-`G±íµõݘ)Xº´<hê‘ý4Ðò ¨B¤@? ´<hªpGC€5¡;@Ë€v ¾ñ”DP…H¨B¤T!RªpGC€5¡;@Ë€v ¾ñ”D€5¡;@Ë€v ¾)ÐO-Ú*D ôÓ@Ë€v  ‘ý4Ðò ¨Â µ«ÆÎR*ÇBJb9v*¶!ã¾bhyÐ)àô½ª$··HŒg8"j7—‚ Ôª„†Y9ìD ¬rÌcÉ×èM¹5E­ö[¸ö£¦>6Çíú^kÜc­®®8Zˆ) R`cS§NEEEuîÜùv­‚|õ»ï¾+ۨ«[£â)¨“¢š¹Y-§®ÌÜIˆ)¨~~~™™™JzÉ’%JB¦ÈtI:t(""ÂÅÅ¥eË–C‡-,,¼®sÕ«z’zùòå©S§z{{·jÕjþüùê)ì|о}{GGÇàààŒŒ eúÕ«WgÍšåãããìì}ú>|øÜ¹s“&MRó 2$77WN”gΜ٣Gezddd¿~ýrrr$ó¨Q£&Ož¬æ-‰ÀÀÀ”””îÝ»KZ¦(W̦§§‡„„8;;+×ÖÚÛÛ==utt,++3s Ã<Ýëu7n¬f¸víšÑBšY õ­ny$QÓH‰š9ÎÑýv)Œ^3WG«æÔ¼nMj-§‹±cÇŽ±cǺººöë×oéÒ¥†¿PЪ«šnY3÷%uß^²dIß¾}¥`ãÆÓú§}µ…—JVY »;éífê†6s¥Ì,žáºë]¯ûvçÎC† ‘úûûoذAk º»ÇMîZ•“””Ô±cÇ+W®Œ1BäZ…¯]¤@ë¯Ñfµðº2s'!R€Ú"õµaúôét#ƒü•ÓV91U>òóó““ì³gÏÊY¬üUÏJõn% §ÚFÿ³mô­œçææV;"Ò¥U 9óVÿÿVPP`ô_Ž™™™†# ½òë½5³„&Æ3ê·KÂðš3kU«5­y­åÔa12 Z½zuxxx«V­ž{î9½û-¡V̬j­²ÉWKî¼óÎÇ{,>>^žÕ®ð5½¦ Ú•ªQñj1”ÁmJJЧ§§Ñ ª·À›Ü ´*GÊøê«¯úøøhýÆ^ëøÕš^í^£ÍjáueæNB¤#´D êkÃÌ™3ç®»îRþ—µpáB//¯¹sç*É tbb¢œ¤9r$""B=+uss;pà€º„Ù³g÷éÓGòèýZÞèí‚ ú÷ï/³Ë9ñ¾}û”ß>˜>åÕ*F=fΜY\\œ““3hÐ Ý0@½S€áHC¯üzoÍ,¡‰Húí’P¾®f0³VµŠ¡Ëœš×ZNCÏÉ“'###;uêTm]i•Á̪Ö*[çΣ¢¢j÷ìÿþúëêÏËCCCÕo—éê} BBBÞxã 3WªFÅ«ÑPpøðá2•ÚÑoÛ¶mnP½Þän U97ÚœØXyûÉ'Ÿh•VëøÕš^í^£ÍjáueæNB¤#´D nЉHìÞ½ÛÁÁáôéÓ×+Ÿ%&é={ö(­_¿¾cÇŽMš4ñööŽŽŽVÏJ圻U«VêÛK—.½öÚk^^^ÎÎÎrvkztwõêUYT@@@³fÍ“’’ª=åÕ*FFFFpp°r÷òE‹éÞ”~ìØ±RwwwÃg–_ï­™%4)Pž} 7nœúÏ@5ƒ™µªU ]æÔ¼Örê°5îV[fVu”­Zeee/¾ø¢²;½ùæ›RZuúË/¿¬<û@êõðf®TíªÎ´U«VÉWËÑ¥K—ÔÔT£To7¹hUŽˆ÷÷÷¿|ù²ViµŽ_­éÕà5b]uU·‘î+À¢NDÐ)@ÃØ'xn™íÚ»w¯¯¯/»“9•óøã¯\¹’}¦^ëJw÷(É;•—z¥¬‚ê R"¨w“&M*((8zôh¿~ýþú׿ÞâÝI÷.€VQ9W¯^ýè£Õ‡ÿ¡ÎëÊ莑—šèºcäÔ¢¬\j€Hˆ -X°ÀËËËÝÝý…^0ú;*G·räðññIOO§rn}]å%¤Æ6޵ Zï‘óé:.1 RhèòRZöµ ŠkïØ“K n=[¸£!À–ä%¤®mk¤¼âºé^b@w€Q´D ~§ž2òâÅ‹¯†øjôP¢{è©­;yV€Ûr"J%´D Ø0€Ûæô7»â{*‚5-CÖy†%{=v`ÎÒ²‚3t8@;@¤€h˜ ¹ÝãñÍ{ñ‚;èp" €v€Hh˜`u“é^D@w€Q´D ŒãF2`Û ·íYsgˆáEt8@;p‹ñ”DÀíW’w*ûßñF/"À-F¤T!Rª)U¸£!ÀšÐ å@;PßxJ"ÀšÐ å@;P߈è§€–í@"úi å@;P…H€~ú6õÁvv ¡l³gÏo ûgXXØ;ï¼ÃqJË€v€HÁ-Âd6Ö¨£q ÔU‘Š‹‹[·nÝ@öϬ¬,77·ÒÒRUZ´D °ˆÁ­ÂÃÃã¹çž;sæŒúQFFFXXØ•$±oß>õ£óçÏO™2ÅßßßÉÉÉÙÙyðàÁ›7oÖ]š®ú›ó·&¬Pí·˜YŒeË– 4Èò÷œ:\šì`«V­âx)À‚Æ{Çê©§žyæåí/¿üâææYP)**JÞÊDåÓÐÐÐ^x!;;»¢¢âôéÓqqq½{÷¾5#ó†) ±|ùò)ˆ‰‰1bÇ#€H–5Þ;uꔫ««’9räôéÓusN›6mÔ¨QJÚÉÉéüùóµF:t(""ÂÅÅ¥eË–C‡-,,4:‹ú¶¢¢büøñ’ßÃÃcÞ¼yz¿>0çâì[~£_g¢&£¢¢¼¼¼5jd:›9+k´5ªRÝÂlÞ¼¹K—.òíÛ·_¼¸ê’Ôììl???ŽG‘,7R ÃrÝÁª·žžžJ:,,ìé§Ÿþî»ï´~[nbÜÞ©S§ÔÔT™ñüùó'N3fŒéHÁôéÓû÷ïüøñ¼¼¼¾}ûÞ§ ×DFFöë×/''çܹs£Fš­L—·&®)P]»vMÆ®þóŸõégbôžžžâìì¬|¯½½½éH ›Õ˜…$ê$R ƒRÝëç7nlº †ß¨[$)¡^3×Q«¾¾¾ê-!L|‰o‘íRë ×{kf]™È¦[˜;w2ÄÕÕÕß߯ êôììì:p¨6œ–í‘6 À¢#F§1Âð>#GŽ4šù·ß~kÑ¢…™‘??¿¥K—ž={öÊ•+òW7" þ#½  Àè5™™™†åð¦éå騱cnnnµµa"R I†×h­£™Åxê©§–,Y¢û½F¿Në[ôŠmf6­·Z…¬]•ª¦””ÝÀSLLŒz= !  RÀ†Xb¤ààÁƒ­[·ŽŠŠRŸ} o:¤|Ú»w︸¸“'O^ºt)''祗^ 33R ãÃÄÄÄòòò#GŽDDD¨9{ôè1sæÌââbYà AƒÔéS§N0`€ú+}î››ÛL×€^ž È¢dJEEž}û† VÓHZ$IL›6M/ƒÖ:šYŒeË–=öØcºßkôë´¾E¯ØffÓz«UÈÚUéðáÃ÷ïß/yRRRÚ¶m«N_±b‡*#´D Ø0ˈ½{÷8°y¥ÐÐÐ={ö¨mÙ²eèСÎÎÎŽŽŽ¾¾¾&L8{ö¬™‹]¿~}ÇŽ›4iâíí­æÌÈÈVhÑ"uº qÇŽ+ßåîînøìƒë•÷ÒkÕª•éß èå¹zõª|u@@@³fÍ“’’j)PF ¥7nœ”P/ƒÖ:šYŒ’’ݸŒÖ×i}‹^±Í̦õV«µ«ÒU«VIad+wéÒ%55U K¹¹¹iÝŒÐ)¨cÜH@wP÷çvõ~&ðöÛo?ú裷ìën/YÓwÞy‡ýÊZ”äÊKH½RVAË€vÀZ#À#·ñë€jå%¤&º‡þ>>ÎÎΣG...V¦WTTŒ?ÞÅÅÅÃÃcÞ¼yêìzƒIõ­Ör6oÞÜ¥KGGÇöíÛ/^¼øºÎÕ FÇ¥ÊDyÊËËÇŽ«lîܹj™þÊ+¯xT’„z=¡C‡"""$Ë–-‡ZXXhbTlt-äë>ûì35ϧŸ~*St—`Î:æÑÚ@’øàƒ$›dÎÈÈ0̬µuŒ®¬aaLÔ‰T©»»»|4aÂùÓù®”Öž),4RpöìÙ7Þx#(è÷{žEFFöë×/''çܹs£Fš|X÷±2Õ "4nÜX™îèèXVV¦¤%Qm¤@k9;wî2dˆ«««¿¿ÿ† n>R SY [0½Ë[%žžâìì¬ÌÞÞÞÄòµÖBL›6M¦ÌŸ?ß°¨æ¬£Ñ<欸Ñrjm3WÖD¶Õ¡Ñ•2Q‡à „H€~šî€ÅE ÇŽóôô¼pႤ;vì¨^_ K÷¿Ö™™™ºò’’%]PP N×ZŽâÚµk)))ò¥ÊÛFÕ:RPÓk $±téÒ³gÏ^¹rEþš^¾ÖZìÙ³ÇËËkíÚµwÝu×/¿übt 欣^žZG ´¶ŽÖÊêÆD¨‹•Dµuht¥Lï à „H€~,1R ž|òÉ?üP ,èß¿ÿ***öíÛ7lØ0%ÃÔ©S  þd]½G3gÎ,..ÎÉÉ4h:]k9Ç—1§L”ÁdÛ¶m•‰nnn’³v‘‚×_]½OAhh¨šM¦«÷) yã7”é2‚MLL,//?räHDD„éå] YÙÀÀÀ´´4I'''wéÒ¥´´Tw 欣Ñ<µŽhm­•Õ+Œ‰:Q+ õ^Zù®”ÖžÎ@ˆpj ;`Ñ‘‚¯¾úªk×®×+ïTЬY3'%%)”G 8;;»»»ëÞ]?###88X¹Ýý¢E‹tŸ}`t9«V­êر£ä—vjjª2122²U«V&ž}`"RPVVöâ‹/*{óÍ7›4i¢Nùå—•gHB½Š~ýúõRÉæíí-%4½|£kñÜsÏ}üñÇjž¨¨¨Ñ£Gë.Áœu4š§Ö‘­­£µ²z…1Q'ʳdÉãÆSå¡•ßèJií ¸I%y§òR¯”Upb­‘na ;`c-‰; ÜF{÷îõõõe»£ÈKHMtÝ=)ª(+—3ë‹¶v.nI‘‚I“&=z´_¿~ýë_Ù:h8~ÛÛø¡X» MAÿ{,v“9—)`û‘‚ xyy¹»»¿ð êí†,ˆwìk”èÒM‹¾Õ^b@¤÷kÜæ§^±vAòŠwz8¡yï†|‰‘ÀeäÀ‹¯†øjôP’ûÀS[w)°PÜ @w€–@=)ܶ'¡e_5FÐüáD×G2ßü¤¬àÌuîhh±x,€î-€z Ä5龺YÏ­ýÆlÜÑÛ"Î×€–hÐa‚ÕÍzÄ5~H÷"‚Þ)p¾´<@à $´ 1¼ˆ ·D œ¯-ЕäÊþw¼Ñ‹x;À Ö„î-ÚúÆS@"  ‘P…H¨Â Ö„î-ÚúÆSÖ„î-ÚúF¤@? ´<hª)ÐO-Ú*D ôÓ@Ë€v  w4XºÖÕòddd„……ÝQIûöí«:·«:—émÚ´™?>µ pB¤°Y¿üò‹››[dddA¥¨¨(y+õ"Û·o÷ððX±b5€H`ËFŽ9}útÝ)Ó¦M5jÔï'â•‘‚M›6yzz~ýõ×TËA¤¨‡Ö"o===?·³KHHh×®ÝÎ;©+…HP/ìííËÊÊt§”––:88ü~"ng'é5kÖPQ, w4XºVÔò¸»»›¾¦àÓO?uqqIKK£’Î@ˆÔ§Ð°®–gĈ†÷)9r¤)¿ëÖ­svv–¿Ô3À‘6 €î€·<lݺuTT”úìy{èÐ!ÝHøöÛo]]]cbb¨j€3"lÝ[hy._,=úù——~»`øÑÞ½{ؼRhhèž={ªNÄíìt³µmÛvÞ¼yÔ6À‘6 €î€·ûLÍóé§ŸÊÝš4g óhmnI|ðÁ’M2gddfÖÚÖfÖ›ÑlJN©.wwwùh„ ò-¦ó])­¢Ë××÷ðáÃæïBZÓÛmd»¿òÊ+•$!oÏž=ÛºuësçΩ‹’)²™¢~‹VeÖô@ÙÙÙ~~~´]D €zÈØæ7Þ úýúäÈÈÈ~ýúåääÈhgÔ¨Q“'OV¦OŸ>½ÿþÇÏËËëÛ·oµ‘­åÈø9!!AFY2–~þùç.Dkˆk4ƒ9 ”·ƒÎÏÏWÞvêÔ)55µ´´TFò'N3fŒÑ¹´VAª+,,,¿Ò€”¹6nÜxÏ=÷ÈðOÉ#%™3gŽ^Q.PF†?üp\\œ¤ccc%}éÒ%ݘ³‚FóhUã!Crsseh:sæÌ=zfÖÚÖfÖ›‰lÊb…$f̘a:¿Ñ•ÒÚ(ºœœœJJJÌß…´ ¦·ÛL:U>Í«$Õ2mÚ4%У»¡ßyçI“&é~—VeÖô@»^yÿ5¢"@½D T2$“¡£2= ààÁƒJúäÉ“íÛ·WÒ~~~û÷ïWÒÕF ´–Ó®]»èèè_ýÕè\µˆ˜³@y{ôèQ£³yyyKk:tè••¥¤¥NÔ¹|ðÁ+V\¯ü߯,Sw°jz’öõõ2eŠü•´^aÌYA£y´ª±  @IK Ž<µ¶µ™õf"›ºØÌÌL£ÿ×Íot¥´êP—££cM#F ¦·ÛÈv×­%›ì ÞÞÞ—/_–´üõññ9vì˜îwiUfM4"D €[)¿×®];|ø°î=áe(¢Dhܸ±:ú*++SÒ’¨6R µœ;w2ÄÕÕÕß߯ 7)0gòVÖT}›žžâì쬔ÍÞÞÞè\&ªB½1žnU$%%uìØñÊ•+#FŒ!®aQµ(¦M›&SæÏŸo¸â欠Ñ<æT£ÑZÕÚÖfÖ›‰lº‹•o1ßèJ™¨C•¯¯ï/¿üR£HÑ‚éí6zÕ¢f ‹•ĪU«žyæ½ïҪ̚hJªC‡´]¶)àVº–ÖòèŽ@Ž;æééyáÂIËXW½¾@—î¿:333uBêÿo ÔéZËQÈ,%%E¾Ty«ü¼v‘s¨7¯¬ËÒ¥KÏž=+£zù«~ª7—Ö*h]S e |õÕW}||Ôߺ›³À={öxyy­]»ö®»îR‡¸ze6§ÆôòÔ:R µ­Í¬7­lºÿº—„ú¯{­üFWÊô~¥xê©§–,YR£HÑ‚éå7zMøê«¯”‚ïÚµKo^­Ê¬év½òŽ†Ã‡ç Äv"< @wÀÒZ½QГO>ùá‡JbÁ‚ýû÷?pà€Œu÷íÛ7lØ0%ÃÔ©S  þ–[½G3gÎ,..ÎÉÉ4h:]k92Ô‘L”á_Û¶m•‰nnn’³v‘s¨7¯ ;ËËË9¡~ª7—Ö*¼þúëê} BCCu+o?ùä£E5º@©ºÀÀÀ´´4I'''wéÒ¥´´T·Ì欠Ñ<µŽhmk3ëM+›$ÔÅJBù¿‰üFWJk£èZ¶lÙc=V£HÑ‚éå—í®Þ§ $$ä7ÞP?º÷Þ{ß{ï=ùÔðKµ*³¦šW~Þ‘‘¸‘‚¯¾úªk×®×+oÉЬY3Á&%%)”þ;;;»»»ëÞ’=###88X¹Aý¢E‹toínt9«V­êر£ä—!qjjª2122²U«V&ž}`b˜gÎõæ]¿~½ÌÒ¤Iooo)¤îMætçÒZ…²²²_|Q©Š7ß|S–£.9>>Þßß_ùÕº!£ |î¹ç>þøc5OTTÔèÑ£uËlÎ ÍSëHÖ¶6³Þ´²©%7Ný‡V~£+¥µQt•””´nÝúСCæG ŒL/¿l÷—_~Yyö$Ô߈ŋ7nÜX"Êժ̚htssS¢H¿¯fÞ©¼„Ô+eœ))‹hy,áÑô–`ïÞ½¾¾¾êÛÇ|åÊ•TËm÷öÛo?ú裶´3Ëê¼óÎ;zóR“<ÃöümaQV.g D œ¯‘‚ÛiÒ¤IGíׯß_ÿú×ë•ÿ"þè£Õ%Âj–Ö¼3çÄ|Ûø!9ÒSûŒ=»Éè%D ,·°Ð°±–§G ,Xàåååîîþ /(7t” ñññIOOg¯#Rp«ƒŸ¬‹sè¶Ö¹ßúÿyr­Ë#†—pGC–œOÖ­vèkgßmCÀSkû›¸ÄÀæ)L‘‘/^¼àkÍ!±ƒÝCOmÝI¤€†¥pÛžµ.$8õú=RÐè¡„æÿøÿf•œi€µA¤Ð )a‚ÕÍzÞˆ4þ2pıU_5ä ᎆkBw€–@‡ âzÅ6 2qw4´P< @w€–@‡ Z†|Õe”é‹xJ" 4€îhyÛW’w*ûßñæÜ‰€H 4€îhy4Üv€H€~hyÐTᎆkBw€–í@}ã)‰  ‘P…H¨B¤TᎆkBw€–í@}ã)‰kBw€–í@}#R ŸZ´Uˆè§ ¾ZžŒŒŒ°°°;*Ibß¾}U'âvU§â2½M›6óçϧ¶Î@ˆ°atl¶åùå—_ÜÜÜ"## *EEEÉ[™¨)ؾ}»‡‡ÇŠ+¨j€3"5Ãdt¬«å9räôéÓu§L›6mÔ¨Qº‘‚M›6yzz~ýõ×Ô3À‘ÀÆyxx>|XwмõôôüýDÜÎ.!!¡]»v;wX"@½°··/++ÓRZZêààðû‰¸¤×¬YCE°4D €záîînúš‚O?ýÔÅÅ%--º`QˆõbĈ†÷)9räï'â•÷)X·n³³³ü¥ºXîh°&t¬¨å9xð`ëÖ­£¢¢ÔgÈÛC‡éF Ä·ß~ëêêCUœ)¨‹ ;`™-Ïå‹¥G?ÿòÒo ?Ú»wïÀ›W ݳgOÕ‰¸n¶¶mÛΛ7Ú8!RÀ†Ð°â–çlúô±s’<à ·í¡¢Î@l‘ý4Ô¬å¹|±ôð‡k7=ðgùhu³'7ýH-œ)`Ãè4Ä–G¹ˆ`Í!_u}6©Í£qÝò×ñä€3"· ·°Ð¸]-zÁº6nzèÿ%4X† „ Î@ˆ Q^BjBóÞ«›tKn7(Ö.è÷WㇲÆÊàAÒÊ‚4iÒ¶&R Ê¹]·†Œsè¦F ’ïzl]›G‹²r©¶‡H`–k—¯dÎüd]›Gc=k”èJ°€M"RÔŒz‰A\£‡ÝC °1ÜÑ`MèXNË£\b°þîˆ$÷ Î@l OIXºØòçž8´ öÒo¨+€3Û@¤@? ´<hª)ÐO-Ú*D ôÓ@Ë€v  w4Xº´<hêOIUˆ€*D @"  w4Xº´0\5£+bæêK†E‹ùúú6mÚôÞ{ïݶm[LLŒ¿¿¿RYYYj¶¹s纻»K©&L˜ %Ô+sÝnÓ›C—V]™Y›Æèújåß¼ys—.]¤œRÚÅ‹›¿²´D n?n$ ;`i- ðŽ9¢5Ü}zÿþý?.KîÛ·¯á} jqMÖêK¶ðððÇË*Ïž=[VdРA²E”·½{÷V³)E’˜1c†Þ·ÔíV0½9tiÕ•™å1‘ÍèújåoÓ¦MBBByyù±cÇžþyóW–v€H}eeeZcÝ£Gªo<¨¤Ož<Ù¾}{ÃYŠŠŠ¼¼¼ŒŽ Í™]f)((PÒ%%%NNN&æ]·nݰaÃ$1gÎ777å?ÉO?ýtrr²‰õ5QBõ­ŸŸßþýû•tFFFD ´V_²8qB]e­éj‘t¯›0Z’›ß ¦7‡.­º2³<&²]_­üíÚµ‹ŽŽþõ×_kº² R@Ÿék ®]»¦¾•¢îÕã7V¦§§§‡„„8;;+ÓíííŽ µf71ºVß÷âÅ‹ÞÞÞ’ LIIéÞ½»¤eŠáEæf–P}ëèè¨FO$Q'‘­Õ×*ƒá7êIJ¨—¡>¶‚é²™®+3Ëc"›ÑõÕÊ¿sçÎ!C†¸ººúûûoذÁü•‘úžyæ™·ß~Ûœ±nÇŽsss ³ùùù-]ºôìÙ³W®\‘¿ê\ÊÝ ªÝœq²Ö¼}úô‰‹‹ ºñëkù›””Ô·o_óK(ãÏ’’%]PP`ôš‚ÌÌLѳުeæê›)P‹$ Ãk êc+˜)Ъ+3Ë£•Mk}µò+®]»–’’âééiþÊ‚H}ÙÙÙ2²ŠŠŠÊËË+//ÿñÇÕgè Ã,XпÿTTTìÛ·O¹ò_È쉉‰2ï‘#Gd^u.777É\íìæŒ“µæ3gÎ]wÝ-é… zyyÍ;×p±Z%ìÑ£ÇÌ™3‹‹‹srr ¤NŸ:uê€ÔÉŽœõVÍ(3WßüHZ$IL›6M/C}ls"Zuefy´²i­¯VþáÇïß¿_V*%%¥mÛ¶æ¯,ˆÜ~Ü @wÀ[J :´U«VM›6}ðÁãã㎠¯^½*cò€€€fÍš&%%)Óׯ_ß±cÇ&Mšx{{Ku®ÈÈHY¦î]÷ÎnÎ8YkÞÝ»w;88œ>}ZÒ§N’ôž={ «UÂŒŒŒàà`å†ù‹-R§Ë@tìØ±ÎÎÎîîî†Ï>0\5£Ì\}ó#ʳ¤TãÆ“êe¨­`N¤@«®Ì,V6­õÕÊ¿jÕ*™.›²K—.©©©æ¯,í‘‚ÛÇbèÐò 6c;®¤†q%y§òR¯”UÐè54ÐÎ×€–Ç–ÇXŽô$ϰÌYŸ½Ä€H 4€îhy€$/!u­Ë#«›öã=ξÛÖ~ô.1 R`¡¸…€î-€ú $¹…Æÿ¡W¬]ÐW£‡t/1àŽ†ªü>làÅ‹W{%4ïÛø¡D÷ÐS[w6´vH ¡Ë_—çÐM‰|yïð¿'¾h?øÀœ¥eg`m)4hJ˜ ¡eß]ž•¿ß?9¥`㎆\!D Wþº´ÕMºË«!_D ‡;¬ ÝZu¨pÛž5­úU{w4´P<œ@w€–@*É;•ýïxs."à)‰4к åÐpÛ"úi å@;P…H€~hyÐTᎆkBw€–í@}ã)‰  ‘P…H¨B¤hxÃ;;Š@óx´–‚r#ÝZž:–[ÎøœHh,¨‰°–‚òpÝëjydèzíÚ5 ÃDÁnM™«ý–ZÃè,¦£$†ŸÚUjÔ¨QË–-ï¿ÿþüã§OŸæ¸  RÀ†Ð å©ÍÐwĈï½÷‘k(‰âââ]»výå/iÛ¶mnn.‡í‘6 €î-O‡©¿ýö[ÇŽ³²² ¢W¯^5k–³³óèÑ£e*ýüü233• K–,Q2E¦ë-üСC...-[¶:thaa¡Ñѯú¶¢¢büøñ’ßÃÃcÞ¼yzÃ`;†k¡÷‘Ñ’+9-ZäëëÛ´iÓ{ï½wÛ¶m111þþþŽŽŽÁÁÁj%H¶¹s纻»Ka&L˜ Ó+ªÑU3¿õ)PM:uÔ¨Q´D Ø0º´<5ŽÈß­[·>ðÀ—.]ÒvFFFöë×/''çܹs2ìœ}âââ‚‚n\ê,“’’úöík˜ÍÏÏoéÒ¥gÏž½råŠüÕ”””(é‚‚£×dffƒ5j¤µ"zi•ÜüHZI^S µjfC—Ôù?ü ;eÇŽ>>>æ|jú>Ï>û,{8ˆ¨}¤@,\¸ðÏþ³:qÁ‚ýû÷?pà@EEž}û”\¯üÝÁ]wÝ­Ìâåå5wî\Ã…{zz&&&–——9r$""B]l=fΜY\\œ““3hÐ uº n  ÞÀpìææ&…1º"zi•ÜüHZIL›6M/ƒÖª™Y ]ï¿ÿ~``à7ß|STiëÖ­÷Ýwß¼yóÌùÔ°ŠJJJvíÚõÊ+¯ðì)P‘‚k×®…††êÞº?:::  Y³f2^MJJR¦ïÞ½ÛÁÁáôéÓ’>uꔤ÷ìÙc¸ðõë×wìØ±I“&ÞÞÞ²u±ÁÁÁŽŽŽíÛ·_´h‘:]ÞcÇŽuvvvww7|öÁõʶjÕÊèoô>Ò*¹ù‘åÙR˜qãÆ©×ÿ«´VÍÌbèÕyLLÌ™ZzÜ]Ì2µ²­n¡´#µµ¢f  ˆhV ’‰&*xÝ"zMÈ W¹Iç•©aZ7 ÊZüÏ<<ßš53ë›Y3ï7ßˬíÄmÛ¶}î¹ç~ùå—[ùMÕÇ’M;··Âœˆàކ §Ô¥9Pû Ò¡š3gNß¾}É)ÐÏÜInY¦ vÓœ={öÏþó3Ï|X)§¥¥Õ%S`bž;wN]Sc+.ãÕšh/—0X«f¬5ÊÜIj”)0g0vëÚe ¤sû—*f®oÇŽµ_½ºÁ;uêdð_ý5Ú¯Œ-¼î[ÆœïW[Cc{¸‰ÝU{,s" €LeàF2€_ëvGCù[YYyòäÉlܸQ/=mŸ¤I“&5ÍÈ2o8™9‹2X“‚‚)tïÞ=>>¾_¿~R–1ú—Ö§¤¤ <ØÙÙY™ÝÎÎÎôÇ988\½zU)K¡.™‚ºoCmM¤b:˜¹jƪ¡Õ´iSõzxõu*¬¿“Ô(SPÓ .{ŽÂÍÍm„ 999f~–ÎW¯npíø›µ_雵Þ2ƾ_c54¶‡›Ø]µÇ2'¢yà1€ÆÑài:!gΜi×®r%—.]ÔkéüVYzEEEJùüùófö kÔ'4VÿÈÈHåJxù ?™··wxxøÅ‹+**䯶çf°æÚÿ¸¦§§ë¯‘Á_k|ËXÍÍϨ5‘‚þ5ÆVÍÌjhuíÚuïÞ½Ú1{öìñòò2½“4ØLAv{•±ûwîÜYÿš37¾9×ÔqËû~ÕÐØnæî ýĉMhl™1vìØO?ýT }ôQ``à‘#GJKKúé'åRáêê*#Õéû÷ï?þüÂÂÂS§N1¢>2Æj²páÂöíÛ/[¶LÊK—.uww_´h‘þb¥[SRR’‘‘¤.ÖXÍçÌ™óøã«wÐ_#- ¥ó–±š›Ÿ)Pk"í/ÛM¯š™ÕÐZ¾|y÷îÝwîÜ™W%11±[·n‹/6½“XY¦à­·ÞRo%0xðà·ß~[¿`ÁÙÈÚû˜¹ñÕñÆ^÷-cìû5VCc{¸™»+) Ñe ¾þúëÞ½{ÿZu#té„ûøø4oÞ\ú±±±ÊÁÁÁNNNê\iii~~~žžž¡¡¡õ‘)0V“ÔÔT{{{åÚò .HùСCú‹Ý²eK—.]š6mêáá!˹aÍ¥g5yòdggg777ýgèo-·ŒÕÜüLòì©ÌK/½¤½[¾éU3³Z•••aaa}úôi]E ááá7ÜILßS g ôk~õêÕ—_~Yy<ÔKôËÊÊfÏžíîî._DHHH6¾Z0¶ðºoc߯±ÛÃÍÜ]€LÖ£¼ øôÚ¯Ê.ç³),4‰nël)åF2ÓÍÁÅ”#ß}3¶í°Ü]‡ØP–t.B¦œˆ )¨N0Ø”ŸX¹Ùs„¼µÁai2'¢ˆd ´¾~AÐë›Xoß7¢‰_´ÓÒ8@ Shtí´zÁúæýå¥ ‘v}IàDq€L ‘¶Óîx4²É#JŽà÷L_æªøôy+¥¬üŒ2eÊ”oV™Qd .n$PšƒÂÌs?ÎZ¾Ñ%0Ömèú¦ý”dAôþyG3Ù>8@hD™´*Ë+²¢O‰j1(Æ%dÀÍB¦`ÙÔK "íü¢Z "YPGd Ö@½Ä Æm(É€º S°*…™çŽQv9ŸMP;ÜÑ`IhyêOIXšDÄúF¦@; DÄjd ´Ó@ä@¨F¦@; DÄjÜÑ`IhyêOIÕÈ€jd nëÖ·±¡J€†Õ1d4„nyÃ韓)€ÆÞWµ”ŠÖúÒõ:theeeìëWÃDÅnMoø)µ¨†ÁYLgIôßµ©bkkÛªU«^½z½ñÆ999À@#Ä}ÅyÈü¦Ö¥îåøñã?þøc2–ž)P ………|å•Wî¹çžÌÌLbÐØð¬2DÄ27!Spùòå.]º=zT¿#zíÚµ÷Þ{ÏËËËÙÙù…^^¨ŒôööNOOW&Xµj•R12^gáÇ rqqiÕªÕèÑ£sss ö~Õ—¥¥¥S¦L‘éÛ¶m»xñbn°†þZè¼e°æÊ”¡¡¡:thÖ¬Ù<°k×®°°°Î;;88øùù©A&[´h‘›››TfêÔ©R1ª\5ó«Q™Õœ9s&NœHÌh§€È€8@¦ Æ™ù›˜˜Ø§OŸ²²2ngppð!CN:uéÒ%évΜ9SFJg^:ÛRÈÊÊjÙ²e~~¾”—/_.=j…÷èÑ#!!¡¸¸øÊ•+Ó¦M›4i’éLÁܹsÏž=+K0§?l¬¿m°æÊdÇ?yò¤tÚ,X õ1bDFF†òrРAêdJM„æÍ›§ó)f®š±jÜ‚LÁ™3gÚµkGÌh§€È€8@¦ 6™ñÚk¯½ùæ›:#}||Ž;¦”þùgOOO)lÚ´iܸqRX¸p¡««ëʕבòôÓOÇÅÅ™ø ¼¼KÌ2µÏˆ¥K—>ÿüóêÈ>ú(00ðÈ‘#¥¥¥?ýô“ò£ƒ_«~wо}ûeË–)³¸»»/Z´HáíÚµ‹‰‰)))ÉÈÈ RÛ¿ÿùóçž:ujĈêxéÜ>þøãêÝô»Á®®®Rƒ+¢ó–±š›Ÿ)Pk"…wÞyGgc«ff5´–/_Þ½{÷;wæUILLìÖ­ÛâÅ‹ÍyW7/¶í°Ü]‡øFÍýâmlØFK©¨þ $Ê ŠO~ºqË}c#l|×7ïOš f_<™–‰ûŠ ò )øö¡ÊEëFµxTÆGµ M@¦@#Á³ÊyÈT1êE×M‘1¤ €vˆ<ˆ7S°áŽG#O(C¤_æªøôy+¥¬\ B™2eÊ”­»Ìù:zˆd ª¿˜ÂÌs‡^]Ýzpt«5Y°ÑipÞÑLv\ "â@ãʨ7¨,¯ÈŠJØ1àïnhÞŸd4*ÜW ‘q€LQê%‘M‰j@²€º³øà«—ĸ %Y@YÏ£ò 3Ïÿ(¢ìr>_*µfÃ&*Ë»£! 1£9@ä@¨o–ô”DöNÍ"â@}#S "â@52Úi ò T#S "â@5îh°$4ˆ<ˆõ§$€jd @52 ™€ae—ó/$`01”])`?;D€£Øú‚ w4&MH„/ƒ‰!gçö€°Cä8Š­/˜)˜jì}¸äBbƒÎp,d çëa‡Èp[k0á×S½4'••t†Ÿ"8_;D€£ØZƒÉMËØØØÜôékºLý­lb·…¬LNNNMM=qâDvvv^^» @Ø!ò€£˜£Ø ‚É»â~ø¡ƒƒƒüm€™‚Ú¥H@ýMib×Ï Ž‹‹Û±cǾ}û¤•ÍÍÍe·;Dps[A0¹qŸ¹W¯^aaa={öl€Ýo2@c›Ø5o~°nݺ˜˜ieSSS³³³Ùm‘ÅÅVLnÐg–  ä ¤¬}ëóÏ?ïØ±cÓ¦M;uêôé§ŸªÝo)„††vèСY³fÝ»wONN^½zuçÎxêÔ)øâ‹|°yóænnn'Nüå—_t¦ùÏþ3jÔ(WWWYfïÞ½#""”wµ”1!!!îîî¶¶¶òòøñãAAA...­Zµ=z´’#ÑŸKHý½½½e]äïŠ+´9í€ÆžAÛÄ®žµ`ÕªUÒÊÆÅÅI{—™™Énvˆ<à(æ(¶‚`rƒLÁôéÓeN)|üñÇRVÇoÛ¶ÍËËkçΉ‰‰ÚžÿÀ=*o½ñÆNNN>ú¨úòÉ'ŸÔÏÜÿý;vì ²³³Ÿyæ™ñãÇëLÓ»wïàààK—.•––þðÃcƌљ@}pæÌåe=Š‹‹¯\¹2mÚ´I“&œkãÆîîîRüü|ù+eÙR4ö Ú&6rîibÃÃÃׯ_/-ȉ'Øm‘ÅÅVLLe ÊÊʺuë&s)K/ýÞ{ï•1Ê[ÒùW»Ó"&&FÛó?zô¨R–¹t^:99ég ·ÔÄÇ«çëÀ-îcXtä‘!$äU‡fòW糑:wöxë­¿í1Q "8y¸õ‡°zœ::6÷ô¼{äHÿuëþyíÚ›UËË|øá‡&LÐ)cd¼¶nÝj⎆7=S0lذo¾ù¦°°P>.44´OŸ>ÊøŽ;nÛ¶Mýñ‚΢|ðÁ¢¢¢ýû÷wíÚU}Wg®7Jýe-ÔuÑÞÑ£½Në2uêÓÿú׫/_~ù/í¦M§ŽY¶ì íˆÓv^½º·_¿ž/¼0"==ª¤ä‡K—’¶lùø‰'ú“)~¦Àr#2ôêÕåóÏçõìyŸÁÏ’ètàÀ:ÿ>S¦ûÌËËËàSoz¦`Ë–-ƒ rpphӦ͘1cÔG-FGGwèÐÁÎÎN}J¢v9ìÝ»w³fͤó¿lÙ2õ]¹Ä'Ÿ|âíímoo¯ÿ”DŽv4ö:­ËæÍK{ÌO})'¾Ÿ|òV·nÔ1£GÄÇ/“±c1AAC\\Z·jÕBFæä$hsó e–ØØz¨kóæ±Û­\ùŽÚv®]»àÁ}d¼››ËĉÊÍMT?Ef‘3oy«cÇöï¾;¥¬,E+8øîîmmmmuj¾`Á´Q£jÔT“)H¦Àr# ~9`@/%_ ecæÿþï«¶mï"SNÔ!¬0Ê\².rR¡¼4³Â'S¦”MѩӽM›ÚK­>ÿ|žúAÆf9qb“œÏ¸º:7kÖ´woŸ/¿\xüÉ÷)ÐØë7ö……É­[ß)¥|éR’4`¥¥ûœœZ* XEÅþ{îqUÞíÑ£óŽÿ.*ÚsùòwÓ¦›4i´ÁFWÎî¾»M\ÜÒüüï¥ñ{öÙáêd÷ßßqûöËø³g¿~æ™'Ʀ¼µk×ç²pù+$³<þxß÷ߟªÎððéÓ[õÛÅîÝ;íÞ½ŠL`‰™Ë<2LŸþÌ_| ü×TÊ&2Ò±!SNÔ!lð`”Ó 9©PÊfVØÄdžžw'&®” '$¬ððh·mÛrÓ³ôîí³dÉÌ‹w–”ü°wïê1cßpÉnÅ)ûcùmÙò±6múè©§•Âðá•_îß¿NûOu¸re—»{[ƒmgß¾=Ö¯_l°m>p`]u«öóö»îj­”Ù·o­öôZýq£ÌuèP¤ÁvÑÁ¡™4«d K ;–y¤;Ô­['9¡—²„ {ïm+cô¯jNM<øáɓǒ)' ê6x0JWÜѱy*lb2Ù ê[7ôéYZ¶l!•ןÆÄ ’)ÜŠÆ~É’™¯¼2^ ¯¾ú\pð?¤°xñ åç…RPÆÈ™¹e̘Á®®Î¿?­¶‰Á¶SÚÚ_~I4Ø6WTì7ØZ·iã$K“AØÚÚÊx)¨Ó(gäd +ËXhä‘óþÙ³ÿ¦¾?~˜ŒÑ¹SZ³fM½½ÝgÍzAùŸ*™pòÐpac™‚;îp¨Q…MLvéR’:™œ¨¸¸´6=‹Ä Y—_ ZµêÝììoÔyM¬ uf .¦9ñMæªø¬¨„+i'9üà¶7öééQ÷Ýç)…‡~@ÉÜïÛ·VùµáOô?|8Z™Ìß¿4fYYÛÊÊRŠ‹÷ªM¦NÛ)-¢±ÆÞØéók›F3ï į‹ÎXhä9Ò_ï¹ãþ7œ‹L8yh ‡°Á·¾ÿ>L½A©™61™±L±Yd8t(rÑ¢écÇ:;·R“,&VЪ2åÅéóVƹ?)ë â;9þQDeyÇ!ÜÆSvv?ý´ÞÕÕYÉÜ——ïwrj)í“öîD-Z8æç¯”wîüLmáìíídzmÛ©}N’9}ÿþ½BCg×´±ÿý©ÜѰܰc‰‘çÂ…­[ßyåÊ.í%Ä2FÆ“)'qò ÿ–Tcèоÿüç+5ª°±Éô}ð製MÏ¢226KH¹á ZO¦àrêq5Gßé©ä¿Lýá¯3vç>LùU·ñ…™ç8àv5öÿõ_cFPÿ3¦üÚpæÌ‰/¾¤ŽyðAŸàà&§¤|ѵkµ…ëØ±ýW_-W/LHXѾ½[|ü2ý›k­¿ýöS—Öaaóss vËË?ýiÀ ûââ½<Òío•žUZºïÒ¥¤­[ÿ‡§$”)°¸Èòê„ Ò)cd<™pò`'ê[r敵mÆI‡\û”D3+ll2ý;ÊɉéY† ë÷õס² 2Ëòåoöésÿ WÐJ2SŽDÝ9Hªþ­ß_dtÖ*+&l«ÏHy7¶Ý$ àv5ö7K‹¥œì*ÃâÅ3›«¿¿•áÀu½{û4kÖTš½¥K_W[¸¨¨%:´·³k¢Ó«W™ÒËëžÏ>›{ÃÆ^iSnÑÂQ>T Òšó\⢢=sç¾èãã%ŸÕ²e‹ÇóÛ¼y©Î†µÏ4"S4¨LÅEžž=ïûæ›OtFÊåºe2àä¡áŸ<¨g×Ç8bÄ /¾øàÚµ5­°±É¤:ÛÛÛÝÞÞN¦WWÄÄ,ññË zHêÓ¦Ó˜1ƒ326ßp­!SPv9?¶ÝRï=_¹V¶ßàŠ•åïIò¼L³ÙçÏ9çÎËË+))á°€[|ÊÞ˜Îבà(¶â`Òà2g„H¥ü'K¨É‚-]F\Ÿòµ`Y¥ììlò@cÏù:@Ø!òÅ Ö–)¨¸ZªüîàÒÁM7\½¬˜0™rý=Ã’’’RSS333sssI=çëa‡Èp3XO¦ ;þ{©ñöãÍ\Ã8÷¡2ýÆ¥+dÅ”dA^^Ç'ÐØs¾vˆ<G1ƒ•d Ž-Y+5>ôê[f®áeú/^™­$ ²³³¹¬hì9_;D€£˜ÁJ2éóVJÓæ¾kæî›4S¦_ýÂëkÖ¬‰‰‰IJJ’Õã² ±ç| ìyŽb+ÉY.5þqÖ;f®aò_¦Èôk§Î [·nÝÖ­[SSSsss9D€Æžóu€°Cä8Ь!S• 5N |ÞÌ5Œï4\¦_÷þ‡aaaÊeÉÉÉÙÙÙ¢@cÏù:@Ø!òÅ Ö)(/(Þà0 ÒÞ/ÿÄ׿®Þ]C"""ÂÂÂV¯^”””™™É! 7«±?²Dâ-ƒÎ ›…óu€°Cä8Š­5˜Ø4´}뇿¾+•Þ=v²é4Áµ²ýßú»¾zžI¸€zjìL œ¯„"ÀQl•Á¤Áe в.DÝ9ÈôÝ ®•íß3ñeå‚‚Èðµd  >”])ÈÙy@¶…¬\?/xÍ›¬žµ@چ߆°Õ4@a‡Èp[_0±i€»×ùm{"íû*Wèÿ áÒÁM þ¯§ ûG.ú822’_@}KNNŽ‹‹[·nÝ*è‘Í"G6 @Ø!òÅÖLlæ^u~Ûž -^OØû%>¿ç™i þ“ÇMÙ>`üoWJ8Žüç²È*²nÜÑêUjjêŽ;$ÌJÈ ‡†lÙ,²qdÑ„"ÀQlÁĦÁîXY?Ý6rÆúæýtMá8 røäÈÏÕ4ADDÄš5kxJ"Ô«'NìÛ·O’¸¸¸õÐ "›E6Žl" €°Cä8Š­#˜4ÜLA^^žTwç¶o£__øe· ß/»]÷Ú‚ˆ5_¨9‚uëÖ)iå‚‚¤¤$™Efä°€›+;;[ljjjrròhÈ‘Í"G6 @Ø!òÅÖLn¦ ¤¤Dª«\²²aÜÌßµ£¦†UQ~Pö»5kÖDGG«—LÈŒ–psIã‘››+1633ó4dƒÈf‘#›ˆ ìyŽbë&6 |ß’ªKÿóó³¯_SôÊš5kþýïZE +V¬Ðþ²B&&« @]4èLAIIInn®ôÿ·O~/ÂÆ7fÂë111aaaŸU‘BxxøÖ­[“’’”4LLV«Í(É‚¼¼¼=ÿ‰°ñÝ>ù½ää䨨¨ˆ*RˆÕþ¬‚4Vž)Pzû“ß}¯/ÍÎÎNJJRî¾ …={öð-n¢Ûœ)¸x }ÞÊ þ“#l|寔§~ ô'–eò½`‘™éØGØøÞÜA–É÷ €Ef ¸¦25¦\z \,päø ¸‰P¦ ///'''+++##C'8õƒ_ù+åø?:b„,D% ä~‡) S™2) S™2Xu¦Àž}À­A¦T#Sª‘)ÕÈ€jd @52 ™PL¨F¦T#SªY@¦ ¼ x«ÏÓ6¾Ûü½²¼‚Lõ§¡g *Ë+vz-ÂÆWLû™êOCÏœaãÓæñÌUñHù»7—’) ž4èLÁ‰ÿÙaãiß7w×!yyrÍ–ëWØùíX´"¹JJJJII ß"ÖŸ)8›iß7ÂÆ÷LÄ7ò²¤¤$///eNèõÜã€o–‡ïÞ½;%%EF’,ÀÊ3SŽDÝ9(ÂÆ÷ðûŸ«i‚œœœ¬¬¬ÍO½"ã×» IŠÚœ’’"#I`Í™‚ÂÌs±ížˆ°ñÝ7i2FMddd$ïú>Æ÷Yy7Úó©’d¤’,à»À 3ê3§U–WüZuAš&8räÈîÝ»¿ßžÝq¤L³é¡ 'þ£$ ¸¬kËT–W$øOްñýªÛøò‚â_ÿÝš&G«¤íÚÕæ1™rÛS3ä-™€ß `m™‚þú®tþcÛ=Q”uáWÍí Ô4’8[娶ïÖß1ðúÕS?ñܰ«Ê¤Ï[)Ýþ¨;]N=þëïb¨M(å­´µ›#ìüd®Ý¬Pß"Y€Åg N¯ýêúãíûfǯ“&P.(Ц Jª(¤üóeÆ;¿CëâI` ™‚ ‰"íûJ‡ÿä§ÍI(s©“}?uáõç&Þ10ý›ïH`Ù™‚¼£™ÑNC¤«èÕ¥æ§ t’Û‡O—%llÿ§c{,ÀR3WÏÿ²Ùëúówõ[âàiaºç¯$ ~ÎÊÞòðs²œ¸nãþ“V= _0–”)øîəҽß>àïê3uÒæ<Ô@IœM?¾É{ôõ¥M˜­^†ÀeXR¦ ïhæwOμzþµÃ¯“&—æüŽ@™÷xâžmƒ'ÿôÝžÍ J¦àYƒªßè¤ Ì¿. v×#€†ž)PÔâ^Ú{(È`Ù™‚¬ßÕ¢“¯}nB­™‚”)PÕ®‡¯& ê¸ÈÜþLV­»÷J²à¦, 2·MÉÝÄE‘)ÀL6l "Sª‘)ÕþZùÃc”­ IEND®B`‚python-watcher-4.0.0/doc/source/images/sequence_create_and_launch_audit.png0000664000175000017500000010136113656752270027245 0ustar zuulzuul00000000000000‰PNG  IHDRšæu]¬ )tEXtcopyleftGenerated by http://plantuml.com09›zTXtplantumlxœ…’ßOÂ0Çß—ð?\|‚‡‘± "Q uÀÄ_¤l'6a¹]Á?ßvL`ˆ1}i¯÷ýÜÝ·íd,ˆu²¨8v‰ˆS‚nœH%3&aN6\ €{ 'ï‚£O$¸ îO  ëâ(t,"BÁ®€Ë<0eL– º¶°²vÖ sX8~~ÙªKA"AFÊj{Â4Zm¿Ùö[pÛ7ÿÈ÷g5§:ÈRMBl­—3͆PsÆKT½G˜hÅ2AÓÀJRªT¼½;kº7¦íg$;èÛÐiÔÏëÞôâÔyžH¥¿¾— µt«Á €IDATxÚìÝ \TUþøñAA‡- TÌ Pr¡ÝÐl["©eUÒEÔL«¿e¿ým­Ûî¶m¦¬™è/HiÅ6wµñ!·UÌL­H1AžRЀñ TðônwggæÎŒ8>ï×}ù:sç>œ{îùæ~½sGs@U44PÒ@eHg•QA:ãLvþ‰”OK“·”¥eÏ=Ê9 “³ßtÆå õyó>ÈÔ=–¢ 4œ¶Ü=¾hIJóå+œ<:';MgœË)ÊÔù11fß“Ñ_ý¿Wö>•©%ÍüäÞ'õ¥?pþè„ì1q&;?íöGS4ÿ š|2+¥¹ù áT–ž´Í?B¼›á9šŒÝ¥3.«Íð¢ Ü?í›.}c”˦Kµûw…=+–Ùìÿĩʓ555 œK: »Kg|%>E˜2M)—!g4¤{4²~W\\\QQAR€N¾ÒW.6¦Ý>,Exöà& ¹ ù['bÉuwŽÞ½{wNNNiiiuu5 :<ûJgTlù"E¸ãá)VsÒ$=tãÒvîÜ)e4jjj8©tlö•Î(|ç£Mà¡Wÿlc:cïQbù3oÆ RF£¢¢‚4èØì+‘7oEŠ&0wî_lLg|=c¶X~Õô?¬^½:==}÷îÝÅÅÅÜ @Çf_éŒüØ•)šÀo_{ÓÆtƾ'£ÅòÍœ“””´fÍšmÛ¶åääTWWs^èÀì+Q––•¢ Üö¬éŒÍ¾cÄòkÞz7))IºAcß¾}œW:0ûJg\¾P¿^ûpªcPmñ?­æ2Nf¥¤hS{HIIIJJZµjÕ† vïÞ]ZZÊy ÓØ[…¾úIÑî}"Êr.£éÒ7Ÿ>8ùZ:câ+©©©ÉÉÉ+W®\·nÝÎ;‹‹‹9¯t`v—Ψ+«J»}˜å'h4]úfßÔÿ•nÍH]ùé :Ö©rûþTLJR4{2ýÖÉÙƒ›þõÈTñnŠ68uÑ{©©©|Ù€NEcŸÕªÜ¾ÝO¹–³èôé°§÷>9++dÚ_úäÁ'¯Í“Khê„ÔëÖ¬Yã@è<4v[³²o >‰x%µÛÐç/äIœòXTê‡+¥\FJJÊêÕ«ù¡V:ûMgÔÔÔ¶ý_þ»öÞÈMàÚ€'Öü~AÊêåDÆš5k¤\†tkÆîÝ»Å*bEÎ+˜ý¦3***rrrvîܹ~òìMàGãg&]—|]ÒV¯^½añ˜XX¬"Vä¼Ðiì¹r555¥¥¥999›Ÿ}ýÚÝ‘¿Y½zõßþö·÷¯…>ø`Íš5éééR.C,Ì­txvÎhhh¨®®.--Ý5?E˜>õéééIII¿NV®\¹mÛ¶Ý»wK¹ ±0·fÐáiì¼~ 555ûŸ¢ Ü5ß¾}iii)׉BFFFNNNqqqEE…XŒ\Fµ<ôÆòMà×XZQQ±{÷î׉Âþýû«««IdЩÜâtFÕ®yóVX²B¢R4â_QÞ5ómÃÉta±MÎ+Ø-NgäÍ[‘¢ lÝIl“ó @ÆÝ@eÔñì é&é¶‹üÿÆ) ³±£tFMMÍ©S§ÊÊÊŽ;f”³Ø5óíM øW”·ü·|b#bSbƒ<(€†tPÒ@eHg•!T†_6*C:¨ é  2¤3€ÊÎ*C:¨ é  2¤3€ÊÎ*C:¨ é  2*Hg\¾P¿ÍRŠ&pÇÃ/4_¾B:€NÎÞÓÍ—¯ìÿûM 4˜õ¤3èäì=qð•øM`zŸ‘¥É[ÖkåÏÿ´”t:˜Kçj«v`²0]:^ÁDÇAÊÔvc'”qŒ–Lª‹ì:Qü×õ)šÀTLJª÷/®Þzí®A;}°ïºìì솆ƨˆXù$&³Ó©ÏÐ+˜è H™Ún¬à„2®‚Ñ’Iuc‘ý¦3Ê3v§:>$ŽäDʧâeCCCMMMöœÄk ç‡?]¶rïÞ½ÙÙÙb& tŒQ¾ðÝwªv¥0M…ñïtæt½‚Ž‚”©}Æ N(ã*-™T7Ùi:ãLv~ÚíÃÄayëC9—qêÔ©²²²Í¿þ˜¿®×ˆÝi›³³³ÅL2裼ˆÞææƒLFÓɬ”ΜΠWÐ1@2µÏXÁ e\£%“êÆ"{LgèKÈð ÇðõŒÒ9—qìØ±}{¾H|Z¼»Áû×_eí3¥Œ#å;ðˆ¶=~ž}ûrrrŠ‹‹+**:CÈÓ+è H™Ús¬à„2®‚Ñ’Iuc‘Ý¥3äŸeÝ6«ùò•«×oÍsùùù{÷îýbÇ® Ɖe6ýbê±â奈7h€Q¾hëæÅeffîܹó믿ƒZuu5½‚ŽÑ9;R¦6+8¡Œ«`´dRÝXd_éŒæËW²B¢Dí?¹wÊå õWüš‰œË ®ËÝóeZŸ_]KÛüúñ–X€¯œ€Q¾h«ÿôöš5kÒÓÓÅ –““SQQA¯ ctÎŽ‚”©Æ N(ã*-™T7ÙW:ã«ÿ÷Qõ Ï𺲪«ÌsRæ¢üºÂퟯûÉ#×îã˜ù¶˜ÏC4À(ßG´U¯-HNNƒZffæ¾}ûJKKétŒÎÙ1@2µÑXÁ e\£%“êÆ";JgäÍ[!êvû°s9EWÿûñŸ†¹ )m!½•ûÑæ”®Ab­½o ¿EFŒòoDKûŽÑV®\¹nݺ;wÓ+è³c€ ej£±‚ʸ FK&ÕEö’Î8þÑ'×*íøPÅ–/ŒrÒ­†¹Œ†ë¤²þãÚ/Óv :´f  0ÊóéŠ^AÇR&ÒŒ«`´d"Ñ~]<Õñ!Qé£ïo´%—!­%/öÅÌØk ùÉ#yŸ~NFŒò-˜4 Ÿ®èt €¡»ãU˜tã*-Õ2`Z­¤=E+ÖMe錚‚Ò .#D½ºÔö\†QFcǘ—Å6ö{¬ðËd4ÐaFy__]nîzùåúõÿ'F ñ¯<çÈ‘ wß}×Í,m18Ö×óÒÏ~6 {÷n={Þö`fæR »Sªé Ó6QuÇS|ü«Zm7ñ¯Ñ¾$¢Ã èõç??_W·ßB5øØ †îVP9 »{{÷7.dÍš…MMZ«&jOg(]AA¦ ‹™†Ç{îÜç¿ýíT//O''GñïìÙÓΟßcËhö ¢&ôûßÿ}òôé]¤3ÀG ˜v’ÎИC:ã†]¬<½ÙçÚO®î}âµg7þ;—!XNOH“e[ùŒØN潓¿ËýÏ* Põ(?sæ¤ÿû¿ßÊ/ŧñhÖ¬Éòœ„„?¾´Ÿ!þâÅ/‡<}zD^^ZCÃWgÏîÞºõ½ðð`Ò­ò·_½Cšî»ÏïÃç üS³ûçÀ5!!DGO"†îvþt.ĸ}âÄ'©©‹Ä5ù¨Q‰¨$aaìò÷÷Ùµk…á̬¬î¹§¿|¼zý>±–ø›XT”!ÚVüûÜsㆠñ7MÚš€J X_ÿe~þÆ^x<,ìAÒà#ŠÌÎ|»\Lg|>v¶¨ëއ_–Õ(—aËO–Hò¼¢M¾®mmêëò Ü Uò›7/ýÕ¯‚ä—âÃÐòå¾÷Þ»å9&„nÙ’ ……é‘‘#zõº£gÏÛÄÌS§²LÓ®Ò*ïþâ÷tïÞMüµX±âMyDûè£⣕˜ïîÞkڴǪ«ÿó¿=bñN¼5`@¿¿ü%úÒ¥ly­¸¸ßétF5_°`Öøñ¡74€’ΰýo¿z;†˜\ûðÃ÷I¢¬Ô¾ÿþÞ¤3ÀÐÝžjkb-q,bH—^ÚXa³‹IKЦ¸ûœE­>üpž¼#¥UŠ‹7‰¿&nn®Ýº9Ý¿ÿÚµ±V°ÒÆ®?~[Î×KÓèÑC׬Y(·ç¼yQcƽë'?Ñ*½+ϱЌŒ«à#M ˜–‡)¥JZŽYʆòõÕ‰áTüû·¿½aX1 GjK:ÃòÞÿ Ÿ;Å~Å`.ê oǰЂ¦Vk:£¦ ôó±³/Vž–³F¹ ñÒ–¯HëíÚ¿}xÔáÏ÷ßк€ÝŽòzý¾;î¸]ü+ÊgÏî#xcã×..=¤üÊ•oî¼ÓMzwР;wþ­®nÿ¹sŸÏš5yÆŒ f‡*ñ7£oß>™™Kkk¿åÓO‘ûÙÏìØñ71¿¼üŸO=>eÊhé­={>ÿЉUFŽ|è­·fÊk…†þòøñm¦ÃP@ÀÝ{÷&“Îh£¿ýêíbzùå§ÄGéÿ[DÙB:Cü $†îöŒP³±&s1¤Ke+la1oï¾»v­ÎÊú@|ˆß¾}™åUÄçøwÞ™}æÌg _}ùåªÇnõÛ(aaìçTœˆC‡R¥999)?ÿ¹¯˜)7‹¸6ûüó6¸{÷?Ä!X-Ü!®î¢¢žïΰÎPjFÆUð‘¦íL ›µPIË1+/°aCœNç!§¦føW”7mZbõHmOg(mAÔÙÇçNiÿŠ!Ýl:£M­ú_6¹úã×LŒr¶ßaѲ;;;åõ« ­[ß1Hýú×JÿŸ#}¥ð›oÖæ³åéüù=bP3;T=ôРuë›¶XóŸAääŽÞ½ïÊâsÒ×_døKþ£XKþôf4iµÝÄL:£z…z;†øŒ">Ö‹¿Í¢,zÈ]wyˆ9¦7mŠ‹áÃ)>¦“ÎCw{F¨ÙX:»ßP…-,&àÓÆqÆýÂò*=zÜ&*oºŒ…l‹t†Õ±kíÚXùóú“OŽJIYdØ,âo¢é.Ĺa- €–¿ë.êĉO¬¦3”š‘q|¤i»ÓÂf-TÒrÌÊ :X ¡òü ₃ï³z¤6>;ÃÂydˆü,<é”™Mg´ ©;T:CÒ‚ç_>wCB:jåßygöo~3E^}õ™¸¸ß‰ÂâůHß!iŽ˜JK·>þøp77Wi0êÚµ‹Ù¡VŒ°fŸ&ýç’Ùá²O±51 b¾(ÈËHŸíHg´ÿß~•v ñ§÷õן—_ŠOÿòcùOi·nN¾¾º×^›.ýo é 0t·[„*}:—¿Î`c…-,vöìny1ñg¢W¯;,¯"†q,/¾™œü—ŠŠOåu-`[¤3,]Òÿ!üÓcÇ6‹I¤ÇZMgÜ~ûO¬€J'HœÄ¼¼´±cyæ™±VÓJÍȸ >Ò´Ý€ia³*i9få\]{~Òe1Çê‘Ú~w†ÒÄ^Œ†q³éŒ4uÇIg”ý¨™Ã_EiñF»åŇ•ŸþÔ[~ùËŸK™Î¯¿þHúJaxxð‘#¤ÅBB_YÙöK—²ëë¿4Y¤I|p´úXù²I[ÿíWiÇ7.ÄèÿÄ[þ¬Ò1ÀÐÝjö­/¾H’~ic…-,¦”ÎPZEL‡¥.ZôòO„‰Ñò……l‹t†-cWjê¢èèI/½4QþOW«_6‘Ðâgþðÿ\\z˜}×ðÛ.JÍȸ >Ò´Ý€ia³¶TÒlÌژθ™Þ–·`c:£MÝqÒ²–¥!äŒÆMn°ŸQÞËËóðáunn®R¦óòåoÄg1>'é¶Ûœkk¿ÊŸ}öwyÔptì*–7üˆiøÛW¶ [ÁÁ÷%&¾~£Cü[oÍäQ mÚ+ÔØ1ªªvÞqÇ톿M(ÊbŽ˜O: Ýö0t›¾%ª1jÔC þæ†*¬´˜é—M}ô~Ë«NÇŽm#†Õlõt†cWSÓÀÀ{Å$ÿR£üVLÌKf:gÎ 7™Î=J¾‹[NžÜ!¿õÍ7k¬6#ã*øHÓv¦…ÍÚ^I£˜5ü²Izz¼ápjøe“¶Kg}ÙD”o(aãЭât†¡ç ¤ŒF«l °‡Qþþçñ Båÿ’>Íž=íÅ#å9C†øÇÅýN¯ß—ý±áà Ðï“O–Éw|ee}ЯŸû–- JO2wþõ¯÷{õº#))¦ºz×… {ÅËÇ{Øê8X_ÿåƒÞûüóãóòÒ¿>{v÷¶må‡Z[÷o¿ê:F|ü«S§>f4SÌóIg€¡Û†nù-1n—•mŸ¶ÅGOÃß´±ÂJ‹™> Tüi°¼ÊèÑCÿùÏDqb•eËþôÀ?³z€­žÎ¸ù±K—8Àçž'NŸh[ñ¯øûØ¿¿sç>oq:Cþ²‰ü@Ä)SF?õTxyù?EƒìÜù·Ÿýl€Õfd\iÚnÀ´°Y«•TŠYÃGŠ!TlGN ÚvéŒÌÌ¥bìÚ½û¢nâ_Q¾¡t†C·ZÓ ÿ­7E:ªå7nŒ£€ô±Iš/~ÅÙ¹»á€Xsÿýþݺ9‰áléÒ?È£FZÚ;b éÚµ‹áœûîóKúøÜù÷¿Ïµeà=CCyÛmÎb§¢ †[>~ÕÕíŸ;÷E±¯=nûÕ¯‚6o^ªôð!Ò-øÛ¯ºŽ1xðO?ýt¹ÑL1Gº-“tºoùÐ-ÉZíµßŒˆöñÇoË÷Ø^a¥ÅD!1ñu__£cW±¼| V÷‡ û…¨OŸ>.?>üرÍV°ÕÓ­2v?¿çÕWŸG'µ’¸«¬ü—-×Jî“~ìÖð)âò@\I¿ì(zËG-°¥WÁGš60-S–+©³†õ‘‡SÓjµ1aáQ ¶°|ùŸ½½ûJ?ÔúŠC°=aãЭÖtÀ(o˧™Î6‘ΠÐ1@2µÃXÑn'ôûï?ñôìýÅIŒ«`´dRõ´wo²¿¿Ï­‹Z-¡ÑhZ}ùÝ&À(ÏU+½‚ޤLöœÎÓÖ­ïyyyVWïb\£%“º¦iÓ;|x^¿oÿþUƒ \´èe{Og¼ûî»Z­Vük‡éŒ–å;È’€Qž«Vz H+8¡Œ«`´dº¡)))æÞ{ïþÉO´÷ÜÓ?.îwF¿ÉjéŒûî»/))iðàÁv˜# Fy>]Ñ+è|ìAÊD:ƒqŒ–Lp,²raŸ““óðÃKI Q6|ëÃ?0`€““ÓÝwßýþûïË9‚ëÏyJìß¿·nÝöíÛ·jÕª:;;?òÈ#%%%F9Qøøã‡ Ò½{www÷iÓ¦>}Úh™ï¾ûnüøñnnnb›÷ßJJŠô®!iN||¼N§spp/‹ŠŠ"##{õêÕ³gÏ &TWW›]Kõ÷õõÇ"þýàƒ †åùtE¯ c)é ÆU€Ñ’I錗_~yÍš5¢ðÞ{<ûöí>>>Ÿ}öÙ… víÚåååe˜žxä‘G Ä[üã]\\}ôQù娱cMÓ?ûÙÏDÅO=õÔ”)SŒ–¹ÿþûãââΞ=ÛØØøÕW_=þøãF È/CCCOœ8!½4hPVVV}}ýùóçgÍš5cÆ ³kmܸQ§Ó‰ ÔÖÖŠE933ÓìFy>]Ñ+èAJ:ƒq`´d²÷tÆ¥K—î½÷ÞÆÆFQ>{öì]wÝ%æHo=úè£ò5¿žžn˜ž(((Êb-£—...¦éŒƒþ§»WUõîÝÛh™=z”••™©½I:ãÛo¿5{,555:ÎìZC‡õ7Ìn[Ý À(ϧ+z Ý)é ÆU€Ñ’ÉÓâ"ÿõ×_—_N™2E¾ìïÕ«×¹sçä·¤´…œhnn6L1½4-455™MRÈ…×^{­OŸ>/¾ø¢8ž~øÁB:CJ¾HŽ?þøã»¹¹Iß+éÚµ«Ùµ\]]Eý EÌ1»A€QžOWô :@’Î`\-™ì=1nÜ8£'Mˆ9r ÀB:ÃlnÂB:øNæÞúöÛo/^üÄOˆ]ÇÇÇÛ²¯×^{­¼¼üòåË/^TÚ£åtåùtE¯ c)é Ú `´dRM:ãÔ©SwÜqGMM?þ¸ük¯6lè߿׮]åj5ÜÎÁƒï¿ÿþnݺyyy%$$Èï­%,_¾Ü×××ÑÑÑô‡Z€Ð.¿pê³Ò´=~źyq«ÿôöª×ˆ0fú÷”´ª³}º¢WÐ1@2µçXÁ e\£%“êÆ".×û²oß¾ÌÌÌ5kÖ$ÄhÑ8¢‰JKKé c€ EœPÆU0ZBcé À¾äääìܹ3==]DïJ "šE4Žh¢ŠŠ zè HÑFcE'?¡Œ«`´„ZÆ"Ò€})..þúë¯EÜfff®ƒÑ ¢YDãˆ&ª®®¦W€Ž‚m4Vtòʸ FK¨e,RM:#oÞ âAEE…ˆØœœœ}ûöí¼¥’£^ßiODƒˆf#šÈðG—ètŒNÛ1@v’0oÿ±âÖžÐ[ÞþŒ«`´dÈU˸­štFŠ&øGg bµººZmiiiñ-µÔéþb{"D4‹hÑD ô :i' óö+ní ½åíϸ FK†\µŒÛ¤3t c Ìi„¼ÊÎ@ÐŽ€0§ýò*C:A:œö@È« @ÐŽ€0§ýò*õ•!T†tPÒ@ex(‚t „9í€W~¨A:œö@È« é èsÚ!¯2¤3t c Ìi„¼ÊÎ@ÐŽ€0§ýò*ã@•›Fc/c?5Qw_·ãf´Ïºñ\"Ð1´?BÞ~/£hû¿Â”kB^££6#gnì2ª“_Ùªâ Ó´&í\·Žq±Ý²flŸc·ºòð_WIô°Ig´vsµºªªªøøøÁƒßÚfTW:C4×»ï¾+šŽ¡ @¿®WûøúúæååIåääd© æˆù¢PTTÙ«W¯ž={N˜0¡ººZº2”IË_¾|yΜ9^^^...ï¼óŽ|¹|ùrooo­V”››+Íojjš?¾««ëôéÓõz½¼¼¸üÖétF•4[ ÓkTùeccctt´XÞÃÃcñâÅFß’0­¿!¥c1¬›Ò!ØØ\J«[ Ô¦Ö¯_?vìXQÿ§Ÿ~zÇŽ·¤å%m;;{øðá®®®Òú]»v5{q¨Õj/^¼hù:V~).M oþïÒ¥‹¼@ss³ÙJÚX ù¥a}DᆮՎŰnJ‡`c=•V7Ú£á—#”¶lhÿþýQQQ½{÷1bÄÊ•+M¿ÃÒžÍ¨Ô ,œ}[:(VIÔÐhV9"(’““CCCEc¾ôÒK-¾;ƒ±t €0í€'ц§9$$$5550ðÚâߌŒ q!'½åëë+.ŒÏœ9såÊñ¯|Mhôx‹š½£ÁìK??¿ÒÒR«×½†”ª!®fëêê¤ree¥ÙÛ òòòL/¿MÏqCÇ¢t66—Òê(mÙTCCúuëÆŒãââòÌ3Ï>;£=›Qfã±ÛžÎ«$ ¦wgÜä)Í%íŽ;î;vìúõëEc2¶‚?ºsÚ!O:ÃNOslll¿~ýDyéÒ¥:nÑ¢EÒ[žžžéééâ¢îرc‘‘‘òÅ¡››[~~¾¼… „„„ˆeŒž7aöŠtÉ’%aaabõÆÆÆÃ‡K_u±œÎPªFpppLLŒ^¯/))‰ˆˆçÏ™3gäÈ‘òL¯ÃêoÈ–cQ:›Kiu ”¶lÁÉ“'ãââ tKšQfã±ÛžÎ«$ o¾ù¦Ñ7y ßZ¿lÂØ :@˜ƒö@ȓθY‘’““ãèèxêÔ©«×ÝS”:$½µuëV???'''//¯„„ùâP\'»¸¸È//]ºôúë¯ët:WWWq5hùŠ´©©IlÊßß¿{÷îVÓJÕÈÍÍ ÒjµÞÞÞ‰‰‰ò|q5%*ãîînú“¦õ7d˱(‚Í¥´ºJ[¾!íÙŒ2Ýöt†ôË&¢V/½ô’|÷„¼@Û‚Ö :tft €0í€'t²xÓÜúˆ«+«*K˺r‘ßp æË+šh¿xÓØEÄ•¥e¥»Ú?uNMA)'€*/¯hàªÁO±j‹½ØÉñ–¥e¥t JÑnYòMܬ@eq4Ð9•¥e¥õ MѦv Z¯}˜›5¨HGx(€–)KËÚè2Ö-X°`̘1ä䆇‡/\¸Ž€0§ýò*þ¯–Šòˆ€ kùH÷c:C)¯¡×ëûôéS\\ÜINnAA››[}}=£1œö@È«õC>M Ù‰‡‡Ç3ÏõÔSÒËï¾ûÎÍÍ-..®òºøøxñRÌ”Þ5jÔ /¼P\\ÜØØxêÔ©ÔÔÔaƵÝåºå-ÍG±zõjµ4{«HJJš2e j½*¡ ÜÌuuUUUïÞ½¥òÔ©SçÎk¸ä›o¾9mÚ4©ììì|þüù–]®EFFöêÕ«gÏž&L¨®®6»Šü²±±1::Z,ïáá±xñb£/›˜ÞÒ¿ÿ£GngÑ¢Eîîîb 3gÎ[³ZøøxNçàà`y±ÄÄD±¯nݺýüç?ß³gORRÒÀµZmPPPAA´XSSÓüùó}||\]]§OŸ®×ëÍÖÙìb¦•Ù±cÇ!CÄ.¼½½W¬øÏÝŒÅÅž¾¾tfÎÐÙÓ†IA¼ôôô”Êááá“&Múâ‹/”Ù`!1hР¬¬,±âùóçgÍš5cÆ Ë錹s熅…•———••…††š>;ÃhEgg纺:ÃíH« ¢0oÞ<«Õ7n\EE…ÕÅÆŒ#ÚD¯×/X° GÇŽ“^Ê÷ªÄÅÅ1¢¤¤äìÙ³Ó¦M›={¶Ù:[X̰2}ûöMKKkhh8qâÄóÏ?/¯.v*ŽšÎ Òm‹G¤ötòuµ¸f~ò:ée×®]/^¼h¸¤¸¤wtt”Ê555sæÌkôèÒÙÙÙÇwuu•öÛµkWËé ­V+'VDÁj:£ÿþò3>¤w W[³Z qP-®­ÑKgggï–téÒÅìò3¬ÌÆß»wïnÛ¶Mž_\\<`ÀFc„9í€'Ái:Q:Ãìü)S¦˜>;cêÔ©f>wî\=lLgøúú®\¹òÌ™3W®\ÿ¦-ä»**++ÍÞ‘——gšÅž+!›8qbrr²aMäÕEA¾;C©F5·q1¥—~~~¥¥¥¦`Tg¥ÅÌ6cssó–-[ SKIIIòm5ŒÆsÚ!O:ƒÓ tÞtFaaaŸ>}âããå_6/‹ŠŠ¤w‡ –ššzòäÉK—.•””¼øâ‹ááá6¦3ÄuxzzzCCñcÇ"##å%ƒƒƒcbbôz½Ø`DD„<Μ9#GŽ”~ašPpssËÏÏ—·¿jÕª±cÇÖD^]Þ|óMËÕ0ª¹‹)½\²d‰¨³¨^ccãáÇ'Ožl¶ÎJ‹möÉ'Ÿ{ÿÐN€ñȨé¤ccYZVJ—S4Û=u"åS[nÖn͇vš +KËZÛ£)šÀ4çGÒnÆÍ°O¤3ÿ¥,-kÃí!)š@1¥v Z¯}˜›5`ox(ó¤«Y&¦Of¸®Úu€ÑèðsÚ!O:£•¯¬8[A‡vP½çô}é#2}Úh¾Lž?jÔ¨ææf³5”–tppèÙ³ç}÷Ý÷Ç?þñÔ©Sf³°¡²²2::ÚÇÇÇÉÉÉÕÕuôèÑ[·n•ß _»v-àÃ~P¤ *Êt˜!!™8qâêÕ«í¿ÎòÆËËËEŸzê)Ë;ó§L™òÞ{ï™Ýˆ\ÐëõüÍo~sçw–––š.fa#¢&ÞÞÞ¿ûÝïòóóëëëOŸ>½yóæQ£FÉK&%%‰ÕFöƒ"Mh—/_ž3gŽ———‹‹Ë;ï¼#_zÅÇÇët:ñ²©©iþüù>>>®®®Ó§O×rÒbEEE‘‘‘½zõêÙ³ç„ ª««¯þ÷ÿ{K‹)­nt±·|ùrq™§Õjƒ‚‚rss-¬ëëë›——'-œœ,Ä1ßh³fkhz5+¿lllŒŽŽË{xx,^¼Øè:ÖôД.zÍ‹!¥ÙؤŽkÑ¢Eîîîâ­™3gнX^~ÇŽC† õµ]±b…íç«ÿþG•_šnÇÆ:›½?Âl«š­ê åJªªªz÷îm5qîÜ9??¿‚‚ é ™ˆiÓ¦™.fa#Ï=÷ÜŸþô' u...6íÉÎ࿼õÖ[!!!âÒôìÙ³¯¼òŠ|é5nÜ¸ŠŠ ée\\܈#JJJÄ2ââmöìÙÒüAƒeeeÕ×ן?~Ö¬Y3fÌ0{Õ§´ºÑeäøñãKKKÅÅsLLLpp°…u£££E¡¬¬¬Gµµµ¢¼lÙ2qo´Yk(¿œ;wnXXXyy¹Ørhh¨éu¬…[ —1{,†”vdc…-,&mV…yóæY^¾oß¾iii 'Nœxþùçm?_ÎÎÎuuuòK³Û±½ÎJ ×J»0ëfÒâß]»v=ðÀ—.]²zöEe<==Í.¦´q EEEBRtѼ MÒ·H:ØyǸûî»Mo—^Ç—_úûûJå“'Oz{{›n§¦¦F§Ó™½ê³eu±Jee¥T×ÉòåœÙu7mÚ4yòdQˆuss“þ¯~Ò¤I™™™ÁB å—¾¾¾GŽ‘Ê¢YZœÎ0{,†”vdc…-,&oÖìí*FËßu×] ßÿýž/­Vk˜Î0»ÛëlK:Ãì.¬`úqõŠŠŠ'¯Sʃ-ÿûßÿ^¾‡ÂÂÙ¿té’“““Òbf7âèèhø¤OÓL‹ÓŒÿüý@È“ÎhMü€ @ÐÁÎ;†¸.½xñ¢éE áƒ ÅÅ•á…_—.]¤ùÙÙÙÇwuu•æwíÚÕìUŸÒê¶\Äš]÷Â… ^^^¢°eË–¡C‡Š²˜cúµk(¿4l Qhq:ÃêU½ÒŽl¬°…Å 7+öbyùŒ?¾wïÞܶm›íç«ÿþß}÷üÒìvl¯³-=Áì.lIgHÜÝݧN*?¹ÓòÝW¯è¿øÅž={®Z»;£oß¾JÀìF<==MïÎ0Ürqqñ€ÿùû €'Ái[KC\š½;Ã𥟟Ÿü¼CC¾¾¾+W®ûìk¯½fa¿IIIò]$Œÿüý@È“Îà4Œ­ c˜·`Á‚cÇŽ=;Ãp™%K–„……åçç766>|Xú¢ÇÕëÿÏœžžÞÐÐ VŒŒ”×rss []Ý–t†Òº±±±ýúõKHHå¥K—êtºE‹™nV©†ÁÁÁ111z½¾¤¤$""Bž/®NGŽ)?{ÂôÕèÐZœÎPÚ‘Mª´˜(È›…7ß|Óòòâ²Y\o‹¶Ý²eËwÞiûùZµjÕØ±cå—f·ccS:5fwaýÓM¤3¤®õì³ÏšžÙºººƒ¾üòËJ¿lba#ÇkÉ¿l"ânóæÍ†«Œ3æã?füçï/Bžt§`l,uŒK—.½þúë:ÎÕÕ5>>ÞìõXSSSBB‚¿¿÷îÝ222¤ù[·nõóósrròòò ÈkÅÅŹ¸¸þ²‰ÙÕmIg(­›““ãèè(}w ªªJ”:dºY¥æææI?“‘˜˜(ÏÛQQQ¢)ÜÝÝMÙÄôÐZœÎPÚ‘Mª´˜üË&bË/½ô’Ø‹åå×®]+æ‹v2dHVV–íçK\Ï÷éÓGþÒ„ÙíØXg£Å”NÙ]ÜL:Ãò³3$ÍÍÍ£F2|Wppp¸ýöÛü‡?üAô=«Àh#W¯Eå¹çž»óÎ;E¿íÝ»÷„ öíÛ'½UXXèææføpº²ª²´¬+ÿùû €'Ñ®xD @ÐŽéí·ß~ì±Çh‡Ö%štáÂ…F3ËÒ²ÒÝGå¼_SPJ˜3Ì äÕŽjè,¾OÝ‘ÒåÁMà§ÿïDʧ¶Ü¬€}"Љ|Ÿºc½öáM`z¯° =B­Þ¬€}"й|Ÿº#Íù‘M ˜Ö;?švÛ0nÖ¨é À ®˜˜˜:àäð`†ûèª]è¤3ZH:Ð1´Šê=‡Òz†Ê‰Œ´ÛMïý«¼¿üýbåiœa!¯üP+‚t  ‘s©NC×ux׈èÊíû s†Y„¼êÎ@ÐŽtÕ{­ëœÚåAÃÛ1s†Y„¼‘Î@ÐŽt ×ïËnz;aÎ0 €W#Ò:Ð1€Ž¯®¬ªø¯ëÍÞŽA˜3Ì äÕˆG è@Ç@˜Óþy•á‡Z€ÊÎ*C:¨ é  2< A:œö@È« ?Ô € aNû äU†t‚t „9í€WÒ:tÞŽ¡Ñh:ÉNÆÚ!ßÁÎ@Сóv 9³Ð)¥m’ÎaÚ!ß ŸµÔRQ‘t c´þ_A“ÌB;äHg€0í€o…ÏT4 óþ$ Òr4 =þÞh4Ë—/÷ööÖjµAAA¹¹¹Òü¦¦¦ùóçûøø¸ººNŸ>]¯×‹™¾¾¾yyyÒÉÉÉRAÌó6[TTÙ«W¯ž={N˜0¡ººÚlÊ@~ÙØØ-–÷ððX¼x±Ñ—M4¬¦!”¾¨b:_i§hùÇKšÐo4šñãÇ—––êõú˜˜˜àà`i~\\܈#JJJΞ=;mÚ´Ù³g‹™ââ?11QÊÊÊzôèQ[[+ÊË–-›9s¦Ñf ”••U__þüùY³f͘1ÃrŠaîܹaaaåååbË¡¡¡¦© ¹†§3”v €–¼¤ íñ÷F£©¬¬”ÊuuuÎÎÎRÙßß¿°°P*Ÿé ¥Œÿ ýBžFh1~¨A‡öèJ×ü~~~¥¥¥¦Ë‡„„¤¦¦^Ûø7###44Ôt1__ß•+Wž9sæÊ•+â_à B]]T®¬¬4{£D^^žiêÁÁÁAé”¶©4ßêNÆÐþ!O#´é Ú£c(¥3–,Y–ŸŸßØØxøðaé;&W¯ͤ_¿~ ¢¼téRN·hÑ"ÓÍzzz¦§§744;v,22RÞlpppLLŒ^¯/))‰ˆˆçÏ™3gäÈ‘å׉ýš¦ÜÜÜDeÌ‚Ò6•æ[Ý)ÀøÚ äiÒ:ØEǸ|¡þøGŸ\:Wkc:£©©)!!Áßß¿{÷îÒüœœGGÇS§N‰rUU•(:tÈtw[·nõóósrròòòÛ‘7›››¤Õj½½½åù QQQ®®®îîlrõú£I]\\Ìf”¶©4ßêNÆÐþ!O#Î@ÐáwŒ3ÙùÙQ±£«÷¢¡ÆÐþù6Å£@t¸©ŽqùBýÑ÷7n,S76¥KÐzíÃä2Ð>HgnŒ|;ÆF×\G¤hÅD.í‰tà”¥e­ÿÉ£©]ƒRºI‰ 1¥v *ùGfÞ¼¢,Ý3I™2eõ– è€ô¥?|ýü‚kI LJä[32ÜFÕ”Ò8h< A‡–tŒæËWN¬ýt{ÀSë‡^{h·`2ã?h„|»á‡Zt¸©ŽñŸ›57öú €ñ´?B¾Î@С:†t³Æ?‡<á>šŒÀøÚ!ßÖHg èКC_úCÑ’”Kçji+€ñ´?B¾íÎ@ÐŽ€0§ýò*ã@t c Ìi„¼ÊðC­@eHg•!T†tP € aNû äU†j@ÐŽ€0§ýò*C:A×.£­FCÇ@˜ƒö@È·ÚlN3‚®=FÛÓjÏkÐ1ÆÐþy»ø€ÍipK‚®ÃÜ­ÐâãµÐfß2LˆHz÷î=f̘âââölRFc€ñ´?BÞ.>`«¥¢<"è`AG:ãfÒRáôéÓsæÌy衇ڳIÆÐþy»ø€MhõëöøøxNçàà ^655ÍŸ?ßÇÇÇÕÕuúôéz½þªÁýJ_Á0œo¸5ñrùòåÞÞÞZ­6(((77×lÌ.c¶&¾¾¾yyyÒÉÉÉÿþ»’—'æm¶¨¨(22²W¯^={öœ0aBuuµåš766FGG‹å=<</^lt¤¦-Ђt†P[[ëììÜžé €t€Ž™Î7n\EE…ô2..nĈ%%%gÏž6mÚìÙ³-gL/Ý ·&^Ž?¾´´T¯×ÇÄÄ›­€ÙeÌÖ$:::11QÊÊÊzôèQ[[+ÊË–-›9s¦Ñf ”••U__þüùY³f͘1ÃrÍçÎV^^.¶jšŒ¸ù»3Μ9óÆo’Îé ¸ÙtÆñãÇå—þþþ………RùäÉ“ÞÞÞ7šÎ0ÜšxYYY)•ëêêäŒÖ5»ŒÙšlÚ´iòäÉ¢ëææ¶bŵûý&Mš”™™iákjjt:åšûúú9rD*çææ¶n:CÖ·oßÒÒR«:ÚuM •‡¦¹¹Y~éììlxùÝ¥KËYÓKwíY^Ëò2fkráÂ///QزeËСCEYÌ‘¾Šb(;;{øðá®®®Òê]»vµ¼;­V{ñâE©, 7”Îprrjhh0œ#¶`Ôt¢YŽ=úðÃoܸ‘tHgØ)‘¨%èŒ.ªýüüäÛ Iω‹ÿºº:©\YYiõ™-Hg(Õ$$$$55Uúʆø7###44Ôt1__ß•+Wž9sæÊ•+â_ô…ÙšÞ‘——gzDF-`èž{îùòË/ çìß¿ßÇÇÇôOœ8áéé)}G†G Ìi„<é »ÃØj :£‹ê%K–„……åçç766>|Xúf‡àææ&fÊ‹ÇÄÄèõú’’’ˆˆˆ¶Hg(Õ$66¶_¿~ ¢¼téRN·hÑ"ÓÍzzz¦§§744;v,22RÞ¬RÍçÌ™3räÈòëÄ~MȨ -[¶, à³Ï>«¹n×®]÷Þ{ïâÅ‹ÍàO<ñþûï_å‡Zæ´?Bžt§€Õ »|¡þøGŸ\:Wk9›ÐÔÔ”àïïß½{wq‰ž‘‘!Í‹‹sqq‘ÎÍÍ ÒjµÞÞÞ‰‰‰m‘ÎPªINNŽ££ã©S§D¹ªªJ”:dºÙ­[·úùù999yyy‰íX­yCCCTT”«««»»»é/›˜¶€¡æææ¤¤¤xàŽëDaåÊ•JøÏþóþûï¿úßÏÔPúÍFc„9í€'Ái:éØz&;?;*6Ã3¼zÏ!ŠŽ€0í€o¤3Øt—/Ô}ã§¿xF¼µ®ÛГŸ~E+Ñ1æ ýò·`=è¤Û1Òz†nóŸ˜vû°uÝ‚ÉeÐ1æ ýò·¿ê@‘|;Fºû¨­?}"Exý¾ rn1ÒÌ+KËJ»íÑuŽ¥õ •bJítôýyóVˆ²”K¦Ü©ÊüHg°wŽ–}1áëºËéŒ.#2ùéWœN€±t „9í€ï T™ÎnÇØpÇðÞÿtFßÇRªØ´›s 0¶‚Ž€0§ýò„š*ߎ±©ïcŸ>ø\ÚmŠsO.h» £@ÇsÐþyû¤štFYZVÚmÃÖ9=”yWDŠ&ðßS—‹—¦ˆ ÊR? L™r«”XÎhgîêøœÎÈì7vSßÇj J9‘têûåÎæËWòbþ¾©ïc)¦hÓÝF‘Ñ SѨ·êòÍ©¦»"£@'¡¦Gš/ݬ±õîÈ ÷Ñd4€v:Ð1h€0퀿åTùC­féK(Z’ré\-'hŸ aÚ!«h8Í:Ð1æ´?B^]Hg è@Ç@˜Óþy•!€ aNû äUFõ@ÐŽ€0§ýò†&êB:¨ é  2¤3€Êð(Pè #5ÖÿJ.X°`̘1æpÚn§ááá .$‚sÐþùVû˜¡–Šò6AUwŒ[r-ÝZ—âJ•×ëõ}úô)..î‡Ó*Wš_PPàææV__O1þƒö@È·ÎÇN3‚­Ò1Ô˜°¸ÑKt£9«V­Šˆˆè0‡ÓÖ; _»v-AÄøÚ!ß:38Í:´JÇè„錉'®^½ºÃN[ï4))iÊ”)ã?h„|ë|Ìà4 èpóCc@ž¯Óé /k¢¢¢zõêåáá±hÑ"«ó›ššæÏŸïãããêê:}út½^ovûfEFFŠmöìÙs„ ÕÕÕf/ãå—ÑÑÑR/^lôí Ócìß¿ÿÑ£G ·³|ùrooo­V”››ká|}}óòò¤’““¥‚˜#æßªÃ±éPú¢Šé|¥ ÅÅÅŽŒÿ ýB3Á#R‚vÞ1L/tÇWQQaôîo¼^qÝÈ‘#­Î‹‹1bDIIÉÙ³g§M›6{öl³Û7kРAYYYõõõçÏŸŸ5kÖŒ3,_“Ï;7,,¬¼¼¼¬¬,44ÔôZÝhEgg纺:ÃíŒ?¾´´T¯×ÇÄÄ[8qÍŸ˜˜( b_=zô¨­­åeË–Íœ9óVN«¤3”vzõú£FD‹DŒÿ ýò+°÷¿(&ºÇ7}wÀ€RùÈ‘#VçûûûJå“'Oz{{›Ý¾U555:Îò5¹¯¯¯ØµTÎÍ͵zý¯ÕjÒ•••RYÌ—/ÝͦM›&Ož, ±±±nnn+V\û43iÒ¤ÌÌÌ[u8­’ÎPÚ)é @: ŽtFss³é»Z­¶¡¡A*_¼xÑê|qløµˆ.]º˜Ý¾YÙÙÙÇwuu•ÖíÚµ«åkrQ±kÓ:(]Ì÷ïßÿ»ï¾³š0{.\ðòò…€€€-[¶ :T”ÅùÛ4í8­’ÎPÚéÕë_60`‘HgìˆÑ3,”®{oôî ??¿ÒÒR«×Ûfùúú®\¹òÌ™3W®\ÿ^rËwUTVVš½³ //ÏôZÝè'Nœ(?öÂÂ!+BHHHjjj`àµ/ÍŠ322BCCoááRÚ¦Ò|«;½zýQ O>ù$‘Hg숛›[~~¾Õkû?ÿùÏò32Feuþ’%KÂÂÂÄ–>,}AÃÆt†§§gzzzCCñcÇ"##åU‚ƒƒcbbôz}IIIDD„<Μ9#GŽ,¿NìÔôZÝèW­Z5vìX«‡¬t±±±ýúõKHHå¥K—êtºE‹ÝÂÃ1|Ki›Jó­îT3fÌÇL¤€Î•Îà)A;ïqqq...V¿•pñâÅÿùŸÿquuuwwÿË_þâäädy~SS“¸à÷÷÷ïÞ½{@@@FF†í錭[·úùù‰Myyy‰È«äææiµZooïÄÄD£_W‘ê`úS ¦ÇXWW×§OŸ¢¢"ˇ¬t999ŽŽŽ§N媪*Q>tèÐ-<÷”¶©4ßêN ÝÜÜêëë "ÆÐþùΕÎàl‚¯c|ûí·ýû÷·}¾zûí·{ì1:†U¢•.\H;0þwZueUeiYW.6ÒþùÖB:A‡öòJeeåñãÇGŒñÛßþÖê|Œÿ@YZÖF×û§Î©)(¥ýò7t‚íÝ1–,Y¢ÓéÜÝÝ_xáÃ:Ušoý™9*þÛܱŒÿ}Ÿº#ÅáAÑÂ[F–üc“Ù›5h€!¶~dâ4 è@Ç@˜·ïSw¬×>,9µK(˜Þ¬Aû ¹°@ÐŽ€0o?ß§îHs~D\ÃHSªãC†7kÐþC.lÄÍ«€ þfbbbbj­iË€ ©]ƒ2<ëvà ÎìÎÁßÄIYŒ ðÝ£_ÞðÔfŸqù±+/Vž¦qÀv¤3€v"å2¶Ý3ù³‘ÿ»ÁeÄÞ'^«Ü¾Ÿf€ ´‡¼y+Öuº¾{0·cÀÍãQ :Ð1æm®zÏ![nÇ ý†\؈j@ÐŽ€0o[ueUÅ]oËí´?À ‘Î@ÐŽ€0§ýò*C:A:œö@È« é èsÚ!¯2< A:œö@È« ?Ô T†tPÒ@eHgh÷ëõ+‘ Œ3¦ÃNÛí4<<|áÂ… UÊrû£ C@-å)A:œö·ŸkéÖºWª¼^¯ïÓ§OqqqÇ8œVÙ¸Òü‚‚77·úúzµ7£JYh†Ü¶;µT”°:Ð1æ¶ýÕ˜°¸ÑKt£9«V­Šˆˆè0‡ÓÖ; _»v­é2¶4£Umë#ÊÍÍGqûu¢pøðaÃ]5ª¹¹Y©>•••ÑÑÑ>>>NNN®®®£GÞºu«[n•](µ?C.é N3@ÐŽ€0'ѹÒ'N\½z5é wš””4eÊÓeliF{Hg|÷Ýwnnnqqq•×ÅÇÇ‹—b¦¼kqtï½÷žÙú”——{{{ÿîw¿ËÏϯ¯¯?}úôæÍ›Geã–o~ÚŸ!—t§ è@Ç@˜wÞö×爫2Nçàà`xÑÕÐÐÕ«W/E‹YßÔÔ4þ|WW×éÓ§ëõz³Û7«¨¨(22Rl³gÏž&L¨®®6{Ñ+¿lllŒŽŽ–ê°xñb£og˜cÿþý=j¸åË—‹KJ­V”››ká|}}óòò¤’““¥‚˜#æßªÃ±Pú¢Šé|¥ ÅÅÅfΨwìØ1dÈц¢%W¬Xa¶ªf›Âl'4[[Ó]ØbêÔ©sçÎ5œóæ›oN›6MÞø¹sçüüü L÷øÜsÏýéOjñ–o~ÚŸ!—t§ è@Ç@˜wêö7½t7n\EE…Ñ»o¼ñFxxxÅu#GŽ´:?..nĈ%%%gÏžx³gÏ6»}³ ”••U__þüùY³f͘1ÃòU®¸¤ +///++ 5½*6ZÑÙÙ¹®®Îp;ãÇ/--Õëõ111ÁÁÁA\ó'&&Š‚ØW=jkkEyÙ²e3gμU‡Ó*é ¥^½þŒ Ñb¦û2jƾ}û¦¥¥544œ8qâùçŸ7»ßm £—J»0K^ÝÃÃÃ0í"ˆ—žžž†ßµk×}Zcî¶(Œ^šÝ„UUªT)**2œråÊwww£•ÿõ¯U¯’P'Êl†·á4ʕظæ’l¢$é Bn…Hg÷¬Ý¤ëhxÏBõ]é»êõze\ºpV§KÌð_å•+W6»~³ÒÒÒºwïîíí­,+Fí^®ìƒÚ«4ÜKùÆ«78ÐX­ÙC¸|ù²¿¿¿Œ´jÕ*11±cÇŽ2.SÔ_Ó8þpJ%ai£·îüØ¡I“&¦Û2*Æ={ö„‡‡×­[·Y³f7n4»Ý»- £—f7a•¯¯¯-×P?òÈ#Û·o7œ(³™^:¡¾kãšK² òé  B3º‡…¥žäÝ^¤Óé¬ö·Í \¶lÙùóç¯_¿. »Üêå999f¯,8tèi7ÒèŸ}öYõ¶‡léBCCcbbBBn_É/ããã»uëV†‡cÈÒ:-M·ºÑ[wnE9dÈÓm£âæÍ›‰‰‰j—ÞhW-…Ñl–öÖì&¬þØdèС¦w¸>|¸é§/%мyóK—.©GŽ9qâDKuØÆ5—dåÒ@…æããsäÈ«}ûI“&©÷ÈèÕ«—ÕésçÎ “58p@ù†é 驯ÅÅéõú'N 8P]¤S§NÓ¦M+((8yòä3Ï<£NŸD^ªD}úòŽ9R˜™™)R;’——·~ýzõ]×\’Mh”?HgZDD„———Õ_%½òÊ+ÞÞÞ¾¾¾Ó§O÷ððО~ãÆ éðW«V­U«Vñññ¶§36lØ$«ò÷÷—•¨‹PÒDv€Ó)P1ÐÌAùƒt†ùtÆÑ£GÕ)?ü°LÙ½{·Œët: rL:ƒ\é ";Àé¨hæ üA:ã^î…Q­Z5£•xxx8 A.ƒt‘àt T 4sPþ q錀€™røðaGÞ ”\é »à®HT 4sʨ éŒ3fÈ”üôÓO§OŸ–Þl·nÝìšÎ —A:@:£Dé ±xñâvíÚÝGXXXll¬ýÒä2HgHgÜ]:£l‘Ë €+¥3ÈeÎà¿éŒÎ;¿ëôÈeΰ;îŠÐè@Å@3§üJg¸ r¤3ì‹gV4:P1ÐÌ)À%Œ9Rú¢¡¡¡S]¹ ÒDv€Ó)P1ÐÌAùÿé‹N:•¢@Iªé 4:P1ÐÌ)ÀÑ}QÒ(a"€F*š9å8º/J:%¬BÜ T 4sÊpt_”tÆïúÞnnålC¨B<¨à辨Ò.ÔuWwÕ–}.Éq‘ÎàÞû¢%Ig˜}¢ªK—‰éþ—Ö­‡t÷Þ-•«3ÊGçüéŒ{ªB¤3Žî‹Ú#!/###ýüü*Uªdø®^¯3fL:uxà™3gZ~ãÆ>ø Q£FÞÞÞ/¾øbAAÙõ:~üøÀeUµjÕêß¿nn®v6¡¸¸øµ×^S6=kÖ,£›h_ub8óÂ… <==;tèpðàAÓ9ÖciK‡L:Ãî¸+@£ÍœòHgôë×/;;ÛèÝ÷ß¿OŸ>ÙwôìÙÓêôˆˆˆ=zœóÌ3êôÉ“'÷ìÙó̲-Ó …ì@ÉÓFë±´ˆ¥C&awÜ  ÑŠ€fNù¤3̾,**zå•W¼½½}}}§OŸîáá¡=ýÆóçÏ®V­Z«V­âãã­¦36lØ$kð÷÷—eÕ9<Ø¡COOÏ€€€ =TEÙ´é“Mnݹ7§———Õ'›XMg­ÇÒ"–™t¥œÎ¸û÷ïoܸ±íÓá´UˆtÀÑ}Q§3ÆŸ“““™™Ù£G·ÞzËêt8"pt_ÔÁ錹sçúùùùúúŽ=Z½7§Æt8"pt_´¬~l‚rS…¸(¨hæ”?àè¾(é ”° ñ V4:P1ÐÌ)ÀÑ}QgNgh<ÁÎS…Hg ÑŠ€fNùŽî‹’Î(ݽrª}¶÷ÎÎ@£Íœò­„éŒsçÎùúúêõzuJxx¸:^TTäãã#󔤧]нq·;*UªT«V­¶mÛ¾ûî»ûVæÜL8s"€F*š9å8º/Z’«3úõë'=Xeüüùó•+WÎÍÍU^FEEÉ»Ú=v[zõ¥˜ PF öîÝûæ›o6hÐ@§Ó9çGã*¿²áV ht b ™Sþ@ÙôEK’Έ‹‹ëÛ·¯2¾~ýzYÛ×_­¼ìÙ³§¼+#Ç8p`:ujÕªÕ¿%ßazÝÁµk×&Ožìïïïåå5{ölµW¿pကOOÏ:ø Q£FÞÞÞ/¾øbAA:dd¤ŸŸ_¥J•¬&ds#FŒÐ^¡¥½RF¶lÙÒ®];Ù7ÙÃÅ‹½«×ëÇ÷À2¢^Æbé lIgXZVV>fÌ)dÙÖÌ™3ÕÅ Gîª$+b:PAÒW¯^}ðÁÏž=+ã'NìÙ³ç;ï¼#ãgΜ©_¿¾¼+ã­[·NNN¾råÊŋǎ;jÔ(³=ö?ü044ôßÿþw^^ÞøñãÕyÂÃÃu:t³§M›Ö©S'ezDDD=Nž<)31b„ êüýúõËÎζ%ApêÔ)ÙIíZÚ+eDŽ}íÚµz½^VõòË/½;yòä°°°¬;ºuëö·¿ýMû lLg˜]öý÷ßïÓ§Oöò)˜MgÜUI’ΔÛt†7nÜœ9sd¤K—.©©©J?yÆŒ2Ýtæüü|???³=ö¦M›š^§ óäää(ã………Õ«WWƃƒƒ;¦ŒÿòË/êü™™™–¦¹íZÚ+e䡇š?þéӧ;ۤI“Ç+ã²’ÀÀ@íƒ2Z‰Ù{gXZV¶uôèQe\6j6qW%I:PžÓééémÛ¶-..nÙ²¥¼lÞ¼¹^¯ ’éÊ iiiÝ»w÷ööVºåUªT1›_ðôô,**ÒÎA¨/¥7nØÕ¯\¹²:ÃÍ›7mLgœ:uêÁÔ^¡ö^íÙ³'<<¼nݺ͚5Û¸q£Ñ»†Ëʈ¼Ô>(«5–••«?f‘m™MgÜUI’Δçt†hÛ¶í矮üÚâ…^˜={v›6mÔw¥£{þüùëׯË_µ#mt{‹fÍši\aô2((Èì]<5î iöÞÏ?ÿ¼ö mÙ«›7o&&&ª¿[±åêŒROgØruÆ]•dELgpW$€F*š9åT¨tÆœ9sêÖ­»bÅ —nmÍš5•ŸŸ(¤Ÿ§×ëOœ81pà@µ#íããsäÈu¶>ú(44Tæ1{— £—sçÎ “Å‹‹‹80xð`ÛÓ………{÷î7nœá“M,­P{¯† røðaY$11QÖfôî¤I“Ô{gtïÞýý÷ß·_:C¶¥Þ;£W¯^¶§3,xELgðÌ*€F*š9åT¨tƹsç<<<¤Ó.ã:NÆeŠúî† ‚‚‚d¢¿¿ÿüùóÕŽtDD„———úòêÕ«ï½÷žŸŸŸ··wdd¤v'üƲªàààjÕªµjÕ*>>Þ–t†¨T©RÍš5Û´ióÎ;ï(w0Õ^¡ö^­^½ZÍÓÓ³]»vÉÉÉFï½ñÆÊ“MdDýá‰é K÷Î0»¬¬ü•W^‘ôõõ>}ºzO« Z:pÒht b ™Sþ@9OgÀ©ìß¿¿qãÆŽ¬B¤3Ðè@Å@3§üÇ!QžŒ?>'''33³Go½õ–#«é 4:P1ÐÌ)ÀqHg”'sçÎõóóóõõ=ztaa¡#«·@£Íœò‡tJ¥ ñ V€£û¢¤3PÂ*D:àè¾h×®]§÷D*é @¤3€"pœmÛ¶MýÍÈ‘#¥SÚ¿ÿðððg»1eÊn €F*š9å”ÔÔÔ„„„¨¨¨¥0ñùßçPVIå‘*$I§Ó¹d:ƒgV4:P1ÐÌ)À大§'%%ÅÅÅI§t~ï矧´Iµ‘Ê#UH*Rvv6é 4:P1ÐÌ)À222víÚ%ÝÑ„„„5ø½¿µx’BÐ&ÕF*T!©H¹¹¹¤3Ðè@Å@3§üGÈÎΖŽhzzzjjj~ïƒ:!‚6©6Ry¤ IEÊÏÏ'€F*š9å8‚tAsss¥/ªÓé2ð{ó<ÚSÚ¤ÚHå‘*$I¯×»d:ƒ»"4:P1ÐÌ)4y(xP+p1¤3€‹!\ é àb¸(¨hæ”?š¼‹áA­@Ù»záÒÙ­{œmFç<;sõâejƒŠilæ‹Îó–mùWA´$äºÐ9é  ì)ŒAc8·mµ‚Š)ƒýb(qDK—‹EüØp–(lÎì³[£Œ†c‘³+r:ƒZAÅ”Á1±‚”¸ ¢%ƒËÅ"n 8K”—Ö{óæ^£á—ä芜ΠVP1@#epL¬à%®‚hÉàr±¨ÔÒnnn¥>ÿÝ® ʗ׈¶)rqjjjzzzFFFvvv~~>µ‚ŠQ1+h¤ vŠ| ÄU-\.YÏÌ™3ÇÓÓSþ:a:ãÞòdI@”w¹ˆ¶fjDBBBRRÒ®]»$¨åææR+¨³b€FÊ`§XÁJ\Ñ’Áåb‘õŽ}Û¶m—,YÒ¦M'ÌÎQ¾‚D´ÿ;#***..N‚Zzzzvv6µ‚ŠQ1+h¤ vŠ| ÄU-\.YéØË’;wV’2nøÖ—_~Ù¤I¦M›~öÙgjŽ@F,XиqãªU«¶jÕ*55uùòåÍš5«^½z—.]Nž>ÞðX lBBFùÀ@¿ƒ¿R_~õÕߥNÊ_uÊáñM›>TòÈbàxåÊÓ¦ý¹E‹&ÕªU­U«FXØc ó46giHg˜–‰KW "#ßöô¬*¶¥ Ó¬™ÿ¤I/îÔØ N»Aè.õª6ÃêÕ«<د_hTÔÇ7nì)­=qõt†¥ØÕ¡C+Ó™e¢áñ^¸ðÝ[o ÷÷¯ïáá.'Lqñâv[ ÙHÈž4iÒð7†üúëVÒà”Æ…¦“¤3ÜÌ)WéŒ9sæ >Üh¢L‘é2²qãF[–z:£wïÞ›7o.((Í-X°àÑGU¦7iÒdÓ¦MêoUŒVÕ®]»ÈÈÈÂÂÂÝ»w7oÞ\}×h©uëÖÉþËQ¨Çbx+PÊ0Ê¿þúsÿû[êK9k‘s ±c«SæÏ×ð¥ó„ø¢¢:vlóâ‹Ï:´V¯ÿ1//eÆOûôéD:£T¾û]·b(CÛ¶A_~9µM›‡ÍnK*Ïž=Q¡¡¾öÚs¤3@èvðÙ¹2"qûÔ©obbfJŸ¼W¯Ç¥U’ÎЈ]ÁÁ¶n]l819yQóæÕã-(H•¥ä;ñøñx)[ùûÒKýÚµ 6MÚš@KxåÊGެ=z@XØc¤3À) ÌŠ|¹œCÓmÚ´ùöÛo&Ê™®ŒñÅ52û ÖROglذ¡k×®žžžõêÕ0`€ú´×ØØØÆW©RE}P«ázöîÝÛ¾}ûªU«úûûÏŸ?_}×h)±páÂÀÀ@wwwÓµ€P†Q~ýúyO>ÙA})'C NjÙ²©:¥ÿn‰‰óe䨱¸{Ô©S»V­2ñܹdÓ´«²H|üœGi^­š´‹ú‹ÿMh+W~$§V2Ý׷ΈOåæþ÷¿=²ˆœÃÉ[Mš4œ>ýµ«WÓÔ¥""þâç÷@¥J•Œöü£Æ†‡w»«J:Ãöï~×­2ìÝ»ºsç¶JÇ@Æ-U€Ó§¿y຤3@èvd 5mk²”‹„tå¥;lv6eN)ЦMòðp—½úòË©ê†,-’‘ñµ|›øøxW­êѾ}ðêÕŸX=@;¥34bתU3Ô|½2ôîÝ1*êcµ<§NóôÓ]ŒV(Sd·­@íè×_·ÞwŸ§¥wÕ)ÅH\§4v ˜ÚaÊÒNj·YÃJAúI8•¿Ÿþ¾áŽi©-é í5|öÙû5íJ0—}P×c8rEm—{g(Û(_PZ»vMù+ãyy)Á‹‹wyyݯDðë×w7hࣼۺu³¤¤Ï w^¸ðÝØ±ƒGêo6TÉwƃÖKH˜wéÒ÷(Ÿþiu¶-šlÙò¹L?sæ_Æõ:´·òÖöí_ÊÊå¯lHéÙóñ?|]]ª[·?dfn4 C­Z5ݱc)é ;}÷»nÅaܸarê¯ü¿EÆ5ÒòH:„nG¶P³mM‚¹„teÜÆÖ˜- àÁ­[Ë''/’“øM›þ©½ˆœÇÏž=áüùmzý?ü°|À€îVÐNé Ø%Ÿ©|ûöÅ(SÒÓ£ÿçe¢Z,Ò7ûî»/V˜’òrV ÆÕÒ»3fzu†F:ÃR1WÁ)ý¦Æj5vR»Íª3ÄÆFøù= ‡“Ÿ¿]þÊø×_ϵz¤¶§3,­Aö¹Q£J—¿Òͦ3Igå3Ê?ùd‡ >• RüãÊÿs”ŸîÞe˜ÏV‡‹·KP3ª¼õš5³Ì†­={¢þD~ÙR·nme\ΓvíZixŽ¥þ‚Q–RÏÞŒOϪ‚IgØ©V¸nÅs9­—ïf—òÐCÈÓ‹6¥3нûä4tÝŽl¡fÛšœtV¯^í®vXc6õ„[†uë"ºv}D{‘ûï¯!;o:ÆÚ#a5v­^ý‰z¾>dH¯è虆Å"߉¦w¸)jÁj@íߺKA:õÕt†¥b$®‚SûLÕjì¤v›UgèØ±„PuzllD§Nm­©÷ÎÐXC—.íÔ{á)™ÙtÆ=5é  |FùÙ³'¼ùæPyûí?EDüEFfͯü†PF”)2èt èîããýÛc‰+› µaÍÞ3Lùç’ÙpY¯ž—¬MQ©R%™.#ê<ʹé Ç÷»hůÞ÷Þ{Y})gÿê—±úUZµªG` ßĉ/*ÿ!B·ÃZ¨¥³sõç 6î°Ælyy)êlò5Q§NmíE$ȱ¼úêÀ¥K§ggoV—Õ8@{¤3´c—ò?ä6m>qb½ 2¢ÜÐj:£fÍû¬@K|ˆ‡­íÛ·ËŸþÔ×j:ÃR1WÁ)ý¦Æj5vR»Íª3x{×2<Ó–q™bõHm¿:ÃÒd+FaÜl:㊚tP>£¼œ¬<üp€Œüáÿ£d:wíZ©ü¤°OŸN‡Ç*³…†>*/+kÓÕ«iW®ü`Y”ANµ£§é9 ³tê£ù±‰½¿û]´bôëjòüïP[¾V© t; …š}ëûï—¨7¿´q‡5f³”ΰ´ˆ ûöÅÌœ9nР09V;6ht†-±+&fæk¯=÷ç??«þÓÕêMÔÜó½ þù[/¯û;køkKÅH\§4ö ˜«µe'ͶYÓ%9ñÖ^ƒéŒ{(êòœÎ8ŸväTôfÝÒĬµÉþ›ˆ€Šåýýë8°ÆÇÇ[Ét^»¶[Î]$Þ'©Fê—.}¯ŒoÛö…5ÜÝ«Èü†§˜†Ï¾²%luêÔvÁ‚÷î6ÄøáëÜ Ô®µÂ+ÆÙ³Iµk×4|6¡ŒË™N:„ngݦoÉnôêõøÇ¿yW;li6Ó›<ñD{íE ‡'ÖKİz€¥žÎ°1vݸ±'$¤¥ ê“Õ·¦Mû³Ù[Nž<º„é ©QêUÜ2òË/[Ô·vZŒÄUpJc¿€©±ZÛwÒ¨ÍþØ$..Ò0œþØÄ~é £›Èø]¥3l Ýå$qíò•CS'ø=%c8$6 ?>7úæµë„T(ÿÊ+ú÷ï¦þ#H9š0aÄ«¯T§´kñ—‚‚Ô´´U†‡kÒ¤á7ßüS½â+9yQƾ‰‰ó-ÝyÈ4î|ûíguêÔ^²dZnîÖË—wÈ˧žêl5^¹òÃcµ|ùåðC‡ÖïÊËKÙ¸ñ<¨µt¿û]®bDF¾=|øSFeŠL'B·3„nõ-‰ÛYY›äl[N= Ÿ;hã[šÍôV òÕ ½HïÞÿõ¯r²È?ÿù¿>ÚÂê–z:£ä±KŽKð¥—úÉÇ'e+åû±qã†.|wÏé õÇ&ê ‡í=lXŸ3gþ%’”ôy‹M¬#qœÒØ/`j¬ÖêNZj³†·•*ëQéá­@í—ÎHH˜'±+%åÿdß䯌ßU:ÃÆÐ]ÒÒ'ø=ý[þâéÔ!¯ýøÂøƒÆ$øõR&~ÓrHîg¢*B”_·.B¢€rÚ¤ ³f¯^½šá€öì‰jß>¸jU góæ½£FµkgK ©R¥²á”¶mƒdÎF|ñÅ[Ÿœzvëö‡5ªËFeDB-§_……;§Ly58¸‘lëþûk<ùd‡õëçYºùéŒ{øîw¹ŠÑ¦ÍÛ7/4š(S”Ë2Ig€Ð]æ¡[Éžž·ŸøÌ3]W­š¡^k`û[šMF,x/0ÐÏݽŠÌ¯ˆÆ"rºßµë#²?õêy ÐýĉõV°ÔÓ¥».^ÜþöÛ’£SJI:c99ßÚÒ¯°të>åa·†wÙîtŠ”';JmY¹ò#[Š‘¸ Niì0µÃ”öNZj³†û£†SÓµÚ˜Îи¨Æ.œð ò Ö9sÞ–C°=acèvùtÆù´#kk>!Çðm‡Ár0F‡—·dcð3òn|ýÞd4PA¢|EHgP¨ ‘28 V8ì=}ú›úõë~ÿýâ*ˆ– .=ìØ±48¸QÙÆ"§Kg\½p)¾~o9€#Þ¸qu·Ù#¼ziçÖ°‘2ÏúàAçr~ÉÏÏ×ëõD åéµR+¨ÄÐHœY&»¾âßl<ÔÔ!¯Éü+_Ÿ¼dÉ’¨¨¨7¦§§çææ5@”çìŠZAÅh¤ ¤3ˆ« Z2Îp¬µÉ²ë[ÃFÚx¨‰MŸ–ù£>œ³dÉåÔÔÔììl¢ˆòœ]Q+¨”tqDKÒríò•¯<;Ǹw¸”ñ/[³nèèè%K–,_¾<666%%E§Ó5àŠQþXäl©Õ FƒKENgP+¨ ‘28&VðWA´dp¹XäælÕýǦËÞï4F;—qãêîÍ ¾}œÏމ‰!ì¢Dy¡Â¦3¨ ‘28&VðWA´dp¹XätéŒÂ¬³kkvÕ¾ƒÆ«»S‡¿¡\š³l%é ¸º«/ŸÛ¶G¶þsyüŒOWOžµ|âG ê°lá"iæQQQ ©©©á",j4RGÆ >Pâ*ˆ– .‹Üœ°ÆçlÚãþx´[È÷ǘþê$oï×[ž~;gãÙ)fæ§111üØåIzzzRRR\\œ´Þe0 "Å¢>’™Z*h¤°S¬¨à(qDK¸J,rsΊž³içšûºÜÎYTéðmèó;‡M‘:øµM†üçú¯n1ϹCŽ“[¢ÜÈÈÈØµk—´Û„„„50 "Å"…#ETÑ`D­ b€F GÆŠ þWA´„«Ä"7§­ëYû~óÌø˜ªHãÙ)ú©11_.SrÑÑÑ+V¬àA­(7²³³¥ÅJMNMMM‚))))¢üü|j¨ ‘ÂN±¢‚ ÄU-á*±ÈyӲDzëÛ6}ûÎ'«[Œv YÝjPÔ_?Š^±JMdDEE)¹ åÒŒ””Y„° —&877W­N§Ë€))))"½^O­4RØ)VT𔸠¢%\%9o:CöXö^ùyÕWƒ'D»…¬ }ÉKïXò›+VÄÆÆª?ª!ìP¾¹9óÎåççëtºôôôõ#ß»}uÆÀ7W¬XñùçŸv‡Œ,Z´Èð!23—fPî9u:C¯×çææêtº-c>ˆv ‰þN\\Ü’%K¾¸CF–-[¶qãÆ””%—!3siåž›“ïŸ^¯ÏÏÏßù—Èh·-c>HMM]»vmô2oxwrTn.±—ûÞ_í²ëyÙÙÙ)))ÊNedçÎܦ€Š¦ŒÓg·î94u±Õ!9tL´[ˆü•ñ­¯Ï0Lg–uò¹PŽ•q:ãÐÔÅÑn!¥;È:ù\(Ǹ:¸׸w†r‡rÙÅ‘ßã# ¢q¢tF~~þ¹sç²²²Nœ8a”³ØúúŒh·ù+㉿wÄY‰¬JVÈB(gHgC:¸ÒÀÅÎ.†'›C:¸ÒÀÅÎ.†tp1¤3€‹!\ é àbHgC:¸ÒÀŸ@:ãÚå+ƒŸ‹v ÙÒyôÍk×IgPÁ9{:ãæµëÛÃÿí¢ {Æþtœ³§3öŽŒv ‰«×S·4ñ+ÏÎ2þÝÿÎ#@EæÔ錌|íãþxîö}òòß+6ܾF£J‡¤™‹RïHKKÓëõ|ŠT(ΛÎ8Ÿãþx´[È©èÍòR¯×ççç§M^p;ÁQ½óæ.Û±cGZZšL$£@…â¤éŒóiGÖÖìírøÃ/oý–Ë8wî\VVÖú?¾)Ó×Ô鑲v}ZZšL$£@…âŒéŒÝÏñõûD»…ìõ‘2EÍeœ8q"uû÷q!ÏË»±ü19E&* >K*§Kg¨eÝ6öæµë·î\š¡æ2Ž9²cÇŽï·lmÒOæùú‘á'2~R2\ @á\錛׮'‡Ž‰v ù¦åÐk—¯Üúíg&j.C½ãàöÖÖ{RæÜôÇñò–ÌÀON¨ œ+ñã Ó£ÝBâë÷)Ì:{Ëà–j.CÉ\œ¹ãئïÖÜ×åöu¯ÏéÜD€ ‰Ò‡¦.Žv Y[³ë…ôã·~ûOÃ\†’¶PÞ:¸r}t•²ÔŽ‹Ô·ÈhP¾9K:#så7·ŸÀêþxvâ÷·~ŸËP.Í0ÌeèïPfHûøÿdÁè*öE%’Ñ "pŠtÆÙ­{bÜv ù÷gënÙËP–RgûþõOn?ºõ¾.‡6GF€r¯ìÓùGu±^=¢ÝBö½=ï–͹ …:ó–§ÇÉÖ5|êØ{ÈhP¾•q:£(ç×õn?ruÇ ‰Ê£\†ÐNO(_²²7üáO²ž„–ƒ:øßEø€(Ê8ñ]ß Ñn![:VËj”˰å‘%JFãÌ¡ã_ö¿½¶áï©tpåOà image/svg+xml watcher decision engine watcherdb message bus watcher applier nova glance ceilometer monasca datasourcedrivers modeldrivers actiondrivers plannerdrivers strategydrivers goaldrivers watcher api watcherdashboard watcher cli scoring enginedrivers API call RPC cast notification extensions workflowdrivers gnocchi cinder python-watcher-4.0.0/doc/source/images/sequence_launch_action_plan.png0000664000175000017500000006637313656752270026276 0ustar zuulzuul00000000000000‰PNG  IHDR2Èòt‚o)tEXtcopyleftGenerated by http://plantuml.com09qzTXtplantumlxœ’]o‚0†ï›ðN¼Ò L%Î9ãŒø1ÝDe“m—¤@›@!¥uûùk™q0ovGOû<=ç-ÓR!U–ZÈB$–¹7Ég¥D¯L¹Q{­w"ã#0÷[0‚ÏóRó,çEJ8TZÿTBS •bÉÄøšxÝçú•Ïwƒùjl[ë$½÷—»ÅãnÕ©)*¢®XI"RR㩺 ÃucÛ¾jc>8_gÐ8}a¿iX/¹Ûgfª4gR¢x| ë“Õ{4 ™á~‘F8E‘2*þï± Ÿ4{%±€WYÀ­Hðͳ1òHÍæÀB”' }F;ÕßÕ/ƒ $_·èKJcâ>ލ$¸ý ˜†8ô†#§?r†0_hoÐAí•ïA™+SHL°,R¦Ú”?-6ð¢¸d…%?1‘óŒryÙôí“p ÂL÷¶E½îm‡w7v„1òW_ß)™ãƒ[×tîkIDATxÚìÝ \TuþÿqѱŒ¸¢(èBýRÓ´H,ÍûH®×l+÷—×ôïvq5o›—ô'¤´«mî†Z*( ¡˜kŠ™E¥)ˆB²ˆxC¹+öÿÄÙ=ÍιμžóðqæÌ™3g¾çûù2oÏÌ™f?€ÒŒ&€X±ËUWŽ¥žÜŸµqOvtÂõä³3±¼ÎݺYœ²øƒ8÷a‘ÍמötJ[yçÖm€X^'®%¥Å¹ÿOž8vÚ·¿ŸýÕsSâÜ+ ?}dlaÖ¿8~by-»r,5ºÍÓ’½?ós1!òÎÚSvLÄ^ß‘ro¬Û’9€X^›Ê¯Ý¼-©ûë‰3+Ê¿ÓÉäÊT~ãëC_uvû>w)÷bAAAii)Ç@,¿['f‡IÞN˜h,“«É\9gžðzhzzzNNá@,¿+·KÊ¢Ûô“°}õÄ'&2¹úivYs{‡!‡NJJÊÊÊÊÏÏ'™ˆå5”³çKIÚúŽ«2“+“r¸]k?8xð ’Ì 8¨byMœYý±Ä쓯Í73–õÜYË«‹wîÜ©$óœœN˜ˆå5‘²xƒÄìäE63–|8++‹ã –×Ä·¿ÿ³„í¯ž›b:“W”·ÿ‰1¿Äòѳ£¢¢6nܸiÓ¦íÛ·ÿÎáê¤ñ6%êÃMJ&ŒŒ”@Τˆåµ©   ==ýó}Ÿí|ãm„H ßÖõ¹­¯/‹üh‹È%Š+™\9U~øðayˆ<ã –ß•ÒÒÒœœœ¤¤¤ƒî3GbùÇAÓ#*m¬ñ’ÉwîÜ)«ÉÊòy Ç@,¿[YYY¶w¿0ï—³å!¯JÿÛßþö~%™ùàƒ¶nÝ£drY™Såbyí(--ÍÏÏ—°}`ÊÛËc&¼! <""âï•dfÓ¦M{÷î=|ø°’ÉeeN•ˆåµ™Ì ¾þc˜Är 牉‰ÑÑÑ‘•d&66VyzzzNNެF&ËkßÉ·ÖK,?úÆZ‰ß‡>XIf¾þúëüü|9€X^y‡Ž§,ÞPå”0Eb¹ü+ó‡¦/מôW–mr\ÄòªIŠÖýYò»žd›W±¼jœ-Ë;夺r<õ¿qÄòZPPPpéÒ¥ìì쌌 ì}húr‰åò¯Ìïùo©FÈFdS²A. –Ë –@,'–@,€XnWbˉåˉåb9±b9Ärb9ÄrˆåÄrˆåˉåË –›çÖÍâ½¾¿“X~ ïîܺM,Ëë‰äð#A¯K&W¦ã3þX –ד³Ã$Ç´”µqÏM_™ÿâOk‰å°rå×nä:Îdb*¿~“~Bÿ§è!LÐQ9 5Ó5TP£ŽåéÙ!9<ªÅ“ùGNÊͳÅÿrÎÜÖïàÊ+;v¬´´”Ú†µ‘ÁEý “ÁéÒçÇé'ôJ€Âd•JQ3L1YC5ÞX~!ö°riÄó‘ûå¦Äï‚‚‚c ÖýÔ[÷Ýÿ×M_}õ•ÄrYH2‡uŽægÞ]w(’Ig:¶š·/ôJ€XN±˜ŽÊ¥¨¦˜¬¡‚i,¿r,5ºM?iÁSK?T3ù¥K—²³³wÿöUY¾ÝiÀáèÝËe!ÉÖ9šË@sçÎ &ébB$o_èÿ”±œb1•JQ3L1YC5ÆX^˜õ¯X·¡Ò|G'/S–¨™<###ñÈ—1?/÷îôüí· ‡e¡’Ì©p0š3©ƒï¾° ‰‰‰IIIééé999 ôJ€ÂÔD;*”¢f˜b²† jt±\ý9´CgܹuûçÊSåj&OMMýꫯ¾Ý_|qdJJtié·W¯ŽoèPb9ªõ½é–€2=ú¨Ï‡.îÞý7ŸKÊäøñ­½¦Mû±ÜªFHuƒ2<ž?ÿiTÔJÉ–ƒ?)]‚Xn¢p|};:´A{aBÂ=ÔI}½……‰ò(ùÓ“–+m+ÿ¾ôÒ³=zøêÿÏ—~õkÀââoRSwýá£|‚Xë¦LcƒŸÏ'–›å‹s¤™ôýƒúsh:™ÜœK¬+ÉüBJÚ'ÞÁ¿lmÂ<õ;'ÌaU£ùîÝkŸyÆO½)Ãâúõóy¤³º$8¸ÿž=á2sæLLHÈ'§ûííï•…—.%èü¿:ÆÆ¾ûØcµjÕRþ0lذP+?þx™¼w‘å®®N'ËÏÿõ´€[·®PÛsñâ)Ç?¥³AY"»]eõ™>@—/ºç±{Õ%&š‘¢f˜²¤¿bÆFÓwéWPÍvŒXn–‚ÓY’ÌKr/ÿlè'ÊÍÿArå±i‡¾Þ8å‡/¾®Öc‹Í ï¿¿ü+óW¯–Áº¬ì¨ƒÃ}Ê`}ûöw:¸(÷vëÖåàÁ¿}}íÚ3fŒ™<9Øà»ùóо}Û¸¸µ7n|)ïž~¸ºÚÃ{8ð7Y~áÂ?Ç:nÜå®#G>”Ë¿òDòAƒž\ºtºú¨þý{Ÿ;·WÄìÚµóW_m$–ã.cyÓ-™fÍ/)B9"ó&b¹¼…"–[Õið@˘)#§2oæ›XÍÓ³ý¡Cd‡>÷îûöýÕôC$C®^=çÊ•ÏKK¿ýæ›Í£FVùë(–›(9¦r NžŒR–$%EþÏÿxËBµY$í|ñŇ:<|øòª¬>gË%/M™òœz¶ÜD,7ÖŒ5Ô…ý36˜¾Kg¦Æ;F,¯~D7ôåæŸñ®Ù™vÀÂbÉ3ÏøÅÇ¿'3Ÿ|²æ·¿}Zùå[Iß}·Uû¿`Õéúõ#îîí ŽæO>ÙmûöUß‚?¾õ×ñîâgçû•yy#rôèÇÚobÔ/AÉ£Ô·G:“FÓRÞ—Ëq—ý¿é–€¼ñ’„ oÍe^jáÁÛÉý0K® ì-ïø‰åV5B<ÐòÆ´uëVÕÚa«Iƒ¨wíÚÚ¯ßc¦rß}÷ÊÎë¯câÖE,¯²p¶m{G cÇŽŒ\©Ý,ò§Gÿà²DmXÕgì»å i¨óç?­2–kFŠšaÊÂþŠaLߥ3S³#–ßU,WÔàûáÚßKWËam£ùêÕs^}uœÌ¼öÚ¤ÐÐ?Ê̪U³•¯!ÉŒ²D¦¬¬øQ£]\•7¶¶Í ŽæòÖÄàEk”³ßd´më [“IØØØÈr™Q×QÆkb9ê.–7Ñ÷"óæ½¬Þ” !KtÞñ·liçíí>wî‹Ê©b¹õŒÆb¹ú1i3wØÄjW¯VW“ÑØÉé~Ó‘~(¯å•WB6nüsNÎ~õ±&^`]ÄrÓ…£œ]ìÞý7»e’å:yUÆò6múŒ 9ˆ))Ñ#F<5iÒˆ*c¹±f¤¨¦,쯘±Æô]µ²cÄòšÇòìÿ¨A¢Ö¾Š{74éÑ\Þ üæ7ž2Ó»÷ÿ(ÿzôèÇÊ·’†õ?uj§²Z@@/y7½¯¼üXqñ7Æ.w)ƒ£±ÑÜØy—cì½…‰ó6|ˆµË›h <ûl€ÎÙ6YR壈åV2B¼ëË/#Ôë*™¹Ã&V3öÎØØCd:y2jåÊYÏ=7ÐÑÑ^ &^`]Ärs '*jå´i¿›:u´zΰʱ«_å­ñÕ³þõ¯Ïî3x¯ö§è5#EÍ0eaÅj%–×lLjå5媚Åi5™ßåv€¦K<<Ü~øa»‹‹£òÿ ·n}'od Ó¾dȽ÷¶¾qãKeþóÏÿ®g-ZØÊúÚïá´–ÜÑÜßÿÑuëæUwÐ\ºt:—|C­ôÿ¦Xyyï¿¿öÏ2ɼ,‘åÄrFHƒwÉn ü䊯Vk‡­¦ÿ9Ò§Ÿîiú!ÚSFÆné®U¾ÀZåfNEÅñÇD&õ'åÔ»–,™jð’o üá.c¹ô(õ#Á2sñâõ®ï¾ÛZe3RÔ SöWÌØcú®Z?‰å5ŒåÚjœ¥•d^+›šâhþ¿ÿ;*8¸¿úŸ”Ê›Œ9s&¾òJˆº¤GßÐÐ?&;¶Eû×b¼¼øôÓ¿ªŸkJHøà\÷ì ׿Rˆ±ñ³ÏÞwrº?"bI~þ¡›7¿’›Ã†õ­rÐ,.þæ‰'yùå ””è²²£W¯Þ»÷/ü@jË›\ „…½6aÂ0…²DùéWb9#¤z— ÙÙûäM¶¼=Õþ43wØØjúW]’ØôC† éóÏ®“— ùë_ÿÔ«×ÃU¾ÀZåw_8òºä¾ôÒ³rø¤må_ù3Ô©Ó×®}QãX®~ˆ]½׸qCÆzáÂ?¥AüÛÃ{UÙŒ5Ô…ý36˜¾«VÆOby ã´¶ZܱV5šïÚ*Ó2*ÓªU³[·n¥~ÉG¦ãÇ·öìéÛ²¥Œ€k×¾¡gÑѫ剭msí%>ê#kvìØáï_Tåh.“ ¯ýû÷¾÷ÞÖò¤2#ã¦9ƒfQÑ׋½âëÛQžë¾ûî}æ¿Ý»×»š±œþo"–7¹èÞý7û÷¯×Y(K”(Ë!Õ¡O£ùå'ŽFŽì·eËrõܯù;ll5™Y·nž··{‹¶²¾úBLþx™9ÍHQ3LYÒ_1c#Œé»jë&±@ƒÅkžxûBÿ§ˆåôKê¨õv@úéS77ç/¿Œ ¨Á0U‹SuÿsÙj+¨Öb¹4k­¯_ÝmŒæL¼}¡ÿSÄrz±¼fS|ü{nùù‡(j0LË],÷Ýw5üÛcyÍr;iŒæ¼ÕýŸ ‡0Ë)j0LË›L,ôÑG#""ºwïÞ³.±ŒæL¼}¡ÿ3Qôb9m†)&KŽåIII}ûöU¹Ìkßõá‡zyyÙÙÙuîÜùý÷ßW³nåw÷×uêÔ©eË–]»vMLLܼys—.]Z·nýÔSOeffêdc™Ù²eK=Zµjåêê:qâÄË—/ë¬óã?¹¸¸È6{öì©Ü«MYæîîncc#7ÓÒÒBBBœœœìí탃ƒóóó >JÈþ{{{Ëk‘?øàí¯½A€ÑœÁôJ€ÂD,§¨Á0EÕk,Ÿ5kÖÖ­[eæ½÷Þ“yuù¾}û:vìøùçŸß¼yóСCÚ1[â÷éÓ§å®7ß|ÓÁÁáé§ŸVoŽ1B?–?üðÃÒ:²BNNÎøñãǧ³ŽDñÐÐЫW¯–••}ûí·£F2xÞ[nöïßÿüùóÊÍnݺ%$$_¿~}ÆŒ“'O6ø¨]»vIð–¸qã†ü+óqqq70š3ø‚þO ÐC˜ˆå5¦¨ ú‹åååå<òˆ$a™—HüàƒÊå.IÚjv111Ú1[B¸2/Ò¹))]?–Ÿ8qâ×nš—çì쬳Î}÷Ý—m`ïõbù÷ßoðµHÞ6ø¨>}úÈþk§tÿ*70š3ø‚þO ÐC˜ˆå5¦¨ ºåVçÍ›§Þ7nœ_œœ®]»¦Þ¥Äo5ÊÞ¹sG;*ëÜÔŸ©¨¨0¶Õ™¹sç¶mÛö•W^‘¦ü׿þe"–+ÿ‰ 8wîܨQ£\\\”Ï«ÛÚÚ|”£££ì¿ök‘%70š3ø‚þO ÐC˜ˆå5¦¨ ú‹åÏ>û¬Î7±e‰eMÄrƒÛD,×Ý'Cw}ÿý÷«V­zî¹çä©ÃÂÂÌy®€€Éó.\¸uëVII‰±g4Ë8ÀhÎà ú?%@a"–SÔ`˜¢‚ –_ºtéþûï/((P—ȼ,‘å?Wõ!öºˆåªÌÌLÙ e¾E‹·oß6ö\÷Þ{ïÍ›7•ùÇ«÷ê<ªOŸ>±±±Ú¯EûCì h´£ù™°Õ2Ð0éLÒ,¼}¡ÿSÄrzˆÅtT(EÍ0Åd d4v¾ûî»&LÐY(K”0ß»w¯‰K¾Õz,2dÈþýû åéÖ­[׫W/e¹——×¾}ûÔÏÀëlªGaaaEEEß}÷ÝC=¤Þ«ó¨]»vÉþË«P_‹ö%ß8ÐhGs&o_èÿ”€•Çr&‹é¨PŠšaŠÉ*ÈhììÞ½ûgŸ}¦³P–¨?`þ÷¿ÿ]’¹ÁH«õX߯_?FÓ¶mÛQ£F©¿²¶sçÎN:ÙÚÚª?¦½'NôìÙ³eË–’´ÃÃÃÕ{u%Ö¯_ïííÝ¢E ýHcà@#T~ýæ¥Ï+Ó¾° Û‡~ô§å›ç.“‡éßSÄfÞ¾Ðÿ)ë,zˆ…uT(EÍ0Åd Dìš¶ÄÄĸ¸¸­[·n„iii¢¬¬,º ýŸ ‡ ©wT(EÍ0K­ b9д%%%Ú ˆåº¸ä@‘”èc ýjXˆå˱byáÒE P ö¨byƒáBüE P ö¨b9‡E P Ñþµb9Š @íP Är?Š @£ýj–ËëÿÒÍšYæeê-õuÑÚ ¾W\ÿVŽ}Œö¨Xx,·Ú@Uë»A,¯ÏvhØÖ6öìô€XÞ"YCE—º~ÞºÛ~Ͷ\oíÜ ԪΖ –Ë-9–çåå………uïÞX^[{%ùî»ïJÃ2ÄòòööNIIQæ7nܨÌÈY.3iii!!!NNNöööÁÁÁùùùJ&Q)ëߺukÁ‚«W¯V£Ëúõë===5ŸŸ_rr²²¼¢¢âí·ßîØ±£££ã‹/¾XXX¨®/¡ÑÝÝÝÆÆFg' î†ÁçÕß7u¦´´tÖ¬Yí*ÉŒÜ4½Ÿ²|åÊ•®®®ò¼Ó§O/++ÓÙ ±½2¶AmèÑ£‡¬ «mذÁàž›Ø¾ÚPúÒ'¯tÇŽ#FŒVzþùçå©¶aݺkmc!_ÿµ[.û3mÚ4yvy«V­R—KcJ“Þÿý¿ýío£££ÕÝ@,×eìÒ6Ö­['3ÙÙÙ÷Ýwß7dþ¯ý«D#™éÖ­[BBBqqñõë×g̘1yòdƒ1féÒ¥gÏž½zõêìÙ³Õu‚‚‚²²²$x/Y²Äßß_Y:`À€ÌÌLYyâĉsæÌQ×öÙgsrrôwÒØn{^ƒKræÀ³+õïßáÂ…¦÷S–Ëú*ÉÌâÅ‹u6h¢q nP[ûöí%ÅI‚=þüË/¿lpÏMl_»¡Lòo¾ùfêÔ©ÎÎÎ7n¼yófchXc¹îZûîcù¢E‹”ÝS^£ÎúÒ°ÒbÒÔRPÒìµ[¤€• @£ýjË]ˆÿ“O>3fŒÌ¼óÎ;...Ê™Ûßýîwqqq:k¸»»Œ1;wÖ?K)ëäææ*óEEE­[·Væ}}}Ïœ9£Ì_¼xÑÓÓS]ÿܹsU¾íÝ0ö¼ozyy:uJ™—G)0±Ÿ²\]_ýø€±¬Ó87¨íÁ ÿé§ŸL칉ík7”‰GÉ]_}õ•9=¤ÞÖØ®Ö]kß},—ýÑ~ÆÚü‹/¾«ñïùµ X9Jô1Ú `¥±üæÍ›’Üd¦k×®{öìéÓ§ÌËå³åÇŽ tttT>½lkkk0Æh4š’’3#'íOD7oÞ\]áÎ;wÒØnTëyµW–¹Ye$3½¾™c0¤?~<((ÈÙÙ¹K—.{÷î5¸¦‰ík7”‰øõ×_O™2EžeÀ€›6mR¿/а k,0×]kß},×yúgË7nÜØ¿iê©S§Öøl9c4x›B#€>FûÔ¬1–‹€€€¨¨¨Çÿeù766V†r—···Ä¹+W®Ü¾}[þUÓˆÎ׿%[šrÕÇÇ'++«ÊÈ¤ÍØn|^}3礮±H¦®/3úëÛ+󃢤ë={ö¸¹¹Üs3·¯ÿU|¥¥¥Û·o>|¸ƒƒÃ¤I“´¿[Þ k,0×uk+麨¨H™ÏÍÍÕNÝ—<[ž’’¢ýÝriÒûï¿Ĉ;vìP¿WÏ P ö¨by5ÿ;ï¼óÀ„‡‡ËüÚµkÝÝÝW®\©Ü%‰1&&FÂFFFFHHˆšF\\\RSSÕ-,[¶L²½¬cÎW‘׬Y3pà@yxYYÙ?ü |„Þt 2¶ŸWgßÔ•çÏŸ¯~:00ð­·Þª2–4Hù¶³ÌheÚô^™ÇŽ+1OZ@by‡ î¹™Û×y” /^ íÖ­[Ã6¬±X^w­­ò÷÷_²dIaaaffæÈ‘#Õ•-×þþ¼º{òbµ¯ÄV[WbgŒoShÐÇh€Z€%Çr—HJJjѢťK—~®ü -™?yò¤rW||¼‡‡‡äv5HºsppPo–——Ï›7Oò¼£££¤Óy©¢¢B6åëëÛªU«®]»ÆÆÆV¨Œí†ÁçÕÙ7u¦¤¤dæÌ™ÊÃeFýL²‰X®\\6>uêTí Œ›Þ+s‚â¶mÛ䱦G ÷ÜÌíë<ªZ¤aÅòºkmUrr²ŸŸŸrüuëÖ©+[®}µù)S¦È¾Éj_‰½Þа”èc´?@-ÀÂc9ª}h›qpiíZP”—p»„ßNˆå (ÒÚ AbyŒËà¯',(8űˆå (6d«ê«õÖ6ö, ›Ì#›ûE6{<¾KHæ?>áä9@,PßÉ<Ú¾¿$ó¨æ~;4}9yXW,çÒ@cHæ»%™+ST‹'µOžS¤°r”èc´?@-ÀÂc¹š˜˜˜×dóDŒëà¼CÇùµ X9Jô1Ú `ù±œ£4¬KŸŸØ¡é«¤ñöŸ¸ s‘úΦ’ÜË)@ €>FûÔˆåê#“Ç=øÛ÷öû*äÍÜ}_S¤%úíP –¨L¾Ý®ÏöOjŸ§HJô1Ú `E±œK %ÿÈÉ÷꟧HJô1Ú `E±@ƒ(ÊÎKÿ˃§ÇË –b9ÄòÚÇ¥Š @íP ÄòÃ…øŠ @íP Är?Š @£ýjÄr)@ €>Ú ˆå~i-ËÍj>2/[¶løðá´a :tÅŠ”fAûÔ±Ü.-P¤M:0×ó¶mÛ6==ÞXc§OŸvqq)..¦À0 Ú ˆå,*Í*Úµk7iҤ˗/kß»jÕ*[[[ùW}mjj5j”““SË–-{õêµcÇQöîStýäðÚzÒÍ›79²V6^[/ÜØ¡7v|efðàÁwîÜ1¸3Êš666ööö>úè›o¾yéÒ%ƒ«™ØˆÈÍÍ6mZÇŽíìì‡ ¯Þ;tèÐmÛ¶Q¶€XÀ¢b¹2sáÂ…Ñ£G?^½«¢¢¢S§Nááá^^^2o:¦§§Kº[½zõùóçKKK¿ùæ›b¹JÚö£>jl±Üà¡7¶}Y>nܸ÷Þ{ÏX,Wf Oœ8ñꫯvèÐ!++K5‘=ñôôüãÿ˜ššZ\\|ùòåÝ»wKŒW׌ˆˆ‡S¶€XÀc¹ÈËËsvvVoîÙ³§gÏž2Ó»woí3–Û„º¥K—š# .LKK“0ïäädooœŸŸ¯,¿uëÖ‚ <<<$ùÿüßgtu6XZZ:kÖ¬v•dFnª+¬_¿^RŸF£ñóóKNN6Ý,+W®tuu•™>}zYY™Î³ÛUcÏÒ©S§³gϪÛ?pà@=dYsÆ _‘Á§Ð_­¢¢âí·ßîØ±£££ã‹/¾(©ø.½‰X~íÚ5ŸÓ§O›ˆå*9d'NÔ_ÍÄF^zé¥?ýéO&ö9==ÝÛÛ›²ÄrVˇ öá‡þ\yŠRæMçjÉÀwË»uë–P\\|ýúõ3fLž®®®&LP/ÓõÚk¯éd3ÉQ&2d­œ-?vìX`` £££òŒ¶¶¶j¦ÕÏüÆb¹öÊ2#7M¯olŸMoÄØ®{–N:ýøãêòãÇ9;;wéÒeïÞ½kæSHLÕ>LÍ›77?–<ô¦Ï–‹²²²Ç{ìÈ‘#?Wu¶¼}ûöÆb¹Á¸¹¹éŸ-×Þrzzº——§À{!Ðþµ@,çðËõ*§ÇÕëu‰ÌÌLõä¹ÁŽ?~ùòåw˽½½7mÚtåʕ۷oË¿ê:’]õÏ–ÛØØÜ ‰³åÕŠåêFdF#ÆvÕØ³Œ=zãÆ:ÏrçÎ={ö¨ŸôÖyEÆžBg5í#¥ŸºÍù»9-£½\Ú䡇ºqã†éï–?ÿüóÆb¹Á¼ð sçÎ5ñ¼êY}þN÷B ýjXÎá,6–Kø1b„ΡC‡jKý+±K¼ ËÎÎ.--ýöÛokp%vÙBLLŒ<<##C®®³lÙ²€€Y¨ýÝr—ÔÔTý Ο?_ýny``à[o½U³X>hÐ å»å2£ýuÓ»jìY6oެݤ’-%”–••I,ïСƒÁWdì)tV[³f¼^Y"[ûá‡ÆŒcîߘ»ˆåbíÚµ’¢õ_xQQщ'fÍšeìJì&6rîÜ9y”z%v9Ü»wïÖ~Èðá÷lÙÂß)ð^´?@-Ë9ü€…Çò^½z©Ÿ¬VÅÅÅõîÝÛÄ%J;88´lÙRÖ¬Áï–ÇÇÇûøøØÙÙyxx„‡‡«ë”——Ï›7ÏÝÝÝÑÑQ’¿²044TžK?ï•””Ìœ9S¹»Ì¨gø«Ë•+±Ë3N:Uûrî¦wÕØ³HXmÛ¶­ú ímÛ¶ÉÃ5M= ¾"cO¡³ZEE…ÜëëëÛªU«®]»ÆÆÆÞ},7ýÝrÅ;w¬ÿ»åmÚ´éÞ½ûo¼‘——§ÿ\¦7òsåGß_zé% ç-Z´pvv–•˜˜¨ÜuæÌí/Ÿów ¼íP ÄòãÒEjiãoü(úòåËõ/hóIë­X±B{IQv^vtÂí’2J ³´?P ÖË€XŽ!±<ÆuðwSß)8Ekb94½ømð3ÛÄò¦•Ì#›?Ùìñ}ÝÆŸÜoÎÉsb9µ™ÌwÜû´$óèÖOEßÛ“ç€X@}'óm$™ËÕÜo‡¦/'Ï€År.-4rJVab²ºÉæ‰X×!y‡Žów ¼¢ýj˹?@‘ +ÿÈIåsìÊÙò]ŽbÛ I}gSIîeJ ³´?@-€X€"ê#“Gß°½õS_Œ˜“»ïkJ ³´?@-€X€"ê#“o·ëeë§}zœÃ,íP – HúÈäÑöú§Ç)0ÌÒþµëŠå\Z HúW”—þ—OS`˜¥ýjÖË –b9Är`ù±œK )@ €>Ú ˆå † ñ)@ €>Ú ˆå~)@ €>FûÔˆå(R€} ´?@-Ë9ü(R ©—@³f\–>Ú ˆåUáÒE Xa ÔO`&–3Ì‚ö¨b9h°„\wOjæ–kkšýG»ví&Mštùòeå*uùàÁƒïܹcpg”5mllìíí}ôÑ7ß|óÒ¥KW3±‘››;mÚ´Ž;ÚÙÙ9::2$>>ž® –@,·ÀX®Ì\¸paôèÑãÇ7½}Y>nܸ÷Þ{ÏX,Wf Oœ8ñꫯvèÐ!++K5‘=ñôôüãÿ˜ššZ\\|ùòåÝ»wKŒ§«ˆåÔ“´´´'''{{ûàààüü|eù­[·,Xàáááàà°zõêŸÿû¤®Nº+--5kV»J2#7ÕÖ¯_/ÁO£Ñøùù%''›N­+W®tuu•™>}zYY™Î³ÛUsžåÀ=zôdµ 6|9·¯¿ZEEÅÛo¿Ý±cGGGÇ_|QRq þ //ÏÙÙ¹ÊX~íÚ5ŸÓ§O›ˆå*9^'NÔ_ÍÄF^zé¥?ýéOT€X@ƒéÖ­[BBBqqñõë×g̘1yòdeùÒ¥KΞ={õêÕÙ³gŒ‚êM „̮Կÿ… ª+eeeIv]²d‰¿¿¿éÔ*¹PIf/^¬ó,ÆvÕœgiß¾}tttiiéùóç_~ùeƒ/ÇÄöµW 0`@ff¦´ŒÄà9s樫t7±\þ=tèP¯^½ÊËË«ŒåòÒÜÜÜ ®fl#Ò,iiiTÀb9—(RÀJ   ÀÝÝ]™ïܹ³þigc±ÜËËëÔ©Sʼ<ÊÛÛ[]!77W™/**jݺµéX®n$%%E{#¦wÕœgyðÁÃÃÃúé'/ÇÄöµïòõõ=sæŒ2ñâEOOOsßÖüg;999c+Ëó:ë¿þúëê9m±\R·±Õ n¤E‹ÅÅÅÚ{¨ó_ ³ü™¨ði(R nKàØ±cŽŽŽJ³µµU–k4š’’3c¹öÊ2#7M¯o,µšÞˆ±]5çYŽ?äììÜ¥K—½{÷\ÓÌíKì׎ÐÍ›77?–+\]]'L˜ ^¡ÍôÙrQVVöØc9räçªÎ–·oßÞX,7¸777ý³å5‹å ³ü™¨b9‡E Ô¤¼½½7mÚtåʕ۷oË¿j$“øª¶ÜÆÆÆ`~3q¶¼Z±\݈ÌèoÄØ®šÿ,wîÜÙ³gúIo—clû:«ùøø¨WV3˜ºÍù»9;¬½\䡇ºqã†éï–?ÿüóÆb¹Á¼ð sçÎ%–óg @,@‘ V’QcbbJKK322BBBÔH¶lÙ²€€Y¨ýÝr—ÔÔTýü6þ|õ»åo½õVÍbù Aƒ”ï–ËŒöÔM憎gQçÇŽ+¡´¬¬Lby‡ ¾cÛ×YmÍš5òbe‰lí‡~3fŒ¹okî"–‹µk×JŠÖÕEEE'Nœ˜5k–±+±›ØÈ¹sçäQê•ØåXïÞ½›XΟ9€Z±E Ô_ ÄÇÇûøøØÙÙyxx„‡‡«‘¬¼¼|Þ¼yîîŽaaaÊÂÐÐPýÈWRR2sæLåJì2£~½º±\¹»<ãÔ©Sµ/çnzW͉åÛ¶m“Çj4š=z$$$|9ƶ¯³ZEE…ÜëëëÛªU«®]»ÆÆÆÞ},7ýÝrÅ;w¬ÿ»åmÚ´éÞ½ûo¼‘——§ÿ\¦7òsåGß_zé% ç-Z´pvvNLLTï-ÊÎËŽN¸]RÆ0ËŸ9€Z –7v\Z HJànÿê7ãX#‰å1®ƒ“f‡œÎb˜¥ÆjX,÷¯>±¼±ú)ê@dó'"›=¾ÿñߟÜoÎÉs±4ÞømðcÛÄòFžÌwhúJ2q¸ó¾þUž<ËPËÉ<ºõS’ÌeÚÑúéè{ûqòˆåÀ,J”bbªýÉæ‰X×!y‡ŽSe@,o`\Z HJ–-ÿÈÉhûþj ¾÷éçgRþü÷’ÜËô1j ˆå ñ)@ À2y”]Ÿí­ú0-wß×ô1j ˆå~)@  >2ùöVþQÍŸÐ>=N£ÆjXÎá@‘”ê#“GÛ꟧Qãµ@,çð HJu«(;/ý/; ž§Qãµ@,o\¸´@‘”èc ýjXˆå˱byáÒE P ö¨byƒáBüE P ö¨b9‡E P Ñþµb9Š @íP Är?Š @£ýj–˹´`©EÚ¬Y3c ›™M{åvíÚMš4éòåËú›ýôÓO5Mûöí_~ùeu;wî¼ñÆŽŽŽsçΕ›¦—ü}Œö¨X],`±ÃñX^³u.\¸0zôèñãÇë?ä™gžÙ»wïÍ›7óòò^yå•áÇ+Ëßÿýž={fV’™ 6˜^ËË®#©ÛÙÙÙôóÞ¸qÃÞÞ^™òÉ'%®+ó2ãïïoz9@,@,¿«XûÔSO)ó’Ïóóó•ùK—.988˜^ËXN,7öuñšÅòœœœ±•L<é‰'ÜÝÝ“’’”›Í›7///WæeÆÖÖÖôrÀêb9—,µHkñl¹ÂÕÕu„ —.]2öŒ‡’L~øðau gËÁß)ÐÇ@ûÔ±¼ \ˆ°Ô"­‹±›ÚÏÈÈ:|ûí·Ú ùn9ø;úh€Z –søbyÇòÐÐPÔÔTåëׯ7xÅucËþN>FûÔˆåšX‘ÞºY|îãO˯ÝhÀX®ÿ ö›7oþ\ùû䯿þºC¥7ß|SûwË .ø;úíP –h2EzåXê±)ïĶ’ä$ Þ¦ô1Ú Ð$c9—š\‘ÞºY|öý]ûºOá{»]ŸKŸŸ •`U%ÐÇh€Z€EÅrMˆrz|G›~qî#"›ûíÐô%“ÄruK==¾ËqÀNǑ͗‰LËÔ¹ìè„÷<eëÙÜO ä2E5÷ËüG\Êâ 2¯|ä‰yæ-u€X fýëèËË~ ç-žTO•Ǻ .8EãM;–s.h*EzçÖíóÛöïë:~{‹>¿\ì­¥?ÉVU}Œö¨Xf,çBü@“+Ò_OžÛ<¾Ëé’9¬­úíP –hø"UNžÿ³Çó±®CHæàm @£ýjÄr S¤…YÿJ[Y~ímÞ¦ô1Ú @,@‘”èc ýjXn6.-P¤%úh€Z –b9Är@,€X^g¸´@‘”èc ýjXÞ`¸?@‘”èc ýjXÎá@‘”èc´?@-€X€"(ÐÇ@ûÔ±¼Iþäää¡C‡¶©$3?üðﯿٯ- ËÛ·o¿zõjºÀ P ö¨bù©ñ¥~üñG—ÐÐÐÜJaaarSêÄòÄÄÄvíÚmÙ²…>Ôs‘”@£ýjXnÉ&L˜°hÑ"í% .œ8q¢v,ß¿¿››ÛgŸ}F‡ËkS»víΞ=«½DnJWcyttôƒ>xüøqz€X^ËlmmKJJ´—·hÑBå2¿sçNº€X^û\]]MŸ-ÿÇ?þáäätøðaz€XnX/-0nÜ8ýï–O˜0Aåòï'Ÿ|âèè(ÿÒ!€ú/R€èc´?@-Ë›€_ˆÿÌ™3mÛ¶ S¯Ä.7ÓÒÒ´c¹øâ‹/œ#""è@=)@ ô1Ú ˆå–pøoÝ,>÷ñ§å×nèßõý÷ß2äÞJƒ>yò䯯_ëwËeµ:¬ZµŠn0F”èc ýjXnîá¿r,õØ”wb݆æ9É£Jô1Ðþµ@,¯ÃëfñÙ÷wíl’ܵ½eŸ‹û¿åpŒÑ%úh€Z –× íK (§Ç£íûïõݦßö–þdr Q)@ ô1Ú `i±üg­Óã1®ƒãó\d³Ç+Ï““ÉÄò:–}ïÓÛ[<mß_ ä2E5÷“ ž²xƒÌ+ÿmÃ<óÌ7È<ÿo Xx,7ÏfüÆöVþj,ßå0 ÖmHÁé,$€X^îܺ}vÝÎ=^ÁQÍý$™G· ™ˆåuKÿ#²êÉó(›ÇcÚ"™­HJ ÑþµË‰åÆ.įœ<ßûÐïb]9g4Æ"(€>FûÔ,9–« ³þ•¶&²üÚ *À P ö¨b9‡`Œ(€>FûÔˆå(R€} ´?@-Ëï—(R€} ´?@-˱b9 –@,¯3\Z HJô1Ðþµ@,o0\ˆ H&ZÍšÕÇ_Ûúy–ú{ƒr/gÙ²eÇ·¼WZìº3C‡]±buÍŸ9€Z –søÆh€XÞèby#9………mÛ¶MOO·¼WZ‹Ã¬±'U—Ÿ>}ÚÅÅ¥¸¸˜ÒæÏ@-Ë9üc4@,oà Ù çákü¤›7o9r¤E¾Òú<[þså ómÛ¶QÚü™¨b9‡`ŒˆåÄòj=zôG}D,¿û‰ˆˆ7n¥ÍŸ9€Z –ÿŠK )ÐDK@:iii!!!NNNöööÁÁÁùùùƒ™zSfÖ¯_ïéé©Ñhüüü’““•å·nÝZ°`‡‡‡ƒƒÃêÕ«M¯l±=Ñßr3-:»WZZ:kÖ¬v•dFnÖ`Odå•+WºººÊÎLŸ>½¬¬ÌüF3ø,:u:{ö¬E¾ÒV-ìŒ=‹‰.dz¹ìÆ´iÓäI奭ZµJ{ýôôtoooJ›?sµ@, éÿ­ýOÔéÖ­[BBBqqñõë×g̘1yòä*³SPPPVVVaaá’%Küýý•åK—. ðyõêÕÙ³g›^Ù c{blËwObíÀ³+õïßáÂ…5ØYY6r¡’Ì,^¼ØüF3ø,­[·.**²†WZ+±|Ñ¢EÊ^)/M{}y:iLêˆåXN,×VPPàîî^evÊÍÍUæ%jª©sçÎú§L­\%í=1¶eƒ7½¼¼N:¥ÌË£Ô3«ÕÚYYÝHJJŠöFªl4ƒÏ¢Ñh´c¹¿ÒZ‰å²Ú/XÄr,9–;v,00ÐÑÑQù¤´­­mÍ2•$Ï’’3ƒ™AÆö¤Z[Ö^Yfäf öDî5½‘ê6Z§N~üñGkx¥µËu^šÎ‡Ø½¼¼¨_ –`9±ÜÛÛ{Ó¦MW®\¹}û¶ü«Ô¼¹¹¹Ufª.]º˜¦× c{bpË666·lârµÂªº™Ñ߈±]5ö,£GÞ¸q£5¼RmƺP•]KûlyJJŠÎ%߯ŽKý±üW\Z H&ZjÔqss‹‰‰)--ÍÈÈ Q—ûûû/Y²¤°°033säÈ‘Uæ±eË–ÈFÌù^´AÆöÄà–]\\RSSõ·<þ|õ×o½õVÍÂê Aƒ”o\ËŒö×¶M缾gÙ¼yóˆ#,ò•*}LûYªìBUv­ ¨{%¯Q{ãÇß²e ¥ÍŸ9€Z –ÿŠ ñ)ÐDK@:ñññ>>>vvváááêòääd???Fãéé¹nݺ*“gyyù¼yóÜÝÝÃÂÂjí‰Á-‡††:88èïUIIÉÌ™3•ë“ËŒúYèê†UåúäòŒS§Nվȹé]5ö,EEEmÛ¶MKK³¼Wªô1ƒ±ÜXª²kÉnL™2EvIvLûJìgΜqqq)..¶øÊ-ÊÎËŽN¸]RÆŸ9€·|Är?À PVùF¤~*|ùòåÆ ³¼WZŸ}LpÅŠVÒ %–Ǻ =ùÚÚ‚ÓYÔ8Àß;b9‡`Œ(b9¯”>Vß¾ó/Q¶~Ò SÎGî7xòœö¨b9‡ HJ &QP_£ÚË‹åu÷JfëÔ#æD6÷;Ø÷Ÿúߘ¶ƒôOžÓþµ@,çÒE P ¡Ž“y³ÇeúÌï¥#ϾÛnˆöÉsÚ`,"–Ш)ïæ™˜˜,fŠi;è³'^Œ²õ‹ušwè8£b9P‡òœÜqïÓj,jî}_Àw3þ¯$÷2€XÔS&²}òŸ=Ÿ¿s˜f@,ê#“o·ë#œÓãˆå¦pi€"(ÐÇP™|Ç=OWyzœö¨b9â(R€} µ¬(;/ý/;Ì9=NûÔ±œÃP¤%úh€Z –søP¤%úíP – HJô1Ðþµ@,¯?\Z HJô1Ðþµ@,Ärˆå€X±¼Îpi€"(ÐÇÐØÚ¿Y³úx;]?ÏR ä.^βeˆn ¯´îvfèС+V¬`,"–×â(R€} ­ý-,–7ò—SXXضmÛôôt‹¥uñ¤êòÓ§O»¸¸3Ë9üïJ Ë8Ý5ªÜX¥Í›79Ò^i]ïÌСC·mÛÆXD,çð¼_(€>F,'–WÃèÑ£?úè#bùÝïLDDĸq㋈å~€÷‹%ÐÇ,'–§¥¥…„„899ÙÛÛççç fêM™Y¿~½§§§F£ñóóKNNV–ߺukÁ‚«W¯6½²AÆöDËÍ´èì^iié¬Y³ÚU’¹Yƒ=‘•W®\éêê*;3}úô²²2óÍà³têÔéìÙ³ÖðJMd{ý—`l¹ìÆ´iÓäI奭ZµJ{ýôôtoooÆ"byµqi€"(ÐÇÐØÚ_:ݺuKHH(..¾~ýúŒ3&Ož\ev ÊÊÊ*,,\²d‰¿¿¿²|éÒ¥>¯^½:{ölÓ+dlOŒmÙàîI¬8p`v¥þýû/\¸°{"+ËF.T’™Å‹›ßhŸ¥uëÖEEEÖðJk%–/Z´HÙ+å¥i¯/O'ÉXD,šþ›iCŸR.((pww¯2;åææ*ó5ՌԹsgýS¦ÆV®’öžÛ²Á›^^^§NRæåQê™Õj퉬¬n$%%E{#U6šÁgÑh4ڱ܂_i­ÄrÙ í—V[±Är 1ÆòcÇŽ:::*Ÿ”¶µµ­Y¦’äYRRbf03ÈØžTkËÚ+ˌܬÁžÈ½¦7RÝFëÔ©Ó?þh ¯´Vb¹ÎKÓù»——õK,,'–{{{oÚ´éÊ•+·oß–µ£‘z‚777·ÊLÕ¥KóÏôdlO nÙÆÆÆà–MœC®VXU7"3ú1¶«ÆžeôèÑ7n´†WªÍXª²kiŸ-OIIѹäÛØ±c©_b9`9±ÜÍÍ-&&¦´´4###$$D]îïï¿dÉ’ÂÂÂÌÌÌ‘#GV™Ç–-[ 1ç{ÑÛƒ[vqqIMMÕßòüùóÕo\¾õÖ[5 «ƒ R¾q-3Ú_Û6½«ÆžeóæÍ#FŒ°†Wª=o¬ UÙµ,X î•¼Fí>|Ë–-Ô/±¼Ú¸´@‘”èchlí¯Føøx;;;ððpuyrr²ŸŸŸF£ñôô\·n]•y¬¼¼|Þ¼yîîŽaaa5ˆˆÆöÄà–CCCô÷ª¤¤dæÌ™ÊõÉeFý,tuêr}ryÆ©S§j_äÜô®{–¢¢¢¶mÛ¦¥¥Yü+Õž7Ö…ªìZ²S¦L‘]’Ó¾û™3g\\\Š‹‹‹ˆåÕÆ…øŠ @íß”’Füj÷òåˇ f ¯´îH®X±BgaQv^vtÂí’2jXÎáx¿P}Œö'–óJ€Äò]޾ž° àtµ@,çð¼_(€>FûWõ5ª=±¼Xnñ¯ô§¨‘6OHoï’ùO žÚ¨ EÙyéÙaÎéqjX€"(ÐÇ@ûÔ±œÃ€"(ÐÇhÀlï¾ûn€Ex¸™ý /¼°ÕôùçŸËP¤%úh4 ´Í`Å/^l ±œK )@ €>ÚM:–÷íÛ÷ͦLö_^…¼–Å0[¿~ý,'–@“Žå’l¯6e²ÿU&LèX´h±ˆåÄò†Œå/¼ðBbbbRRRzzzNN±,'–oÙ²¥S§NÍ›7—§ –7ÎX>zô踸¸ƒ=zT’9±,'–»»»ËÆ9[ÞhcyppðÖ­[cbb$™'%%5ÕXÎe6Š @íb¹>åZß|ˆ½1Çò   7J2‹‹KLLlª±œ¥(R€} ´?¬!–+1{åÊ•<ð€,¹råÊš5k~øá{î¹§sçΫV­’%Ú+«ê.“Ÿ9sæ7¿ù ±¼f±|ôèÑË7mÚ´}ûöƒËP¤%úh4öX>~üøsçÎ)KV¯^-KÆŒ#K^{í5™ «Ï³åj&÷õõÍÍÍå€ËP¤%úhXx,?}ú´ºD‰Äß}÷ÌgeeɼO½År29±œ¡ HJ Ñþ°ºX®½¤U«V:V·³³«ŸXN&'–ÿŠËl)@ €>ÚÖË===eÉ©S§êù’odrb9˯._¾\–Œ5êÇüé§Ÿ$Úõïß¿®c9™œXÄòÛ°aC=î«4pàÀ;wÖi,'“ËÀJcyƒ#“Ë€XN&'–72\f HJô1ÐþhÒ±¼oß¾o6zdrb¹Qü(@‘”èc ýѤcySA&'–3¼_(€>Fûâ¼ð ’Í$œ/n ÈäÄr†B€÷+%ÐÇhX`6“ÄKSˉå(R€} ´?ˆå –W—Ù(R€} ´?ˆå –šj,—ÝhÒ-Ùàû_Ý –±¼^cm>E­lün6B,ˉåõî×üg\µj•­­­üÛ 9\ýµ¶víÚMš4éòåˤňå@,¯óX^QQÑ©S§ððp///™oX®Ì\¸pA2íøñã‰å Ël)@ €>ÚÖËÓÒÒBBBœœœìí탃ƒóóóMçC™Y¿~½§§§F£ñóóKNNV–ߺukÁ‚«W¯6½²Ž={öôìÙSfz÷î¯.×ßf3-:;VZZ:kÖ¬v•dFnš¿Ú/6//ÏÙÙYg¹‰V2ãU®lðÕ|HEEÅÛo¿Ý±cGGGÇ_|±°°Ðc9?JP¤%úhXC,ïÖ­[BBBqqñõë×g̘1yòä*cyPPPVV–DÁ%K–øûû+Ë—.]pöìÙ«W¯Îž=ÛôÊ:† öá‡ÊLDD„Ì«ËmÓàŽI€8p`v¥þýû/\¸Ðü}¨2–›h%ó7^Ý=1ñÐÐÐdffJËLœ8qΜ9Är)@ €>ÚM2–k+((pww¯2–çææ*óEEE­[·Væ;wîlðô¯Á•µedd¸¸¸”””ȼü+ó’6MoÓàM//¯S§N)óò(oooó÷AÝHNNÎØJ&ZL§•ÌßxµV6ý__ß3gÎ(ó/^ôôô$– HJô1Ðþh’±üرcŽŽŽÊg§mmm«Œå—k4%Z›¡µ½öÚkÍþÛ믿^ƒmj¯,3rÓü}PŸÚÕÕu„ —.]ÒY¹º­T­¦3¿ÅÔ›’ϵ[¬yóæÄr)@ €>ÚM2–{{{K~»råÊíÛ·å_í”[TT¤ÌçææV™-»tébþ™m•rz<++K]’™™©ž<7¸MƒÛ4q¶¼ºaX¹±VªõXnìÕéÜôññÑn4CÏ%ßP¤%úh4™XîææSZZš‘‘¢®ãïï¿dÉ’ÂÂBÉÉ#Gެ2[.[¶, @6bÎ÷ÀU#FŒÐY8tèPI•ƶ)¡=55U›óçÏW¿[øÖ[oÕb,7ÖJµ˽:›kÖ¬‘+k–••ýðÃcÆŒ±ÀX–ËuÈÂøøx;;;ððp5ø%''ûùùi4OOÏuëÖU™-ËËËçÍ›çîîîèèffíÕ«×Þ½{uÆÅÅõîÝÛØ6CCCô÷§¤¤dæÌ™Ê•ØeFý@{­Ärc­Të±ÜثӹYQQ!»áëëÛªU«®]»ÆÆÆË ±ÇrXÉ¡'–±Är –ƒXÞDc9—Ù(R€} ´?ˆå –7~” HJô1Ðþ –›ÏØUÓVãÜ+b9C!Àû€èc´?ˆåÖË­$ŸËP¤%úh4ÆX~éÒ%WW×ÒÒRuIPP:_RRâââ"ëÜMä®õÜ»jÕ*[[[ù×òþ³€XÎPð~ úíëŠåâÙgŸ•¦Ì_¹r¥yóæùùùÊÍ­[·Ê½wnk7WTTtêÔ)<<ÜËËKæ‰åÖ˹Ì@‘”èc ýaI±<&&fĈÊüîÝ»e…O>ùD¹9hÐ ¹WfÒÒÒBBBœœœìí탃ƒ•ÜÞL‹²þ­[·,Xàáááàà°zõj5¯_¿ÞÓÓS£Ñøùù%''«ûí·ßîØ±£££ã‹/¾XXX¨®æîîncc£ÿöìÙÓ³gO™éÝ»w||¼vÌ^¹r¥«««ìäôéÓËÊʪ\®3S³ý!–î*–———·oß>//OæçÎ+Qü7Þù .¸¹¹É½2ß­[·„„„âââëׯϘ1còäÉ:™V±téÒ€€€³gÏ^½zuöìÙê:AAAYYYt—,Yâïï¯, 0`@ff¦¬üðC™‰ˆˆyíX>pàÀ •dF}&–ëÌÔlˆå€»ŠåbÖ¬Yï¾û®Ì<õÔS‰‰‰Jr^¾|¹,×ßHAA»»»ÁXÞ¹sgõd¸v`ÎÍÍU拊ŠZ·n­Ìûúúž9sF™¿xñ¢§§§ºþ¹sç îFF†‹‹KIIÉÏÿùÞ»¤hõQ§NRæSRR¼½½«\®3Sƒý!–j!–'%%=úè£eee<òˆÜ|衇JKK}||d¹²Â±cÇ•¬ÛÚÚŒåFÉÌ:±ÜàMÉçÚƒoÞ¼¹ºÂ;w îÿk¯½Ö쿽þúëê£Ô§–Ù“*—ßýþ˵˅Äò¿ýío/¿ü²Ìÿþ÷¿_½zu÷îÝÕ{½½½%Ë]¹råöíÛò¯eu¾nÝ¥KƒgË Þ”ØŸ••¥¿'Æ.Ʀœ×~Hff¦zò\û¬¸Ì<[®³ü.÷‡XÞ`¸Ì@‘”èc ýay±üÝwßuvvþè£d^’[›6m”µ+ÜÜÜbbbJKK322BBBÔ¤*©855U]mÙ²e²ŽÎwË ¦Ü5kÖ 8P^VVöÃ?Œ3Æt ŽˆˆP/M§:t¨ì­ò¨Aƒ)ß!—™… ª[3¶ü.÷‡XÞ`øQ €"(ÐÇ@ûÃòbù¥K—ììì²³³e>++Kæµ®<>>ÞÇÇGzxx„‡‡k_&ÍÁÁA½Y^^>oÞÚÄrËÄrË€Xb9ÀœlÖ¯_¿E°2rЉåÐ(b9¬—|@‘”èc ýÑ>ÿüóÅÿñ /HH   ë°hÑ"~ E P öG£˜˜·uëÖMÐÚ×nDMÉA—C/€X€"(ÐÇ@û£Á$%%Ú    ??_²YVVVzS³wöòtÔˆn9èrè¥4ÕX€å!–@,€Xˆåº¸Ì@‘”èc ýjXÞ`øQ €"(ÐÇ@ûÔÂÿoï^à¢*ó?ŽëŠyG(K4kÍÕP³Í &šnZ–Šn…ºµ•­¦®ºæ–Ù¿Íú§¥ ®ºi©+^¹I*ਠŠºšZ"ñEÄKûÓÓž‡¹c13Ÿ÷ëyñ:sæÌpæ9Ïóã|™±œÃüò*K.œM˪kM&iÝÙ™ÊóeŒÆ?S€â`­N±Ÿm ÖúËö?×(SÔ"ÇžAÄrÀþ(‘f¦îÊbœ0þ™Œš T(“š2Es†Ä{Ë{­æ'-<›N3h'BrúÂøg Ë!3P9 LjÊÍfŸÄØk5—BsãF6Í }ŸÎé ãŸ)@,g„8Ì@å€2©)S4g˜A6‹åõêÕ³ùöÕ½O€jNSŠorÈJ­V›“““››«ÓéJKK6Œ¦#„f•ʤ¦LÑœaYν‹-rss“Ÿu0–×,·“öA5wøâ9'8...%%%33SêoQQÆñÏ`„Ðìq r@™Ô”)š3Ì Ëµ[·n¡¡¡~~~u0ëËA5§-¾ëßž#õ7''G§Ó1lÿLFÍ*”IM™¢9à ²Pe§ýýý•p.ËúW­^½ÚÇǧaÆ;vüì³ÏÔ¬+ Ë–-ëСƒ««k×®]µZíºuëxàww÷~ýú}ýõ×ÙX6nÜØ½{÷FÝ}÷ÝãÆ;wîœÁ6_}õÕˆ#¼¼¼ä>{ôè®\«OYâíí]¿~}¹xòäÉQ£Fyxx4kÖläÈ‘Ê?EªÞJÈþûúúÊc‘Ÿ+V¬ÐðúwPÍí¥ø®›5wÍš5Rãâ⤠åçç3lÿLFÍ*”IM™¢9à ²˧M›&{/ K—.•eu}rrrûöíwíÚUVV–––Ö®];ý˜-ñûøñãrÕ[o½Õ¢E‹Ç\½8lذª±¼sçÎ)))²N§3fLPPÁ6Ńƒƒ‹‹‹¯\¹²ÿþÀÀ@£Ï{ËÅœ:uJ¹øð禦–——Ÿ?~Ê”)ãÇ7z«-[¶Hð–¸pá‚ü”e9ZFï šÛKñxo¡ßµk×FFFʼÎÍÍeØ0þ™Œš=T(“š2Es†d.–WVVvéÒE’°,K$nÛ¶­¬Q®’¤­fW£³%„+Ër+ƒ‹’Ò«Æòìììÿ Ó³g[µje°MÓ¦M Œì}•X~øða£¥´´Tò¶Ñ[õéÓGö_?¥÷íÛ×âTsŠ/ÿLFXΤeŠtgc¹„ÕÙ³g«ƒ‚‚ÔøêááQRR¢^¥Äo5ÊÞ¸qC?*\¬ºpýúu£a[]˜5k–§§ç„ ¤+Ïœ9c&–+ÿDP|óÍ7^^^ÊëÕ4h`ôV-[¶”ý×,²ÆèTsŠ/ÿLFXΤeŠôóÅòáǼ[Ö¨QÖL,7š±ÍÄrÃ}2vÕáÇ,X0zôhùÕ!!!Öü®€€Éó§OŸ¾zõêåË—MýFó±œÂªyÕvëßm_8ïÙ S€b§Ã£îì0±œIÍ2Ê”5{Xû]µûX^XXؼysý¯t“eY#ë´ô"ö;ËU_ýµì†²ìââríÚ5S¿«qãÆeeeʲF£Q¯5¸UŸ>}bccõ‹þ‹Ø)°£jîëë}äÈfõâæÍŸÈ–Ÿêš/¿ŒîرmíËô¨æååûÞÿõÎ}5rmÖ¬ñ AÅÅ-6óëLí§/Î|6c×S@ZHÈL77Wùið»25x Ý;ï¼zéR†™Ý` 8^…TÇ€»{£ûï¿wøð€°°®_ϲ՞Ø{,75qzõêZucY©ÿxKJvÿå/cÛµkݰ¡‹üœ1cÜùó{¬™}F=ññióç?¿pî\±Nu"gj2ËkË-Z4vìXƒ•²Fùó¤¤$3ùfóX>dÈíÛ·_¼xQ~ݲeËzö쩬÷ññINNV_opWÝ»w ¹téÒÁƒzè!õZƒ[mÙ²Eö_…úXô?òÂ;ªæ“'?÷É'Q/ÊiœdL™ò¼ºfÉ’·ô/Öj~ùò¾>}ü^~ùÙ£G£**ök—Ú—XŽj£ÛïPZ·n¿^½zŽŸßƒF—L“¬¬°€€ž“&=G,wª ©Þ¡”ÇS§¶EDÌ—lù䓽eHËÍLœNÚ§¥­Ô_™šºâ¡‡:¨÷âE­ÜJþôœ<+}+?_yex÷îªþç«êì3ÕååûŽÛòÚkƒ=F,‡ó”)3“ñ>ßîc¹ŸŸßŽ; VÊõ ÌW­Z%ÉÜè¤Ù<–'&&öïßßÍÍÍÓÓ300Pý–µèèè:4hÐ@ý‚4ýûÉÎÎîÑ£‡«««$í%K–¨×ÜJ,_¾Ü×××ÅÅ¥ê¤Q8`GÕ<>~ñàÁ½Ô‹R—/§K—Žêš‘#$$,‘…'bFzÂãy³feeaaªÁÿûÕj»è‘GjÔHæQë•+ÿ¦ÖÊ æÊ¹‹¬¿ûnqãž**úßÓr©Ër•O›>˜TYy@½UpðÞÞ÷Ô¯_ß`ÏçÎ2bÄ€j•fb9ã¿êÑ·ß) -;{“¿7å´F–M õo¿ÝvÏ=­ˆåNU!«h¹•<©œÊE+wØèfÊ–Ò;¶mØÐEöJΪÕ_dê&¹¹[¥h{yµtumØ£G§M›>¶øïP,73q6nœ§þ{WiC†ô ûHíÏ9s&>ýt?ƒ;”5²ÛgŸùtî\Ú]w¹™ºV]c¦™Ô”)Gú+fªÂ˜¿ªê ªÙŽ9Â{ËØW5¿xQÛ¼yù)ËÅÅ)ÖW®d¶hÑT)Ö×®¼ï>/åÚ‡~ %åóK—2JJvO™òüøñ#ž=ÈŸ‡{ïõŒ‹[|áÂ^9{øÃžV7ëÜÙgçÎÏeýéÓ_Œ34(hˆrÕž=«åÎå§ü"¹Éï~×ûÃ'«·0àÑo¾IªZ1»v혞¾†XŽZÆrûÒ¦M#)By2D–ÍÄr9…"–;U…4z ¥fJåT–­Üa3›Ýÿ½ii+e‡SSWȹ{rò§æo"ráÂ?ü°«¢bÿ¾}ëZ|€w(–›™8rLå@:¡¬ÉÉ ÿÍo|e¥Ú-’vvï^mp‡Í?å!Xœ}fž-—¼4qâhõÙr3±ÜT72©)SöWÌT…1•ÁBwŒXàˆ%ƒ÷JL\* [·þý™gWþñ¯¼+éàÁ0ýÁªíüù=ÞÞ÷­æ½{?¹Àè)HVVØÿêÝ÷;[µj®,ˉHfæý“õMPr+õôÈ ¹¹¹Êy ±µÿö;äÄK‚œšË²Ì…¶mï‘5U_À,¹bàÀG匟XîTÒè–Sw÷FÕÚa3›I‡¨WmÙÜ¿ÿ#æoÒ´icÙùªÛ˜y€w"–[œ8›6}¬†^x2<|¾~·ÈŸžªï—5jÇš™}¦Þ[®Ž:uj›ÅXnª™Ô”)û+fªÂ˜¿Ê`¡f;F,ðËTó… gL$ 3g¾ü†,,X0]y’,(k¤åç'ôòjù߯ü•Ñj.§&F?´FyÂèI†§g ¹7i¢~ýú²^Ôm”zM,Ç‹åv:ä\döìWÕ‹$dÁ¿«kC__ïY³^Vž*!–;O…4ËÕ—I[¹Ãf6+.Ö¨›I5öðhnþ&2å±L˜0jÍštºíêmÍ<À;ËÍOåÙE?¿óòâ¥É‚ò9ycy“&wYœ}¦Ä£G£† ë÷â‹Ã,ÆrSÝȤ¦L9Ø_1SÆüU6Ù1b¹-ýpàØ©ðíùk ¢RÏù730SÍålàÁï—…GýòÐÌÌ Ê»’†íûå—ÑÊf=ål   ¹²ò@yù>Sw)ÅÑT57µFÎrL[˜yÞ†±ÃV±ÜN§ÀðáU¾‘4Àâ­ˆåNR!^µwo¨ú¹JVî°™ÍL›º‰´C‡"æÏŸ6zô –-›©QÁ̼±Üš‰1Ò¤ç^ý÷ês†_Ä®¾•·ÆŸžuæÌŽ-š½VÿUô¦º‘IM™r°¿b6‰å5Û1b¹ \-+?:geœ÷0éGý–Ð1ðäßÃo\½Æ”ÕÜhõi×®õ¿þéåÕRù?èÕ«åä@ ™þG†4nì~áÂ^ey×®Uj9sqi ÛëŸÃé-‡5Õ¼oßnË–Í®nÑüðÃÉ|äl2þíq œ=›Ò¼yý¯e’eY#ë‰åTH£WÉn<ùdï>šZ­6µYÕב>þxó7Ñoyyñ2\->@›Çr+'ÎõëY¿ýmiêWÊ©W½ÿþëF?òíÝw_«e,—¥¾$X¾ÿ~§zÕÁƒa»‘IM™r°¿b¦*Œù«lR?‰åµU’sR ä ŸÑ¾0yÿ§§žç=DY¹­KÐÅü3ÌjPÍ«¶?ý)päÈê?)•“Œ3ÆM˜0J]Ó½{§àà7.^Ô8°QÿÛb||ÚlÛö©úº¦ÔÔmÚܰ¤ê'…˜*ˆ;v|æáÑ<4ôý¢¢´²²t¹øÔSþ‹fyù¾Çëòê«#Žºr%³¸X“”ô¾ 5‹åv7BBfŽû”ÁJY£|õ+±œ ©^%å±  YN²åôTÿ Ò¬ÜaS›UýÔ%©Àæo2dHŸ/¾X&Anòé§o÷ìÙÙâ´y,¯ýÄ‘Ç%ð•W†Ëᓾ•Ÿòg¨C‡6%%»kËÕ±«Á4d̘¡§O!’’òyçÎ>»‘IM™r°¿b¦*Œù«lR?‰åµòÃcQMúK÷íèõ‚ô£AÏÄ„&u.ׯ¶J2ÕÜèÛ{¤<)¥Pi Lwwo¤¾ÉGZVVX\]J\¼øMµœEE-”3’ ~¥¿¦[·_Ë–íÛß·jÕ{«¹4)¯<Ú¸±»üRYºiMѼt)ã½÷&têÔ^~WÓ¦î¿ØÔ§éËÿfb¹ÝM?¿·o_n°RÖ(/Q&–S!ÕÒçævó+Žž}¶ÿÆóÔç~­ßaS›É²e³}}½]\Èöê1s9ËïßÿÙOÏóòâ->@›Çr›Lœóç÷Ìœù¢<:¥—$Þ|÷ÝŽ|´Jùž'ýw¡K„,¤| šŒ– æZÓLjÊ”#ý3UaÌ_e«3Lby U–\¼-}—1nêõʃF;·òBFê/É6ñ~_øÝ÷¥¥¥ÌpPÍiœ¾0þiLFˆƒ ÔŸí€~ûí¶Ö­[íÝʤeʆ­ºÿ\vÚTçbyöôé¸Ô€q¦2¹šÌýìÍ-ÿ,ݪÓéç šÓ8}aüÓ˜Œby[bâÒvíZ¥1©A™"–;u,¿vùŠòòõâì­»¸ &T¶Œ¼oˆF£ÉÉÉÉÏÏ/**"™ƒjΩ§/Œ¦±œB,gRƒ2E,'–×.a¯ôÚNÿ +{9ÎûIÙ~ËâÒ¹J2/--eªƒjΩ§/Œ¦±œÁ@,gRƒ2E#–×ĉ…¤×Í|ÇÊ^N=Q¶ß8uNtt´’Ìu:O˜ƒjNñåô…ñÏ –3ˆåLjP¦hÄòš8:g¥ôÚ‘÷>°²—3ÇÏí×½üæúõëcbb4t1O˜ƒjNñåô…ñÏ –3ˆåLjP¦hÄòš8öñZéµÃ³þfe/k_˜$Ûo˜ünhhhXXXRRRNNNQQ³TsŠ/§/Œ¦±œF,gRƒ2Eµ!–W[ATªôZÚ —¬ì儎OËöa.’X®³ÎPÍO„,”Y@3hÒ-œ¾0þ™ÄrFˆÃ T(“š2Es†Tç¾·|ÿ?ŽK=Ñ|&¿^ypG¯çovñï§GDDP§à„Õœf¦qúÂøg 8y,§9Ì@å€2©)S4g˜Au.–_*8«|u¹™w˜K&Ï÷gå©òˆµˆåp6•çË we)-9deäœàõoÏ[7k®TÚO-teñÏpÎ)Àq°ÊeRS¦hÎ0ƒêÕÁ‘ú]rF„Koå9óª¯f/ÎÞš0îf&wï1©dr^Äg¦ÕjãââÂÂÂÖ  ééé"ÊãŸ)À T(“š2GAõêæ•d¾¹q¿›ÙÛ¥WÚ —2ÆL‘(®}~ÒNÿ Ÿ^‡Ðr`ÄGK"n‘þå#ßà´rrrRRRdðËDX =Ò!Ò-Ò9ÒE”Æ?S€Âp€êä”IM™‚Ï zuvŒ>ž<|zd£>†o p÷xzbÄêµJ&—@ΤÁiåææfffJ‰‰‹‹‹„éééé"ÊãŸ)ÀaH8À@uòʤ¦LÁgPÝ奥¥Òe»’wD¿ùñ¦.£$oê::ì¯sÃ×oT¹Dq%“+O•k4¹‰Üç¡ÓédØçäähµÚè‘‘n‘Α.¢,0þ™Œ†„ T'? LjÊxÕÝX^QQ!]¦¼®cóó3$–o19ô彡ÿ%™<::Z}A‚Üç!e¥¨¨HF~~~~.ôH‡H·HçHQÿLFCª“P&5e <ƒêÕña*Ý'a;þ¥Ù7Ÿ-5UøçŸþÙ-²°bÅ ý7 ÈÆüï`Gêt,¯¨¨(**’°½sâÿK,û¦$ðÐÐÐU·ÈÂÚµk“’’4’Éecþw –Û2™—––f¼"±\¹V«ŠŠ ¿Ebccõß!@&ËmïÐÿ-—Xžùæb‰ßFy×¾,ddd𱼆Φe³ÒbK ˜(±\~ÊrÚäyú­êÆrŸW±Ü2IÑ_K^û&÷ÉqË-ãÙr±¼®SžTWž?v;!€Xn¥¥¥………yyyÙ;mò<‰åòS–nw̹¹+¹C>@,'–@,€XN,€X±Ü,>‰@,'–@,'–ˆåÄrˆåˉåË –Ë –@,'–@,€Xn«eåIž“X¾ÓÿµW¯ËÄòŸ‰äð=#þ*™\iYS>!–ˆå?“ìé!’Æc<—¿&a³›¿,ï~{1±@,¿ãrÿ±Yrx„Kï¢=‡äâ¿×'Þ|μA¯”ù+´·8p ¢¢‚£ –ÛØéXrÉá§Â·ËE‰ß¥¥¥Þ]v3¨»ûoÿtmzzºÄrYI2Ëmé‡Ç¢šô—þ凫ÕL^XXXPPÿÌTYéñ„&*^b¹¬$™ˆå6s1ÿLlë¡’½3ÇÏUÖ¨™ú§òeæ‡ÂHæbyµMËR¾¢üߟm±&“+·R7Û;ùã›_™vW¿£Ûw“ÌÄòj(=žÝâ ÉÕ‡f.¶>“$óOO“{ØÒæ©û²Hæb¹U.w.¾ýͯ:K=ë§”~{&æc¶’Ì¿/Ð%>ú¢ÜO\—ç¿:rŒ/3Ë-Û=l†déþ¯©_‡fÉ­ùˆu%™Ÿ>zr«ïÈ›÷6v¶ú;O˜ˆå&•Ï—d~ù»s?ûŠrë¿\¹íÉ´Œäÿµ;£Z·ÀIcùmÝØW”[ÿŒwÍži€X~[,WÔàýáúïKWËÄòê…jE µþ§¸×øNpÞX®ªYœV“y-ï§‹åújœ¥•dn“»ÀYbyÅílxWÄr±Ë –àäþ væþnðdIEND®B`‚python-watcher-4.0.0/doc/source/images/sequence_architecture_cdmc_sync.png0000664000175000017500000013331313656752270027146 0ustar zuulzuul00000000000000‰PNG  IHDR‹Áñ‰½½5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gU[zTXtplantumlxœTMo1½[Úÿ0â‡Ø6¡•*á£-D¨K{«*³ÀÅëA¶—|üúÚ „$Mvo»~~ïÍÌÛ¹´ŽWä*bv)õŠžCÎïFh-Ÿc*’z=bã™#•+‘K-­3Ü¿VÂA» -äJ>p'IC»¾¿€Bü*=̤ ¾žKhAW ,šµÌÐú«àÂåÏ–sC…0áv i¶@Q(44[ŸrÍÂ{…dA«« ëÐÀˆ*è’RXÖ?$.<¦C_ð5—ŠOB¶Å î8äáR(ã ćËj *4ƒ·¸%+¹öTRûê²kSD+˜ù¯¸FsÐd;ŠˆŽ5äÀ±ZÀ…€IBf`ïu¶0¤wøEÓJf°Í5M-Œ‰Tå4õ=ÞKu7$AdÃŒZDLài“ØB“‘oÑ‘L „Âm½¸%³œ)ºÝåû_ýñÛÛžŸ óèæj<ØúºlQ ØäÉL ©¸ÅkI{ªVÒÄ'×x³Wùχae1Ò7×þ7ŠsÌÉÿ4+à}ˆã}÷HãóDž2”ˆ]ú$—˘×îÛh>¥å"lÖß¾«¦…†7Po@rÑJš­ó÷ÐíOàM=iÔXõóx– “!ˆ°wå´!«±k¿ ª“Q Ò>|-´“9úͺ–>>9jWžÃréŠ\‰kœÇé õÙûý>bÉYó¬þó"‰§Iƒ ¥.î~ä[Ùš ë€IDATxÚìÝ\\õÿûlÇé8EJi);—ÎKéZ¤)ù¥MQSLÓíZ~)‹ˆìzù¥)7ËÚ›²®FJ›Ú$¦MÓ4RÖF“h4æ1ÁdT.&b:­©iêŸMA˜«EJ ¥˜û‘¯ž=?g˜æÏëù8<Μ9f¾çs¾3ïœ9‡y‘aM$4 Hh301òÇ~§ëÕÚ]'×Þ'ÿö<Ú*SØmHhauað-Ieû/Ëß;o¡~)/ݾEžeç ¡…Ãù#/´/U‘ìðÿâ·¾}ríú—nÿÞ“W–ì»$O&6¥|¥ßébÿ ¡…<ž©Sg’ÇÞI\'×®0¡Éœ2ÿÃEÿòðÃ755=û쳜F@B ‚ŸC{àvïÞ}øðáöööÁÁAö+ÚlÍì:´Ý·Ü) MFs¹\}}}ìW$´ÙšÁ½÷^zÍÞíJB{衇Ÿ}öÙ®®.ö+ZhÍàÏU¿wí ÿ(sî+ü—}ûö=øàƒ;wîlhhhmmíììd¿ ¡Á…Á··¼{5Ú-ß5i/üëwdž½‰×îÛºƒ„€„*缨~ëxøÿ¾Áó玿åð“Ÿÿ‡wã™)o_Í$žíÝ»—_9 ¡…2¤%ùÝ6oaóEÏÝð¿[¯]v¬´òð•%û>xõ{gÏ¦â™Ø½{7w @B ¡þSgZþùÎ}ÖE*§ý÷pé5ï^{6õãFuM²wÛ@B ¡áááÎÎΣO¶4~¯v÷go|÷®úŸ½q÷-wîÝþ –Í$•©xÆ_¬@B ¡ .ôõõµ···¶¶6üí’Ð.ú—¦<8å÷Ib 8漘ÁÑÍ@ÏGÄŒûɈNhÿyo£D²G/]4ôË“òð_¼ôhB¾LyººÖ5å׿þõ… "ð•Ë.ÑÎû1„8ú"ûˆâd`˜óbG7]D<3î'#7¡u?|xß%WÉ{{}ÏÓòP’Øðððo·7í5åÉDçêM¿øÅ/$¡ÉÄ iªà^Û|÷ù#{Â9¼ö“»§Õ)°(N†Ð38ºè"âóˆ˜e?¡ í­öSû/»VÞØË?خų³gÏ>ýM2}ŸuÑ‘í{%¡ÉÄ iªàd÷¼óÎ †pçžÙ;­N}Ä@q20„®˜ÁÑÍ@ŸGÄ,ûÉHLh£gÏ?n+wõÒí[Ô-ž9sÆår5—Õ¼ûëÇ,q=vX&ªFÌ Oþä~©“öööÎÎξ¾>¯åÁ>b 8B]Ìàèf ‹ˆÏÏ»Yö“—Ðþ<6Þ’·Bݶñ‰?«hZ<{å•W~1å@þ?É<}êúÓ'_Q!-¢N£ÑÏíñаnÓÁƒ[[[?.‡Äàà ûˆâd`1ƒ£›.">?ïfÙOFVB“HÖöµÛÞMœW.ûÓ[ok¿oÔâ™xuÊË/ý扫þ_™Sþ=ýÊk2CDýÖ‘.xn‡‡¿{×îÝ»›ššähooïëëc1Pœ á/fpt3ÐEÄççÝ,ûÉÈJhß©—7sÐþµ±7†ô—ŸiñLF$ŒõNé~íôc¶¯¾ç ÿíLç鈺 .xn‡‡¾³áÁ”CâàÁƒ.—«««‹}Ä@q20„¿˜ÁÑÍ@ŸŸw³ì'#(¡½ò£ß½È%WÉÞr»;ˆ>ž©$¦žzõÐÑË5ïþ‘´ÿýCí©HitÁs{<ì[{·;wîlhhhmmíììd1Pœ á/fpt3ÐEÄççÝ,ûÉHIhÚ½õeÄ-ž©húxvaŠšáäî'Ôý÷Ÿÿ÷‘Òè‚é(NŠÝ t$´hMhÿõëWÔ½õOmÞHÛ”òwò^üVmàñÌ-¤=÷;{¿”ÄÅ¿u„fPpóÞg±|05õ£%%_~øáFQB›çMŒ%4ý[3›/±Ù>vóÍ7œ?ßW mžo!j„ ž¿ãŽŠŒŒOH›'&&È¡ñÜs;HhžÃÿø«µk¿™™™. eµZ®»îª'ž¸gö/2()+Y´èJÏéùùÿ#t•ãkƒ† õ·œ`5& -®šì—… ?æOïOfö}i[§ï n½ëËXøî¹§&9ùÃò¯×j4™> _ W­úºì&ã—Ý íOo½ýô羡î­ÿç±ñ‹ù§ÏÔågƉK iÏ”¾ûGÒ¿ü†S/¼4ç$͸à´ñ·ß>öØc›äøÿÛ¿ý‚|=“ÿ|üÿ¶q{}}OË¡˜——·çÐfÿYÃòå_+/ÿû×_wª´¶ÿÝ‹Ž„æ™c¯¾:§ªê¦ÿüÏfyø§?ýúé§ï-*Z9ߨæÏÿÔóÏ?¬Ÿøë_?’ý7aþ–cÜP$4Zd~=(++¸ÿþ5aû‰üþdfß—f–Ðè»"0¡…ùˆPƒÄÂ={6.Xåk»¯½ÖTX˜/ßXb9¡¯Ø ¯þÿ[ôÏ*ž¹ýé3uÍï 1ÒÎ÷¿qøšzïÖŽïŸv›«ÓhûWCqñ—î¼³R{øøã›sr>­þÓhõêÒÏ)Oåæ^a6_âpضo_ç¶N)š¥K¿hµZd™mß¾{nTj]¦þó;ô/ì‘Gî’ŠTÿgSZú¯g Ž ã5ÈvÓÓÓäM]~¹C¶ëÙ•/nÐ&¡NhjM«‘³gŸ,)ù²jä‚‚kÜ^¤ÁÞ‘Azÿ´´?ðÀ~Ûäž{jl¶™Lð»× úhƒö”—çuÓvÙ¾! tkmEþüç|½_m;³šñõöÛÓïš=w\ЋsÆoUTÏàÚ¸—ðzštoSž’híöeBÊVé…Œ+ÓàõL·¡9¦Ù´×ÊôÚ˜›3hÌi½rƒ®if­”bÆ´>zd¯É>ú¯ÿ:êµ ¯/À mgP3o?„f°f·ŠâÌÎþýgÆ ¾QùÝG³y›j%ò"_yå15EFäa€½Á®1~=Óm¨@Ž©@6í÷¨dsÆø+÷uTθUƒR̘îG|ë­¬,õ,_»^öÚ#Ü¥Í/eÇÔCÔõ'ß—|½G·i ú® ö]Áý2Î#B†êêo¨ÿhÈ'ãñ›Ðô¼þé³ÀσÍìü[„$4©ÉôÚžµBÍö1í©§Ÿ¾×`ËÌ Çê‘#÷kÓ[[·j¿ñ•yººéϳkg¼Í` ²]ý—r÷šÐ|-nÐ&¡¾M‘ð ~}ç¹ãÙ;ò–“óiýJŒÛDý¼!½fð™j°;ô+Ôoz?{Ð7‚qz6‚~O¸””,[ö?·o_'ïQ>Í mgP3o?„f°f·ŠâÔ:Š£òÛKÌæmª•<öØ&íÿ€%6Ëž °2Ø5Ưgº È1ȦýöÉlθ1徎Ê·jPŠ3øz ý¤úVÈóøã›¯»î*mºŒËƒzˆºþÄàû’¯÷hð™å«)軂Øwý¿ËÃvDÈtÙ–úúñöÛÇìö¿Ö¾Šè[¦³ó€äÀÿ•£×„¦ÌàZ2ý5lJ%4훥Œhƒ¨êªDãŸië\½úŸÔm-öìÙxö쓞3¸­AÆ þÀëK5>‡ækŠçv½&4ƒÏ_mžsheeÚÿ0I$(,Ì—vÓ®õ»wÖ®ý¦ô†øƒËàG}^Ûd6{mf»#À=ë« *Ðk#¸ çÏ·>òÈ]²ø¢EWJϨÝ)ÄoåO«foy¯íi°fãfŒ„æ·—˜ÍÛÔ J—AFï… vÍt_qCrLÍ –|5æ´aãÎÖ`UvM³iUZx¾È—ÑÜÜ+3^ÉœÿÊQö½þ´n}œß#꥗öýô§Õ’(¤(ï¾û¶ØHhmžëÐ.\x>!Áª]Búƒ¬Ô®TÖw…¾öŽ$ ‡Ãöä“õ1“Ð|5‚AzmƒAæÌÈøDà•xÍÌ2¡¬9 ŸX³ÿU’ß^b6oS{ê±Ç6ɘ 2”o9Ó}=Æ 5­o9³ì“ÖÐ|•AlUZؾTTÿǬðÀ¹å–² ¾%#?úÑ¿VUÝä·—ŽºþÄ×÷%_ïÑmÁ@š‚¾+’ZØŽˆÒÒ¯¸ý„JýBr…ƒ M3³d¥…´Y®'üw ‘ºÑN=ïÜù¯;[žòú×ë:ßx£Eû&m)PýIaä÷r³Ohn'£e|Z Í M“Ðþð—Ö˜úÿwùõ¯Ñ¿Hƒ½#*iiןj¤Müî5yUú»Z½þº3ö\´èÊYþÊÑW#W g#ÿxRÿ“Å+?š1xû´§ÁšÃð‰à•ýoÄ×>rûÅ™½MýSR¥úàÒ ìšé¾ㆠ¤' dÓ¾*Ó­1§Õñ¿/ƒUù:*ƒØª$´°}=<’™™.ÿràHך•õIu¥ìúÀ{é(êO¼~_òõݶHSÐw±ï Å—±0ÿõ_Gô¥.ùV¦¨û”Ä{BÓ›q¬R!-(« u,ßû{lÓâÅŸ»îº«´ÿçhmÝj³}lÿþ»åª Ï=·CKðú+;Ïž}R»tR[gaa¾ô jA9,µÿgÒfxä‘»22>¡îÇ ÿ¦§§i×S†4¡¹]D+‡Ù´šA›„!¡É§QYYÁòå_Ó.ŒV· êê:´`A–6³ß½óÚkMú{Ò&~÷ZiéW¤+ûícªÇÑ¿¿»C[á îâ«üV [#è‡%K>/‡ƒú#òIóÍ7ÜtÓu¶í´jÆàíÒžkÃ'–Û˜åx½;¶Áñµd¿üò—éÓ?ƒ·ÈSõl°k¦ûzŒ*ž0MûªL·ÆœVÇkܘ«òuT±UIháüÜûï_#È£¾‰þô§ÕêÎÆõ¥ý‰×ïK¾Þ£ÛÖi ú® ö]!ú2ê#âž{j´ëY´A¦¨?Œ× íÂ_ âª"-¡i 9%å#%%_Ö—”ä(’o«ê§Ã2¢ÿé¡rr>m2}ÀëýÜeNé¿äY«Õ"kÖ.âô¼9©L‘õe"ð„fp§ƒ5ȶä5Ëk“íþǬÖ~iàâmÒ;…¨?Mxë­ÿKûÁ±t^rÌ«]ð³Ÿ}Gÿ"÷ŽúI´$1íw ~ÛÄï^;¾UbŒ´‰¬$7÷Š={6Øž’‘dUÚ¦§›Ð|5B èÖú],ßdAYÜnÿëU«¾®¾ضӪ_o?Àöôµæ0|b½3õÇLï¼³R>l¤`ä\søðÏÜ^€Áñµöíû±|6ëè¼ÍŸòUÏÆ•9Ý×cÐPô„Ö’×ÊôlÌÀ;^ƒÎÖxU]S[•„ΟØäåexàlØð­¤¤Ëþýßÿ-zˆ¢þÄøû’¯÷è¶õ@š‚¾+ˆ}Wè~ÐÒ#báÂÏzÞhZ¦¨?Œ× -Nº`usyùŽŠ5ëKp,µ C„ Ñ[œ $4¾00ðyÎ~2h MdÐçŸî:é‚#yX±¢HÝ]¢H^^¶ºˆ3Î;…ð´ ŸX $4¾00ðyS ­®®.99YþÀ„6³†àGì9ìÙ³133Ýbù`FÆ'BE¢«SO›0ð‰ÅÀ@B#¡10ðyS máÂ…{÷î]°`Ažï"¡1Ð)0Pœ $4Ÿwq”Ð^~ùåÅ‹«œvòäIýS;vìHOO·X,—_~ùÖ­[µØ##õõõ‡Ãl6ÏŸ?ÿرc»ví’yäannî™3gÜb’ŒìÞ½[ Ì`µZKKKÜæ9uêÔÒ¥KåYµy“êY=5¥®®Îf³™L&yØÛÛ[RR¢–*((P«õ\JlÛ¶-##Cf“å½è³œ~…tÁt ' „ÆÀÀçÝ\&´êêj‡$·È¸6½±±QB—Êlò¯}âºîºë$MNN®_¿>55UÒ‘öpÉ’%ž MâÙñãÇe|ddäæ›o–Xå6OVV–„¨‰‰ ooo/++óz6lêOË«‡’].—lW¬©©)//÷ºÔÞ½{µ÷"+—÷"‘Òë é‚é(N Ï»9Kh’mrrrä_•ìv»yyyGÕælkkÓ'®žžm ò°¿¿_{hµZ=Zww·¶ªññq‹Åâ6Lòû{Ey¨£ó|/‰‰‰^—r{/Ï<óÌÂ… ý®.˜Nâd ˜Ih|=``àó.¬ Íét®Y³F{X^^~èÐ!5n6›µ´¦%1_©ÉëC_ó{}jõêÕ6›íæ›oÞ»wooo¯ß•+ü %¾ÿ‡­L^gó|/ž1’.˜Nâd` ¡‘Ðø¼›ã„VZZêvÕVqqñœ$4ÑÑѱeË–²²²ÔÔÔM›6²­… þð‡?7Þ" Nâd` ¡‘Ðø¼‹ô„öæ›o&&&jñFLLLÈ™.ã‹-2ø•c(šæÜ¹sÚÏ Ýnàá6¿þÙ^xA{Öm) rú÷"ãú_9Φ ~í'wËîaç m>­N}Ä@q20„®˜ÁÑÍ@ŸŸw³ì'}&ººº+V¸M”)꣩;…ttt¨³[nw zB+,,”89塇š?¾šž––ö«_ýÊ×¶²²²Ô»»»,X =ë¶ÔîÝ»åõkï%==]¦%¡1ÌÕx§ÀÀ@q20„®˜ÁÑÍ@ÏŸwÁOh .”Pä6Q¦hM¶'ÁÆd2É¿÷Þ{oHÚSO=µxñbÙ–Õj-))ÑîÞ!ïVâ–þnûúõH ËÎΖ¥G}}½ö¬ÛRÚÝöå¡ü+ã¾^|àþôû‘£/ªáÉŸÜß°nÓÃß½ë¡ïlýÄŽá‡üì#Š“!ÔÅ Žnºˆ¸þ¼›i?9î2Ô\.×ÁƒwïÞý ÂKÚ\Z^Ú¿««‹}Š˜ÛbG7è"âóˆ˜A?IB ¹ööv ÍMMM²{v"\¤µ¥Í¥å¥ýûúúØG 8¹-fptƒ."ˆ™õ“$´ëìì<~ü¸ìIÏ imisiyiÿÁÁAö(N`n‹Ý ‹ˆÃ#bfýdÔ$´“kï‹Ò.Xâ²ìÉÍ.—«5ª<ðÍï¶F-imisiyiÿáááXÝG”Å Š6ZŠÝôt‘pD„¹fÖOFMBÛ;oa”vÁ²3$1Ë^éêêêŒ*[Ìÿ£3jIkK›KËKû_¸p!V÷åAq‚¢–bG7=]D$a®„™õ“$4Ðæ <@Ñ •)Hh ÍAy€¢@*„ÆqE›ƒò(Zô ¢4¡EïB¢mÊ-zP $4ˆS$4 ¡Hh@B›®ï¤ÍAy-zÄ|%p·}Ðæ <@Ñ •@B㸢ÍAy-zP $4Ðæ <@Ñ •@B㸢‚Ay-zP 1‘и¾“6åP´èó•ÀÝö€„ ¡ 5Ž9²ÁpôèQÊ ƒ ë;isP'‰ó ëÖ­£œ>¶@%Ä`Bã©´9(„?¡-Z´¨3uÍ5×Ð>¶@%Ð@›ƒò@КČ71SÒz$4€-P $4Ðæ <@B‹ „öo|Ãårµ··wvvöõõ S][ Hh ÍAy€„67 ­´´ôàÁƒ­­­Ç—688Hu|lJˆ…„Æõ´9(Т1¡ïÞ½»©©IBZ{{{__Õð±*! ºšº“áÌžõ:[€‹ë+¡]ýõ>ø „´ƒº\®®®.ª @B-¡%\…?¡Íø5ÌþWŽ’ÐvîÜÙÐÐÐÚÚÚÙÙIuHh€p'´ d¡ †¨Pç1€„˜ã„¦ÆëëëÓÓÓ-ËW\át:Ýfsû Î^óÒücyh2™>üá¯X±âw¿ûñ94Ï? m¼ª@^Ã÷¿ÿ}»Ýn6›ÓÒÒdÜíýz}$4@Ú<þR€ M‚ÐÀÀ@SS“ŒKŒ™Á¯ׯ_âÄ ‘$£îP௟zê)Ib—^zé´Vå9E"™ŒßvÛm2~ûí·Ë¸Ò Þc íÆo¬®®^ºtég>ó™~ô£’©+ˆgÓHhÜt à–.Lh¯¿þºöÐd2Íò:4™~Ùe—²§Ó)sJæillœÖª<§¤¥¥É¸:á&oGÆeŠß÷à9´íÛ·ßÿý’<øÂ /P]ÏHh€'4¿O§£C‡]}õÕ—^z©$ƒ"º­D–’ô%SyäýÄ@VÈ‘àç÷=’Ð!LhœpÌìW޳Lh©©©2.áJƇ††Y‰϶nݪ_m€«òœb³Ù´e^Ï¡Í8¡ñ+GÀ 娨ë˜<ØaÖ|æ+  ÍAy \JKKƒ˜Ð>ô¡Éø©S§¼Îùñ\ÆŸ}öYÉT•••$4µÂÚÚZ·×f°*ã×ðío[Æo¿ýv¿í¶Ûæy\‡6›sh÷ÜsÏøÃµk×Ê¿uuuTÀÇâ¹fr/ÇÈLh?û—JŠ6å(Mh¥Ô)/¯Ï677_qÅ&“Én·oÚ´)„æù_’~WeüÞœºAˆ:“æõ^Ž$4€-P s–ÐÚÛÛ].W+ Ž-_¾ÜWBô®å“„V__/ùðÇ?þñ–-[~øaª âÙLZ___WWW' ŽUUU‘Ђ•Ðî»ï> i÷Þ{ï®]»ZZZ¨.ˆgüÅj@ÿb5¦›Ðø‹Õ€™üÅjHh$4 - '×ÞÇÞ¢ÍAy€„FBÀÇb»¢&¡í·’¢ÍAy€„FBÀÇb»Hh ÍAyÀgB»æškj0SÒz$4€-P $4Ðæ <´„†Ù#¡|lJ ¡6åÙ:zôèº÷}ãߘQ\\|ýõ×—F¯oœ“ÍJÖ%¡|lJˆÁ„Æõ´9(Ì—ËuðàÁÝ»w?Ͷþûæ9ܺ´ž´¡´dWWð±*!`®´···¶¶655IÌØ‰é“v“Ö“6”–ìë룢$4ÀÌuvv?~\ÆÁƒ0}ÒnÒzÒ†Ò’ƒƒƒT€„˜¹¾¾>‰ííí.—«Ó'í&­'m(-9<00à5_¹-²hÑ"‘_~ùe·y|Cóºf¯/ƒ @Ñ •_ ­¼¼\å(‰FÉÉÉjâÆ Õ¯OŸ>-óxMh"''gÛ¶m×]wgŠSסIÓ_‡ækÍ^_ :8€¢@*!¾ZCCCVV–ÙlÎÎÎÖÿ¼P¢TzzºÉd’ &óøJhÏ,‹Ä*Ï„&ª««Üîåèu;^F¤={þÌý½1D_>ê@Ñ •@B›!®ï ¢~§KªsBþù#/ÒæàE €T sìwÛHH“á€}髵». ¾E›$4̙ަ#ûLWIH{,ùo,×+®6>¥€„†p„4.¹ú±¤%œRHhþ©Áê¡áƒW«‘žG[9<šw\ß ÿùóƽõÉþŸ…>±´ù“׿ò£Õ94Ú’ hЀJ ¡yÇ=RCÏ>xÍ¡ÌÒý‰_úÅß9×òü;¦ÍÁ! Š=¨ÇUX½ºq§:u¦?iF›ƒC-zP $4Ž«pSíÑK¹4£ÍÁ! Š=¨ÇUXž=ÿjí®?tõÓæàE €T m†¸¾“6åP´èó•ÀßC€„$4@L$4®ï¤ÍAy-zÄ|%p·}Ðæ <@Ñ •@B㸢ÍAy-zP $4Ðæ <@Ñ •@B㸢‚Ay-zP 1‘и¾3rÚ|xxøÖ[ou8f³911±¤¤äرcïÕÓ¼™TÔÌ–‡$@Ñ @ìUߌ1m«V­êïï—ññññÆÆÆÅ‹“Ðæ€Ùlžœœô´4jJoooII‰Õj•E$× hsÖÕÕÙl6“Éä¹@BµdÉ’+V¸\.Ϝ斲æÏŸ¯f›˜˜¨©©)//×f[¾|ùÈȈץ¨7ß|óŽ;îÈÎζZ­v»}ÕªU2ÅoÖ’œ–˜˜¨Í644ä+×$´HÇõ‘ÙæÝÝÝ+W®\²d‰×¬år¹òóó%È©1šL&¯³‘Ðbµ<Š=¨„˜MhÜ#5’ÛÜl6{ÍZv»½¹¹ybbBÆå_íYZ\•@Ñ •@BCÛ|É’%MMMccc2>44T]]ŸŸ¯žJHH8wîœ6grrrKK‹Œ ,[¶ÌWBs[ ’E €$4Z¶ù±cÇJKK­V«Éd²Ùl•••ÚM7oÞl±X´æt:322d6‡ÃQ__ï+¡¹-…p={þÌý½1Ä! >•ЀJ ¡q\Ñæ˜{ýN—ìëý—]{þÈ‹”èÓЀJ ¡„ë;is„N×C‡¥Ã’¡ù“׿Z»ëÂà[”èÓЀJ ¡˜3ý‡~±Ï|õ¾|á‰O^ÿ¨õ‹®›¾7­Sj ¡~HS'ÓšRþîñÔ¯~J $4„–ú¦ÎÏþ|Aô<Úêwf€ 9×òü£ ×6|ð½ÓhûLW5~dɉۤΡg0@|%4®ï¤Í†x¶ï’«ÞÍf—\õôç¾Ñw°í‰?˜ÁHh O@*!¾ßÿhs„Îë{žVçÍô'ͦUòì† ¶oßþÜsÏÑž O@*„Ú3¤þZƒùj·“fÓMhk׮ݲeKKK M ú4ô Hh Í1£gÏ¿Z»ë]ý³¬èÓЀJ ¡6G¤Ô ôiè@%ÄWBãúNÚ$4€> =b¾¸Û> $4$4b"¡q}'m@Ÿ€1_ Üm´9Hh †Ð€J ¡q\Ñæ ¡Íy8oÞÌfð» èÓЀJ ¡6GÔ$´¹M8ÚÖIhÔ0zP $4Ž+*ñ•Ðff–HhÔ0zP $´âúNÚ$4èÓÐ æ+/æ ¡iNgvv¶ÅbIOOß±c‡zJã¹`oooII‰Õj5›Í###)))ÃÃÃÚ<2%55U¦xÎì¶umÄ`ÎM›6ÉÚä–——y¾©Õ«W'''Ë eee²iJÐDkB“p¥‘PTUUe°ˆ2þ|—Ë599911QSS#©I&ÞrË-µµµÚ<2¾jÕ*_3{MhsO‘yÊmÁuëÖ É‚Ú[ ¡ˆ¾„æp8êêêúûûý.âIUbb¢Œœ>}ZÖ£MÏÈÈèííõ5óEw q›³§§GwwwÛl6·ív»6ÃèèhRR¥Hh¢5¡utt”––J°ÉÌÌt:~šËåÊÏÏ·Z­êg&“IM_ºtiSS“ŒÈ¿+V¬0žÙ3¡ùS1›ÍžkÐÓˆñ„Æõ´9b2¡iÚÚÚ<ÏPy²ÛíÍÍÍ2.ÿjsÊâ‹-’ù÷å—_6žÙsÄ`N¿çд‹Ö@Ÿ€TB%4î‘J›#&Zyy¹$±’““ÕÄ„„„sçÎyÝÌ£]·¶lÙ2ýÊsrr¶mÛvÝu×ùÙsÄ`Îâââ‘)^¯CÛ¸qcaa¡úQåéÓ§µ Ø@Ÿ€T ´9"4¡¹Ñ'œ†††¬¬,³Ùœ­ýÊqóæÍ‹Åk´“y222L&“Ãᨯ¯×Ï#ñL–’¤çwfσ9kkkÕ­+**ÆÇÇ=c§„´ôôtYV"¢¼J…> =¨hsDnBèÓЀJ ¡6  O@ZÈp}'m@Ÿ€1_ Üm €„ ŽZ€“:ä=à<ú@@B@B#¡ÐDuB z ò»B" ¡yÇõ´9Hh$4Ч @ÌWwÛmŽ'´ššš¤¤¤ÄÄĺº:·äãôB:;;Ûb±¤§§ïرãâ_þåkmþÕ«W«?']VV622¢­D6d³ÙL&“Û+///—ùSSSkkkµUõöö–””X­V³Ù\PP000àu‹^g}zP $4Ðæˆš„¶fÍ 3CCC’Ž$ª˜ÐRRRÔª$UUUyyݺuEEE²æ‰‰‰ŠŠ ýlË—/×›[V,..ž"#Ú çÏŸïr¹&''eU2¤8¯[ô5èÓЀJ ¡6Gt$´´´´îîn÷®Ç_Bs8uuuýýý^ŸUìv{OOMJJÒf“ØæõuʋёWåõGŒÀ½nÑ×l O@*„Úём†¿ ­£££´´TBWff¦Óéô5³žö›FƒXåks.—+??ßjµ¯Ê×l O@*!î×wÒæˆÒ„f|MBÎää¤ñ Wmmm6›Ík^²Ûí^¯3Hh¾Î¡Éªš››'&&d\þõ }Íú4ô â.¡ˆÒ„¦®C“(åõ:´ ÈRÒd†²²2mzyy¹Êu’Ð’““ÕÄ„„„sçÎikÞ¸qcaaaoo¯ŒŸ>}Ú×Åczê:´‘)EEEÚœ² í²·eË–iÓݶèk6€èHh¢ººZ¢Ž×{9vttäææšL&‡Ãqï½÷jÓ²²²Ìfsvv¶ö+ÇÍ›7[,}.’–žž.‹çääÈ"~š¤ÄŠŠ Y­d-ý½eêeÔ××kÓݶèk6€¨Ih ¡ ¡ÐBƒë;isÐú4ôˆùJànû ÍAB5 €T ãŠ6 m}–¿›sp÷Ч  Ö¤‚AB#¡> =¨hsÐHh O@Zøq}'m ôièó•ÀW¡Mh™êëë‡Ùlž?þ±cÇvìØ‘‘‘!sss_~ùe5ÛØØXeee‘‡júøøxyy¹ÅbIMMÕÿi±zõêäädyª¬¬lddÄ-¡9Îììly6==]¶ÈQ„ ä í¦›n:wîÜää¤Ì AKÿðꫯV³UWW O)..–‡jzMM<Ô¦klݺu2ÿÐÐÐÄÄDEEEUU•[BKIIQ/f``@{€„ Þšv‚KR™ÛC³Ù¬Æm6[ww·— rj<--­§§G›®0»Ý®MMJJrKh‡£®®®¿¿Ÿ}HhHhÝ"“ñC·éZr3˜_Ïd2¹ÍÐÑÑQZZ*É-33Óét² -˜¸¾“6Gl'4›Í¦?WÈ9´/šÇBÚÚÚdåìAЧôw ß·CÐæˆö„¦]o622RTTävÚÈ™®Í¿qãÆÂÂÂÞÞ^?}útyy¹Û eŠúÙ¤$´äädö èÓz€»ís\QÁ ¡šÐÆÆÆªªª,SV®\9>>®¦ËHEE…Ùl–”åv/G iééé&“)''§¡¡Ám…2%++KÌÎÎæWŽ1lôìù3÷?>öÆ}zÐhM*Ôƒÿ„„Z¿Ó%¸ÿ²kÏy‘> =Hh´& ꄆ9ö»í¤exâSůÖîº0ø}zÐB‹ë;isнMGö™®Úoýâ“Ùe~(ßuÓ÷ÜN©Ñ§|«¸Sî¦N¦útéO|Íà”$4±œÐ"pØÿ¡|5Òóh+G1€„ ŽçÐ0çε<¿?ñKþÒ{ í¯¾°?áÚ_}c] wz€„€„9ž=j:iö<çgþ±{—ó‰?Ó2ZHp}'màËë{ž~ïg¾OšÑ§|«¸SHø¾‚6 qKý=´óUÆ'ÍèÓ>Åî¶ÏqEƒ„„ÖèÙó¯ÖîúCW?}zÐhM*Ô ôiè@%Ð@›ƒ„Ч  mq}'m@Ÿ€1_ Üm €„€„ 1‘и¾“6  O@€˜¯î¶Ús™ÐæÍ U/ÔÖÖf·ÛC·~Ч •@BmŽ˜MhARyyyGeÇ> =HhWT0HhÓNhA™MÏb±°×@Ÿ€$4Ž+*$´ˆHhü¾ôiè@B !®ï¤ÍÛ Mq:ÙÙÙ‹%==}ÇŽê)çÆÆÆ*++¦Èˆ<ô»èÓЀJ ¡ ¡”ÐRRRÔJªªªÜžõT]]]TT4<¥¸¸Xú]€„€„PBs8uuuýýý^gód³Ùº»»Õ¸Œ¤¦¦’Ð -8 ­£££´´4)))33Óétú[nO™Íf ¡ ¡ùnûmmm6›ÍoÜ’yzzzÔ8çÐ -L¸¾“6Gœ$´òòrõ«EIhÉÉÉjbBB¹sç¼®¡¦¦¦¸¸xxxxdd¤¨¨ˆëÐ@Ÿ€Q] Üm´9"+¡544dee™ÍæììlíWŽ›7o¶X,¾îåXUUe™²råÊññqèÓÐ z+„Ús™Ðú4ô Hh ÍAB5 €T ㊠  O@*!×wÒæ ¡ôièó•À•ôâ4¡Eã­Dü¾æÙ¼)ƒ¿|HhHh$4$4cB›§cµZ‹‹‹ÕC ¢S§NÉjÌfs^^^SSÓlrÈŒ—ÊÉÉq›˜››;ݵM+¡ùzã!Jhä:Hhb!¡i㣣£wÜq‡g’™3gÎ8ŽmÛ¶©¿“ÖÞÞ^VV6' M2’¾qœN§L ]B3xã$4HhAÀõ´9b>¡)‹Eôöö–””X­V³Ù\PP000 e›ììl™-==}ÇŽÚ‚«W¯NNN–éEFFFÔÄòòòúúúií¡ç†ôgüŒ·+3ÔÕÕÙl6“ɤ677/^¼X[jÑ¢EÐÖ366VYY™0EF䡚.ùJÞ‚¬<55µ¶¶6íú}ã¾¶åµA¼nEÔÔÔ$%%%&&ÊÛôÚ2[Ñ· }zP њиG*mŽ˜Ohî¸ãŽüü|õpþüù.—krrrbbBò€D5=%%EmE2[UU•š¸nݺ¢¢¢¡¡!™¹¢¢B›.)bxxxf Íë†Üföµ]™mùòånÁ)33ó…^y_2®_[uuµ¬gxJqq±<Ô‚<Ô¦kól×ï÷µ-Ïñµ•5kÖHf–é åzmƒ­è[†> =¨hsDVBÓ“PôòË/{Î&9-11Q;Žºººþþ~ý v»½§§GŽŽJ>Qãf³Ùo8ô•мnÈmf_Û•Ù$À¸-uï½÷–––ÊHaa¡vRNÍ`³Ù´ ðd$55U§¥¥ië—éÚüÛõûÆ}m˳A|mE^•çå‚n-c°}ËЧ •@BmŽÈJhÚøøøøÆµsh.—KÆ­V« oÚâ:::$çHZÈÌÌt:^“ž6ólΡùÚAÂÔ¶ëu’3%ó8p@þ•qƒ× å+_¯Íïvyã¾¶øV Và;¢O@*„ÚšÐí:4‰1ÍÍÍ2.ÿzÎÙÖÖf³Ù´™µ ÕôÊËËÕ…R›–à¡òÒÅ©_ZoÈóš×íúŠ+µµµ ›6mr›.ëן+ äšñv Þ¸¯my=‡æu+žC3Þ }zP Ñи¾“6GÌ'4‰awÝuWnn®z˜œœ¬]¶lÙ2}öPñ@‚“Ì£&nܸ±°°°··WÆOŸ>­]´¦ni¸uëV•ôNž<)«rÛô‚ ä•KH“ •••oHòÕ¹sç´×ìk»Æ'”<§k×›ID,**r»mdŠL׿÷»]ƒ7îk[ž Í×VÔuhÒ\úëÐÜZÆïVèÓЀJˆî„ VšÆb±Hh¡žr:&“I’F}}½ö;¡¡!++Ël6gggk?>Tq"==]æÏÉÉ‘y´é§NR÷„”§$Œyþ=´ŽŽ‰…jC÷Þ{¯ñ†6oÞ,¯S3¼nwº mll¬ªªÊ2eåÊ•êù§~ùYQQ!¯A"¢Û½ýn××÷µ-¯?¹ôÕª’¸$’i÷rôl¿[$4—Ð@B@B ¡ ¡ ×wÒæ ¡ôièó•ÀÝöA›ƒ„j=¨Çm@Ÿ€T ´9"'¡qïuЧ •@BmŽHIh3ì¹Èu O@ÚÜâúNÚ$4èÓÐ æ+o9æ2¡iYKF¶mÛæp8ÌfsnnnGG‡šît:³³³-KzzúŽ;Ôœ5OoooII‰Õj•e Œ×)jjj’’’ëêê´‰«W¯NNN–m•••ŒŒøz$4±ŸÐ$ MNNnÚ´iáÂ÷6—’’¢Ö)¹«ªªÊm)eþüù.—Kœ˜˜èU^^n¼Î5kÖH“éããã2¿š¸nݺ¢¢"™(+©¨¨Ð¶åõÐÄxBSã¨Ìf³w8uuuýýý^—ò$Ë&&&¯3--­»»ÛmA»ÝÞÓÓ£ÆGGG“’’ ^ @Œ'4¯Ó;::JKK%/eff:N¯3»\®üü|«Õª~úh2™Œ×é5àÍûKÚJ¼¾€xOh\ßI›#>𦭭Íf³y}Ön·777OLLȸüëw¾Î¡i°y¥ O@*!Þ÷H¥ÍŸ ­¼¼\¥) HÉÉÉjbBB¹sç´™eºv©Ø²eËü®S]‡&3ë¯CÛ¸qcaaaoo¯ŒŸ>}Z»˜Íë }zP $4Ž+ÚQœÐ. ¾Õýðaùwº ­¡¡!++Ël6gggk?2ܼy³ÅbÑæ‘é&“ÉápÔ××r^®ººZbžÛ½%¤¥§§Ëzrrrd»/ôiè@%Ð8®hsDeB8z¢)å+2C¿ÓE3‚> =¨hsÌAB»0øÖo¾woÓǯ“§døÝÍ´!èÓЀJˆ„Æõ´9¢%¡ =ñÌâÊ}—\½ï¯¾°ï’«µ.êk~Ž}zP 1•ÐDxB[_ýÝ÷O I_ÚgÊSçÍ>xõ£ ×ÏHhÂÐÞÞgj8þÏw\{ŸŒ¨ÿ²bœñ¹€„ ¾ÚúêïÞ_\ùè'þgSÊß5¥~U%´}æ«Ï6¡õHhšдëÐÎyñùòõZ¿ØüÉë÷]r! ?S¡Í- MM¹0øÖ©Í{]^¢N¦ýç}M´!èÓЀJˆ„Æ=RisDWBÓœ?òâAû×ø{h O@*„ÚsŸÐ” ƒouÖ5Œž=OK‚> =¨hsÌqBèÓЀJ ¡6  O@Zp}'m@Ÿ€1_ Üm €„€„ 1‘и¾“6  O@€˜¯mó•+Wþ_q#''çÚ»qÞ'Öú&Ï~æ3Ÿ¹âŠ+®¼òÊkˆ$UJ# lÙ²…ï|«•@BC<¶¹ä–yD˜Å‹óƒo5 Hhˆë„VÓÔ{LOO_b7ÎûÄ:ßäÙÏþóŸûÜçòòòIª”FÀÜúÈG>BBã[ ¨â=¡I†y3¦©¶víÚ¹­y¶¡¡¡µµµ³³“côi€W×^{- TBì$4®ï¤ÍIh$4€ÏÄ@B»òÊ+].W{{»t˜}}}ÃÃô =¨„¨Lh@šúÅ`Ì'4ýåò°©©Éáp˜L&õ0®šÖnãÌÈô¿˜i“HÞ_!}ýÓ-Œà¾ÁHÞJB›?þÁƒ¥Ã<~ü¸ô™ƒƒƒ´ Hh$´ÿ>öÓÒÒäaoo¯ñR$´ˆMhÁýB¶ó -Bv("0¡eeeíÞ½»©©IúÌööö¾¾>Z $´ØIh•••³Lh~Œù_9’ÐHh$4„'¡]qÅ>ø „´ƒº\®®®.Z $´€Ú÷¿ÿ}»Ýn6›ÓÒÒd\?§¯§ÔêëëÿæoþÆb±È³6l]<{饗ÔgœÐÜî-S†††ÊÊÊ,SJKKe+žsº­Pæ¹v^ŠÌ/ òÅ/~±··wdd$!!!33S›GÆ­Vëðð°Ì\^^.ÏÊü×]wvîN¿þE‹É¿òõE¦8p@Æóóó}½ÙhnnîÑ£G 6ê9³ß/âó+óçÏ?sæŒç²¾Þ ž¼Ô•+W&&&úz¾Æ322ÔORÕDÏ=H »½žîîny;^gðlÏ-új.Ï¥ÈNÉÉÉ‘£Cæ·Ùl’í Ï×k3~›¶¹qµ¾s=W>›#‹„ó MJZÚÎ;¹|@t'4®ï¤ÍÜÐ$wÉøm·Ý&ã·ß~»ŒkIÌà)µ†¢¢¢³gÏöõõÝpà ò0D!í·¿ý­ÃáõKVœÖÁŸC“/‘ò°¹¹ùСC2rÓM7égS_¦Ý¬X±BžúÑ~ôÐCÉH^^žL\µj•Œ·´´Èø3Ï<#ã2E›ùøñã§OŸÖfv[¿Jeò-VÆ+**d\Öàëíô÷÷«¿7`°Q¯3_ àT‰×ùµÆYºt©ç²¾Þ g‹­_¿Þ 9K 󜞔”$ñ ££Ã`‡ÒÂn õï+Àv0Þ^«N¶.©FïÚµK§¦¦ž¯×fü6lsãj|çúÝ¡Ó:²Hh$4ð­q^ Üm1Ûæ³Lhêê¬ßýîw2þúë¯Ë¸Lñû”Z믾ªž:uJ~êSŸ ]<“oÃÓý…ŒqB³X,òprŠŒÈC¿ßÕ"-º»»e¤  @Æå_—)¾fö\FF†Ùl·Ùl z)E§Ó™-ó²Q¯3|7ž_k«Õ깬¯7èÙb²ƒ„æµå¥5dº4έ·Þ*!Á`úma=y#ú÷`;ï ¯U§Þ²~\»E¯ÂóõÚ¦û6½¶y ÕÈFýîÐiY$4øVƒ8¯HhÞšÛ/Õ/¸¦û”Û³Aüq£Ï´¯ésžÐyä·¯¥¥¥êô—vBL›yllÌøUmÙ²EVUUÉ¿ðÜhJJŠ<%Øí‹»×úšÙ׸ñü$4Ï78Ý„&e##n/ ½½½¤¤De¡¬¬,ƒ=è·…Ih´›ÁlïÎs|f -ð·Ä„æ¹QHh|«•@Bmò„f³Ùdüõ×_÷/^Ì®$¡|Ž€„F@#à"wÛ縢‚Ih$4€> ¡Ñ€J ¡6'¡‘Ð>G@B=HhWT0 „ÆW Ч$4zP ñи¾“6'¡ÅvB“5­éaí(çqÛ[ú4€„F*„ÐHh¡IGÓ]§6?Q ¡‘Ðæº£$˜ ¡ ¡$4 ¡!˜ ­···¤¤ÄjµšÍæ‚‚‚5}||¼¼¼Üb±¤¦¦ÖÖÖj)È×t·È´iÓ&™Af“™ÇÆÆ´§V¯^œœ,ÓËÊÊFFF´ùëêêl6›ÉdÒ'.Y°²²2aŠŒhëñõô/¦¦¦&)))11QÖLy ¡ˆ»„Æõ´9 -Jš|q¹\“““’j$ùh §¸¸xxŠŒháÇ×t·„¦ŸGQÓ×­[WTT444$Ûª¨¨¨ªªÒæ_¾|¹>°©‘êêj™_[<4~ ÚÈš5k$mʆ$Ëi[}@B£•G {¤Òæ$´(Mhz’ÓÕxZZZOOïîîÖ¯én M?ÍfSãv»]›>::𔔤Í/iJ¿¸‘eqm=©©©Æ¯Aÿ"µAŸÐè@%Ð@›“Т&¡¹\®üü|«Õ:oŠúáE‹»|Ý/ÑWBÓ?4›ÍÚt½énK¿ãù¹2> ¡Ñ€J ¡q\Ñæ$´¨Lhv»½¹¹ybbBÆå_¿çÊfyM»ÎÍ ÑéÏ¡é×Ã94ú4€„F*„ÆqE›“Ðb<¡%''·´´Èˆd§eË–¹]o62¥¨¨Èït·ˆ¥Í£¿mãÆ………½½½2~úôiíš7_ M»ÞLmËí:4Ï×àvš¼#®C£OHhô â4¡q}'mNB‹Ò„æt:322L&“Ãᨯ¯×ß³±¢¢Âl6K„s»—£×énKžR÷l”™eí) iééé²¹œœyIÆ mll¬ªªÊ2eåÊ•Úz|½ýz$Î%$$p/Gú4€„F*!N@B‹Ò„’ž‹ËÀÐÄ(¾å€„ÐÐ m¦, { ¡‘Ð""¡ mŽq}'mNB#¡ýEçÅÁçHh @,VwÛG̶9 „6W«%=Ò§$4zP $4Ðæ$4ÚÜõ¤D2ú4€„F*„Úœ„FB#¡ÏÐ@­I“Ш‡i$4§Ó™m±XÒÓÓwìØ!Sd§hKZô÷÷§¥¥ŒŒÈþÚ¶m›Ãá0›Í¹¹¹Ú<555IIIú?íkf™.óØl6“ÉtqêoRWVV&L‘yèwqmDOMìíí-))±Z­²TAAÁÀÀ€Ûœn+™îÖAŸè@%p§Äl›“Ð"$¡¥¤¤´´´Èˆä™ªª*ikk“̦ͰråÊÚÚZ•[ÊÊʆ††&''7mÚ´pá{[\³fÄ!™>>>.QM 9^g–éË—/—¼§VWW O)..–‡~w{ý’*e j\¾W¹\.YdbbB^Iyy¹×¥´‡ÓÝ:èÓ@B=¨~™ƒÿ $¡ÍyBs8uuuýýýú‰yyy{÷î•‘3gÎÈ ½TnÑÎ2It1›Íj<--­»»Û½óò1³L—ä£Íf³Ù´ee$55Õïâú­H¶ÌÍÍõ|˲Tbb¢qB›îÖЀ„B›Ð:::JKK“’’233N§šøÔSOeeeÉHyy¹þ‡‹^sŽ×«¼œÙí¡¯$æuñS§NÉkîííÕ¦¸\®üü|«Õª~Ó¨~Hø‹ñ»u$4àkHhmBÓ´µµÙl6íannîúõëÓÓÓ'''s‹¯sh„"ÙbOOw;‹e¼øÀÀ€dÈ_ýêWúÙìv{ssóÄÄ„ŒË¿~cát·€„|- ¡Mhååå*_IBKNNÖ¦766Ê’¯)~C—ºM"“Ûuh$4™¿¸¸xxxxdd¤¨¨H%˜ñâ‹-R¿Ãԓׯ]S·lÙ2mæ„„„sçÎy®mº[@BmsZ„$4™ž••e6›³³³µ_9Ц¦&õCG¿ íâÔ-7$¹ÝË1„666VUUe™²råJuÁ[ ‹{½—£¼þŒŒ “Éäp8êëëµé›7o–õ{½—ã´¶ú4Ð@*»í#fÛœ„! Í—’’’ÆÆFôi ¡Ñ2ô Hh ¡aŽÚÞ½{õ7ÜèÓ@B=¨Hh˜›„f±XÇ /¼@«‚> $4=¨Hh˜ã„Ч„FB£•õ ë;isZ¬&4î“ú4Ð@*!ú@B‹Ò„æ7€‘ÐЀ„”ÐüwC$4$4 ¡„ÐÐZ|%4ýŸo®¬¬L˜"#òP›aÓ¦M©©©‹¥¼¼\›$4$´ÈÅõ´9 -ÚZuuuQQÑð”ââby¨Í µé555´6èÓ@B=ⶸÛ>b¶ÍIh‘–Ðl6[ww·—‘ÔÔTm†žžmºÌFkƒ> $4Ð n+„”ÐÜ®73›ÍÆÓú4Ð@­ B•Ðl6›þ\çÐ@Ÿ T $4ÌYB«©©Q×›ŒŒ¹]‡62…ëÐ@Ÿè@B‹\ßI›“Т=¡UUUY¦¬\¹r||\›¡¶¶699Y¦WTThÓú4Ð@€8¬î¶ÿ $¡ÍyB3™L´ÐÐÀ! mîÚ™3g¸® HhHh -"šÕjݺu+­$4$4ðAHB›û„ ¡ˆÍ„Æõ´9 „ð9èó•ÀÝö³mNB sB›Û>G@B=b£Hh ¡!8 m¿jÐ?€: ô ¡ÑšT0 z ¡> ¡Ñ€J ¡6'¡‘Ðè@€„zÐæ×wÒæ$4Ÿ»às$4Ð æ+»í#Æ?Ih$4 ¡ˆ"$4Ð@B@B#¡‘ÐÐHhHh ¡!ü íÂà[Ý–IhHh$4±Ð¸¾“6'¡EiBëâØcÉ_–ú®9ymŸ# ¡QT Üm1Ûæ$´¹MhßzqemcÒyJ†ß=ÐL­‚> $4=¨Hh$´p'´þ'Ž=ý¹ò½¦¼w'^úÅG?ôžæç¨UЧ$4zP $4ÐHhaLhÛw¸þ–Gïý«…ê¼Ùþˮݟ¸8ÔñŒþÔ Hh  Ö¤‚IhÔƒû³s;°ƒÀçHh  -¬¸¾“6'¡ExBkؾó‰YÿxÆõÿ‡2ÿA§}¦«ºy’Ö}@B£•k ¡EzB{ÿ«Æù#/>_¾þÑå;?{SㇿDH@B›³„¦¦\|ëÔæ=‡ß?™ÖY·6À $4ÚÜ$4Íù#/´-¤ HhHh „PBS. ¾ÕY×0zö<- €„FB ë;isZT'4€> ¡Ñ€Jˆ©„Æ=Ris às$ºŒv÷ŸÚ¼7N†Ñžs$4zP $4Ðæ$4ÀçHä’Ü2ç,1lCç–=$4zP $4Ðæ$4ÀçH¤'´×6ß}þÈÞ^ûÉÝ$4zP $4Ðæ$4ÀçHt$4É0ï¼s"†‡sÏì%¡Ñ€JˆÇ„Æõ´9 „ð9BB‹Ø„ö³ÒÊíÛ·KOØÒÒrâĉÞÞ^=¨„Oh „€„± í߯_±aÆ-[¶<ôÐCÒN:EB@BHh$´hèvçÑñ‚„ƒ íîüæš5kÖ¯_/!MúÃ'NÐÐ -  $´˜LhÿñÍoÿà?¶aÆíÛ·?÷Üs$4$4€„¡ íÔ©SÅÅÅ f³9//¯©©)„æ6ÈÁQIÌ×ë!¡Ñ€J ¡6'¡ESB“03<<ì+¡-_¾\‹RëÖ­+**š˜˜¨¨¨¨ªªRÓåÛŒËåšœœ”é’Ž$Õ¸e!ãÅõRRRÔ×&‰yj†¶¶6ÉlÚ +W®¬­­U+—˜'k“íJ Z¸ð½·¿fÍI‰2}||\^ŒAB«®®–×3<¥¸¸Xj3ÈCmº×•€>-æZ ék– MûO“é6ÛÇV­úúÿø+=¨ÇLB‹÷„f6›}v@óæIÔÑÚíöžž5>::*ÑÎsÉK‰‰‰^Z ‹;Žºººþþ~ýļ¼¼½{÷^|ÿ™êט²ríì–lT{iiiÝÝÝF½êû¯Êf³isÊHjjª6ƒö:eºÌFuѧ‘ÐB”дñ×^k*,Ì//ÿ{=¨ÇLB‹÷„f|Íí¡žöÓG—Ë•ŸŸoµZݦ¸¸^GGGii©¼¤ÌÌL§Ó©&>õÔSYYY§~©ýpÑså^§¼)·9µŒçk:èÓâ6¡ÝÿšŒŒO˜Í—È¿?ÿùúyä©Ë/wX,LOOÛ¹óúĵté­V‹,•›{ž}?ö¨~ÿûçd~¿5~=$4zP $´ÿÆõ´9 -Jš>ö'4»Ý®®1s#Ó›››'&&d\þõ|-îU[[›þäUnnîúõëÓÓÓ'''ڴΡéÏ•q>„æ5>íÙ³Q2Øo~Ó ã'Nì‘PôðÃ?ÔæÑž’eü±Ç6©§²²>)9êOúµZª¬¬`Z Í`£Æ¯‡„F*„Т>¡©ŸnݺUE¬“'O.[¶Ìk Ú¸qcaaaoo¯ŒŸ>}Z»Þ,99Y»xL–Õ–JHH8wîœßÅÝâ¢ÊW’ÐdµÚôÆÆFY­|gò•µ‡ê:4y%~¯C“gÕõf###EEEnסLá:4Ðòò²¹_›ÞÚºuáÂÏjóèŸzöÙí‹]©Æ-– üG‰ŠŠk¿r4بñë!¡ ¡$´¨Oh§þšº£ÉdZ°`ö÷Ð<1()+==]fËÉÉÑn±èt:322d¢$½úúzm©Í›7[,ýJ¼.®'ÿöî.ŽúÞ÷Žëvå ElJ÷R~{(ý“¦”K9i1jS[«KWÄü®¥iÜk¹Úi­ŠXNNc£xÓHijóÇü%T.IìVc ¡5IS ¦¸R‚H{?2vº]vfgöÏëù˜GÃ0;3ûÝÏ~¿ûÎì ³gÏ6›Íéééê·…’òEGŸ íoc·‘phä^ŽEEE–1………ÊåmÊ åååÊ=' Ôå@t&4³ùÜ÷ß?¤.—yõL—¬£õ«ÒÒï[­»é¦kwî,;uêiŸw IJúøí·ïÝw>wª<$4$4€„ -ôI€¬««›”My½þ ˆ0ƒ§NŸ\÷øÐ[ýÓ•Ðd:r¤æç?/ÎÏÏNL¼ðnóëF#$4$4€„FB á§V]í~Ãý‰8yò$ו!Jt7:å¿+fÞé½/O$¡ee}Ñý[…2oä[ŽîÓ[o5Y,ñ+¡éìTÿxHhHhÿÀõ´9 „ ‹Åf³:thR¶óðÃS-ôiQâÍÏÊ{_¦Ç.üÆï—þêlß™Úöí÷¦¦~òÈ‘å´XJJ’,q¿Sˆò«ßÿ¾vÖ¬õN!99ó%°½ÿþ!™6o¾'-íÓ~%4ê T Íè§CÐæ$4À82õzš^¬ýÈ¥ä“s.ª9÷âæËnv?¥æ×Ýöe‰ü+óîëüêWKe¡ÉtŽüë~·ý§Ÿ®Z°à˲<&Æ’—÷õ?üa·¿NMk§úÇCB£•@Bã}E›“ÐHh}Zx„4eª5_¢žRÓJhï½÷’ûeá>‘Ðè@%Ð@›“ÐB1¡1111)Ó“Ÿ²+3/8VzMh>xÇ•W^LB#¡ñ©T $41¡ñQôiÑéHñšê]vÅÞ¯-yê³yO~æÚ×îÛ¤u-..63óó¯½ö n“TBx'4®ï¤ÍIh$4€q$4ãYƒíÛ¿¹ìuñ_ÿíuwõ4½ø×‘÷•ßê\‡I TB”&4€„FBbñ¬òï5þý¤™Ç $4@B#¡˜ þ=´ó湟4#¡‘ÐÐ ÀT¹B›üVÖùÜÚ @(tY!{l‡ƒ„€„†hçt:vìØ± òyÚí·ß~çwÞ{ï½´€i·íšÂ?B’d`’á©­­a@X&4®ï¤Í'¢¥¥¥¹¹¹¾¾^FÄ͈τvË-·üøÇ?¾ûî»i+„&ùÈN# DÈ`$C’ L2Õ€J ¡!êÚ¼««KFÁ––§ÓÙŒ€9‡vûí·?øàƒ´B¶†i„ŒdH’I†§>rð©T Q׿2þõõõÉ@ØÖÖvñ™ÐJJJÊÊʪ««i+„l Ó2É$“ OgÏžå#Ÿj@%Ð@›#zП֬YS[[{øðaÚ ôi˜v\tzÐèjÞ-V­ZõÁߨ^x£úçªe‰Ä³-[¶455;vŒ&}øzP ‘ŸÐ„ˆ 6¬ùg²¤¶¶VâÙáÇ;;;i"$4 ¡˜"húg²D²Ù±cÇ$žqÉ;ÐHhHh"ËŸ^:äóŽ þN}ŽÐ°¦ ¡ÑÕ2¼!ì ¼þzýÌù'7Ý02R2YSÛö[ëg^q¦…Û‡€> =¨„ÈJh|]6GP žêÚrùkMb =¨hsxñב÷÷]½xßÕÙAgÊ´çë×>šùçç i OC°qÑè@B£«exCXzá†[š.½â¬«ØxК1c†ÇŒÁIöòD–}ׂïÒ@Ÿ>„TBØ'4“îÈK÷¤]þî[·ù´Nh2ý¥ïöÇ>Ÿ]—û#§ÓIH@B€½öß?oø·ËÚnõ™Ä&1¡ÉôNÇOvY/¯ûn1! Hh>жe{ýÌyo¿z‹‘se“›ÐdêýÖš„Ýr7! Hh@´ëÚólý̯ö¾ðCƒßf¼cñât‹Å”˜ø¯÷ßÿ5¯ í®».NH8OÖùÎw¾pæÌO|†´·~WTûÕúÿª ¤.:@B£«exCبž‘u¤ôÿ7~½YqñÜk®ùÜŸþt«L23>¡-[6ïÛßþlOφ†Šo¼1£°pŽ¡?’Vú=9’'}Œú4ô Â/¡ñuÚ“¥}GÍãÖ¯ê\æÀ’’Îÿã—(óøÃ’ñ -99N]a`à¶øx‹Ïx6ðæmµ3¿¶õß¹mÛ¶††BèÓЀJ ¡6^¯,ÿÙž´Ë‡ÎÜÀ=Ç/ŸñÏL¦ñyÛýÇÒ®Þ‘óÃM›6=òÈ#;vìØ³gÏÁƒÛÚÚxu@Ÿ€$4Z´yÔy>ïÆýöÿÐùCÕ~Cëî.2~§ÆoæU%¿zûŽmÛ¶IHÛ¼ysmmmss󫯾Ú××ÇKú4ô ¡Ñš Í£ÎûCÃOÿûÕ-w.4xÚ™3?‘éÛßþìø„vï½—}ë[ŸioÿàÖ#o¼qÓâÅé:ñìù[o¨þÿþ£fãæšššêêê­[·ª§ÑZZZHh Oäà¢#Ѐ„FWËð†ðÓ~ä÷õŸ˜ÿ‡ ‹}ÞËñÆ3ÌæsÎÓº—£„´û·x“é_þçÿLܹӮÏŽ®»¹æc—×üb]ÍèÓÀÇqЀJׄ`Òuuu½TS³+öâžç~Ø_6ók:µïGÕ±_­Y^¡Æ3¾å€„$4’8tüøñýeåõŸ˜çóÖŽœÞ¼m×Ç¿VsóRxÆBЀ„àgÏžUBÚs·üÄç­'2)7o¬¹¦¨æï$•mÞ¼YâÙÖ­[ëëë¹Û>Ðü#¤=›}½þ­'2)7o¬Ù±Óãò³-[¶ÔÕÕíÛ·x`rqÑ]-ÃÂ;¤ýþå–§2®Ò¹µcÀ“ûÍÕï7J<ó¸üŒxú4ô þÆÝöA›C i]]]/7ï},QóÖŽMãoÞ¨\~&dþ™gžiii‘]Ï@Ÿ€T ´9þa`` ­­íwÛ·Ëë>¹Óø›7rwЧ •@Bm=êiN§³¡¡Aù"¢¤)™‘dU3ž×…ÚÜã™ÌÈ.¸;èÓЀJ ¡6‡Ñ¦ÞnQ‰jÖ/Ûäe;²AÙ,ñ ôi6.:=Hhtµ oˆ´¶gÏžÚÚÚmÛ¶mÙ²eó8ݹ|³?d#²)Ù l–xú4ð! ô Â>¡˜âvðàÁæææ†††ºººÚ “Ȧdƒ²YâÐøÒÚÚÚ^}õU Tûöíkž0ÙˆlJ6(›%ž ¡ €!m``@¢TWW—dªã&‘MÉe³Ä3$4 ¡€hÁEGHhtµ o <Š=¨„¨Ih|]6åP´èñ•@BmÊ-zP $4ÞW´9(€¢@*„Ú”(ZD6.:=Hhtµ o <Š|=¨„¨Ih€)°wïÞåŽçž{Ž#¡ à 3+V¬ ÀHh@BøÐæÍ›W‚És饗’ÐHh@B˜Ð$T¼É#íIB›b\t€„FWËðÊ$4è%´n¸Áét¶´´?~¼««k``€z¶@%„qBãë ´9(ÐÂ:¡-\¸°¡¡¡¹¹ùàÁƒÒúúú¨7€a T ´9(Ц'¡åææîر£¾¾^BZKKKWWõ0lJ ¡6åÚô$´k®¹fÓ¦MÒœNg[[õ0lJ ¡6åà&4åÎòÓ•…ô÷¾uëÖääd“ɤ¬3e‡ª~ËQÚæÍ›kkk›››?N½a $4ºZ†7Pˆð„f0Ï„rB³Z­òÛ£GNñ¡’Ðø†-P ˜Ð$´ îÝã·$4Ð’Ðþë¿þ+99Ùl6'%%ɼÇjUUUŸùÌg,‹üvÕªU^s‹¬ k8p@æ7lØ ó’^d~ß¾}2ÿéOÚà¾ÊËËM&“ÇAîܹS@–WVVª¿R|FÆ’„FB€INh3ã3¡I€‘ùÛn»Mæï¸ã™W#²šÝn?uêTWW×µ×^+?z i?øÁäW«W¯–ye5ùWæ+**d^~kp_½½½)iJ²Ù\ðÄO9‡¦µƒi$¡]wÝuÅÅÅW_}õ¾ð… /¼P¢ •QȄƽ¶J–ð™Ð’’’dþü£Ì¿ùæ›2/KÜW{ýõו;¦u®i×®]Jn‘|uÞy祤¤X,™—t'Ëå·÷%9Ðã zè!ùW6xèÐ!ƒßrÔÚ‹Áƒ4xMrãºuë6oÞÜÐÐàql€(ABL~BuÌf³×_yüÖäå+…Û·o—užxâ ùwëÖ­ÿøÇey`û矾ü+›Õy Ÿ‘Áƒ$¡&?¡qÂ`ð[ŽÊMß|óÍ·'pM\uÕUòÛùóçgeeÉsÇÈY®®ãs_ã²±±QBÚyç·sçNƒ Mg/FÒHBã[Žÿ¾å¨P._nhh¨ 1˾pE-hsP²… IhwÞy§ÌßqÇ2Ûm·Íwmص×^{êÔ©·ÞzKb‰üxß}÷y.÷ß¿²¾r¡—ü«üXQQ¡®ãs_^ò©§žRΤ­[·ÎHBÓÙ‹‘ƒ4xí¡‡úÙÏ~¶|ùrù·²²’z¶…•ȽIh ÍAyÐôÚÛc·ÓPÎ;iÝËñsŸûœÉd’u´â™x饗”ï*'¯$Ô)g–<¾¨¿/­ƒÜ½{÷yç7Ãí>%: Mg/’„0lJVBkiiq:Í!æž ³šA›ƒò@-^¼ØkBëïï׺œ Æš×òòòûï¿Íš5[·n¥Þ‚ç‘›J#€a ¡Y $´®®®¶¶¶ã!æ©ß{´9(YQQÑø„&ñlÕªU²üßøqk" í׿þµ„´µk×nÛ¶­©©‰z ž5æ§À°…Ьþb5`¢±úüóÏÿèG?úÝï~WÆâÖD±š¿X $4ÀDHh$4 ¡Hh$4ÐÐQŸÐ.½ôÒLiOÚ{eù¯i$4ºZ†7Pˆ„†` ¡ [ "*¡ñuÚ”¦ÀsÏ=·âïn¸á ¹¹¹×\sÍÂpÝuÓ»I¿$4€a T ´9(Èét644ìØ±cSDXS¼,CÚSZUÚ¶­­¶@%Ð@›ƒò€Q---ÍÍÍõõõ*6‡¿Ÿ]ý½i?iIiOiUiÛ®®.j `Ø•@BmÊF?~üàÁƒ'jÃß²/\1íÇ -)í)­*mÛ××Ga $4ºZ†7Pˆ4]]]$ZZZœNgsø{äæŸNû1HKJ{J«JÛ Pc|è„0Nh€)&¢¯¯O²D[[ÛqLiIiOiUiÛ³gÏRc$4 ¡ÐHh@Bц‹ŽÐèjÞ@y-zP Q“ÐøºmÊ hÐ â+„Ú”(Zô Hh¼¯hsPE €T ´9(P´ˆl\tzÐèjÞ@y-øzP Q“ÐHh" Àtt=3è|Ѐ„ øÑk<š Hh¦9ªÑ¦  ¡ÑÕ2¼òÐKhòcee¥Õj5™Lî¿v8‹%11±¼¼ÜçrQZZš ¿ÊÏÏw¹\^·Ч @˜VwÛmŽ ”Çø„¶xñb÷@¥Ì”””äææÊò¼¼<ŸËW¬Xa·ÛûûûGFF ŠŠŠ¼n O@€0­hsLQB“X5þ·V«µ³³S™—ŸË“““;::”ùÁÁÁøøx¯ÛèÓЀ„FkRÁ <ôš×XîNýN#—½> =Hh´& Êc¢ Íý\YGG‡ÏåÉÉɽ½½>wЧE9.:=Hhtµ o <Ih%%%yyy®1 .ô¹¼¬¬,''G o'Nœp8$4Чã @$UŸi§s1–І††n¼ñF³ÙœP^^.3úË•–’’b2™222jkkIhHh"êCM Dœ— ‚¥ªª*111&&Æáp ù\>qÑ]-Ã(€¢@*!j_W ÍååÁ½@@Ÿ€ÑP $4Ðæˆê„6)›%=R´è@%Ð@›ƒ„6}]'‘Œ¢@*„FIÑæ ¡‘Ð@Ÿ#¸èô ¡ÑÕ2¼òøPccczzºÅbIIIÙ¸q£,Y°`ú§¥EwwwRR’Ëå’œ³~ýz›Íf6›333[[[ÕuJJJâãããââ*++ÕPäueY.ëX­V“Éô·±?u½dÉ’Ø12£ÞRçáêŒ;eaggg^^^LLŒ<*;;»··×cMø»wЧã •® @¸˜9sfSS“ÌHž)**’™ýû÷KfSW(,,,//WrK~~~ÿèèhEEEVÖ‡Ÿ¨–-[&qH–KTSCŽ×•eùâÅ‹%ï)?Ûíö1¹¹¹êRÓy¸ÇñKª”-(óiiiN§S222"Gâp8¼>JýÑß½ ¡ @pÙl¶ÊÊÊîîn÷…sçέ®®–™“'OÊ ½”Ü¢že’èb6›•ù¤¤¤öövÏÞJceY.ÉG]Íjµª•™ÄÄDŸwß‹dËÌÌÌÁÁÁñÏK§ŸÐüÝ;ÐWkkëÂ… ãããgÍšÕØØ¨,|æ™gfÏž-3‡Ãý‹‹^sŽ×«¼ ®ìñ£VóúðcÇŽÉ1wvvªKœNçüùócbb”ï4*_¤4~0>÷€„$4SdÿþýV«Uý133såÊ•)))£££ú¹Eëš‘P${ìèèPæ=Îbé?¼··W2ä /¼à¾ZrròîÝ»GFFd^þõ ýÝ;€ á¢#$4ºZ†7Pr8J¾’„– .¯««“d²iÓ&Ÿ¡K¹M"“ÇuhFš¬Ÿ››;00àr¹ìv»û•`úŸ7ožò=Lwrüê5u‹-RWŽíéé¿5÷ú4ô Â5¡ñuÚáRµµµ³gÏ6›Íéééê·E}}½òEGŸ íoc·Üäq/G# mhh¨¨¨È2¦°°P¹àÍÈýÞËQŽ?55Õd2Ùl¶ªª*uùêÕ«eû^ïåè×ÞAŸ€T ´9¦§<òòòêêêh:Ч •@BmŽi.êêj÷îôiè@%Ð@›czÊÃb±Øl¶C‡Ñn OÃÔà¢#Ѐ„FWËðÊ hÁ‡0ЀJˆš„ bpŸ $4 ¡•FB@B€°‰p@B@B€ÁEGHhtµ o <<ØÐÐÐ’%KbÇÈŒü¨®PQQ‘˜˜h±X‡º O@*»íƒ6Ç$—‡šÐŠ‹‹ívûÀ˜ÜÜ\ùQ]A~T——””О O@*„ÚÁMhV«µ½½]™—™ÄÄDu…ŽŽu¹¬F{‚> =¨hs7¡y\of6›õ—ôiè@%Ð@›#X ÍjµºŸ+ãèÓ0e¸èô ¡ÑÕ2¼òðLh%%%Êõf.—Ën·{\‡æÃuh OÇA*!,€pá~/Ç¢¢"˘ÂÂÂááau…òòò„„Y^PP .}ƒ§NŸ\÷øÐ[ý$4‘üQŠ&0¹L& Hº’¾=ÿ²Ó{_&¡ ¡€'Ožäº2AÕ¶e0™vêš×Ë·í;CB@Bïbbb~øaÚ@Pu?õÛó%5ç\ôä§®ÙóUçõÿéqJ‹ŽÐ‚Ž®–6åP´ðiÊÉ´ú™ß|<ñ*Sj=¨¸Û>hsXLLLL!5Õœs‘2Ó±«™^|ªAøV ´9(P´?=M/¬ö#žF«1]\wÁå‡o­àè@B£5©`PE‹iˆg5ç^üA6;÷âg¿|CWÃþ¿Ž¼OË€$4Z“ åP´˜Roî|V9o¦sÒŒ‹Ž@ZÐÑÕÒæ <ŠÊßC«5_¢ÒŒã @øVwÛáaðÔé×Ë·½ÛÖísM€ðEB‘†„€„@BÀ?ã¢#$4ºZ†7PE €TBÔ$4¾®@›ƒò(ZôˆøJ ¡6åŠ=¨ï+Ú”@Ñ •@BmÊ-"$4ºZ†7PE‹èý6c÷Ǧ•e ÚÁˆB¼í (€„@Bˆl:—š466¦§§[,–”””7Ê’ ÔÖÖª+tww'%%¹\. ]ëׯ·Ùlf³933³µµU]§¤¤$>>>..®²²RMh^W–岎Õj5™LòãÐÐÐ’%KbÇÈŒüèóáêŒ;õHJKKäéäççË1óÒ$´èjA›ƒò(Z4sæÌ¦¦&™éíí-**’™ýû÷KfSW(,,,//WB‘ÄžþþþÑÑÑŠŠŠ¬¬ÏË-[¶,;;[–KTS”וeùâÅ‹ÕìT\\l·ÛÆäææÊ>îqü’*e ÊüŠ+d^522RPP <Ð Ü+»íƒ6åŠQÄf³UVVvww»/œ;wnuuµÌœ>} )óÀÌf³×„æu­ýj-—P—Ë‘ø<~­õ Hh“ó6mÊ hL<¡yMPîL&“וµð::ûõëx&rüŒ$4FVÚ”@Ñ­„–œœÜÛÛë3YiC 8¡iíWk¹Î94¿Ög, ¡1²Òæ <Š@è&´²²²œœœÎÎN™?qâ„ÃáP–ÇÆÆöôô¨«)סIò¸-à„¦µ_­åÊue®1v»Ýçv´Ög, ¡1²Òæ <Š“ŒË1¹÷r”“’’b2™222jkk•…«W¯¶X,î)..–Øæq/Ç€šÖ~µ–K2,((0›Í ÷fôw}Æ]-mÊ hA Çô'40ÐHh ¡$4@B „ $4€„¢—bRZˆÜ<#àÃÐz`”Ü„„FWËðÊ hЦ?¡¹ÿ­jZ8ŽÜm´9(P´Hh$™¢ÎÔ<„„Öc ´9(P´Hh$4 ƒ„ÆÈJ›ƒò(Z¡”І‡‡‡ÅbILLtÿ#Îyyy111f³9;;»··WÉ9*Õ<¢‘ÇC†††–,Y;FfäG¯ª¢¢BILO]GëO]ŸÑz^ ‘• åP´˜ \vˆÀZIIInnîÀ™Q“LZZšÓé‘u$íxÍHZ«yÍQŠââb»Ý®îQ~ôú÷£’-û›Ð´ž…AB£«exåP´àCB7¡%%%utt(óííí^“Œ°¸¸8¯Ik5„fµZeGê½>Äý¨ä!þ&4#Ï‹±€„@BCh%4­Ìãt:çÏŸ£|AÑd2y]_k5#»P˜ÍfŸQ×1žÐôw  ¡˜Ð´Î5%''ïÞ½{ddDæå_­ä£µšN:²Z­î{ôëš$ÀÑÑQeÞårq„@BCD%4åz-׻ݮ&™„„eÍÞÞÞE‹©Ëccc{zzÔ‡k­æÎã!êbÊu®CSŽÊý:´9sæÈS&»ËÏÏ÷yÚøçÀTà²C|/Ç‚‚³Ù,YËýž‡©©©&“Éf³UUU©ËW¯^m±X|®æÎã!CCCEEE–1………r^šŒ’¬#‡§®ÓÚÚš™™©ìníÚµú÷rôú¼@B£«exåP´B7¡±€„6io3Ðæ <Š 0Ð@›ƒòE €„ ƒ„ÆÈJ›ƒò(Z$40Ð@›ƒòE‹ÈÆe‡ ¡„FWËðÊ hÁ‡0DTB›ô{rEÆ‚ˆJh$4Ðñ¨@B ÍK‚ ‘„F®#¡Ð@B#¡„0¸ì$´nÔ%ëׯ·Ùlf³933³µµU]¹´´4!!Áb±äçç»\.¯{)))‰‹‹«¬¬ôÈZZQ°±±1==]6›’’²qãF¯G¥µwYAvdµZM&@B£«exåP´Â;¡y NúûûGGG+**²²>ÜæŠ+ìv»,)(((**¿‹eË–eggË:ÃÃÃÕ &´™3g*‡ÔÛÛ«nÖce­½Ëj‹/ÖŠ‹£±€»íƒ6åŠ Í{pRæ%¤™Ífe>99¹££C™Œ¿‹¤¤¤öövÏOÞ¾šÍf«¬¬ìîîÖ9*­½ËjÛxé#`, ¡6åŠ M/8ÏWî¼~«ÐëÅc>ZkkëÂ… %tÍš5«±±Qke¯{çr5­Iƒò(ZјВ““{{{õ@ÿšÄªÑÑQeÞårWû÷ï·Z­^Ckï$4­Iƒò(Z„(.;D` -66¶§§ÇgB+++ËÉÉéìì”ù'N8Žñ»P®C“(åõ:´9sæÈÞ%¤É ùùùêrÙ”’ë$¡%$$x=*­½“ÐHhtµ o <Š|CD%´Õ«W[,ŸßETbRJJŠÉdÊÈȨ­­,S 3ÞIDATõº—ââb W^ïåØÚÚš™™)·Ùlk×®U—˦fÏžm6›ÓÓÓÕo9z•ÖÞIh3ðBHh €„@B$4ÐHh ¡L.; $4ºZ†7PE ¢Zèß{Ãçrûp ¸Û>hsP hÐHh ‘•6åP´Â9¡M{þ!¡‘ÐhM* <@Ñ ¡‘Ð HhŒ¬´9(€¢…a\vˆÀÚðð°Ãá°X,‰‰‰åååj¼éììÌËË‹‰‰1›ÍÙÙÙ½½½JøQé¬6>2UUUÙl6Y'--íùçŸß¸qcjjªü˜™™yôèQeµ¡¡¡%K–ÄŽ‘ùQÿEiiiBB‚ü*??ßår‘ÐHhtµ o <Š|Cx'´’’’ÜÜÜ12£Æ‰RN§stttddDÖ‘Œä5ÿh­æ‘Ю¿þúžžYM@‚–û—\r‰²Zqq±ÝnWD~Ô?Â+VÈúýýý²ë‚‚‚¢¢"ZøŽ¼f€„Ú’’’:::”ùööv¯ñF¢T\\œÏü㾚GBSOpÉ:?šÍfeÞjµÊ¨G"ANÿ“““Õ僃ƒñññ$´ðÅkHh ¡yÉ3êN§sþüù111ÊwM&“×õµV3² =–«ÉMg}wZG a“ÐtÎPíÞ½{ddDæå_­¥µZ Íjµº‰‘shZ—½Q $4€iÆe‡˜Èuh®1v»]7 Êš’‚-Z¤.íééQ®µZ M½ÞL9ëÐÆaYYYNNNgg§ÌŸ8qBëJ9ÐèjÞ@y-€°IhÃÃÃf³Y²–ûSSSM&“Íf«ªªR—¯^½Úb±ø\-€„644TTTdSXX(¦„JHKII‘½gddÔÖÖFUBÐ@›ƒòE €„†°÷æÎgå••©î‚Ë[Š+Ïö!¡1²òi”@Ñ ¡aÚô4½¸+ö²ÚsçÊK\cšÛtÑÿö8¥FBcdåÓ (€¢Å4à²CТ=¤}äå|Zéb÷Sj$4ºZ†7PE 9B1¡1EÏTsÎEÊLÇ®fî@BC(&4ΡEªî§~[cþðZýÌo>þ‰ìÝŸºæµû6y½,„@B Ág5çÌÝbôü¿Í+éizñ¯#ï‡×³ ¡Hh{m[ö(§ÎÂî¤ D8.; -Ú¨-Oš…kB£«¥ÍAy-Æ—{ݸ¿ëë,Tt:óçω‰ÑߦÖj ¡Ðº M;v,##cppÐýÜ—× ·´–ëœCók}ƒÛܽ{÷ÈȈÌË¿îiÐãP½® !ÐÄÃ?¼dÉuaYYYNNNgg§ÌŸ8qB½ˆKk¹r͘kŒÝn÷¹­õÝi­“ W ®–6åP´¦'¡õ8ÒðÉ«e…îF'ÍÈX@B ümÚ”@Ñ8¡í;slõÎ'SsåW2ýñ‘Ý´!c ‘•6åP´¦:¡õ8â¼~鮘¯Ö}ôk5ç^¼+f^×î4 c ‘•6åP´¦.¡­,þéºÜ%»þÇU~tAíG.‘%òï®ØËˆgŒ$4FVÚ”@Ñb’qÙ!|&4¯ÓK?(“â‘¥„˜¼y]-Ã(€¢¡˜Ðî¹ý®M_¹¾Æ2OgM_¹¡.þëœCc, ¡Ð0Õ M½­}ÛÓ_üNõ¿|Òžüôµ„4ÐHh˜¶„¦,z«ÿ…Å+ý×ùÜË$4¦9¡©Ú·=]ÿñoò÷Ð@B˜4\vˆ€šâlߙ㕵ƒ§NÓ’ ¡ÑÕÒæ <ŠÀ4'40Ð&ú6mÊ hÐc ´9(P´Hh 0HhŒ¬´9(€¢@Bc ´9(P´ˆl\vHhtµ o <Š| ŒQ“ÐHh ¡!â‘Ð $4€„@BC¤$´3føµ ¡€hÁe‡˜²„¦0¢.¡ÑÕÒæ <Š@PÚ 7111¹¹¹íííþF5cA´$4¾®@›ƒò(ZÁNhêüàààÒ¥K322HhŒ$4FVÚ”@ј愦°X,^5þËêÌðð°Ãá&&&–——{Mhyyy111f³9;;»··WÝÈúõëm6›,ÏÌÌlmmå¥$¡Ñš ÍAy€¢@Bû›ËåZºtéüùóýMh%%%¹¹¹cdÆkBKKKs:£££###²¾$:u#ùùùýýýò«ŠŠŠ¬,:.­ Ú”(Z„?.;ÄįC3gÎ0&77W~T¾xñb%†íß¿?==]=°ÂÂÂòòrÊ€„¸ìOhê©*ÉKf³Y+¡©ç©d5ÕG%''wtt(󃃃ñññ~=Üjµ¶··+ó2#AN}¸¤>õ`æÎ[]]-3'Ož´ÙlÃÃÔ ®–á ”@ш„¦µ\¡¿šÖ£&“É߇»/W“›ÇògžyföìÙ2ãp8*++©ð ¸Û>hsP hÐ %4ÏOÒÆ"VrrroooÀ·Z­ê)8shÌÌÌ\¹reJJÊèè(5¾c ´9(P´HhALheee9992âÄ ‡Ãá×ÃKJJrss\.—Ýnw¿Íãxêêêdá¦M›(­Iƒò(Z$4Í%¤¥¤¤˜L¦ŒŒ õ¦‹>44TTTdSXX¨^`6þ¨êëë•/:‚„FkRÁ <Š!„Ë-//¯®®ŽWŸ„FWËðÊ hÁ‡0Цû)WW»ßpá;p·}@B -¼Y,›ÍvèÐ!^ú@B$4Ð !ŸÐ´n2ÕŸÚg𹄞¸ì$4ÐèjÞ@y-Z°•Ï á"f,ànû ÍAy€¢@B#¡Q$4FVÚ”@ѱ„VRRWYYé‘|´þ¢tccczzºÅbIIIÙ¸q£ò+•º~iiiBB‚¬–ŸŸïr¹ÔÈŽ¬V«Édò8’ááa‡Ã!ë'&&–——«›êììÌËË‹‰‰1›ÍÙÙÙ½½½^÷èu5ÐY©`PE lÚ²eË$Ìô÷÷K:’¨f0¡Íœ9SÙŽ¤ ¢¢"¯+¯X±Ân·Ë–GFF ÜW[¼x±Ø<²bnnîÀ™Q7˜––æt:GGGeS²Ž¤8¯{ÔZ ¡1²RÁ <ŠSËXBKJJjoo÷ü¸ì+¡Ùl¶ÊÊÊîîn¯¿U$''wtt(󃃃ñññêjÛ¼¤Œú9*¯_b”çuZ«Q$4ºZ†7PE >„!<š×ã3¡µ¶¶.\¸PB׬Y³µVv§~§Q'ViíÎétΟ??&&FSZ«1ÐHh„¦MBÎèè¨2ïr¹Æ‡«ýû÷[­V¯y)99Ùë•`: Mëšlj÷îÝ###2/ÿjH­Õ@B ¡!<šršD)¯×¡Í™3G"!MVÈÏÏW—;%×IBKHHPÆÆÆöôô¨[.++ËÉÉéìì”ù'Nh]<æN¹Í5Æn·«kÊ.ÔËÞ-Z¤.÷Ø£Öj ¡Ð MKÔñz/ÇÖÖÖÌÌL“Éd³ÙÖ®]«.¯­­={¶ÙlNOOW¿å¸zõj‹Åâž‹$¤¥¤¤ÈÃ322ä!>š¤Ä‚‚Ù¬d-÷{9Ê.RSS•說R—{ìQk5Ц—bÿ@B£«¥ÍAy-¢t,ànû ÍAy€¢@B…ABcd¥ÍAy- Hh ÍAy€¢@B vBóysîÞÁX@BmÊ-¢—‚„]-Ã(€¢Â@B#¡1DMB ¡„†ˆGYHhF¦ªª*›Íf6›ÓÒÒžþù7¦¦¦Ê™™™GUVZ²dI왑•åÃÃÇÃb±$&&ºÿiQZZš ¿ÊÏÏw¹\$4ПÎA øNh×_}OOÏèè¨üV‚–û—\r‰²Zqq±Ýn“››+?*ËKJJäGu¹ÀV¬X!ë÷÷÷ŒŒ‘Ð@BQ„ËpBSOpI*óøÑl6+óV«µ½½]™— rÊ|RRRGG‡º\ `ÉÉÉêòÁÁÁøøx"!¡ÑÕÒæ <Š@°š‘=–«ÉMg}w&“‰„ÆX ¯+Ðæ <Š@($4«Õê~®ÌÈ9´ÞÞ^/ÄIhŒ$4Ðæ <@Ñà-?Á„¦^oær¹ìv»Çuh®1²\]¿¬¬,''§³³SæOœ8áp8HhŒ$4Ðæ <@ÑR´oùÉIhCCCEEE–1………ÃÃÃÊr™)((0›Í ÷r”–’’b2™222jkkIhŒ$4Ðæ <@ÑR´Q„Ë  ŒžÐèjisPE 9Hhˆø±€S«€„@B ¡„Ð $4HhSËá3¡MïÄ „HHhtµ´9(€¢0Y mÏ¡‘Ð "$¡QÊ´9(€¢@BCÄ7> ´9(P´HhtG R¦ÍAy-h|hsP hÙ¸ì$4ÐèjÞ@y-øõÉX5 €OÀ ¡!â‘Ð $´r¶ïLûÖ=ò/õ Ÿ€1m ­ïÀ‘Ç­WÉ ÝNê$4€IÀe‡ð7¡í;óÊòuâƒl&ÓÙÍÿ €„FWK›ƒò(ZSÐúyó%5¦¹uñ—ïú×ù]»Lã±±€»íƒ6åŠ@Ô%´•Å?]UAmÂå’Í”ófâªG?úµ`Ç3º#ÆhsPE ð–÷üíôN¼@Œ$4Ðæ <@ÑR´oùüöÃshñ_«9÷â)>‡ÆhsPE]¸ìƯC;½÷å½_/ü §ýËEu\NHc, ¡ÑÕÒæ <ŠrL[BS–(÷r¬ŸùÍ)¸—# "$¡Ð¤„¦:½÷eåžûÁû{h ðéÔ€¡„¦8Ûwæxeíà©Ó´$Hh$4LsBHh“†ËAB ®–á ”@Ñ ¡± j_W ÍAy-"~, ¡6åŠ  ‘•6åP´Hh`, ¡6劑ËAB ®–á ”@Ñ‚a ¡± j $4D< ¡„ÐHh˜Ö„6cŒÉdЉ‰ÉÈÈ(..îéé¡õ@B˜L\vã MY2::zôèÑ¥K—&&&vttЀ ¡ÑÕÒæ <ŠÀ´%4UYY™ÃáP懆†–,Y;FfäÇ?@Ϙ±~ýz›Íf6›333[[[Õ‡—––&$$X,–üü|—ËÅ ÁXÉ ¯+Ðæ <Š@°ÚÀÀ@\\œ2_\\l·ÛÆäææÊjB“Ößß?::ZQQ‘•õá¾V¬X!ëËò‘‘‘‚‚‚¢¢"^ÆhsPE ð„&Ìf³2cµZÛÛÛ•y™ILLTšz>MBšº~rr²ú ÉÁÁÁøøx^ÆhsPE ð„Öß߯&+ߪIÌc¹úãŒf2™x! Hh ÍAy-¸ì'´{ï½W½ÍjµªçÄ<ΡyMhÉÉɽ½½4>cA´$4ºZÚ”@Ñ‚@Ž %´ÑÑÑ×_}éҥ¤¤$77w``ÀårÙív÷ëм&´²²²œœœÎÎN™?qℚômcÁà©Ó§÷¾ün[w„'4&=¡),Kff¦D2÷“`CCCEEE–1………ÃÃÃú M i)))&“)##£¶¶–"ÜIÖ:¹îñ¡·ú ®ÿçWþð›?¨9÷b©1™žýò zé5àÓ9 0”Кššh+èëntJÁ<;ÿôÞ—}ƳGÏ¿L¦£÷lè¬îõòmõ3¿)i­ÿw¯ÐŸÎA Ð09ÞÜù¬rNìñÿ‘ýÚý[ÎöñºÚÿ÷$žINS—¼ÛÖ-!íÙ/ß@BQËABÃäêiz±ö¼¯ŽL»¬ö#—øöí§ÔO®9÷â£÷lðxà±Õ;åQ~]“ÆB@›ƒòE êÚªU«6lØpàÀÚŠ±ÀߦLž¿Àý”š6YØYÿœÇ£”/Iúü†dX&4¾®@›ƒò(ZÆßòúÓú«¾_[[ÛtóJ™W>µË¿ÌGü¼2¼Ö»ª¼–SÇ®æh<‡ÆÈJ›ƒò(ZþZµjÕòåË—}áŠå'KÖ¬Y³eË–¦¦¦cÇŽÑDŒy=‡öê=ÔËÒ”ëÐ^oSáס1²Òæ <Š€¿6lØ yìž ³Öü,ùàìYSÓáÇ•¿]ÆãñìƒëÐÎû꾫~,Kþ:ò¾û:î÷rìÚ}àØê~/GFVÚ”@Ñ .;„êÀÆÖ˜ÿ½éïd‰d³cÇŽI< ‰ |r¿—£ûI³ñ$¤ýßyÿÇýï¡ùÏþÆB@›ƒòE >„T‚åV»,—Ž?i¦eðÔéÓ{_öëÚ³°Lh$4SI²ÖëåÛÎZ$4€ðFB$4 ¡ ¡ÑÕ2¼ò(Zô ¢&¡ñuÚ”@Ñ @ÄW ´9(P´è@%Ðx_Ñæ <Š=¨hsP hÙ¸èô ¡ÑÕ2¼ò(Zð! ô ¢&¡ÐD<ZP¼wæÓ{_fš®é½?»x˜(N&¦h.fIh4]Dè7 mê(cÓtM½Ï½ÌkÄDq21QÌ4ïˆoÚT'´7V?pzo5ÓTNo<ø€_¯ÅÉÄD1Ó LL¼#¦«HhSÐäåùë_3MåÔó›j¿:^#&Š“‰‰b¦A˜˜xGLW#7¡Í˜1cÒ×÷w›$4&åýðôƒëœNgKKËñãÇ»ººx˜(N&&Š™abâj`B«¬¬LHHC0¡ᦠøÑOïû¡vEECCCssóÁƒå-Ñ××ÇkÄDq21QÌ4ïˆPk„ZVVVuuõœ9sBð| ÉëûaëOïݱcG}}½¼%ZZZºººx˜(N&&Š™abâjHB;zôè‚ ”œöÊ+¯¸ÿjãÆ)))‹å³ŸýìÃ?¬Æ™©ªª²Ùlf³9--íùçŸß¶m›¬#?fffžªººZ}.²qy.)½n„.åÈûaóæÍR½ÍÍÍÇç5b¢8™˜(f„‰‰wD¨5‚ß M²MFF†ü«d§ääde^Ì;÷¹çžS×Ü¿¿{âêèèP· ?vww«?ÆÄÄŒOhíííꦆ‡‡-‹Ç:²¤¿¿ßç÷åGõÝøççõQÏå7¿ùMVV–Ï ’Ð蘘(N&Š™b¦A˜˜xGLiBkll\¶l™ú£Ãáxê©§”y³Ù¬¦55‰i¥&¯?j­ïõW¥¥¥V«õ¦›nª®®îììô¹q…¿œœÉ„ÊÕo*z¬6þ¹Œ‘$4:&&Š“‰‰b¦A˜˜xGLsB[¸p¡ÇU[¹¹¹Ó’ÐDkkëš5kòóó+**Œì+++ëg?ûÙðð°þ# ¡ÉqNp…iœ&÷ØB°SåÆŸ²ƒ Òö'k³F¶3ñ}MAq†E±…àKãuk‘טáUÌ¡ù*hOD~à·} -ŒÚÛo¿§Æ122"Kd¹ÌÏ›7Oç[ŽÁHhªžžõk7ððXßý·‡Rëñ( rîÏEæÝ¿å8é M»ËG/ÌËûúÖ­? £„6ÛKhîOÍl>×jýØM7]{útsT%´Ú‚ÔgϾ¸tiAjê'¥Íãâbå­qàÀFÚøé/yaùò›gÍJ‘†Š‰±\yåÅO>ùÐÄrRS62oÞ—Æ/Ÿ?ÿ߃W9Zëè4T°?åLVcFvB›‚÷Úk™LçHO~ûíß“ªÙ„&ÏÊúâ !ÞŸöy)€½ÓwMn½…ï;B¦‡*IHø¨ük°3™x·à_B«¬¬¼ñÆ=Êå£)w immUÎnyÜ)dÒZNNŽ„ÀÑ1[¶lIKKS–'%%½ð Zûš={¶rcÆööö9s樿õxÔŽ;äøÕç’’’"K‚šÐÔùwÞyþ±Ç*äýÿo\$O£ä?_Cÿ¿m<žcW׳òVœ;7=jÏ¡Mü)ÙÂâÅßr8¾ýæ›JZ{ôÑ,ø2 m|Ž½ä’Œ¢¢ëÿð‡Ýòã{ï½ôì³kíö¡ó‰*-íÓ/¾¸Õ}áK/mOOÿÌÊÑo(Zô$4uþ7êsræK'Ê -??{ݺeS6‚„~Øç¥À}W&´)~G(“ÄÂ;ËæÌ™m°3™ê„–••%¡Èc¡,Qÿ0šìO‚Éd’×®]Ô„öÌ3Ï,X°@ö“——§Þ½Cž­Ä-÷»í»oGbXzzº<Êf³UUU©¿õx”z·}ùQþ•ù‰ÿå4ǤÜܯÝ}÷õÇÇ_‘ñ9å?JK¿ï¾¦ü*3óófó¹6›uÆÛ”¢¹úê¯ÆÄXdY­¦æþñ;•ZWNSÈ¿¿úÕR÷Û¾ý^©Håÿl.¼ÂëY#÷†þd¿))Iò¤>ûY›ìw|W¢ÿp6™šÏ ²keæÔ©§óò¾®4rvö¥©óêÈ$½RÒÇyänŸmòÐC%VëÇL¦s|¾j:}´N{ÊáyݵÁ.[«ŒT G#¨“<äý÷iƒVÛV3ZO_¿=}nyü 7éŹjÕ-¹ŒÐú½„×Ó¤*âÕud>>þ|Ù‚N=„Q¢ÿyÉës¿w#MAß5‰}Wø¾#dzõÕG•ÿ–œöûß×éL¦:¡!H±Úë yøðδ´O+óO?]5wnº,‘ù·Þj’bÙ²›”_=ùäCòFúío7)'vn¼Ñî±ÍÙ³?%õª”¦l!??Ûc…;ˤ֕𓤦Õï ŒýÙƒÙÊ ½óÎó7Ýt­”²¿ Mk ÒÇ©û•ê—y¯ Mëá:m2 MÞÕ·ßþ½Ë/ÿŠò£¼RòH–vû#ß6øêìÙó é¾÷íÛ`¤Md’÷¿‘WMgLÕjO‡Ô Ê¿²AšV#ø¬@FpŸ$¶9RãõtÚ6€šÑyúFšÎ–=^¸`gzúgÜÇŒ>Qù|&ò4•ÈA¾öÚcÊ™‘ öB:/þñøÛPFÞSFvíó]odwúiüȵޕ·j°‹ÙàÀä~ð2ÿØcú•l0¡éìÔ¯.wÒD>õ.Y²Ðx+É«¶}û½êú²P^8z»þDçó’Ösôx ‘¦ ïšÄ¾+|ß2ß üGƒD>™'¡E{B“Š‘L¯~éYí ”"°Z?¦þêÙg×êlÓbùH_ß^佺wï:uysóÃêw|e¶¶§Üϳ«g ^‡¦³Ù¯û‡r™÷šÐ´®Ó&Á¾M!áAùöÝø...ÖÈ«#cXFÆçÜ7¢ß&Ê׌¼j:cªÎËá¾A÷]ðµ÷FЯÀñà>É7sæ‹ýdž +ä9ºŸOÓiÛjFçéIh:[öxá‚QœjGð'*Ÿ½ÄDž¦²ù ­þ°Äfye öB:/þñøÛPFÞSFví³O6²;ýÆ4~äZïÊ€[5ØÅlp`ò8xõb!­JÖjÃãÇŸnêÇ2ãu¨ßå£A¤ŸT>Ui¥Ç_}å•«Ëe^–èÔCØõ':Ÿ—´ž£Î˜¥Õô]“Øw…ï;B–˾”ï¼ó|rò'Ô": -šúÉRfÔIÞ¨ÊU‰ú_S·YZú}å¶;w–:õôø<¶ ó:ÿàõPõÏ¡i-¿_¯ Mg Ðj“©9‡–ŸŸ­þ“D‚œœùÒnê5£>_åËo–ÞðÝw:_êóÚ&yÕ{9 ¾²Z S^Ác:}ºyûö{åáò!LzFõN!>+߯š1Þò^ÛSgËúÍ" Íg/1‘§é> J—IfŒ÷B:/¿Ç£ßPFÞSÔ’VcúõÖïlu6e°kšH«©§Õo­_iU²ÖÅýII¿ýöï©]_u8‰ÿYn¤AäÃhfæç ¶’Ì$&^ØÕõ¬r2Dæ•Õ´ê!û­ÏKZÏÑãFš‚¾kû®ð}GìÙó ÷¯hIs¿qŽVg¥ mÄõ—îFçëåÛ^Yþkù·cW³,‰ìo9Êkïþ Z>Îç;êÈ‘šŸÿ¼X…åÜ M§M¦æ:´³g_ŒQ/!½çžBõJe÷®PëÕ‘¤a³YŸ~º*bšV#èT ×FЙdÍÔÔO¯|ã53Á„¦³å)±&þ­$Ÿ½ÄDž¦ú«Ç«L&õ;iü”ãïñè7”_Ÿr&Ø'OYBÓzWNb«†TBÓªdƒÏ"”šL¹¿üe©ÁVúñóW­ºEfî»ïGEE×ûì¥Ã®?Ñú¼¤õ=h¤)è»B9¡MÙ;báÂ+Æý™±¯V¨‘œÐÎö‘Töèù—ÉÓsŸdÉ‘;ÖÈo#)¡IHݨ§žÝ/†öøž€×¸^·ùÖ[Mêÿ3©+HºŸ–y#ß—›xBó8-ó~%46™š„öî»Nµ1Ýÿß套¶»¤Î«#ƒJRÒÇÝOµiŸ¯š•û]­Þ|³ÑH{Λ÷¥ ~ËQ«ô+p|#èyÒý+‹+ßHÍè<}#í©³å)± ^Ù¯óD´^#ÿP ìiºÿJªÔý xFz!—ÆßãÑo(#=¡‘]kU¦GcúÕñê?/Mi½+'±UƒÔÓê·Ö·µ*Ùà³ÐÙi¯×¤=}}{gÍJ‘´’t­³gJ¹ÒF^zã½tõ'^?/i=G½i ú®Iì»Âôñ§?=ë^ê’oe‰rŸÚ?^°†äo)‘lÏçí/ßrç+ËW¹ã?ŸþÒu5ç^, ëg~³»Ñî M>÷?öXÅ‚_¾òÊ‹Õÿçhn~ØjýØ£> ŸPe:p`£šàݯì_µ… ¯®üwžWz÷ãñùr¨ àN!Zà³=Á}ºüò¯ÈÛAù#2ÒßtÓµ×_¥Á¶õ«ftž¾‘öÔÙòŒX7b–ðzwl'¢õÉëò»ßmqÿNOÓȯtêYç¥ñ÷xôÊHOhd×Z•éј~u¼ú©³)­wå$¶jzZýöQ^é'Õ3Z•l° uvÀ댡gݺeRQFZIù$úóŸ+w¶Ð¯‡0íO¼~^ÒzŽ{7Òô]“Øw…é;⡇JÔëYÔI–(„öá«¥œ:{úKyo~Âã ÿùµ=Ms¯ÿà9Ÿ{q×îá˜ÐÔ?…ÈåáÉÉŸ¸ýöï) ¶­_5£õô ¶§Ö–§`ÄúëØ3½ûî%2ØHÁÈdg_ºgÏ/<@ç‰h½F55÷ËØìþŽàiü•V=ëW¦¿Ç£ÓPFzBƒµäµ2Ç7¦ñŽW§³Õß”N×4‰­Ô»í{mùP¥¼û ­J6~&P§ý}½‚ôõ¹sÓ ¾qV­º%>þüÿþï[ÔCõ'úŸ—´ž£ÇÞ4}×$ö]aúŽÈÊúâøMË壑Ð>ørããÖlyJ‡oýéè{šéð­wÉ:u~£ëèñ³gφKBcRn./ŸÑƒ±åÉúImÂ"Sø'Ó¤ó{ï½ä~E ÂÄDµ í÷K¥œ=ùËAýgþì—ÊšÏ.þOy¶]]]!•Óè‚ÇO7ÞhWnŒ.QdîÜtå"Î(簾M˜±˜˜B­˜|ð÷;bÓ LLt$´ÐMh#®¿(ßoÿåÆñÓŸ_ÛSsîÜš˜yÏ=õtKKK[[[___ˆ„4ºàñÓÎe³f¥X,IMýdð¢Hxu SÓ&LŒXLL!UÌqq±™™ŸwÿKM¼»™˜è"Hh¡›Ðºv'ÓøÅ\ƒO^9öhñ}òœ•600@B£S`˜d¢8™˜(f„‰‰w m¼^¾MžÌË·Üiðɹã?eýíß¹­®®N i]]]¡p.˜N‰‰âd¢˜Ih¼»™è"HhaŸÐ^Yþky2¯,_iðÉËš²þVû·nÝZ__¿oß>yæ¡p.˜N‰‰âd¢˜Ih¼»™è"HhÑ{í‘GÙ±cÇž={ZZZúúúHht “L'ÅLƒ01ñŽ ¡MÏuh;~|·$4å4šÓéìêê"¡Ñ)0L2QœLL3 ÂÄÄ;‚„6 ÷r¬>ïÒê ›$¡mÙ²¥®®nß¾}mmm!’ÐÞxðyy˜¦r’6÷«Sà5b¢8™˜(f„‰‰wÄt5B˜ý=4?W­LMsÿ×Ï<ç‡555=ù`'4¦éšŒw LL'ÅLƒ01ñŽ˜®F„v¶ïÌãÖlyJ‡o½K'¤Éo?xÚ]PóðÆLhïýÙÕûÜËÊôôƒëjWTlýé½[îZ%¯ÓTLlñY¼FL'ÅLƒ01ñŽ˜ÞF„¦äi廎O)oü×ÿüÚåìYµinMÉ}Ϫ««Cí[ŽîœNgCCÃŽÿ×ÞÄFUÇ?h¢DMŒñàÁ›ñÆÁƒ¢F£‰Æ —¨q'5î Š‰·D‚ËÁ(D,hRÖBU,¦KÕNH¨Fhm@G%m4d0Jbýuþ0™V´í0´ƒ~>™4¯Ó7/Óÿ{søæÿæ½ÚÚùŒ­óùÿa û'8˜ øDŒï Tu¡¥H«;å¢4?¸jâU_N½gõù7g®¿·ñìk—îÙ³Bž…ø·«íJ!¥6nÜÑï-ÞçÆJŒvŒyñ.yöNp0ð‰¨æA¨öB ?mënšöÜâ S†žÐ9aòÀwÏ '7¦ ´h³j»Ú~©ÎÎÎuëÖÅŽ‰z^ÂX‰ÑŽ1‘ñö°pp‚ƒÙ€€OÄøÂQPhù|>þ“5Ÿ4Õ͘U;qêÀUõ'N­}ð¹EóæÛ,ª,åYµÝ±zÐ-~ü1ÞU¤c&“YÍX‰ÑŽ1‘ñö°pp‚ƒÙ€€OÄøÂQPh}}}ñŸ¤Ï%×=…öþ÷Ö¤s:kŠ<«««+ÎÆ «-5£˜ãmß¾½“±£c#ã?ì!aá೟ˆñ„£ ÐÒ®Šÿ*ºëÃ[žŠB[xÍcsæÌy» æÎ[zrg¬\mh#WÕ…¡ÅÝÕt÷Ì(´7=1VSSóNA,,X° ¡¡¡¥¥%åY¬\mhÿ‘BK‘–Ïç3ÌŽB‹NËd2Ë–-[T õõõ¥gvÊ3@¡qíϼ…¶öñ×£ÄZZZÒïb¡­­m´§u(´CëiÞÐñìÜaÅKíÇró}/•>þ¾rlÓ~Ú¨•ÖW¥±MûPh£f  Z m„R¤¥É±ì`v! Ð*/ŸÏ÷ööær¹îîî!Ö|ßKQhñ3–W–ý±‘ØTlÐuD…¦ÐšBšBPh Phcŵ…¦ÐšBšBPh Ph @¡)4… Ð€BPh•±ö¶™Qh­S§+4@¡§?÷íºñɳO¾0³¼!žL‘f_ ­Âöïù£éœ;ÒeÿÜ·¿¿0V̳l6ÛZðÁywÆ:Ëϸ²«#›"Í4 Ð*)’ìóˉôú䬛÷îÚÝðüÆbž…o ¶´oZ9éöX3~ve·Æ ÎuZ%}=ý͈®O¿lÏΟûK¾~V̳Xˆû¡`ÇÖ®å§]2s—?ÔÝÙå i€B«˜ìËó.r줞æ ýƒ¯Ršg©ÄÒŸ¾ùxÍ’ã&Ü$힊i€B;,ÅkëÇÂK_?û÷â*FÚg×Ü$­þÌ«·}Õî&i€Bµuw½Yõé”i)φÜú,M  ;!–"­ç§ “ïVÞü€Bû·BKÊø.YéwØ…(´ò -wPqUzȲ7 ÐZQyeUŒ´ÃÜÀÿ½ÐJ•U)Ò*²)€ÿi¡õ VÁM)4@¡ Ðeû Š» ’6¼IEND®B`‚python-watcher-4.0.0/doc/source/images/audit_state_machine.png0000664000175000017500000013326413656752270024551 0ustar zuulzuul00000000000000‰PNG  IHDRG¨†´)tEXtcopyleftGenerated by http://plantuml.com09 iTXtplantumlxœ¥“OÚ0Åï–øs„JIÊRšCµ4É¢¥á6@¥ZgK‰³ul ß¾N€%»-‡voV<ï—ç73·…¢Ré,mùþîXÖg˜†ãà~<𠯮@âO…ÂVè'¼P’ª\6È©´’Mƃɟ2à…92ä;L–ÂÔáUl‹d¼à¹€Pl¸À91*Þ]ÿ> ƒ3nMyZ,E3Ü3|R¥&gLK‰Ië¥,žû~¥rvýWKÁr&°æ"ª Ej¼æ!ÎS]ò_Sãò­•Ÿzð‹ U€Ê—¢ÐÅZùÀÊsƒ<«^t… ±ÐÖÇ *uFáÌœ¯Ë©d[óûSTøÒÇ)“7“ê ÷ûc?Œ¢¿ ËvÕ”õ4ÿMy©~{õ~œHÿ:›(1fmÊí¹5½¯‰LSC˜"Ø¡¬FεێûÉvÚÍØŒÚˆJpÚàÜxmÇë8àÇ3(ï[¤9˜FPäZ2„¤ôÃWÕ(¶Èî(˜uÏ<ˆ¸Ð{2‰/Š¶ëØŽuãvlL?Úû^÷±Û!®©N•ñÁòÄ<˜Ï¨Øhº1ŽQ?7nåÁÜÅä7`ânY»Î €IDATxÚìÝ\•õÝÿqÔLܬa™R#Eßì. ›6š6I‰#!m¥»Ã´ûf¦ó–åRÓü³‘)!«´pJzPÄ#¢ 1¸…DAôhˆH¨ˆ ü•ý>zm×ÍpD8çðz>®ë®sëú\×¹Î÷}®ÿSfA j€T¤¤ Õ©H5]*##cñâÅNNN¶·;ÖËË+22²ººšâ¤£–““3yòd‹VØØØ„††R%€T`¤"## `q'ÞÞÞì´H5FG«ÕZ´™³³snn®N§+++«©©¡z© ›:uª-{išš={vjjªd›ââ⊊ ² @ªèN3f̰h§ïÿûaaaÉÉÉj°¡Œ© {èt:‹™6mZtt´lØ]jºG```ÇRÍСC·lÙ¢αH5ÝÃËËË¢£þö·¿…‡‡ÇÆÆ¦¦¦êt:ŽCH5ÝÀÉɩéfíÚµ¡¡¡fß¾}Êîê jL,Õ„„„(»kÒÓÓ‹‹‹©'@ªèj¸šjË–-’j6oÞ•œœ¬Óé¨'@ªèj+W®ìX¤±¶¶Žˆˆ Û¾}ûW_}UPP@=R @W;qâDÇR§§'© Õ—öFš¾}û®[·ŽTjŒBvvö}÷Ý×®TóòË/GÜFªH5FaãÆm4ÿñÿ±uëV‰4†«¤c1wîÜ>}úÜ1ÒŒ=Z"iÂÃù²3@ª0¹¹¹þþþ?þxkyÆÒÒò—¿ü¥²—FHOXXwáH5ÆB§Ó¥¦¦FGGÏ;÷ùçŸ4hfxà{{ûßüæ7Ÿþ¹’g$Æ(‘FÝQ#/”—WTTPF€TÐmÊÊÊrss“““%Ø(·×”Ü¢±xN2LDÊgJ¤‘Ád`y‰²£¦¦¦†2¤€n#™¤¸¸X 6]n]fñœ¤—Ð&Bn“@4òB" @ªè~j°‰Õ‹ç6oÞÖ„<”<#ÿ‘ÔHñg©À(ÔÔÔ¨Á&55uß¾}’j¢¢¢¶7!%ÏÈ¿d5Ò°£ ÕW¶)++ÓétZ$Õ$''Õ„<((hĈÒOªéšTÓÆ²“jH5¸sªÑjµÏ>û¬ôŒ7nÏž=­5UÕ‡µµµo¿ýöC=4dÈÕ«WëfÑD‹Í߀€›^½z)-û+VØÚÚ8pÖ¬YUUUmy‹æ“ÔÚxÆŒcii9lذààà'¯½Ó 7~øáàÁƒe°ßýîwòyò…^ضm›:̹sç}ôÑ«W¯ê½¶µ²˜÷Ï>ûL‚Ðý÷ßÿä“O¦¤¤„„„Œ9RæÎÑÑñĉz5‘žõë×ËŒ+äääè Жâè-¯üüüéÓ§Ë´=øàƒ¯¼òJYYY‹¯ª©©™;wîÛ¤G¶86R :-ÕL™2eÓ¦MÒ#­dé¿cªYºt©‹‹Ëùó狊Š^|ñÅæ‘ÃÀ/÷ò/ââbåáÚµkÏœ9såÊ//¯ùóç·ñ-ô¶6‰‘‘‘Òª–h1{öìGÒÞiЛe!=Ë–-“'ãããô£©»_ä}ýýý›¿¶µ²˜÷_üâ………’»V­ZõÀL:õôéÓÊÉ'6_ žžž:NøàƒÆ¯7@[Š£·¼ž~úéÄÄÄ7nHHóõõ3gN‹¯Z²d‰ÌBÑm2 ï¿ÿ~‹c#Õ sR4‹yä‘êêjé—¿Ò/í{Ãâ‰'ž8~ü¸ÒŸ““ÓÞTsöìYõ¡½½ýÉ“'•þ‹/6¬o¡÷°µñ<þøãAAAß~ûm‹¯êØ4èJ&77W^¢ô7îË/¿”ž‚‚›ëׯë½Ð@Ù Ìû… ”~¡<,))Qöïß¿ùR0<@[Š£·¼šª¨¨YkñU#FŒh: jY ŒT€Ž§š Xü;???ÃÂÒÒRiŽ+-òö¦šÆÆFõ¡4µ›¾uïÞ½Ûøz[Off¦§§çÃ?û¬é®|||8xðà/P¶víZ++«¶\ ùæÍ›AAAöööýúõ=ztLLŒá·hm’Z϶mÛF%Ã3&11±ÅÉkï4èÍŽr 4ì·¿ý­z±/±cÇŽ‘#GÖ××7•á²ß±¼•jÚR½‘ìÙ³G^Ò·oß¡C‡JÑZ[âÕÕÕï¼óŽr 4éQF#Õ óSÉ4M°5üòË/oݺ•Õ ¤R饚›7o~þùç£GîÀ-MAª©Æ(¦ÖÖÖ6##ƒu ¤R @ª© ÕjjH5©¤€TR @ª Õ ÕjR H5©€TCªH5¤¤R @ª© ÕjjH5H5¤€TR @ª Õ ÕjjH5©ª*Ý©Fï¿H5FçøÊM't-¦š²”#…vR"€T`Ô*OÅX»©ÁFM5iäùºòk” Õ»ŽoF~I 6Jª‘H³½ßø¬y Õ˜€âÝ)fv=:E‚ôÜŽ44½+OQ€T`¢yiç&I°‘T³ãû?Ûa9áàËÿCYR €É8¹öKM¯ŸÄþÐ]Rt;r)K9BYR €Éh¬oØn9A‰4Ûïÿiü3¿¦&©ÀÄdøø+©f×£nç4û)@ª01uå×"úÔÚMUÎ~1Ñ.Î~Ú‘k.&¤³@R èAJ“¾Itš}ëN2–?=è>;géòÂà¿E‡\LÔ˜J'S{jý:™òäɳd.d^âfžIfá‚T`þŽ-þL2@¬ÍKgB>­¿‘ÞØ˜eêÌ…ÌK¼Ãt™¯äÉ¿¯)+g)ƒT`žë¾~u4ý3|˜GžÑëN­_·Ãr|ÌcngS³***jjjXè Õ˜•£ ƒ$Òœüxùåµ»’µ+zˆKä —œ)ÅÅÅdjÌG±6E"MÖ¼…fi”îj^ÜÎÁ.‘çfÎÐéteeejÌA¼Ã ­Ý/nÖ}cö©Fº {Ã%Âíò~/==½  €`R €É;§Ù'­ü¢èži”îïÓ|4}Ÿß£‰LMM%Ø€T`òÒ¼þcíb XüK¿~÷9tÑ¢Ùׯ§éý«)õ_ãÆ=ÙÐðÞ¨ô^Ø¿¿aÃõðpÚºõ/7of¶8¤ñH'ÿúâ‹%&8üàúö½ï‡?üÚk?OHøÜðqhä¶z¿K°©À´5Ö7DY½˜>g¾áT£ôTWÿofæV'§±o¿ýZótÑüU¿þõ”õëµ–j”žššÃçÎíˆøÐÑqôK/=/oÑ|Hã©«ËxùåŸM:ñàÁM×®ª­M×éölÙ²râÄÞ]=ÊãË‘S·nÝ—ž.¯ÒUTT°>€T`zªt4Ïÿµ-©Fé¾ývï!·%Õ\¸p`øð~÷]¢T£v’O~þsÇU«|›i`< ¦M›ÔƒÐ¾öý“¦ïó¡¡¡fß¾}ÙÙÙÅÅÅ쮩Àô\JË‘Tsaox»RÍàÁµ%ÕÈßÀ@¿Ù³=Û’jn%¯CG¶kqÈÖÆãà0êðá-H5G?Z-3ú×õ›7oŽŠŠJNN.((`w H5¦§4)S÷5mI555‡³³5“&óñyµ-çÕÈßúúoÆŒ±OM kKª©ªJíß¿_‹C¶6þÆÿí@ª9¹If<|e@HHˆrZvvvYY«H5æ™j÷ßß÷‰'l.œ%ñ£ûj¤;t(äÙgí%–´%Õ|ï{–­åŸÇciy¿z齩5œjd–o]0`ñjI5[¶l‰ŽŽNMM-..f•©À{ï?ÿÓ½yV©­M/*Šß±ã£ñãZ»²³ñÔÔvr;eÊ„ÄÄ/®^M©«ËÌ#c#Õ¤@ªikt1|µ€¦÷Ê7îÉæwá´´¼èPë©S'~ùåŸ[» §ñ(¹híÚÿùñô½ïYöí{ŸŒÍÛ{ê7ßl%Õ¤@ª1çŽTR ©†TjH5¤€T@ª!Õ¤@ª!Õj@ª ÕjR ©†TjÚ­xwŠ4îO¬‘V~êd–o¥š…þ¤jL[þÇ·vYôØ.Âû]R H5¦­X{H÷ùTš¤éQÌ2ûj@ª0œWCª©€TCªH5¤R @ª ÕjR ÕjH5 ÕjH5©€TCªH5¤R @ª¤R @ª ÕgWw--j Ó­TóûH5 Õ˜¶²”#Ò¸/9°µ§í«Éz…̸æaç­C§„ÿú¶ÿ-ŒTR €IªÒ]Æý™O{ZªIûÃ"MLjˆˆÍ ÿüåO£ùþÏ¢öfÆÛëX+@ª0% Õµ;,ÇY°¨§¥šÝ?þ¥fÄË’jþyÚæ/÷,øp·Ã¯b¬ÝŽ.üTÂëH5&#yòÛqöS{T¤©þî ¦£æõ?ü_ªù×y5'tG¬‹äšâéw1!Õ¤PðI„Æâ¹+Y»zNª9²ôÖI5ë>ožj”š4T×nع÷©™ÒI¥I™TíR_yã û|é6 Õ€îq1!=ÞÁK:é¡èÉ3_¿ºP:‚ H5 K•&e&:ùhí¦q%+tJ°IrñÍôýˆR€TºByv~ЧŸä]¨–×ÑYê+op|“‹€Tî­ÊSEi^Kc¬Ýò5 Õµ«ºäRœýk²vQ jÀ=inö^=È5wY0y÷N•îB¬{Qd"¥©tšš²ò¬yQVÎÇo¨+¿FAp¯UœÐÉúÆõô@ª@2Lî²àèA®’jªK.Qt™²”#1Ön—3ò(H5 ƒªkó5Ò¬<ì½¼òTA×+ŠLŒµq'NƒTÚ­±¾áôÆ]’gR<ý*Nè(ºQžX’‹/×Ù©´ÃÙð½Z»i‰N>åÙùTÆ@Ò5×z©´Éù˜äx¯„ oó)Ú4]zÜ⨯¼gÿÚÅ„t R h•Ä 3i$ؘJ3Ú âM÷ÎB§¼{—ÍÂÕœB­Ý4N°©´ <;?ÉÅWÚ‹ºPm—½éêÕ«ûôé#;¥mäñƼw.uåÜ ßË 6 Õ€SqB—âécívzã®®l)Þ¼ysøðáAAA#FŒ~R ©¦í2}?âjÀ-•§Š{/䚨i¨®íâw×jµÏ>û¬ôŒ7nÏž=­µÕ‡µµµo¿ýöC=4dÈÕ«WëfÑD‹m›^½z)jÅŠ¶¶¶œ5kVUUU[Þ¢ù$µ6ž„„„1cÆXZZ6,88¸-“×Ú ;6#õõõK–,:t¨••Õš5k O­Œóòå˃ ºråŠúFòÌàÁƒå™öÖªkHüNtòájèѪK.eÍ ˆ±vË]\W~­[¦aÊ”)›6m’žé¿cªYºt©‹‹Ëùó狊Š^|ñÅæ‘Ã@ÃZþåááQ\\¬<\»v­³³ó™3g¤Õîåå5þü6¾…ÞÃÖÆóè£FFFÖÔÔœ;wnöìÙm™</ìÀŒ¬\¹ÒÉÉ©°°PžŸ7ožá•w÷ñññ÷÷Wßè/ù‹òÂöÖªËTé.ÄÙ¿Æ 6 ÕÐI†9¶xC”•³¤šnlž>}ú‘G©®®¾±ª«¥_Ú͆#ÄO=ÊÎN§>sæÌu×4—¯_¿®<_RRÒ⾚ÜÜÜæ‘C9ÕÄplPŒ5ªé»«Z{‹Ö&©µñü³ìZ­ÖÚÚº½“§÷Â̈„¢æûjZXç¾}ûžþyéqttÌÊÊêX­ºX}å ­Ý´²”#|ÌAªÀœóŒ.T+;O¿òì|#™ªwww½'ÝÜÜBCC¥güøñ|ðAUU•D©S§ªÍå%K–¸ººž¿ÍÅÅ¥yª‘\”——×–Ø(ckkk;öúë¯~‹Ö&©µñ̘1CZüò¤„“Ç{¬í“×â ;0#«V­rrr:}útÓójZ¸é8Ÿ|òÉ¿þõ¯2X‡kÕõ.¥åì¶õ¨)+çóR fè|Lrœýk‰N>¥I™F5acÇŽm~xUllì¸qãþqû$ GGGå:`Ÿ}ö™Ú\®©©ñññ8pààÁƒ[¼@ÙÚµk­¬¬ _dLqóæÍ   {{û~ýú=:&&Æð[´6I­gÛ¶m£F’áÇŒ“˜˜ØöÉkñ…˜‘ººº÷Þ{ÏÆÆFæ% ÀðÀMÇÜ»wï}ûöu¸VÝâøÊMÉ“çò‘©³"1&ÞÁKº’ø4ªÑñ¶‘­#ÓÐXßpÀñÍÓwQ j0e)G|´vÓŠ"©©¦ç(Ïαvã84j0ùV]Чßn[]¨–kÝ’jz ¬yésVQð¹À$Už*:ì½<ÆÚ-?PÓP]KAÐ3Õ•_‹µq¿”–C)H5À”T—\Êðñäš»,¸¾òF×5:i'†Ñî a/‰:§Ù¿÷©™ì«$ÕÓPW~-k^@”•óÑ…ŸJ¿‰6úI58UF5ÍÝ81É“çæjØDj€±ç™ÜeÁу\3}?ª.¹dZmV½vl<ß}÷ÝàÁƒkjjÔg<==Õ~åŸ2ÌÝL|'6Ê-nëի׃>èààðî»ï˜6cH#zLîRyªH>׋JÙVj€1j¨®ÍÔÄÚ¸ö^.M·îmûvcªÛ·oWú/_¾Ü»wï²²2åáÖ­[å¿w9ñ›j”žªªª¬¬¬ßÿþ÷=ö˜N§3ÚTcŸ‰ý©3±Å ÕãÒXß  ÕÆX»¥xú]Í)4†¶ï‡~8xðà‡zèw¿û]mí­K¼ð ۶mS‡9wîÜ£>zõêÕ¦¯ÒÛ =ëׯ6l˜¥¥¥££cNÎ?Ïó¾yóæŠ+lmm8kÖ,Ézíîî®ôïÞ½[Ƴk×?ïUâêê*ÿ•žüüüéÓ§Ë>øàƒ¯¼òŠ{šOC}}ý’%K†jeeµfÍšŽM˜ `ccÓ«W¯;æy;///Ã#lmª”ž„„„1cÆ(w ÖûoMMÍܹs‡Ü&=êN­Öfª-©¦µ×*÷â”"Ë{É*ÑüÖ¨^Äw™ÿµvÓ¸Y©‘sšýÒDKtò¹œ‘g, —ó·IϲeËäÉøøøýèGÒZU†™={¶¿¿¿áF³<ôôôÔétÒ¨ýàƒÆ¯<¿víZggç3gÎ\¹rEÀüùóõÆSWW'‘©´ôÖQF .”$óÇ?þQúez¬­­å¿ÒÿôÓO'&&Þ¸qC’•¯¯ïœ9sZœ†•+W:99Ê{Í›7¯c&Ã{xx·%'HÞ“‰4<ÂÖ¦Jé‘yŒŒ”D!£’:ëýWâ,”¢Û^|ñÅ÷ßßðLµ1Õ´øÚÅ‹»¹¹ß&K¡ÅTÓ±E|—$Òȧ†K’j@÷;“ïàµì¥I™ÆÕh°°8~ü¸ÒŸ››ûÄO(ýãÆûòË/¥§  ÀÆÆæúõëwL5%%%J¿ Ü¿¥ßÞÞþäÉ“JÿÅ‹‡ Ö|æÎûñÇÿãö>¢ÔÔT¥¹üç?ÿYžo>pEE…LO‹Ó`gg×|¯E{'L†?{ölkµjÉúöíkx„­M•Òóøã}ûí·-þwĈêÒ‘‘¨K§µ™ÒI‹çÕ´öZy¯'N(ýò¦-¦š/â»”:cѱÅØŒj@·‘“èä#‘F‚16,,ª««•~é±´´TúcbbFÕÐÐ0sæLivß±‰ßÚCiû6mX÷îÝ»ù¨²³³jkkŸzê)yø£ý¨¦¦FÞ]žWÈÈȘ4iÒÀ•‘ôéÓ§Å7•‰Wç¥Ã&ýmL5ʱy†Ghxª233===~øá‘#GÆÅÅéý·ék›.¶œÔd`_M‹eäênò^-¦š/â»t½¨4ÆÚ­{Ï@©€ª<;?yò\­Ý4]¨Öho»Ñt_ô¨{¤e?zôh???[[[åd=z§´Öä•pÒ–óé%Õ|þùçÊ!XÞÞÞkÖ¬yæ™gÔÿÊT………]¾|YR–üUG®7  ìiã„8ɾÅój~ó›ßa[¦Jª­ÕjÕƒÙÚ²¯¦ÓSM[öÕÜÍ"¾Kùš$_¶*¤Ðu*O}ýêÂk·‚Ovùm¥mêêꪜW#=ê™B£ÑÈ7nÜØâ y䑼¼¼;¶•]\\dH‰FÇŽ{ýõ×[ÛÇüðÃoÙ²EúCCC  “¦æ~tttMMÍéÓ§§OŸ®Ž\oV­Zåää$ôxK'¬-©æúõëYYYsçÎmz ´ÖFhxªf̘!B^"©FƦ÷ßE‹©çÕLš4iñâÅ÷.ÕÈ{©çÕ¼ôÒKmO5m\ÄwI>G»m=ʳóÙ¼jÀ=W¥»pØ{yô דkÂMâüfõhüíoÛôÖ1;vì9rd}}}‹/\»v­••Õ›¼7oÞ ²··ïׯßèÑ£cbbZÛwß}×·o_i»K¿äéoz+˜={öŒ5Jž:t¨ŒM¹Þ4ÔÕÕ½÷Þ{6662/›0éF¹_„®gžyæü£r‘Ã#4|¸±MUCumŒµ[Å  ˆT:ŸdI2’g$ÕH¶1ƒ9ºyóæçŸ>zôhõâÎè æÍ›WRRröìYggç?üáF8…ÇWn:콜%Eª©¡º6Ï?LòŒ´´®•šOKÂÂÂÖÖ6##ƒEÜ£ÚØØ <ø­·Þj~-o#ùA>nUº ,,R èõ Ÿìˆ±vûúÕ…\pè2G~š5/€:jÀÝæ]¨Vk7í û|®Èt±ê’KQVÎæq¨'H5tó1É{Ÿš™èäSš”I5€n‘5/àØâ ÔTÚMbÌþ±oÄ;xkQ  Už*ŠäjW©€.r)-'ÑÉGk7­(2‘jÆà°÷ò<ÿ0ê@ªwVžŸâékã® Õ6Ö7PÀHTœÐÅX»™ÄnAª ÛTž*:ì½\òL~ †–`„R<ý >ÙAH5 Õ%—2|ü£¹æ. ®¯¼AAãt1!=aÂ[ÔTþM]ùµ# ÖEY9Ë_NDŒ\c}Cô Wsºû-H5ÜmžÉ],-¤L߸`*Òç¬ÊÔPR =]Cu­´ŠbmÜ{/¯&9ÎþµD'ŸÒ¤L àôÆ]i^K©©ã"¹Eš)ÍŸ¿˜ïà%ôP%ŠŠºÝ¶ÔT€)Ù›cíV_y£é“¥I™‰N>Z»ix(=QVÎ\üT€±¸š{&r€S†¿úLyv~ЧŸä]¨¶±¾hî û|~ò Õ`jÊÊwþ`’Æâ¹Ëyò°òTQêŒE1Önùš†êZê 5yþaY󨩀n&¹e÷¯hzýd߯ê’K‡½—Gr=¾ryÀ•&ep|“:jèf‰N¿•H#I&ÉùwQVι˂ëʯQmQ_y#rÀD~!Õоù­Äý?ÕX<']ü3¿þ.%›šh—½OͼšSHH5ct½¨´<;¿4)Ó;™ò¶\•(?(bGÿ”H#Ý.›—#8øÉ¬sšýüò  R<ý¸`©`D.&Κ÷¡ÖnªÚÐ7õ.Î~Ú‘kZ¼ÉŒ´B¶÷›  õ€Ó·œwYOŽü’¦×Oä™ýüØâ Šà޲æœ\NH5€îWšôM¢ÓliÍï°üéA÷Ù9K—ÿµ(:äb¢Æ;™òSë×É\$Ož%stëè2‡™ML½”–9`âŽ~ãc‡¾œèä“:c‘´KòüÃt¡ÚҤ̊:ò €6*ødGӋƒTèÇ&íþX›—΄|Z#½±1Ëœ:™#™¯x‡é2É“_SVþÛ—r&·è%ñiÉ“çRR  Û4Ö7|ýêiîgø,0¿<£×Z¿n‡åø˜ÇÜΦfUTTÔÔÔ°¸{•§Š´vÓ¨©ÐmŽ. ’Hsòã5ægÔîJÖ®è!.‘ƒ\r¤“mܽÆúÙÊ_JAªtƒbmŠ|gÍ[ØC"Ò]Í‹Û9Ø%røÔ¬Ã:®¬¬Œ`à.ií¦Už*¢¤@7ˆw˜¡µûÅͺozTª‘îÂÞð[×qö~/==½  €`à.%Ož[ŸFH5€®vN³OZöEÑ!=-Ò(Ýß§ùhú>¿G™ššJ°p—2|ü >ÙAH5€®–æõ§kÃMÿòòƒøÃ¯‡µîÛ÷>ù;¾×Õ«)ê-,,Æ{²¡áßvõÈ“j¿üë‹/–L˜àðƒ 1üð‡ƒ_{íç Ÿ·}üõF-‡&¡n«÷»±±±wéäšð¬yÔTèRõ QV/¦Ï™o Ý_U•úÌ3ÿoÖ¬©ùù155‡åï›ozŒcýzšš+~ýë)ë×/j1lÔÕe¼üòϦNxðà¦k×ÕÖ¦ët{¶lY9qâÛ>þNy£ÖºèQ_ŽœºuëÖ¸¸¸ôty•®¢¢‚u@œINñô£¤@—ªÒ]ÐXü‡ß}—تÑhöíÛ—]\\ÌîP–r$aÂ[ÔTèR—Òr$Õ\Øn ÑÿÔSvnÒ{29ùo£GÛ5Í~³g{6£Þr÷ã¿û7j­;úÑj)Bè_×oÞ¼9***99¹  €Ý5:öSÑn[Ž4 -hBß›dr…e‘€É(MÊ”ýÅDF¿¥åý—.%é=)Ïôï߯i®¨¯ÿfÌûÔÔ0½°!ƒÝ¸ñ¿w?þ»£Öº³‘›¤á+BBB”ãв³³ËÊÊX=´W}åÈ» ñ}Çá‰I¤ Õt0u:òì³ö’:š>)/WÏQ‡Wt,ÕtøZëdöo]0`ñjI5[¶l‰ŽŽNMM-..fõÐÍoĹzõê>}úÈ_ã !{;SLJ¤ Õ´û1éfÍšônÓ'Ÿ~zdZÚææ£Uhïh~#éFóþI5êAh:ŽÕ@ì¶õ¨Ò]PÞ¼ysøðáAAA#FŒ~R ©Ð=©¦ígóKWZúÕðá?¼x1A}Òß®‡‡“°Ñ®«ÜÍN5K׆†††……mß¾ý«¯¾*((`õÐñ^åÙùêC­Vûì³ÏJϸqãöìÙ£>_[[ûöÛo?ôÐCC† Y½zµÚø–žÏ>ûL‚Ðý÷ßÿä“O¦¤¤„„„Œ9ÒÒÒÒÑÑñĉzuéY¿~ý°aÔrrrôHHH3fŒüW† Vþ¥R°±±éÕ«—<ÌÏÏŸ>}ºLÛƒ>øÊ+¯(Gä6UMMÍܹs‡Ü&=êEVôÆÖT}}ý’%K†jeeµfÍåÉßÎÀ¬µ8IŒ+V¬°µµ8pà¬Y³ªªªîXR ô¬TSYùõÓO|óM‚‚]µµéòwölϯ¼¬tŸ}öÞþ§»údMÍa'§±S¦LHLüâêÕ”ºº ‰";v|¤Ðöñßå‘jtD'Ù´ª§L™²iÓ&é‘p"ýêóK—.uqq9þ|QQÑ‹/¾Ø4`üâ¿(,,”vùªU«xà©S§ž>}Zy8qâÄæ©ÆÓÓS§ÓÉ|ðÁøñãõxôÑG###%rœ;wnöìÙzÿUzxx¨GÞ>ýôÓ‰‰‰7nܸzõª¯¯ïœ9sZ|•D ™…¢ÛdÞÿýÇÖÔÊ•+œœdî®\¹2oÞ¼;¾]‹³ÖâHÖ®]ëìì|æÌyÒËËkþüùm©©zPª‘îÊ•äyó~ec3ä¾ûú<þø?üá×-Þ%S½æ¸qO6}R²ÊÚµÿóãÿè{ß³Tî³éí=õ›o¶¶wüwÿF¤÷Zš×R]¨Vé—4òÈ#TWWK¿ü•~iv+ÿzâ‰'Ž?®ôçää4M).üó¶ëׯËÃ’’õaÿþý›§Ã<þøãAAAß~û­^ŒÑ{xöìÙg§¢¢ÂÆÆ¦ÅW1¢é,ÈÝqlvvvê.—¶¼]‹³ÖâHìííOž<©ô_¼xqذam©©zVª1ïŽT eøø|²Cé_°`Å¿óóûç=:---•´£ž¦)Åpüh­§µç333===~øá‘#GÆÅŵöªÆÆÆÿ›…ŒŒI“& 8P™æ>}ú´ø*½Y‡-Ž­µ—´÷íÔ‡-ŽD2OÓ:÷îÝ»-u Õ©†T-È],Ý?þµs¦é¥GΜ9£îºiº¯&77÷Þ¥…Ä ­Vkmm­<Ô;ãEo`™6Ù^¾|¹¡¡AþªÿÕ{•}5­GEóÝ,­½]k³ÖâHFÕâU^ ×T¤R ´ pÃÎô9«þqûDwww½ÿº¹¹É¦æ·OJquu=›‹‹Ë½K53fÌìQ[[+­ùÇ{LyRÂU^^^kï"þèèèšššÓ§OOŸ>]ý¯Þ«-Z¤žW3iҤŋß1Õ¬ZµÊÉÉIFÛô”˜ÖÞ®µYkq$212y2§ÇŽ{ýõ×ÛRR jH5Ђ³á{Sg,’ž±cÇ6?Ò)66vܸqÿ¸}1Ÿ&Y ù5WdÍ P³ÄA÷ùjr8àøfÓleå,Íbµ‹µqoúßx/½àÑ<¨]øióTÓ<ü(]I|š^LjÚUœÐ5ÍT†»¼?‡6òžÜEx¿Kªp÷êʯE˜ht t ‹ûî¤ÆEi…K“]i»k)Mü‚Ov(àÈ‚uJBH±¨yöˆ¸ïy¥ñºÃr‚úd„·”!%±¨C’Œš+$á¨YB’šÊRŽ4Íòf$Uj¬oé©¶xƒdE)æÅ„ô«9…õ•7ø€Pɦ¸Û 1|…èX½zuŸ>}äo§d*“ˆ7¤ G¨.¹¤6µ•÷”}&G~Ú|‡€r€–Úà–ÄÒt÷ˆÞ^i&Ê8%ùÛnNw½¨TÙ©¢P²Š´•¥ )ž~Jéb¬Ý¤nR=)Úþ±oÈ“_¿ºP=\M¡Z.)”Œ­ÓÀÕÚ~µYç%À(§EI¤QVu á’´%óÈb’ü#kµäs ’?%Twl¡0u²Ñ–Ͳ9¥š›7o><((hĈÒOª`¼ªke¬4Ú”F¶rz‰² %yò\5¢ì°¼u¸Nô WõŒ%Ÿ(ûL”gÃ÷6Ý!PSVÞÓê)ÍY¥˜R ¥’Ê™9 ÞRcž„)©PbŒ’U$ب‡ºI麠MLªé¬k ÉJ^ž/ËNâý‘ëRg,’ô®ì@SvIÔ—Ô*Y´$>M.»wRÍ=4*õ™õë×6ÌÒÒÒÑÑ1''G++V¬°µµ8pà¬Y³ªªªZ¡V«}öÙg¥gܸq{öì¹cvª­­}ûí·zè¡!C†4¿ÇhóÉ#Õ¸å /å\ç4û•ý”ó+” ¢wä’r|—’R”Ö¤‰¦žž^yªÈ¤åêܪ–¥Q⟒ý”ÝSR@e‹üUŠ)ÏË•Sÿexå 8ã‰y¤š{}egemQöðHjMŸ³JÙ½³ÃrBô ×x/É´òY+ܰóRZNÌÿ€Y:àø¦|¢»¹EÞ,oxzzêt:É-|ðÁøñã•ç×®]ëìì|æÌ™+W®xyyÍŸ?¿Å±M™2eÓ¦MÒ"ýwL5K—.uqq9þ|QQÑ‹/¾Øü¼öÕø?M÷(Geøø+'Êï}j¦r~¼’Uä¡zŠ… ¦œP¬=¤ŠÙZn)ŠL”Ú*†IcT*©œ^¯G§žb¡>'õ”%bZ‘jºñ~5’a.gäIÖ•¤¬H²FEY9Ë &ŸVù,ËJUŸÆÇ0Eò)–­«±¥š’’’¶®_ïß¿¿ÒoooòäI¥ÿâŋÆ k>ªÓ§O?òÈ#ÕÕÕÒ/¥_RáTóÄO?~\éÏÉÉ!Õ=—ÞüÊñKÊéêñK±6îj\QŽþ*ܰS9Qž 7µ«Mi ·HÍ•Ú*;¬¤°²hÌ©¤c» §¬`²PdÌôý(yòÜk·È÷}C>æyþa²ºÊ–6`äºÏ/Ö2¶TÓâC‰7MëÝ»wóQ-X°Àâßùùù­¥¥¥’‚” DªzJhQO;V®¶¤œµ¢¿¤ ¦žnQyªˆÃTÚ®¾òFÅ åè;å¢aZ»iêñ?òÝcÞ¹…TcZ©¦¹ºòk¿eý” …ÄlÙ2DÜ÷¼ü•~yælø^ù/géFE¾kä3Û½ÓЫW¯¶¤šQ£Fét:C-–Û;gšsæÌuפ—ëׯ+Ï—””´¸¯&77·y˜Ñ›ª×׋J•ƒñ¤˜RmÉ*’X¢¬œ%½HûO£rôd©3çj“jL(Õ4×Xßp5§°(2QÖöÔ‹dU0Q¶*²ÚËJ^¬=Ä©F‚G^^ÞSM`` ‹‹‹ Y[[{ìØ±×_]oy¤ «£KúœUÒŒÞûÔLe'€rY^å|qr˽h¥IåOoÜ%AEâÊþ±oDr0Qê¯\6JÒK±ö, .ËFª¹›î\Lè­Tó— “N5z*O ß+“ŽoÊÆJ>5²ù’ÌåŒlÔÁÔ‘j`’”Ç•=™¾tŸßZt‘f4;îE€)‰OËÔHñ“\|¥æÊ4ÅŽ¯ÜtN³_c:ã¥k”¥‘}É­=9ÕäÜJ5Ÿo2§TÓ”ÄÙjÉæK>MÊájûǾ!Ÿ5‰=Ws ùNÚG¬£¤àº^T*Í8i"+g_(Ç/)'ŽË×üׯ.”ÍÇ/ÝSõ•7$@Ê"”(—6–Ô_Œ, ifIñ•{òsr—åIiП ù´'§š´?,ÒôqŒˆˆ0×TÓü3(›Áü@|µvÓ¢¬œ“\|eÓ'Û=Óº.9`´t¡ÚÃÞË©©è ÕµÒ2.Ö’oî¬yÝç+é%ÆÚí€ã›ÊÙ…vr“ï{­òT‘²$C&:ù(—©•$#‹ wY°dIÔ¿{?);,ÇY°¨'§šÝ?þ¥fÄË='Õè©+¿&Òã+7¥xúEr•Nzòüúý‚©¤ô,ÊÁcÒ>VîÛ0á­XwåÊWf2}?’&µ|g_Í)äì‹{­JwAJ-í¡¯_]¸÷©™ÍwÂðK°JžüvœýÔiª¿;¨éã¨yý=6Õè‘iQdbÖ¼å(ÜÔ‹ 7ìätA ]äû.ÉÅ—:j€VI,Q¯gÚôVÜÊ-êŽ.üôôÆ]¥I™4»F}åKi9Êí%LʲP®-{dÁ:v˜‚O"4Ï]ÉÚÕ3SÍ‘¥+nT³îsRMsÕ%—”Ÿœå£­µ›–áã/›ßºòkT0Lš"‰N>ÔTü“r¹zíݶÊ•|”{ÏÉw­4©9õ¥+)‘òØâ ²dq(çKC'?P#[pÚ:¦¨±¾A\”Õ‹i^ïôÀHsãÂß·?ð³G¯ˆÛH5†?þ'ׄ˦8ÊÊù€ã›¹Ë‚åSÏùo@‹.gäÉ÷#u Õ ‡ª8¡;“¬ÞT®éÁKÒh–xSyªˆ*u¥†êZÉRüÃÞËeë¬Ô'yFZ3²¤dyQ"SÏ3²pe™JOî²/"îs¼”ÝÓRMÒ+ÿ}ë:·wÔjÚ¾e_V$6@SUº »m=¨©=4Ã(-fù¦T^â˜n!•?½qW†ÓË¿nØy9#ÃÉ̉äUYÄI.¾Êuå×vÛNÝmëV{åP;öì7~J¤Ñh4!!!›7oŽŠŠJNNÖéh¦ß™¬9²Å–-†4àbmÜ{/?¾—+°¤R ÌVMYyI|ÚÉ5áj†ÑÚM#ÃiÔE&Y°.aÂ[Ê .Ó¼–*·êc¡˜k3TÚ 1ÖnòÑkú¼,ñˆûŒŸYw-­'Dš£+VÝŠ4NoFüËÖ­[%ÕlÙ²%:::55µ¸¸˜µ¥½“ ;¿~ua”•³lçe«"›}6#è±d CH50Ê ò­vÐ}¾´Ÿ¢¹*÷CÐ…jùÕ¿Ûf±öPî²`Y4²\bmÜ%^J༘Îr1{ò©”%žéûQ‹'Aå…ìÒôqÜ÷ÜëUß~eÆy¦öÊ¡”_½# Í Þ[·©;j$ÏHª‘l—]VVÆ Ó1õ —Òrd#“èä9`¢lüó5\Ĥj`Ôkaeøøïû†|ÅÙ¿öõ« óüäÍ—Y·«8¡+ܰ3uƢݶQVÎ’gŽ-Þp>&™k-ôcd¦›TTTd|±=¢ÿ„íß›³êÏõ7ÒÍ,ÏÈÿxí«ÕK9ß.‘&<<¼éágR V›NùvoÃÞË•{}žÞ¸‹ÍzˆXwÚ?¤˜€Æú†ËycÒç¬RŽ(SNÀg¤ÍÄOþF•db¬Ý$ÌH«BªåŠ =ÓÅ„tù~Íš`øp šššâââô= Q?}ãÖÑYýÇðôÉû4ðüž-5¦Û‹ Í Øï6GÛ­]4£ñá_ÕÏ4^§ùþ϶ rÙ:v†ÆwÙÞmÙÙÙR©kN'’uï|L²l‘"LLñô;¾—o ˜7ùæåFO¤©«9…ò=$ÑEÙ#aF"Íé»Ê³ó¹¸§1'¶ª=¼))ŸY‰4¥I™mZ…**t:]zzúî€/¶¹ÌÙ6àg·b€Yt;k¦øD¬ ŒhBÙKî:g›åø[ƒõúIDßç#î¿ç©™Yï|T™ÈNÂŒ|•H°‘_](Q‡so`–¤±d`ß8H5èRÕ%—äûæÈ‚uI.¾êyäùù”ò%dœIFk7$ƒ¦¿D(Û¶ßJ¨¦¦¦¬¬¬   55566vëÖ­¡Ÿ„-öÏëâÕš÷×D,]kzÝ_‚">ßñï$ÏÈÜ)çÒ„nÖüÀI?õú‰Òeå,5¼˜ÎÕ¹jÊÊOoÜ%ß/j…ù æ$Ñɧ?'Tƒ{ÕD–¯iÇÙ¿&ß4)ž~yþaòeà ­5 ÑEI2²¤2|üÏiös5U4%«„¬²ž´{íjlâââ¤õ¿yóæ 5êÉÜÉ<ƾÿñöïO¼½»æ¹¨_ü»óï²ænØy)-‡­ß½&Û®ü@M„·¢¹¦ÏYU–r„šÀ Èw´Þ¥&AªÁ½¥Üiñäšp 0ò¢üØ/ßåÜRÍ]Í)<¾r“r fÙ\r_´¨±¾AZäòY.ÏÎï`lþW°IOOß·o_TT”²[C@˜¹y‘9’ù’¹“y”9Ý3þMMoÇí÷zpRÒ îÞõ¢RÙÄÅ;x)§qôLšrèu Õàž'™Ò¤Ìc‹7(—ÝÜ?ö ùþ8“LÙ8ÖÅ„ôLßvÛzH;õÈ‚u§äS,¹÷ ûü»ÜàN—œœ-`»¹y‘9’ù’¹“y”9-:zbGÿvXN(X§‘ÏÚÑ…ŸòAë.•§Šr—ËRN¾ª:œÏn$ÛIéÔTƒ{âRZNž˜r’Œ´{ä;C² W¡1Ú¶éé»”ûÙIø<¹&œ³–qG—3òbmÜ;ë{T‚MEEEqqqAA´ûSSS%|e.d^dŽd¾dîdeNe~s—ÿMcñÜ‘ëjÊÊås'@Î!ì^’g¤i(ku¼ƒ—¬ØlaB¤•%u Õ Ó(§’§xú)7{ΚPŸF’1æ¯pùæ>àøfô ×4¯¥gÃ÷rL?Ú¨X{(ÆÚí|LrçŽVÉ6eeeÒô×étæBæEæHæKÉ3ÊÌ6Ö7ìzì_ýì¿•~I5 Þbƒi ÊRŽÈ÷—¬á²y,ŠLd7ŒŸr–2u Õà®H;XZÃòYR.﫜JÎÏŒ™r4yœýkÊq/òýÍw6ÚûõkãÎyÐzþúèÎ.êCiIË“£s„l%·':ùÈWÛÉ5áüèc&M¯Ô‹¨©QqB'[yÙÜ+×.“V—÷5rõ•7t¡Ú$ßèA®™¾]ÎÈ£&è€c‹7HË›+|tI2Mt’íª´¡)¯Q)ÏÎOóZ*_vz 0¥I™Ò$£¤´Uc}ÃÅ„tåzGò½+-cyÈà&±Ô”¯då&tì™A‡×%Y‘&¼Å΄Îý¹Ao¯W~ &ÆÚ`cldµ?ºðÓèA®)ž~\ÆF¹cu Õà”Û•(§’Kƒ&Ï?Œ¯[SÙÆY°.ÖÆ]–Zá†>»Œ4²ö'~Ü‹Úê=#Xùä²¥5B ÕµŸìÐÚMÛ?ösšýüH#Q¥»°ÛÖƒ:jÐ2i»œ ß{Ð}¾r'féçlSI¡ùšx/ÙÀå. æx tV¤Irñ¥ ×eŠ" 6ÆL9åF–§ÜÀHh,ž£¤´f”ë˜É_éç§YS! å² Ü0i¸g‘¦{IsyÿØ7XÆO¶À…vJ wð:¾—Ã5ÑŤ1@¨&ÕôP5eå'ׄï}j¦tÒÃ93¦E¾/%‚Êw§4w¸¬î‘<ÿ0ÚÓÆ Ó÷#Nj2!ÅÚC²¼ØyŽ.öõ« ‹"©©¦g)‰OK±(ÊÊù°÷rî£grªkó5»m=’'ÏåWÜ;ÊÙêûd $ÏÈç=ÃÇŸR˜å”ùª•PÊ)7貉(ܰ“:jziäù‡IkxÿØ7d½ç÷WSÌ3¹Ë‚£¹¦y-•ïK ‚{çRZNŒµ«™ñ-vœýk4YLñ›W9åFÙ÷”´¤£¤3w5§P¹HK¦ïG4SLTI|šÖnZêŒEœ ˆ{MÖ±X÷bí!Ja„ËåbB:¥09uå׎.ü4ÊÊ9k^Ç{ã)ܰ3}Î*ê@ª1[¥I™\ËÔ]/*•0#‘†Ö º@}å½OÍ,ød¥0B²à°@ÓUyªH¹kBî²`—@§+Ö:è>Ÿ:jÌMc}Ãù˜äýc߈³Kc™ôrÌÔH(•¯@"ºF†š×Rê`´dkÀ•L=šî}j¦lØ >ÙÁrD'ºœ‘' ?ê@ª1«v°l(µvÓ&¼%Á†‚˜®Ki9²y’æ ‡b£ËE&ÊÖƒ_‘|#/›Žž7õ…X¸agô Wù¸Óì§ è׋J%-SR™£l"ºÏçÊf&­®üZ†¬;ßvèâoDif]ÎÈ£F®ºä’lØÎ›Á¦þÈ‚u÷=ïàÅ-Ñ)4ÏQRÉ“ âþ±oHÇ–ÑÔkI{%k^çA¡+5Ö7$LxëäšpJaŠ"ãì_ãÀT3PqB—5“Dm¢â¼8üØü\Í)Lrñ01?PÃgmWðÉIÅÔTc2ëkô דkÂÙÌ™‡ÊSEûǾÁMÜÑ]ªkwÛzpƒWÓU𔩵›Æ7‚Y’¯Y¸\9Smt>&™q’jL@•îB¢“»hÌÉåŒ<ùÆ*K9B)Ð]Ž-Þ:cu0iÉ“ç|²ƒ:˜%É«'ׄGY9gøøsÀ9î¨<;_b0u Õµbí!vј™«9…1ÖnDt£ê’KÒZâØ}3hÇÈÆ„ g™±š²òô9«d)sÜqU‘­:u Õ/ 3±6î\íלÔWÞà–ÒèvG~š5/€:˜O?®÷`öÊRŽ(w3ã÷M ©F² u ÕÙreøøï}j&?¦š™Ô‹dÉRto´ŽäÊýÚÌCyv¾´wiìš½ºòk’`÷}ƒûu¢5²zp"©ÆÛÉ“çtŸ/=&¹l,Ú±tRSSGŽÙÆ—´kÌF¨pÃÎx¯.b´3nêK¤gÊÔ|ýêB–µÙHrñÕ…jù÷ò%=Èõlø^¶ÿæÜÀíha{/—5„’jŒH•îBœýk™¾™îooê²-ŸÌ &ìܹ³Gh´Ê³óåÛ¨c?³‘j:qªŒjš»~bdâµ›ÖãZÛ;©wž&Qg)MÊ”o.[ Y²ÝþU²÷©™i^K»ýwOÖcû^ÈÔph1©Æˆ\Jˉµq7†kÚ¬^½ºOŸ>ò·S>>¢>ø`MM©7î郞üÚn[;Þ\¢%žñï¾ûnðàÁMËëéé©öWWW?òÈ#2ÌÝT»—ˆ2³½zõ’UÂÁÁáÝwß50mÆð­Ó|1™Y &¼ÕÏ{ÏcÇÞΤ#ëÝL|k—‰gÉšú’m‘ä™ ÿŽý<щmÖcK5Ò›oÞAªéÊoùÆp“›7o><((hĈÒOSM»>½¦›jRg,j×/(zsÚá÷ððؾ}»ÒùòåÞ½{—••)·nÝ*ÿ½ËjwnªQzªªª²²²~ÿûß?öØc:Î̾uºÝÇ7ÏÇ$wàóN Ƙ۾ºPmó[U°dÍ5Õ¨¿PDY9ß‹kϰæ˜è÷‹ÄÝÈ9ËŽTÓý*NèbmÜäFÑZ­öÙgŸ•žqãÆíÙ³çŽMíÚÚÚ·ß~û¡‡2dÈêÕ«õ3ð{¶Þ¿òóó§OŸ.ãyðÁ_yåµýÝü´„„„1cÆXZZ6,88Øøój{¯¾Ú¼Ôëׯ—™•YvttÌÉÉQ¿{V¬Xakk;pàÀY³fIÐOtt´»»»ÒÿÿÙ;¸¨Ê´ÿW¦a«-$)¾=ò‡3gÎÜ羯ûº¯ß9÷KJJ ]'))‰ù×ÅÅ…>U•í½K­««kãÆcÇŽ577 Q/at~hh¨µµõ£>Ú§§Ÿóòòâ¾ ªTqØ ûi{{ûÚµkŸyí°/µTÝŸVGÕwé
#Û¶mëmÒjqM1ÅÆg}ç¨×{öì¡ gذa/¾øbnnnddääÉ“™”_¼x‘ç­qSoóS°žFËQÄJípâĉåååÌ~TT³CGè8·ƒê}ƒ½Ãíµø'^)ä^-]f—AÉAÉò‰*‚£u˜ å¨jeúe3ª.ÂÑPªê¤Ûy‘Ÿ‡H€ªLdu©¶žú3Æëí·ß>pàíc¢ý>UM`` ³³óõë×¥Réܹs•Fl|Â÷—^z)++ëÞ½{wîÜññññööVugŸ}6>>žüѵk×>øà=/ßlgŸšýIyTCÿzxxH$òz›7ož5ks|ÇŽNNNW®\¹}û6 €õëÖvvvR^Õ×÷Ä:~~~¤dþò—¿Ð>–••}Ê'Û¶nÝêèèX]]M¿µnÝ:õFç»»»×ÖÖòÑ T¸”Hî ªJ‡°ŸRCv+}ÙígŸ}Æ}S„JAiQ¯ˆûEù¦ˆs~»yÖwŽz½páBÊsJjPPÐÈ‘#-ZTSSÃü;gΞ·Æ³˜”ZO£å(b¥vHg=OÁ¥Rº¯»w{–>ܽ{÷ÇÌý£Ü7ÈÓkñL¼*Š|¶Sù¢d¯dûŒ(-kð =,§Ï¶¯¿6£ô" ¥Ú¡Y¦z‡ªLºZî‘Kº°õ€ž¤‡<ÑèÑ£e2Ù/ÿzAUŽ[ÕLœ8ñÂ… Ì~YY™Úªæ?A57[[[«ºÎ /¼þÓO?éùÖg‰'-îo{Ó;«ëêê˜ý¶¶¶áÇ3û¶¶¶—.]böoÞ¼9nܸޗZ»ví×_M;o¼ñF^^ãd¿øâ :Î3Û&MšÔû­EFç_½z•§%è:t(÷U¥ŠÃNØO'L˜ o·ÌS[Ž›RõüUáñ¡ÒïÒo±,éG•Öµ‹˜?äj–å¨ïõúÆlRU¥¼Ï[ãYL}Z‡Ñr±Ò«%%%-[¶Œv‚ƒƒ)+˜‡£žžžÉÉÉÜ?Ê}ƒ<½ÏÄ«âNYµü‹8”¬Ñ”,Ÿ(Bƒó¦ÂrPÚÊô×f”^„£¡T;Ô¹&ÌÈõØ‘U38P°›9{^ÍYáëë«®mذ[Õ˜™™1q‚j«šÂÂÂyóæYXX0¿;dÈU×)**òðð5jÔäÉ“SSSõ¹ˆ©|Õ˜…“{\ û/yLù’zì±Çz_ª¤¤ÄÎή££cêÔ©ôïÿ÷···O™2…ŽóÌöÞ¥¬vÂhÿÁƒ£@ª´‰‰‰ííí555K–,áp…Ë—/'ÇÔÑÑAUý¹çžÓÛ"&I£0ß”fUMXXe8å-eÅùóç™.½ùúë¯Gõý÷ßÿòp¤ìˆ#˜>iÜÙ®PjAAAÐ9JG°ðLUCʹ¸¸xíÚµòs ©º wª”Ú û©¿¿?ÛÁzÞ¼yÚS5ô[츚ùóçóoæy1r¬U˜„»¾÷Y¯5Á(-&óS¸O£åYÄò?ÿüóááá´¿sçNkkëmÛ¶ñtPÜ~¯O¯Å3ñ°óº¢d¬dùsM˜‘`î¤ÐÑTS1€iZŽÒV¦¿6£ô"}6”ê…:ZU3ÔŠO§Ø¸w6ÝÕ«TÙÛÛ÷~Ë™œœ<}útæÁ†ƒƒ3Çž={ä'cÆŒé=Ú/‡Ä™››÷;vlÊ”)C‡;v,µ=®ððáÃt&¥ä•W^ÉÊÊÒÏ"&=£öÌñ<‰û÷ïSFÙÚÚ>ñÄÓ¦M‰DJ¯öóÏ?S®’§}Ò ´/¿ŒªlW(µÎÎÎO?ý”B*èÐÐPõÆ­j˜õjHt½üòËùË_˜I¸/È*¥vÂ~*“É>ùäf2Úa»ðT5ªÆÕ(ý.]üøSG¶lÙÂŽêó‹<‹¸OºZîQ¬£Ðíž»¾÷Y¯5Á(-&óS¸O£åYÄò”””<þøãL! ¤ýÒÒRžŠÛïõéµx&žƒnY•25+(Y#+Ù~A’Fd媞°åôFi+Ó_›Qz‘>JõB ­ªÑ5mÒzµ0È­ôwê3` œ;wnüøñº×Ø9 Ö"óžÌÙkв€`è`h TNyÐÕíì£?“žmÐÙt·÷£q`ʬ[·®®®îêÕ«NNNþóŸuüëçö)Lû Œ’|¯@I”ù lL™÷ ¯P5ý "8Z߆ÓS³?éÌR?ä` ³¶¶3fÌš5kØY7tÆ)·õê ñ†Å…­HÁ"Q—ž?16Àp¡ðCÉWTM¿¹UX!²rUX$\Dý!ÙÚ­Urù`ôP(“ïˆ| Òø,¤2L¨WK†ãT5]-÷RlÜÕvƒ‚‚.\ˆ²×‘ýkÄž««ë—_~Éÿ‹Ì ݲ]&’Ïñ¼¼¼É“'s ‹Tã‡ë:gEƒÓÒÄ3yp Bg¿N틉¼%6‘Õˆ°àtÏ(bƒ£V|)¡j´N¾W`‘Ïvõ¾ÛÚÚjiiYUUw£û{¼xñâèÑ£ïÝã;H¦"8ºÀ;hp BéñÙ³g=zT½„ªÑÍýj#Ø ¡j=…ÚN†$J¬°*j–A¨) ±Ú´\–Š'-†T€ªÑ" ¹¥)6îj?¿?xðà¢E‹äë›Ò‰e!c´tË®®®‡æùÅ û•:{ãß/UóÔSO±+ÜkÕúµÚ¦™å;wüüü&Ož<|øp ww÷ÌÌ̦P·`ô½ø¬)4ð|þꫯ† B5’B-•»«Ó,P ‘•«¬®El¬E,OW˽øs  j´Åƒ®î´©+–Àëï¾û.³Šâ`Õ.W5‘‘‘+V¬àó­Î¦»äMt6D¿TÍ@ ª†˜?þš5kªªª:::~þùçØØØ9sæè¿ªÉ÷ 4îIÆu jîß¿?~üøððð &оÞFHå›"Œu²;Ó,P r)$&ÝÎKŸgæDká#¯A*@Õh‹ª]qÙÎ>¹UÎêêjîÚ%ø÷„ÑÿûßéHjïêÚ¸qãØ±cÍÍÍCBBØ ÿùçŸÛØØXXX¼ÿþû­­­ì•·mÛ6f̘§Ÿ~úã?¦èí—ÿ|5¤€öööµk×2kZÑû*€NØ»wï¸qãÌÌÌÊÊÊz'XÕw•&XéAŽÅ¼BCC­­­}ôÑÞÿªºwºÙ>úˆnœÒ#¿ÄXO VUMœ8‘OIÕ¥çó,k¥wÄ‘Ÿªî´ÏijgÊ¢ü”–TŸ$*++—,YBç<õÔSï¼óNCCƒRkQšrU¥ ôšDffæ+¯¼Â,Ñçõ×2U¥GžáÇ߹sGiiöYpJKPK5K,GA}vÑÀýXï"à°LU'÷v/ÜîH=ãTš±j—;!‹_}õUÚ™>}ú±cÇúŒÃT¥PU¹kг«¶Tï;ªvLÉÇOöË hgÏž=Ô~ 6ìÅ_ÌÍÍŒŒœ>ž‚ûk×®}ðÁ}þP-SUzäquuõôô<}útïAV}\Ÿ%¨Áš¥@Š»F&@ãS쿪NVê^8Ü‘ÚÆÙ;aj—;ñöÛo3ÏŒ((§ý>#$U)Ôvgý¬SÁÓOöË hgáÂ…äå(Ûƒ‚‚F޹hÑ¢ššæ_öU'ŸR0ÍÕ,¤gHÕ¶1”w5(bµ ªªF[x•úîàEÌÌÌTÒq57oÞ?~¼ŸŸý¥}æà¤I“z?еµµ½téû­qãÆ±W¾pá³_^^ξ På&L˜ÀžO¿"~]]³O)W* T}Wi‚•ähJ¯^½*\þ_U÷N Ozª&ÛÙ§.=ŸÏ™Jïˆ#?¹cbŽÄ÷©j”–Ÿ ÊÓÜÜlmmÍ_Õ¨*U×|á…ÂÃÃúé'à™“Jï—gzHl¼üòËTéú¾¾¾ì«›> ®¿ªf 5Køs4Òí„O°ÿª:Y©{ápGjgï„©]î…=Z&“Ñ>ý¥}Ѝ¸o_U µ!‘vY¹$¦äã'ûe´sãÆ 6ÛR &X Ú€\Aº×¥ƒP5(bµ]AŠ;¤Tæa¨élº;ÀëJ¹|ù2Ÿ`î³Ï>£OÙMŒ"bê¶<ä äEÑc=Æ^™=™vè»ÜŽ@þâ|ÎWjJ¿«4ÁJr4¥EÀþ«êd¥7ÅáŽÔ6NîŒêW¹“|UxŠ´aÃõR¨ÕI%Î[î?˜’Ÿì—h°L°@µ„¬®‘Â’ ôÕ™ªA«ÿžðª¦œpX­‘^ß}÷ݨ¨¨>C™ÒÒRkkë£G>ÿüó¬ ¢¶§÷;)S¦H$¥n…}\A;ìÃT¦§u¿ž(÷{©ú®Ò+=(ÿ «®®ŽgSªêÞåÕP̪0[ÀòåËû,¦¦’Ê´©+x–©Ò;R•'}Þ)GâÕS5|.HçDGGߺu«»»›þ²ç(X‹Òës”‚Òk²rB,[YYõùCýµLUéá*‘#Gòü9U%¨š¥Õ‡v E ê¾”ž¬Ê½p¸#µSUÆö·Ü™½òç\¹r…} ¬êöU¥OQ›,GÁ@BUž~²_fÀ3NåSûL°@µ5U$lômuN±¦¨ŽÆ*œP5š‡R5¹ÔÁƒÝÜܸC™ÖÖÖiÓ¦åäô¬ò™œœüÊ+¯0½ÿƒ‚‚kjjäÇo„……9;;WTTtttœ?~Ù²eì•]\\˜Žï´Ãöæ'@'÷N€¿¿?ÛûÞ¼yüc/UßUš`¥gÍšµyófºqòY‹-âÙ”ªº÷7²÷N'ÈkáÂ…?üðCŸÅTµ+Ž—e¥w¤*Oú¼SŽÄ«§jø\bšÄÄÄöövº‹%K–°ç(X‹Òë«*U×$UIML±ÔsÏ=×çõ×2U¥Gž9sæÄÆÆÞ¼y³³³“ âÃ?tuuåùsªJP5KžÆü2My!¥E ê¾”ž¬Ê½p¸#µSUÆö·Ü###å}/•;ó˜IÕí«J!Ÿ:¢vœJòu ÁyúÉ~™OUçö™Zêàq~²µ[›´^’„"Öàž=áTM?H·óÒ”aµµµYZZVVV²Õ¬÷¸šßÿþ÷ß}÷û•ÐÐÐ÷ߟv(ûôÓO­­­-,,è óéýû÷ÃÃÃmmmŸxâ ÒB"‘ˆ½23IüÇ?þ‘yiÇŽæææ½ë¹L&ûä“O˜™šh‡}Ë'öRõ]¥ Vz°¬¬ÌÁÁ™gÏž=<›RU÷N7+èútûòS \ºt‰ç*œÞAü' RzGªò¤Ï;U•xµU Ÿ ;vlÊ”)C‡;v,e©üLòÖ¢ôúªJAÕ5>LÇ)H®geeõùCýµLUé‘çäÉ“ï¼óå %cüøñüñ­[·xþœªÔFÍ’§V|ú”Ûzx!¥E ê¾”ž¬Ê½p¸#µSUÆö·ÜíííÙ~V,ÉÉÉÓ§Oç¸}U)äSGÔ#ß+p€ƒ%xúÉ~™OUçö™Zê2˜ û•ú3%ŠX#ÈêÌÔ^@Õ(çº(‡T/øÅ_ÈÏã¡­œ5½uiú„²ýË/¿äsf¶³ÏÍÌäÐôjYF¸­Æ1À L\ E>ۑƉÕï äT†!ICÂÆðraÇOZÜrYŠ|P5p/º„Šøœßnäè/]-÷¨ÙÒÈè_ <èê¦mÌ/CV@Õh¿¨AØaÄ>>C?8P5p/ÆJ}vQŠ»>¯ôfæ€;eÕÈ ãpøYŽäT†9á°Ú_Ô€Ð&­O¶vC>¨ 3ºe©¶žµâÓÈ 0/AValÞ ÅÆ]ߦ¶¯j곋øOï Œr%™³× T жÈõØ€|¤P<ÅŽ€>p>` ªFód9 (’@!™WcÒò½‘@¯¨ŽÆˆ cåVaE¢¥‹^MÎ ”nYG†ýÊÊ0!²Â@¹SV oU£•ff€‹åÂÖçö!€^Q¾)‚6äƒñ!«kL¶vCWg )Z%7(,¦YaptµÜK›º³>@ÕhžBApEp4JÈ)^Š]ªè&ˆÉ°_‰¶hi|Ø"gWmAOc¨ÍÓÙt7~ÄY]#JÈ4Ý z}ƒ”6Ö£0>ò–û#ˆÚ ß+ð À#mê hQ¨ÍSµ+Cµ jЫᯑQ¾)"söôsÚ ³énŠ;fÕ3î”Ucbn¨mAr¹>»Åcšäzl@w oHã³Î,õC> ׄt¢GÐÆ$[»µ74!+ôf8 §BÕh…Æü²T[O”É’å(€¦z `Q6£Z<—: Ôw'ž†è?…‚`¼Š‡ªÑù^—BbP6P5èU|Âa5òÁ`ÖùE×  tu§Ûyá%€>sM˜© j´E{CS‚¹SgÓ]”ÉBm@SI%òè­’)6îÈC‡bò0xpt³þ 9d…Ò[*²rEÈU£-jö'aF‡bG4@ß`ð# \ ÞAÈ KHE£ÿªÞJú‹¬€ªÑTó1Rªªè!ÂG^C&4çöQƒIÏ€Ž!“;á°ºf²B¯$M‚¹º»CÕh‘6i}üˆ9ݲŒ)“6uFñ=DdåJ> ù` ”úîLµõÄ„T`Ph*©L´tÁœ{zBóEI²µ›4> YU£E*Äè~0[ÐO2ìW6æ—! ŽnYÇ™¥~™³×`Ä&DÎìÃ|h4À„T µ:73 P*P5P5@¡ˆ ¡ÁÑÞÐD-K¾W :žAWש¶žècILBÕ´Ië-]Ð𨠟¯ ÅÜYÁPy>`²è̺œxg8X´Jnˆ¬\«÷EV@ÕhÊ0!¦¦P5@o!ICÂù`(4ä–R‰!Ú@¯(ùlï}#еMc~9 5‘P5ºÝÏúùý„ÌÝâ ¨°DV®uéùÈ  Wt6ݥغ÷lÂX2E«\IK´tC€ªÑ²ºÆøsÐý gWm‘_‰Vô„Æü² û•Èý§2LH#ÂD ·’;mê …¦­"89£ (ŸÏùíOZÜ|Q‚Ü€ªÑ ÓkR‹‡ €¡x]¨ü;b¼ÁzBgÓÝøs8üЇ†Hª­'æàúL¶³OÕ®8ù#X¦StµÜ;å¶žò¾ªF»ÔŠOËG«ù^ ½Ÿñ„Þd)ßA+w1c ÄYáˆÈÊ•]"–Œoô-‚ÉõØ@ñ"í 7€>Ó|Q’hé†ÚMwÌ-š…|uÚÔ…‚`Ä“P5Z‡*óÑQo±+RQõf­ÑNEp4FΙ$håç„¡`ñìª-´s§¬:q´ , d–¶7Μk™žÙyçÇÌGéùe©¶žä:Áƒ Ôw';CY¯ð‘×ÐØiúì"LwU£SDV Ž¿ú{j¨>“žþåá“QÒ3dˆ-—¥(ÓáVa…è™å[ÿÎü{]”“뱡«å^òØEÙÎ>È0ˆ4ä–’q²o’™î‘dœ‰£\`œz•KŠ;Þ뢳é.E;̾|“ªa߃@‘dù¦Ê[t_‡ªÑ)§ÜÖ }­ì³oÉþÎùí&û;6y Uì«?¤£`L J¨èÅ“©Ï.Êrœ^üW:òÓ‘“È0¸œûÛn2ů¿OÆIÁG¡ ˜ôŒðÑ×± Š>@î‚ôÌÙU[°Ðÿh[ŸE¡û:ñjLÚ ‡Õ´“ÿ^ 9,i0pšJ*Óí¼(¼d»¨Q;Äᨅ“xÒ⬹LõVìã3þï÷›P*&HÏ›™çÆpŒþæOÿ£ÎG†Î${Ày ¤Oûí‘a³ŽŽzëÔ"ß䱋È8“Çþ¦V|93¸N£Èg{²µ =RžYPàÄÈ›ÌÙkjö'¥Ú¾KáÐ5aòg ¢ñÂÖ‰–.òs¨¨Jê¸áoÄ ›%|ä5‘ÕÛ±9ˆ'/Ak²\åP€BÆpÔÒEøÈë=OÇgz#[€>ÐÞÐ?ü á‡t;/Æ8ãFÌÁËA¯h€áòóÅGÍç‘I97gáŸEV ŽP ôèëX Rmš/JN8¬>å¶“BÕ ¦°Ž:ƒ*6³Å?5ÃiLœ\ L/Df“LEž=¡.-/vˆkœÇþë]äÉ @2¦À;ˆ$ :ÍÃ¥½¡)Ã~%¹”ØÇ^éÈx•’u_#gÔˆ$/…Ä$Zº(Ì”  jñ¤ÅLeNxjÞ½‚6iýQ ç¸'f“I ûÐ7 þð㯒_ø 3MÐ1µâÓÉÖnE>Û1w30Îým÷‘‡ÝU˜íôâ¿"OúESIå ‡ÕYŽL´ôBÕä-èéqôëyg?؊¿<\ «P5mÒzjÑ볋¾]å6ëjLz¿¾E¿Ž9ûtŒ ½Ïíò¾£qO¾Ywü¬n~ö+í×vÞoÜ“sôÄ8ÛDY=CQq{›¼ßmŒ¡_ŽEÝ‹$Jœlí–·Ü]ÎÀ€TÍÍ̳Å붉'-b;€êÖj»¸Ô7cFµelñº¯Ä“ÜõªÐÕÞÞ)õÝNu¶¡OVª® .q€t6ÝeÖуžhÑþòpÚà û•'V3ë– ¦ª©ÏþG–ã=3™šÍ<åöAYà–êˆo¤‰‘7³„ƒµÑ¯_Þ»“R’³à}JÕÃÞ¨+°ª´fÝG–ã½*t=³·å×E?ÂNÛJõÎ5Á%jJÏ$Zºœ]µz MD Ø[šë±!ÅÆsJªšó{z&ü±ž%rw×½‚Šõm£TQÚÒíz¦ÎYð§ö†&ê9°WÏ ]oìíØÛàYéX©‘¹Dy=ƒYþÚD´€73 ²âI‹«÷Åèk0 Uó «ûÌR_2ÜB¯ATãË{wÆ™Í=çz5¯¸¹¹¹½½¥Û_ú_ ¨ÐÛÞfŠž[{ +õ…•“K¤x¥2L=Ð&¢d¸.ÊI·ó¢M%¦R€)‚ªšs~áT“/}b@ÕìvqRâ3Îñ–Îe'rkkkkö—s~» ®Ð×ÞDV°7Ý[i8¬Ôh\â²ê"Ÿí¤gò–ûCÏ´‰&Þ’€!“jë™a¿à €ÆTM­8—jrñ:?ƒ«fw*RŽqŽ¿¨øl¡D"ihh@ É“Zñi-ôÁµ7Š&ÀÞtf¥¹°R#p‰Ý²Š]N8¬N¶v+ßÑ&­‡m´‰FÙòœ|¹½¡éRHŒxÒâ,GÖ¤V5évËÅ“Þïü‡!V³i1䉒V}ZPPPUU…@“'év¿5ÜB‡½™Œ•.‡•´‰6•T2/gN¹­§¨}KÚD#v/ ¹¥WcÒ¸ó¹1¿,o¹‚¹ÓÙU[nVÀð€†UÍ5áq²Qib¤áV³ „CgÆçåå!ÐäÃ5a†¡ú ng–}; ö¦+=+5P—ØÕr¯zßÑ û•x9Ð&šH H’Fdåªjå\:^µ+.mêŠT[ÏK!1XOhKÕä{ýMdåÌaÁôBþÓÐP_3³aôWá+ ;Ü”?Ÿaøð'Æ{ÖÝÝñС/ïß/êó­(ù£C«þšœœŒ@“ù^ŸqºLöAA>ÿó?ž|ÒÌÂâ)*ˆ'öq—)»ïÞÿmÞüGúîO {ê©_9;¿žœ¼“ϧ| C ›½¬•þMKVÊýE²ÏÉ“ÇúûÐÖ–ÏǦO±»ûªÜ é˜è­ÂŠBAp‚¹Ó™¥~uéùx9Œ£Mì³iã‡8œÃ@ÚDî†XÇîåFVaÜð7Î,÷çö èl´«j¨ÉI0Ÿ[ཞ[Õp|jg7åÀM/¿ü_ýR5}þV{ûÙk×Òbc·98L›?E!ÜÕ,qŠû“:t(55µ  @"‘477£¼•ò°Ðçqú{ï-\¶ÌåÂ…Êùúú“Bá¶9sþ—g¼8sæËï¿¿¨¼<ž ñöíœcǾquÅçS>†¡†ÍhÃÞÄSß½ißJçjÉJù|‘>**:äèhÿÑGž| éw¿{{ï^n7¨KÕ±Kl*©,ß‘nç•bã^•g€‘µ‰Üw8Äá4Û&ê,âRhËâÒI´Ô‘ïQÖ&­'WŸtªjZ%7È«#¾QOÕž=ÛŽ©Ì´¯AUÃn…o½åäÓÇ+QŸ¿ ‡ÎˆŠŠ …Ç/))©­­Åãs¥ð)ôáß ÷ª†3¥’òð˜«ê²ÜŸPÕ¨²mØ[é_?ƒ½®•òÿâO?¥=óÌ(>†tãÆ‰ñãŸÿùç,>nP&ª—H±`CniñºPñ¤Å´Ñý ÓFém¸/îpˆÃ9h¶MÔYÄ%ßÛñÝ‘s„9ˆÿké/§¨ÙŸ”9{M¢¥Kw|ЩªiÌ/£Ê|#-F=U³víoøá Ú ÿ+íkCÕô4Ïg¢¦M›Ä]ÇÎmÿŠn$꛽LHHÈÉÉ©ªªÂãs¥ð)trÁ§OGªáL©¤¨¼T]–ûSM©›Ñ†½U~ó5ìÍp­”ÿIÕŒó4OC ÛðÁ<Ý ¶MT{.±[Öq]”Cñ E-év^¶h*©„Åãö6ÜWŸá*ç Ù6QgÛÆlšÍ¦ýË·N{}–ë±!~Äœ|¯@rè} AÕÔg‘9Þ̪1®¦££`êÔIíígiÿÖ­_xá:2ðq5½ÏomÍ>ü î:v5þÝHÌÖÐÈÈHæ­hIIICCм7| ýèÑ––æï¾ûùbòq]]ÿàé7Í̆‘1¨º,÷§j«á¶mØ›41öf¸VÊç‹äÙJJ„óæM–ò4$ºÎ+¯ØæåEóqƒÚ6Q»ÄΦ»WcÒÎ,õK0wÊrT† 10oÃÝxõ©ri52®f -àüM8l&í0[Êä%ä"TÍ€©U!ŧŸ~Àþ»bÅ:¢w5TÇž|ÒŒ»ŽÑ-ô _ øŠšðï¿ÿ>111//¯¶¶E®ž§­¡!;*j‹Ï2òÅãÇ?ŸŸPªFSÉÛ ì VÚû dž :q¢µŸßûdü éôéÈW_µedRŸ¡ƒþ›èƒ®îÆü²ŠàèlgŸøsr=6H¢Äí M°OUÓßpH©sÐl›¨ãˆK8Ä•4q£ßÙ,:á°“5CU5îîŽ êŸŽhCÕ/P~§²Ž}BM8ÛãB"‘ ÈÕŽå·~øâÅ'2û#F<ÙÔtê?–¾C! ^õ@“·Ø¬TÁJ9¾8p#|ÿýEááíÓ ê­‰vË:(óË7E0J&ÝΫx]h­ø4‡Y“õ6ÜŸpH©sÐÃh|ÜKåÎ=îeèÌÿvÒö«7…CfôôC³pŽ3›](ÆÄÍÀÀTM}ýÉ_ÿzÄ;¹ÿžìïN.¡ãšU5]]ÿ˜?Æ—_þ‰O¸#***::úÈ‘#'Ož¬ªªB‘k$^¼uëG ‹§˜}{ûÿ9~|ü§ii»Ùn¸[·~Ì1ö‘ûSM©›½ÁJ¬”ã‹7BòãÇ?óf&‡Ô7e•L–£€”Ì ‡Õçüv×¥ç#4ð6}6^<Ã!¥ÎA³m¢®#®?~vð/[cÖü-vùú$µióév¿™;Ä!ÑÒ¥fL Œª õýÝïÞV8HG˜™Ú®j:: ¤Òô¸¸í³fÙñ™gQ¦f=øœ9ÿ+n««;AQY)Z±bÁÊ•¿a>¢ã“'MOß}ûvm´3q¢õ›Ø©÷_}êx”—ÇÓwé„ÔÔ]ì<•ÜŸ0 Te3°7X©‚•r|Q#ÒzÏžOÿ{·ÞnP¯L´MZ_+>Ý[É [<€·QºXªÆ‹g8¤Ô9h¶MÔŸˆ«½¡éNY5fs£j^~ù¿22ö*¤#Ì‹KŽAÞ<…›™ ;ÖjÑ¢9?üðEŸkB!ÊÔ¸ÏÌüöwæZX<5|øþío«ÙÑ´ÅLJ88L£Oi›1ã%¶1³µµå~hkk3lØÐ‘#õÖ[));ù|:ÀU8UÙ ì Vª`¥_äŽxºíîþÇôé/ꛉf%â \ögÔ1žä,ø“¡:ìͬô#X)L´‰p/ðU5U»b…¼v»8ÉpëXiàç=]¸ö–ÿûµ°·ÁµRƒpMp‰Mw«vÅeدL±q¯Ž–Õ5¢MD ŒJÕ477~w$vøì#OÎ. ú¢ë^Ö.JÕ…¯wÄ™Ïe'd+XLLŒüËЪª*º#97ò…^²y«~úàÚ[å7_Ç[ÀÞôÅJõÖ5Á%`4æ—]µ%ÁÜ)o¹]z>ÚD´€À8UM{{{mmmÁ±Ì„™+{Þ3ŸuÂCP±;ìú±ïof w»&Š* Ípõ&_ÓóÀ`Ú»±Û¾Q¨`Q¡ýãÇ—””нСÈP·yÀ4#IDAT¹é]èÇ}XþÍ×RñÁA/ôAÜ®§D_ØzÒm 5l°7=´RýqMp‰ÌË™´©+Rm=/…Ä´74¡MD ŒYÕ0O)$IAAAJèw‡½x³Ç õf‹å$|[»5,V¶‚±oB)ýtxlÀÿA¸>:ì ÀJa¢¨M}vQÞræå íÃÛÀ½SQ5$µªªªòòò’““Éd£BwEÇ||(à+ág!±;ÔØzª‡Z_ü÷öexì·bÿª]”B¦g'Õ1Ú§4SÊ)ýtxlÀÿA¸6 ]mí¡ù\Ýýì Vªt³[Ö3"Vì& €ÚN£¡éRHLª­gÚÔ•aBU/gôºM„{` ªF¡>“ ';>xð`俈R‹Ã–NÑ!ßDi6U”BJ'¥là!£¦ ] ÈHÈT¢ôØ›)Xiô¶Â‘s¢÷|e€ÀDøEîåÌÙU[óË ·M„{` ªF¾>?~ ·€¡X‚ÄL¶³Ï5aÉä P5ºæjLE È}Uô“¦’J‘•+‚$ô®–{’(ñ ‡ÕÉÖnçüv·Jn Oƒ¦jóË2ìW"÷T ÐOr=6T† ‘è· + Á æN§ÜÖ׊O?èêFžYÕ´74%Zº ÷7wʪӦ®@>ÝËiñ¤ÅxQ€žÐÙt·jW5)6îÁѲºFä @_T ‘`î4ÀÉãÑÓ*¹Amòè˜,G$JŒ|`ÐaФ¿´ 裪I·óÒà,òªp3³@ ¥§ׄÙÎ>‰–.E>Ûñø`lªæÌR?i|Êp9{MCn)òhY]#[xȀƹUXA2&ÁÜ)gÁZ6˜`œªæÂÖ¥¾;Q€jÑåh›âu¡´!Ðí MU»âÒí¼RlÜ©­o“Ö#OƬjjŧ³}P€ƒ³«¶`•w UZ.K-]п€ó «›Zö3KýÌò½볋'“P5F`ÂÀM©ïÎK!1È =ò–û_ØzùÀ@h*©dœÉœ½¦fRWË=ä À„T ‘l톾쀃òM´!€–hÌ/Y¹¢¯?êÑÞÐT&L·ó¢Öü|À>4èÓU5¹0aà@%>»j òh‰«ÑÅ€þò «ûº(‡Zðs'rÑ73 'SW5ÁѤ 8hÈ-Åj­@K\I˰_‰|€?· +˜žfYŽI”=ÍP5ÿö©¶ž( Š6i½ÈÊù4N·¬#ÙÚ ó†ÀY]㥘´©+RlÜË7E`1T-]0ç#à Îl6ƒe7à#þ¥ñY§ÜÖ'˜;xaN3T X _;à mê ,D 4 ³ì&ž7 ŠÆü²"ŸíTM²}®Æ¤aF TMߤÁSÀ¦”çìª-çö!P MZ_jë)ž´«g júíC-]P@¥¾;©•E>Mq«°BdåŠn°Pu¸“–íìCÍqwÆ› jÔ$ÕÖ³1¿ …”R½ï(µ²È )2g¯A¯W~y8As]z~¾W`üˆ9¹® 3ÐÓ U3 J}wbmo Š›™YŽäÐWcÒN8¬F>‡™ YdåJ"¿jW\{CòU£êÒóu¶>òÈ#(~ƒ ³énüˆ90 [J¶v£xÖL“VÉ [0ÃfÊ7E´\–‹ j4I·¬C÷ó;÷×÷y>|ºö 6X«Ó ÁL„BA0m°`‚z¾fRæì5ÔÔùlׯ°Ô TÍ?)ðÒàˆð¯¾újÈ!ôW<¦z?§Wn]ãήÚBM2ŒÆ0˜I(¼ƒµÀZL„nYÇuQN®Ç†s§¼åþ´ÿ «^õ¨íÒ[š6u…F.uÿþýñãLJ‡‡O˜0öá‚ÀWíŠSoÂÚc–«¹µ1¬Öb4P“J>3ÑÒ%ËQ@f?ÿP/P/€ªé7âI‹ØßýŸ×‹_}õUÚ™>}ú±cÇØã}ôÑÓO?ýÌ3Ï|õÕW¬ƒ ={ö×6lØ‹/¾˜››9yòd333‡‹/*8ÚÙ»wï¸qã˜ÊÊÊNÈÌÌ|å•WèS:'""‚ùˆ…=944ÔÚÚúÑG¥+++—,YBi{ê©§Þy熆¥ßjoo_»ví3¡úWéÕX&NœX^^ÎìGEE1;t„Ž«úQU7Ø;1½oSÁÿòL<šJ*Õ½0ã3õ À®ÏI`-°C§å²ô|À¾wr˜ÁÑé×zzTM¿)ßQ¼.tà×yûí·è™Q<)í³Ç¯_¿.•JçÎ+ï .\X]]ÝÚÚ4räÈE‹ÕÔÔ0ÿΙ3§· öððH$tÂæÍ›gÍš¥p³Ï>O.æÚµk|ðÒ§,ô¯»»{mm-óïK/½”••uïÞ½;wîøøøx{{+ý֯餡[øì³Ï”^…šj]h‡Î§ûº{·§ïÍîÝ»?þøcîå¾AŽÛäx°¤*ñ|xÐÕ?bŽa Æg jÐÙtWdåz§¬Ök1JÚš*Äö+“­ÝJ}wjv"êêP5ý¦Ur#ÑÒE½^¿ÿ~"[S3zôh™LFûô—ö¯\¹Â>e¹pá³_VV&ïRoܸÁì·µµÑ¿uuuì¿Ã‡ï킹Oxá…ÂÃÃúé'Dÿ^½zUé-477[[[+ýÖ„ äoy>Äqµ¤¤¤eË–ÑNpp0eóøÇÓÓ399™ûG¹oã69nYUây’9{ÍÍÌŒA ¼ƒú|bkµìÒ™ æNù^ä!Ø€¢^ ^U£1²×E9¹‚¯¯ï#ÿɆ ˜ÌÌÌ×Ìxgy—Êí+Uí¨:^TTäáá1jԨɓ'§¦¦ªúÖƒØ çÍ›gaaÁ¤yÈ!J¿¥p ô¯Ò«±´´´Œ;–v¦M›&‹gΜIût¤µµ•ÿªºq¥·É‘“ªÏ55‚1«1ô‹Æü²>' €µÀZ ˆnYÇ5a3ý%a£½¥3Q/P/€ªQ÷±Ðþ¤3KýÔþ:ó$I"‘°G®\¹Â>g’°T^^®=Ì@>‘¼ž••ó¯B÷\…“)mÑÑÑ·nÝêîî¦¿ì§ ßâx°¤*Occc_{í5Ú§¿"‘hîܹÜ?ªê£ô6åá™xžÔŠOg;ûÀ` ýÿRm=¥ñY°X‹¡ó «ûº('ß+Ä 9CI”x sÀ‹¢^U£]ÈG“¿–Õ5ª÷õÈÈH777…ƒ®®®Ì¨¾7º¸¸\ˆ³³³ö\ðòåËÉ×tttozî¹ç˜ƒÔTTT¨úra‰‰‰ííí555K–,a?Uø–¿¿?ÛvÞ¼y}ºààààçŸ><<œöwîÜimm½mÛ6îUuƒ ‰Qz›òðL<ÿð”lƒÿØ0#6þ”oŠàó ÖkÑg1s3³€™Ð,söšê}Gù»Á‚zzTÍ€(^ªöœööö½ßÛ&''OŸ>ý—‡† ‹1cÆ(LØ¢Y|øðá)S¦˜™™½òÊ+YYÿ|H¼cÇsssU9vì}eèСcÇŽ%wÉ~ªð-™LöÉ'Ÿ0sžÐûö™Ã—””<þøã?ÿü3í×××Ó~ii)÷ªºA…Ä(½Myx&ž?L_ Œ'M%•<—÷…µÀZô†ÜRj EV®ö++Ä:^¨õõ¨š"«kÈë`Ĥ!aƒ||xÐÕ}Âaµ$JŒ¬†©ñóû’­ÝÒ¦®¸°õ@Ëe)ò RÕü2°×5Àˆioh"Å«½q±À˜¸“å(@>CÔKù¦ñ¤Å´‘ªÑììÌU38àu PE¶³Ïgɦ3M<ýEV=§MZ_nç•líV¼.´!·yÆ£j~Áë ‚Ê0awòp“å(¸ƒ|z‹¬®‘¼Yæì5$¿É§Õg!OÀ8UÎ^×p Ôñé %:»Yíýz›´~àKµÂŒÃTQ½ïè ‡ÕZ2X‹‘Y‹Ž!Fb†T7³nf]z¾¶ ÕX õ˜êF®j~ÑÕëU~Aãþ.XÕ„0ý…V ` 0¥0}Ïš/Jà:`-ú&f˜73$f®‹rŒu| êêP5J`^×h»g¼ÆÝAyyù„ ´á‚ÕH*÷ ›jðÕW_ 2„þj$«ÕK$Jœ³`-ŒÆÐfÞ³ª]qƒ½ÁZ ÅZt@Ëeé…­Ønf73 ŒìÍ êêP5|ÑÁ,F÷_ýµ@ 0J|ÿþýñãLJ‡‡SCûƒå‚»e"+Wm<Œ‡1œ1(@¤–/¬Åø¬EÛb&Ý΋<•‰ˆÔ Ô  jú€ç“×ÊÊÊ%K–<ýôÓO=õÔ;ï¼ÓÐÐÀÖðÐÐPkkëG}”þíêêÚ¸qãØ±cÍÍÍCBBØsöîÝ;nÜ8333‡²²2öø­[·,--oß¾Íþ3f !ôùçŸÛØØXXX¼ÿþû­­­ì9o¿ývBBíttt|ôÑG”ªgžyF~…2Ußå>á9¸¯#ïྡྷêfû¼ƒX,~õÕWigúôéÇŽëÓÑ«Êf§wòøs>`_‘ÏvŒAž[…Dök¥BX‹ÉZ‹6h*©,ßa³™¡^ ^U£š/Júœ¡õ¥—^ÊÊʺwïÞ;w|||¼½½Ùªîîî^[[Ëü»uëVGGÇêêjò¡ëÖ­cÏñððH$äw6oÞ—bZ—ÐNdd$í÷é‚UåÀÀ{ËêÉ*:›îÂ` ]-÷Rm=¥ñYýú¬Å4­Eãbæ|À>ñ¤ÅF353êêP5ÚâRH ÿ^%ÍÍÍÖÖÖl ¿zõ*ûѤI“ØG)òþ¢®®ŽÙokk>|¸¼w¸xñâØ±c»ºº˜çR666×®]£}[[ÛK—.1gÞ¼ysܸqÌþ?þøúë¯3û'N¼pá³O¿ËºUßå÷iÜ.XéÍò¹TMMÍèÑ£e2Y¨ÉhŸ\6· V•qÁù^}NÝ c0c Š|¶Ÿ]µe@P`-&c-Ô #fŒ{ÑLÔ Ô  j4 ÓM%VuBaaá¼yó,,,˜w¯C† akøƒØÓÌÌÌÂí¤v\]]…B!í>|ø·¿ý-sœ—üÛÞÇ{Œ9îïïÐûçh‡½ ªïò?û4>wÇóåñõõ}ä?Ù°a÷eUå€F\ð­ÂŠ÷Þ½Õa &h µâÓd ]-÷úûEX‹ ZËÀÅLñºÐdk7#3¨¨@ÕhóqÑE Gù‰'FGGߺu«»»›þªªá“'OVú`‰Û?~|ÆŒ´ãààP\\Ìœ2eŠD"é’×^{-''§÷c•òòrö‚ª¾Ûç LWfU§q|EÕÍöy)æI’ü9W®\aŸ3‘«mkkcŽ×ÕÕ)}°$ŸìŽBòú iÝ뢃‰ƒ¬®‘BLõºýÀZLÓuô—nYÇÍÌ‚"Ÿídiév^¶ÐÁÔáƒêêP5څ釦t&++«ÄÄÄöööššš%K–¨rÁAAAŽŽŽtŽB'`nL¼øâ‹ß|ó³³3{$,,Œþ­¨¨èèè8þü²eËè ]ö׿þugg'sÎÆ]\\®?„Nf/¨ô»ò¿¨êryt; (|EÕÍöy©ÈÈH777…ƒ®®®QQQ´3k֬͛7·¶¶’_^´h{YU9Àî($¯¿¤I›ºBÁ$` &e TúÙÎ>å›"Ô3!X‹iºž´74]I;³Ô/~ÄœÌÙk*‚£[.KM!,@½@½ªF»Pø’å(P:óÕ±cǦL™2tèбcdž‡‡«rÁä?ýôSkkk ‹ÐÐPþ.8""â±Ç;~ü8{äþýûôC¶¶¶O<ñÄ´iÓD"Œ_¸pá¿[Äöv@@¿5fÌ… [z÷—ÿœ°Eé ;vì077ïó4y¾¢êfû¼”½½}jjªÂÁäääéÓ§ÿò°ƒ¯ƒƒƒ™™Ù¸qãöìÙÃ^VUÈâ”OžPD[½ï(ŒÁdáœßnU;øk1Y×ÁI—K!1$cÌr=6H¢Ä$oL*,@½@½ªFëtµÜ;á°Zíç²ÚæÃ? ƒIéfŠ<=Œ9` : V|:ÙÚÍ"NX‹><5kÈ-%‘ÌLeVàT—žß-ë@Π^ j´…¬®1ÕÖS7k‡÷— &°}^Î(^J!ŒÁÔh•Ü A{«Ðº[ÀZ‹®–{Òø¬³«¶-¥Ûy•oŠ0‹2P/F®j˜h&ÙÚ­¿ Sc¥³é®ÈʱˆIÑ-ë T?Ÿný§MZ_½ïh΂µ æNL/Ö~-Þ ªFc4_”PkTŸ]„òÄÕ˜´«‘¦CwPÞräèM%•å›"2ìW&Zºä{Jã³Ô˜ T†iÈ-ÅzÀ’9{MÍþ$äƒ) ‰§M]xð¡[ÖQ—ž_(fV˜)õÝIm‡ÚÓK€ªÑ Òø,j¨nf é[Ž€‘ô²jdà å²´zßÑSnëÌLjRf€ª1THÒ°ÑÛYÑ€.¡Àå„Ãj<…5b˜7{/½ À/‡þ׊O ‚™yÌήÚrM˜ÑÙt9P5èd9 r¬5µ%@oÎ,õ£˜ù`¬1kºiWdçVaYµñ#æd;û\ ‰Á«<€ª1TtuŸóÛlí†a6|Sm=%Qbd…‘Au<×cƒNá Y]ãÕ˜´|¯@‘•+Uùâu¡X^€ñ¬A[+>héR&D¡š2wʪ)Ði*©DVE>Û³è^hâʶ>»èœßî û• æNg–úÕìOj•Ü@Î06UóËÃ¥l¨ÁË[î¾Ô¦Œ4>+ÅÆ6`4\Øz ÝÎ “ž™&ì¸ÿøsN8¬.ßIÌ¿ªùåá<ž¥¾;-].…Ä C‚ÉB6³`-òÁ ˆ6ÕÖSV׈¬00îTÍ?i¹,=³ÔZDj QÆ&ȃ®î,GAñºPd…AÃÌÞÞ|Q‚¬0…:[Ÿ]T¾)ãþ@Õ(Bmd†ýÊ«óËPÒ¦FW˽ÌÙk l ZÒˆ¬\!iŒ[És®ŽÎY°–é`vÎo7Æý€ªQÎÕ˜´÷¼åþX…Í…Mº„!Ò[šhéB‘ÆGSIee˜*ÃÔÐë¢t0UÓ7Ý²Ž [ˆ¬\K}w¢í4)du©¶žXäÄà$ ÕVHc¢ù¢„”L®Ç†s'ª’…‚` •U£~€[¼.”ÚTjPÑ]ÛÔ„ ¦ü†¤ºW2ÕûŽæ-÷§M±qg” &~U£Úš*‚£©•ÍY°¶V|a i€¦h•ÜD‰Ï®Ú’líFJ†vè_¬*ªF[<èê¾&Ì8á°Z‹"cH=§©¤²jW3N&ÙÚvè_:ˆœU£_tË:jö'eÎ^üºAŒe”d-òÙžjë‰Wsú#i(DFp¬Ÿ.‘Ü`Epô)·õìÜe'ªÆ`h“Ö_Øz ÝÎKéÆâu¡È{æ… 3kerwPÍþ$ôUă®î†ÜÒóû(¼ÀªÛˆ"+W*>t)8≋“­ÝHÕý“\¬ŽFžh„¦’Jò*ìXÿœk™YËHá s@ÕÍ#«k¼&Ì(SðA±23< GÿKJJMb;ÛÙ'~Äò'$iÈ™Ü)«FæP5P8@%uéù)6îTL(¥YŽñ„wU£°Å>>#söšŠàhØ¿*˜2ä" ¼ƒÈc’!=Cª†´ ^ÈP5úB›´žY,‚âfVá౫¾Ñ-ë8ç·;ÑÒ…J¹Ñ/("ž¯(f†8õ½K;~ÀœÎJi•Ü`VÃ$ÉÇŒa^È`òeªÆ0BùåðØ'²ˆüôR›'VÓÙÉŸ|¯À¤çöˆ™G_{òÍì·|¤ Y˜úL¦_Ù…­N¹­'ñœlí–ë±áRH ¦, jŒ!Ê)ß‘³`-)ñ¤ÅV† oV "\˜5m ¼ƒtÜ!­MZßTRYŸ]d@›4!ûȰ™GG9ŸYæ»è"Œ‡¥[Öј_F5š™y™éWvÎo÷uQ4ò€ª1Nš/J$Qâ"Ÿíév^pX]¼.TŸ…EôKsRJj“d§VÇ6ÜÌ<[¼n›xÒ"¥ƒR kKµ]\êbÊó.Ü)«fjq†ýJf)LÚ§#xõ@Õ˜"]-÷rK/…ÄäzlY¹ÒvÊmýù€}×E9-—¥ÈÑ&­?»jK²µ[Õ®8¿@«ÏþG–ã$âÌfžrû ,pKuÄ7ÒÄÈ›YBÃÚ(Í—÷î¤ôç,xŸî…î(ÝnÙª)Xˆ¬®‘Y@&ÛÙ‡yãš·Ü¿2Lؘ_†é¨ [SäT¾)‚DEØ»(Ý΋6î·̬Ü'œYêKA¡ÀטôŒÂvyïÎ8³Y¢ç\¯æ777···‡Œa&ù(õ݉á1T Ð$ݲŽ[…$iØÞü©¶žg–ú•oŠÆg¡C¿f¡X–2™Ä¤$J¬ê-EÀùeª®pÎ/œ$Í¥¯CŒUϰÛíâ¤Ägœã-ËNäÖÖÖжaŸP%OZÌʪMèÿ @ÕÝAJæš0ãœßî\ ¤p˜QËyËý+‚£)(Çbˆ§>»¨gÝÉI‹•j›VÉÄÑó•NóP+Î%IS¼ÎÏè% ³Ý©H=:Æ9~ü¢â³…‰¤¡¡A…‚Œ‰1'söšâu¡T‰ c¨ /tË:šJ*)D+õÝyÊm}Š;tަ´ å§Èʵ2L¨04<íÅå)ãÜ{÷L·[.ž´ð~ç?LDÕÐv#-†„\ÒªO ªªªôAØ0³q0s.§M]Áʘ«1ix· @Õƒ¢º[…’(qos>`éúCŸyBŠ1×cC¢¥ e&û~æjô1 å3ìWʿɹ&_¤ô>‡Ï¯¨J¡ª-qŠû“:t(55µ  @"‘477³‚°À;h€ï™ÙÏI““qÊÏT&ÏBKª€ÿ€BsŠ)|¼CÁzæì5AÆ>>ƒâøœk‹|¶3oušJ*M|‚éªðØÄÑóIÕô›G_¯Øz€bwÚ¯Žø¦_£³³ð­·‚‚|ôMÕ¨J¡ªíŒÏß„CgDEE …ÂãÇ—””ÔÖÖ¶··W†ÇЬ\ÕèÖrYJŠ…t ^¢¥ ³n ³R-”6U@¿é–uÔ©Ÿ®ÚǼÕa†.P´zÂau¾W Åš’(qCn©¬®Ñ¸³‚´Üå=ñ≋éöIóOaó˜Ã…ÏÿN;7Òbú%0zôÀ™¨iÓ&é­ªQH¡ªíÜö¯èö£¾Ù{ðàÁ„„„œœœòì¼ôé+{ñXÛ”ä4Iå«1id`YŽ‚s'’Ó¹˜å/o­O@Õ}4 )ŠDIÕ¶!…C:‡YN'gÁÚï IéS:Ç®ß*¬(Ç=ù†¼˜‘ßâG:Òß›YÂþªšÖÖ¼áßÐ츚>UM¿~E>…ª¶«ñèöc¶†FFF:tHôÑæØ‡ÂïœßnUú ƒt2ÙIº™ Iå¼åþ—Bbnf`* ª€Á„¢Õæ‹’ºôüê}G)¢¥8•éÆFnŠ{–£àìª-¤‚jö'QðÚrYj4»ë³‹ÔV5O>i¦ÏïjäS¨j£ï™0 ૨íá‡'/þêMᣯ§þï{lù2ãû+‚£Ï,õcYʰ_IB‘„ )FïШ`PtK†BI”¸|Sif™ËØÇgˆ¬\)ÀÍõØPä³¢Þ«1itZ«ä†¾ …%k4¥jNŸŽ|ùåÿÒgU#ŸBnU#tÍf Ÿ˜ÕÓ+oÄ›…;¢HÙžr[ŸhéÂŒïÇ4e€ªÆI›´¾1¿LŸU&,õÝɼÞI±q§ÈX•àáZ¢½¡‰ô˜ªÎQ꩚®®ÌŸ?ãË/ÿ¤·ªF!…*{ ú®GÕÈõÐ#U“æüÇ [\åPÃÎUL]ðTíŠ;ç·;ß+0ËQ@‚'öñ æNiSWd;û0]Úè:ã½÷týÄÑóI\ PÕttH¥éqqÛgͲÓÏ™U¥PéöSÜßã†÷¼Ÿ›ùoU3t¦hòâ[…0c@Õ œö†¦;eÕ73 $Qâ [ùl?³Ô}É“h颠y® 3Hx´\–pÇyÿ½tý û• ËJòT5 ffÃÆŽµZ´hÎ?|ÁÎ~­§©ölÜ)TµI#{Ä̼M]"üÕÂÇg0³{½`Íý¨ú¬®±©¤².=ŸÆÃhæ=Oüˆ9qf³i‡$P®Ç†ï :¡zßÑë¢æUOŸÝÛNÎ^#|ôõ#Ãf•ü9Œ}/ÄGÕñÆŒ«‰ Ü}äïÑ©A»l=ñúê£N‰£ÞªÙŸ³T ƒt ©Ò0Òø,Ò3¤jHÛÂa^õæadÏ ‡Õ§ÜÖ3o{.…Ä@º™Y@bévñ¥DKác¤mŽš;Õ&å@Õ(ªš#GNž>#~ä\¨¨@Õ`’æè¨·Ž>íÜ3ñô³oÇ=ùæÉ9‚šýI×E9P5P5€ªÀ$MϨ›áoÄ 3ó?˜iohb>¸¨@Õ`’&á×ó2g®‘3,P5P5€ª@¯éj¹wM˜Ñ[̰ԦäRX)4„â{ÜèÆ{T_0T  j0T*¿2“˜ò»ê¯P5€ªÀP©Ÿ¦°¾òëíõÙBÜèÆñ®P56Wƒq5€ªªªª¨¨@Õ j jUT T T  j€ªªT ªªP5¨¨@ÕUUUTžo×DQ=ªæËp¨@Õ`¨4ä–RX_wâiªšò°ÐUóí¨@Õ`¨´JnPX%r·iªšü?û ‡8ÄÆÆBÕªC¥[Ög6«Ô×ß4UMÊÿ¾+œð¨@Õ`Øä,ø(Õv‘ JÙϧ„C„Ëþ U¨ ›ª]±ÂG^»]œdjª¦4ðóžA5;¿…ªT †MgÓÝó¹ù^Ÿ˜”¤¹wãÇ##ߌuðŠ}T  j0lÊ7}û¸CcA¢é¨šìw>ì™'àየ@Õ`ðt6ÝM±Y”bãÚqû´ õ={o#i„BaddäÁƒrrr$ LP5· +bw81kEçÝ|ã–4ç>ê‘4Ž«cÿÅ¡C‡HÕ|ÿý÷‰‰‰yyyµµµ°@Õ`TD& ‡8mYëO'RÏtÜ>ûÛOHÒßX{è0û¢†ô ©Ò6©©©%%% 0@Õ`477~w$vøì#OÎ. ú¢ë^Ñ躗 _ïˆ3ŸËNåÌJš˜˜ùîgUUU”0@Õ`´··×ÖÖËL˜¹²§ÖðY'<»Ã®ûþf–зk¢¨ò°Ð Wo’j=¯h¦½»íIõÚ?~üxII ååŒP5*ÍÍ͉¤   %ô»ÃÎÞ‡G¼Ù# |‹å$|[»5,VVÒ°}Ïè®éÞñ¢P56ííí UUUyyyÉÉÉîG…ù4øPÀWÂÏBbwhvëQš¾æ¿·/Ãc¿=ûŸž¡ûbÆÒª¡}ºSº_ºkºw¼¨T F%lRSSIŠ’f¨pσþÜLU¬1¿?ù«¨jî}øñß?5@ÕkÌ—Ï¿,üqT5_¿úÊ@U¬=ŸÕþb¨pÏŸ0 @ÕkÒ?æÿúÉwöyó& j€5fqq1‹>|¸²²ò§ßûAUUÕÑ£G¯^½úì™÷ÕªXÝR©TCCCyyykkëøøøÓ§Oѹ¹¹{÷î555•••577§ÓiƒT °=xð tK,{ùòå<þ<ÔNEEÅìì¬qªX]ÆÇÇC®LOO‡íl6›N§S©ÔüüüÌÌL2™œ\æòåË¥¥¥CCCaAX› j€<[\\,++› ‰’Éd–––BÏzl€ªT ³³³eeeñx<ÄIH”èaÍ]]]'Ožƒwd4ñFB`³öaà‘Ñ¥sÏÖ K-(Bf½²­¶ö.Åù§à3¬4W¬@S…%/«ÓÚõÐýÀ$(Ì–p;¤?î{ßÉqÇÚ>·„¸½5Múäºï ût"ÆõÓ/q½ˆwòÖ€IDATxÚìÝ\UõýøqóOb_×0ÿQ1ÿ;YÓ²¦¥-R2)&¤fZ,5µ/k4ÉŸ.––fºÌÐiÚì+ Œ.*‚€ˆJ¸ RT”Ñ«Áð‚ˆ \¸@¿·ÞuÇ€{¹ Üûz>΃ǹÿÎ=çsîÅûòÜ{éôhQˆmˆmˆm@l@l@lb¸Ët:]llìüùóÝ6~üxooïÇ38±  É¢££:!ùÀ(Ä6³ètºeË–ujL×®]7nÜÈpÄ6€Æ-^¼¸“Ù–,YræÌ•J¥Ñh´Z-£ÛêR(š¢K—.ëÖ­KLL”äV«ÕÅÅÅ$7@løòòòAƒuj¢áÇGFF*•JCo3’± àß;5ËÒ¥KÃÃà ½ÍÁm€ØðoîîîÍ‹íI“&íØ±ÃÐÛ|~ ¶ü›½½}óbûç?ÿy@@@pppdddbb¢J¥âÍä± àö+ìæêÛ·oPPP`` B¡8pà€þà6ã ÛšÛ<ð@pp°áàvrr²Z­f<bÀvvvÍ‹íáÇ+ ‰ííÛ·‡……)•J•JÅxÄ6€œ›Û&L Ú¹sç×_™™ÉxÄ6€¶lÙÒ¼Ø~ï½÷ˆm€ØЀ¢¢¢>}ú4µ´~øáb ¶4lãÆMíwß}7ô6b ¶4ÌÃÃÃüÒž:uª¾´ù‚4€Ø`TII‰‹‹‹9¥ý»ßýNÿr)mþô@l0åÌ™3üãò“ŸËì¾}û.\¸0ôG’ÜAAARÝ›k4† ¶ü‡J¥JLLüꫯÞxãGyä§?ý©¾±{÷î=vìØ·Þz+88Øðîq}ikË åæÅÅÅ #@løFsæÌ¥R¾cÇ iÉiE§_KZ‡Ö¢÷¸¾´åjre¹‰þ°¶V«ebÀH*«ÕjCoKQßz‹x§_KTÖp›ÌÈ ¥-7¤´b@Š‹‹ ½©~½}ûö Zä¤d¶\"W0”6o ˆm Ójµ†ÞNLLøàƒÒ®2ïãã#ýç?ÿù‡ÛGíììäR™ôÑGãããËÊÊ®_¿îåå5þü×aÕªUNNNYYYr_ÞÞÞÍ[1¹¾›››Z­6½É ƶ‰õlpZpÝb°ÌØÎÍÍÕÏß¼y³Gúy‡sçÎéçóòòXQ .üì³Ïdæé§ŸNLLÔ7ç_ÿúW9¿þ•‹‹‹ííí\‡aÆ7{Åäú—.]2½É’»3okp5\Ïס× ¶ËŒíOJ@Ö~¿wçÎë/*55uôèÑ#GŽ”“¿øÅ/´Zíˆ#ä|ýRRR&L˜Ð«W/ýBºtéÒàÚØØ”——›¹žÆVLækjjŒm²^¿~ý^{í5Ã7·–iæzN¶àºÄ6`E±-ͬR©]šÄö_|1oÞ<™Ÿ3gޝ¯ïc=f¸tèСAAA………UUUòÓ°ð:]>|xƒG¶›´b&¾íÌØE†ó­gSc»ëÛ€Ŷ¿¿¿³³szzzEEÅéÓ§_yå•—öÙgŸõîÝ{ÇŽ2سgOýËõìììÂÃõZí… ¦M›fXxß¾}eɆ«­^½ÚÉÉI®Sç3ÛMZ±;‰mcëÙÔØnƺÄ6`E±]]]½aÇîÝ»5*""¢Á¥]½zµ[·nÙÙÙ2¯R©d¾öŸ×Þ·o߈#äÌÈÒ _·n­­­ádeeå{ï½gooß«W/??¿æ­ØÄ¶±õljl7cÝb°„Ø@l ¶Û± €ØˆmÄ6KˆmKýÚ0¾ Ä6@l[c”¶ê]Û ¶b»íêÑô_k†µk×véÒE~Þ•îô£þýû¿þúëÄ6ˆm€Øîر]]]=xðà 6 2DæïJlëgrrr^~ùåW_}•ر Xflëtº÷ßÀ€¶¶¶¾¾¾†üóóó³··¿çž{ô™úÑG 4¨W¯^sçÎ---Õ_-##cÚ´i<ðÀý÷ßÿÒK/i4šj¿54¤±›ÇÅÅ=þøã666ܺuký4­³™Ù¼y³\Ynâè蘖–fzùuDGG?ñÄ23vìØ}ûöί? Þµ~F«Õ.\¸°ÿm2#'M¯›±€ÏÏÏïÝ»wóO3Û@;ŠíU«V999eee]»vÍÛÛÛPwnnnjµZrݺu'N¼xñ¢\ÇÃÃcÑ¢Eúó}ôÑøøø²²²ëׯ{yyÍŸ?¿~Uš¸ùƒ>¸{÷néÕË—/Ï›7ÏtêOº»»«T*Éé?üpܸq¦—_Ç /¼°mÛ6™ yÃùÆF Á5‘,wvvξíÙgŸýàƒL¯[“bÛÄx6ºp€ØÚQl6¬þ‘R©»K—.N:88œ;wN?Ÿ——7pàÀúK+..¶··o°TÝüg?ûÙ† ¾ÿþ{£…P¯xsssõó7oÞìÑ£‡ù«wáÂ…¾}û–——˼ü”y‰sýEÆF Á“C† 9{ö¬~^n5tèPÓëÖàBÔjõÌÛ¼¯dzхÄ6ÐŽbÛÆÆFŸ u²°¦¦ÆpRê®ö;«;wî¬??%%e„ ½zõҟߥK—ëÑØÍ?îîîÞ»wïáÇÇÄĘÛ ž4¶üÚ/^Üé¿-Y²D‘±hðdí+ËŒœ4gU gêõë×ïµ×^»zõj+›9ž|ÆÄ6ÐÞc[B·Ñãº#FŒP©Tõ—0tèР  Âªª*ùi¸•þ“ÞÞ\Oª>::ÚÎήþEu–c¬9M/ÿ‡e×¾ÎÅ‹ ºcwmâȶ9±Ýpýx¾±ñ$¶Al,¶W¯^íäätáŸXö÷÷wvvNOO¯¨¨8}úô+¯¼¢?_ 9<<\«ÕÊͧM›f¸•t¬\¹Ñ›Ïœ9SÂUΔØ~衇ê¯aåkNcË7puu­s¦‹‹K`` ±0v×K—.5|f{„ Ë–-kÁØ66žÄ6ˆm ƒÅveeå{ï½gooß«W/??¿s®ººzÆ Ý»w5jTDD„þü}ûö1¢[·n +nµnÝ:[[ÛÚßFÞàÍ¿úê+¹¹Íã?_ ë,ÇXs[¾Á˜1cê¿M=22rìØ±ÆFÀØ]———¿ýöÛúo#—Ã[Ê[$¶'± bè`± €Ø@l ¶b± ÛˆmÄ6b ¶Û± €Ø@l ¶b± ÛˆmÄ6@l ¶Û± €ØˆmÄ6b± Ûˆm€Ø@l ¶Û± €ØˆmÄ6b ¶€Ø@lF©êŠ‰Ø®s)b@ãήÚVüªÁØÖ$œÌÚ²‡!ˆmMSr>;ÂÎÅÐۆؾúÏr~eÑ † ¶4Yìã¿7ô¶>¶óÛe3þ„·ƒۚ㻵;¤±#í]¥·eFJ;´ë“¡]KÎg38±  ™vÚ<5ÈMz[b{ç½ãö>ôB‚û† ¶4_òüÕ’ÙqOΓŸ2í}øEMÂI† ¶4Ÿ®¤,´³£¾´£»3›1ˆmwê°³—>¶c^¾¬8È€Ä6€;U’qIqÏoöôrŽ|øÅ]ÛZÀþG^ÙyïSç|ƒ €Ø:€›ÙùE©ù‡·çé»5ÛwÙŒWG%´ç•”a,Ï-àÛ°RyqÇNx=lŠþ³ÐL-;Å8L=¹Ø7/.™G@lÀ*äþ6ÞéÖŸÑÚeóÔ7®óÒ–¯ÌÚú·ìð€¼xÓN2Œç7¯—!UNž+Ã+ƒ;zVN„’G@lÀ’^ö¹`¤ýó6éÊ’kjN0µÒ$Ã+ƒ;zš ¸ròŸ´š"~± KS£«::}±„_Šçb2»-§ó›×ï²ñË¥ÄÅÅÅZ­–G#@lÀBœòÙ ¥}î3_ê·í§k'ö†÷wÞÝÇ9íP‚Z­&¹b–@ ¥}ÂÛ‡î½[Óõô˜=ýœwžrâXŠJ¥Òh4ô6@l c‹=3z؋Օß½wqº²?XÑé×{ç¼—œœœ™™IoÄ6:°ËŠÒxÙáäî]Ÿþ9ÕSÑíÉ}ŠÝ‰‰‰ô6@l KòøK„³‰ìô£îÝï:ÔþÝwç””­im†KËÊþõá‡xä‘!rÛûïÿgçßDF®7\ZTôÍ;ï¼6`€]·n]åç¢Eׯ'Ô^²£ã¨úë#gîÂÌë[C™;ö—UUßÖÙÞ:7ìÑ£ûÀº¹9…„|\]}Üœmoô¶ÆÞL®èôë9ïFFFÒÛ± €ŽªFWfûlòüE¦c[?£Õ;}zçóÏ?ùæ›Së_Z*/ÿ×SO=6wî”3gvËm¯]SîÛ÷7—qúKKK{ìçriFF„\*?ßxÃíñÇnÞL2,ÙÁaÐáÃ[k/3>þï¿øÅàÚMkÎuLlÚk¯½°yóRc±mØðË—÷‡†~" /›/ÛeÎ’MßÖØ>Âíç„„„ÄÄÄ$''«Tªââb¨± €Ž¤TuEÑé×Y[ÿfNlë§œœýû÷6'¶W¯örwÖØ¥+Vx¾øâÓuΔsV®|˰äü㯆8×O“'?òqí6ç:&6íÊ•Cƒ?|õj¼‰Ø6L••)Ï=ç(Ûe~l»­±é¨×_Ýž T(HMMU«Õ܈mt$IiÛWö7)¶{ö¼ÏœØ5jØÑ£Æ.9rØ7ßl«s¦Rùr+Ã’«ª¾}ä‘!'O†êÏIMUüò—CåÌÚ=lÎuLoš¿ÿ’yóÜ͉í[1|4°öšÛunkl:õéZÙ#Û¼}ûö°°0¥R™™™ÉÁm€Ø@G’ø¸¤]^¼¢Io#Ÿ:u‚9Ÿ[¶±¹·°ðŸÆ+—®s¦œÓ£G÷Ú÷ûÕWkfÍš¬?gæÌçŠOê÷p£×1ñÉjù©Ó}ûøã‰‰AæÄviibí54½d·56]Ú½MöHð*¿€€ý›ÉSSS5 U€Ø€¥Åvmýû÷ÎË‹3çÈv‹ÄvUÕ·=öó ¢d’ýwŒÕiZs®cúÿŽ xâ ©nsbû¾ûlšwd»ömM²/n}GÚ²µÛ;vìOLLT«Õüðmó6rýLhè'o½5ãxyçεÆ>›sÓIÿü½×_w­CY½ììØ]»>7ntSÿô—±ÛÛ± bû¿Î‰ÝôÄ~I˜LRÎË—ÿ¯ƒÃ {ïíö“ŸüÏsÏ9FE­7\*ùííýª½}ÿ®]»üìgýßyçµë×ÌyƒzScÛ̯1«ªúvìØ_Ö¿¡Í½ØM™òÌ?þñWÑóF—lú¶Ä6@lÀªc›©-'b ¶@l3Û± ÛÄ6@lÄ6±MlÄ6ˆm&b ¶@lÛÄ6@lÄ6± Û@;¥ŽJ´;çç+ÇÔ&Ù·bÛg ± Ûè¨2>»u•©½M¡sÞ%¶b•:úˆ¤]ÆgŸæV0µ‡IöG¶bŒVS”øx†¿"ÅsÍÁ1³#ú>Ïg¶ùÌ6@lMPU^Q˜’® Œ>áí§œ¼0ÂÎ%ÌvbÜøǽ>Íܸ« )Mÿ™mb›ØˆmÀ(]IYnlÒ™[N÷‰q˜±Ëf|ìh$åç|ƒÕÑGnfç×¹>ßFNlÄ6ЀòÜ‚œå o?éêÝ=Ÿ‰wò<å³){wüõ´¬]•éÛÛÄ6@lÿVªº¢ ŒNñ\ã0#Ìvâ7®‹Ò×$¥U•W4i9Ä6± Û°jE©Y[ö$Î\iïaç"3™wÉ™w²Lb›ØˆmXªòм¸ääù«%°cfÈŒ*0ºTu¥¥–OlÛ± k¡+)»¼?ÉcùîžÏÄ_pÎ7¸ä|vkܱMlÄ6,ÜÍìü _î=ìì¥ÿ¶Ìk5E­zÄ6± Û°L%ç³Ó×Å_ ä±<{w¼®¤¬mîšØnoÓåˆÀ[±ýñb ¶Ð7³óϬØã0#ÒÞ5ÅsM^\r£©«ÅiNJÚå ¡rÛÉtÆßïVl±Øˆm4µ:úÈ7®‹ÂûL:îõiARÚ]\™RÕI»‹›¨Üv2%½³TÑÅ144”؈m˜E(;ÒÞõãªÀè¦þMìÖ ë°ËfÜÉÅK©Üv2EýêeÅßÛ± €ÆåÆ&î£?”}=-«]­›rò[1S¨Üö0•_ýFÑÅQñÊ;Ä6@lÀ¨òÜ‚ô5AQƒÜŽ™µeO›}íY“dn Utúõµ{iÝ»>\þÑ­l¯ÿ‚؈m4@(;ÌvbŠçšÂ”ôö¼ª•E7ÂlŸMòx›Ö½»SÙ•îüÉoC=Bo#¶bÿ‘¡ŒíÑže×wfÅßC»:$‡S¼wq:üÒÿÞúj´Û‡µ‰m€ØÀ¿åÆ&IfË$½Ý±Ö¼²èFÔ )Qƒ\*®!zïæÈ¿D_Ú …" `ûöíaaaJ¥R¥Rñüˆm«“øx¼“gGÌlƒÂ”ôÐ®Ž‡ÆÍª¼‘Dú¶ñtê£Õ·JÛéÐ…„„HlïØ±#<<<11Q­Vó,ˆm+"*™=lª*0ºFWÕ¡·%=`¯¢‹ã_¿Rúý×pÛL׎$¼ú¶”¶âé9¡!_kKfKlKrÇÄĤ¦¦j4žk± `ŠR3Ü—D r³€ÌÖ+..NùûÎÐãwÞ7>mõ_ueÉÄpëM2¼g?[·ËöYÃßú2”vpppí÷gffÊ®áÛ®ä|öÑé>v.þŠªò ‹Ù.­V«V«“÷Å…=5ûÖ»š{Œ;äÉ?gߎ¼x…¥N¹‡BÚòî.Gžñ÷;è2?´Çø[´G½úÉßê”vàm2àÀÔÔTÙ)²kxÞÄ6€%gö±9+ÃûL²°Ì6(..V©TÉÉÉQ~ÿÊyþW={+™Zz í=Qñ‚gè*ÿÐZ ¥mx¹ìÙÖˆm‹U£«:ç,™}fÅÖÊ¢–º™Z­V£Ñdff&&&FFFJòúm Z¶&ø½5!ËÖ*>ð ]¾®ÝN·"¶7tzC1dJèûmµioýb[è“Ì–¡ÖN[b[æeðeÈŽÝÁam€Ø°LÅß©9¾ïäYªºbñ[»·cbb¤·oßð£ÀvLb»97û¿m!¿œìþÇ»¸æ†á•¡–—a§´bÀ’hgnÜe=[mèíäää„……é»J µcÛÍ»áö ›LØñ—ïÖšËÀÊðÊ ËPˀ˰SÚ± `±¬ê€vƒ½­R©RSS•JeLLLxx¸¤àÎvLb»ù7^º6´÷ÄÿtWÖ\V†WY†Z\†Òˆm d´ë÷vqq±Z­ÎÌÌ”LLL”üº“ؾ“›GÎúóž±weÍe`exee¨eÀeØ)m€Ø°4Ö|@ÛXrk4‰@•J•ÙŽIlßÑí¿;õ+o¯kû5—•á•A&³bÀq@»C“ؾÃ%ÜÌΰs)HJc0b-C@û°³´­6¶EN„2j›ÿu7€Ø@ÛQFGعÈO†ÂÊc[œðöKp_ÂxÄ6îÈ™[£‡M-þNÅPÛ?Üþ4ÁÁ1³3ü )@l ™Y•ä±üã幌±mPªºaçR”šÁ¨Ä6šFWRïäytºÌ0Äv—£‡Må±Ûh‚RÕ•‡'¯¯ÑU1ÄvƒR<×$Î\ÊÀÄ6ÌR˜’iïÊß÷"¶M«*¯ˆ푵ec ÛhDN„2ÂÎE}„¡ ¶UüJ-×Ó²^€Ø€Q™wEÚ»¦¤3Ķ™TÑûGÎâÃÛ± €ÔèªN.^ã0£Tu…Ñ ¶›äØœ•21± €ÿ¢+);:Ý'ÞÉ“ã“Ävó?ûGÎRF3ȱ €«ÑU%¸/‘Øæ‹Ç‰íf»ž–ÞgRÉùlÆ ¶pËq¯O;{QÚÄöÊÚ²'v´GUyC ÛÖ.s㮇¼{œØn‰3—¦x®a¨bÀª©£DÚ»òhÄvKÑ•”E›zYqшm+U”šaçR”ÆPÛ-û¸ ï3‰ÿÁˆmkTž[iïš¡d(ˆí—á¯88f6ßÛÖEÿ‡šÎù3Äv+Ip_rÂÛ1ˆmkQ£«RN^È·XÛ­ª²èFÔ 7Þ:ÛÖB2[b›·øÛ­­0%=ÂÎåfv>#Ûîœoðþ‘³øC_ÄvÛH_7~ÿ³Û–,'BiïZž[ÀPÛmF9yá)ŸM >@lX¦’óÙv.E© ±Ý–´š¢H{×ÜØ$Æ ¶,ròB¾~œØ¾+÷«I8açÂ[*bÀÒ\VÜ?rVS?:Û©“e¾`k›íj‡£w·b[œY±õ°³W‹x»Ý>D-õ¹ƒŽòháñÐt%e‘ö®Ii­ý°Ñë·“!¶Ûžd¶Ä¶$7±Ýnת]­³Uýo± ÐQðöKž¿ºÎ™k×®íÒ¥‹ü´°` ¶Ûgl‹òÜ‚H{×¼¸äæ f}Íä«W¯öë×O«ÕÎqwwÿÏz–—÷íÛW®s'{¶÷¾~cï¹çžûï¿ôèÑï¾û®‰ukOÏú» Ä6€*J͈°sÑjŠjŸY]]=xðà 6 2Dæ‰mb»mHiKo×y4Þɨ6{ÝÜÜvîÜ©Ÿ/,,ìܹ³F£ÑŸ ‘Kïp϶llëgJKKOœ8ñ§?ý顇R©TÖÓÄ6± ÐÅ_pá˽uÎŒŽŽ~â‰'dfìØ±ûöí3œ_QQñÖ[o=ðÀýû÷_»v­á% Ì|þùçÒç÷Þ{ï/ùË„„„€€€áÃ‡ÛØØ8::~÷Ýwu^2ÊÌæÍ›¨¿BZZZ+ÄÅÅ=þøãr©\gëÖ­†Kýüüìííï¹ç9™‘‘1mÚ4Y™ûï¿ÿ¥—^Ò·Pý#fZ­váÂ…ýo“ÃáÊ:Kkð¥mƒwa¢èŒm—N§{ÿý÷ `kkëëëkúÊÕÕÕ}ôÑ Aƒzõê5wî\)(cba±-NùlRN^Ø‚±Ý¤A6wuuÕÏGEEÉröîý÷ÓdÒ¤Ir©ùÀÙûæ<\ äî<<øàîÝ»åuöåË—çÍ›g¸ÔÍÍM­VëO>úè£ñññeeeׯ_÷òòš?~ƒ¯•¥(d³o“uþàƒ\Zƒ/¯Í¼‹F·kÕªUNNN2>×®]óöö6}åuëÖMœ8ñâÅ‹reI¦E‹™Ë‹í]UÜøék‚Z*¶›4È•••2àùùù2ïãã#ýç?ÿYæåÁogg'—šÿðh‘½oÎÃÕ@!²’¦hl­L?}N5¸QfÆvƒ·]¶l™‹‹‹ú6Ù Ævóvq£›YÿŽô¿ý„̬X±¢I»¬Áû2v[c¿i[ð—± к’<–×ÿs_É}ûö-//ÿáǧÊk;ýEC‡={ö¬~>--­öËÐ+W®èçoÞ¼)'sss '{ôèQÿe«é+üìg?Û°aÃ÷ß_çUï¥K—Üââb{{û_+2¤ö:Ë&4º´{ÀÄ]4º]Æ «”ÏØ•Î;§ŸÏËË8p ‰1±¼Ø¾5Ùùv.š„“-ÛMäÚ.\øÙgŸÉÌÓO?˜˜¨¯¸¿þõ¯r~“-²÷›ôp­¬¬ìÖ­›é[+ÓOÀFŸS nT…4ø™mc·•û2¼5Fî´ÁØnö.6½™õïȰÕgΜ1lµ™»¬Áû2v[c¿i[ð—± Њ´š¢0Û‰•E7ꜿxñâ:¯†—,Y¢¿ÈÆÆFáú¯ÿ·ÑW«f^áøñãîîî½{÷>|xLLŒáÒššíRRR&L˜Ð«W/ýJvéÒ¥Á…×Yg9ÙàÒ\3ï¢Ñíª½Ž’ÔBíÁïܹ³‰1±Èع±IÍûð¶™Ecƒ\[jjêèÑ£+**FŽ)'ñ‹_hµÚ#FÈùÍ~6{ÅÌy¸\¾|ùÁ4½@Ókeì ØèsªÑÿ´2qdÛØsÇð6õçÜÉ.6½™õï¨Á­6s—5x_æìÚ[Ý‚¿ˆm€VtÎ7øØœ•uÎÔÊ®ýíJ/^4è®}¼åÌ™3­ÛzòR5::Úð†Ø:—ÊÊVUUÉOÃ¥u>Ôjâ(\£Áfì.ä¥ðÍ›7ÿ…¹¹n—¼6q±ÎIÉ9ßnUgL,5¶ÅÉÅëÜ—´Rl›d‰í/¾øBÿ¾Ü9sæøúú>öØcM}¶ÈÞ7çájðþûïÿþ÷¿7½@sÖÊØÐÌçT‹Ä¶9G¶ïd×ßLÏnÃVËŒa«›´ËêÜ—±ÛûMÛ‚¿ˆm€V5È­0%½Î™†ï…2pqq Ô¿ˆŸ4i’áS‹­Û3gΔךòÚñ¡‡jðVòš2<<\«Õ^¸paÚ´i†Kûö훞þŸíZºt©áó¥&LX¶l™ùõbì.Æ÷ᇖ––^¼xqÊ”)n×êÕ«œœd! ~>¶ÎIYaÙÙüÓ§O¿òÊ+&ÆÄ‚c»FWupÌì EkͱA®ã³Ï>ëÝ»÷Ž;d^ž={öÔ¿±¼IÀÙûæ<\%Oœ8±páÂÚßFnl¦×ÊôÐÌçT‹Ä¶Ü—á3ÛÏ?ÿ¼ù¿LÌÙÅ n¦‰g·á·ŸÌ>©næ.kð¾ŒÝÖØoÚüå@l´uô)™úç3¦þ»###ÇŽûÃß Ü«W¯~ýúÕù6ò–í¯¾újĈ666?þx|||ƒ·Ú·oŸ\§[·n ذaCíï²µµ5œ,//ûí·õßœ,3†7gšS/Æî"--ÍÑÑQÿÝ¿Ÿþy£ÛUYYùÞ{ïÙÛÛËÐùùù™¾ruuµÜ—ƒƒC÷îÝGabL,8¶E©êJxŸIõÿKèÎcÛØ ×qõêUÙû’”2/ù*óµÿ„µ™ÀÙû¦®ú¿³Ý³gÏÇ{ìÏþ³þ{ÝL/ÐôZ™~šùœ2ÛÆ>³Ýàmeáo¾ù¦þwÎÊ•+ ŸEoô†æìâ7Óij[ÿmä²2øÃ on7s—5x_Ænkì7m þr ¶ZËÑé>ªÀhÆí9¶EN„2j[ýo€:uêÔàÁƒïÖ½[Ø_'¶ZK˜íÄòÜÆí<¶Å o¿£Ó}Ø;VËÛÛ;77÷Ò¥K'N|çwˆmb ý*LIoðÏkƒØn‡kU£«Š푵e;È:ùûûÛÛÛ÷ë×oÁ‚†¯.#¶‰m€ö(}MÐ o?Æ"¶EÉùìð>“ŠR3ØG± Ð~)'/̉P¶õk»N–ùêÎ’¶«Ýƶ¸¬8=lª®¤Œç/O"ÛíQ®jwÏgÚþ§îâëéV½kb»Í¤x®Iœ¹”§0O"Ûí‘&áä!Ç7ÚþÕù¿ž^»vm—.]äç]yínøKEýû÷ýõ× ˆí6VU^±ä,¾EŸ'ˆm€ö(}MÐÉÅë;\lWWWžD ¶ZQUyEh×'ÛèÅœÉÏlNÊëæÚïYíܹsýE-^¼¸Ó[²d‰þ"}?˜s_µ¯,3rÒœU5œ©×¯_¿×^{íêÕ«u®œ’’2a„^½zé¯Ö¥KóNl7UQjFxŸI%ç³yR›‰'ˆm€VTü*ÆaFÛÜ×=÷ÜcÎk÷#F¨T*ËÑ…«}‹/ŽÑ >¼þA9cwmâ œ9`úÿdiAAA………UUUò³þ‡]‰í–•µeOìhªò ž×âIb u©£|㺨mîK^ʧ§§7ÛþþþÎÎÎrÍŠŠŠÓ§O¿òÊ+u–àêêZçL—ÀÀ@™Y½zµ““Ó… jÜÔØ]/]ºÔðqÓ &,[¶¬;ÁÎÎ.<<\«ÕÊÊL›6Øn‰3—¦x®áyÝ(žD ¶ZWæÆ]ǽ>m›ûZ·n­­m£/—«««7lØàààн{÷Q£FEDDÔYΘ1cbbbêœ9vìX™©¬¬|ï½÷ìíí{õêåççgú®ËËËß~ûmý)ËŒáݰ-Ò ûöí1bD·nÝ  [Dl·]IYô°©Ù»ãyj›Æ“Ä6@ë:³b«LŒ,#¶EaJzxŸI¥ª+ì>€Ø ¶Al·˜ ÅÁ1³ktUìA€Ø¸;N/ÛrvÕ6Æ–Û"Á}É o?ö @lÜÇæ¬TF3°°Ø®,º5È-'BÉNˆmbÄv‹)HJ‹°s¹™Ï~ˆmbÄv‹I_7~Þˆm€¶–ä±üRð~ÆÛB9yá)ŸMìJ€ØhSG§ûðG‰aÁ±­ÕEÚ»æÆ&±7b íð6rXvl MÂÉ;—òÜv(@l´‘ã^ŸfnÜÅ8À‚c[œ]µí°³Þˆm€6rfÅV™XvlKfKlóPˆm€6rvÕ¶Ó˶0°ìØå¹‘ö®yqÉìV€ØhuªÀècsV2°øØRÚÒÛZM{ ¶ˆmÛ-æô²-ÊÉ Ù³± кòâ’;{1°’Ø®ÑUÅ_¾&ˆ Û­èzZVŒÃ ÆVÛâfv~„‹&á$û ¶Z‹®¤l—ÍxÆÖÛ"76‰oÄ6@ë ï3©<·€q€õĶ8å³é×Eìb€Øh-‡ß(HJc`U±]£«:8fv†¿‚½ Û­"qæÒËŠƒŒ¬*¶E©êJxŸI…)éìh€Øhy§|6ñåÌ°ÂØ9ʨAn•E7Ø× ¶в¶ìIž¿šq€ƶ8áí—྄} b-¬ )íà˜ÙµÏáûÒ`=±]£«Š푵e»Ä6î´.jŸÔÿõ/ÙRE©Œ¬$¶EÉùìð>“x؃ØÀÑjŠÎ¬ØZûsªûGκž–%3E'3÷ôšÈÁªb[\VŒ6UWRÆN± €æË‹Kްs¹ðå^ýÉ£Ó}.ï—ÒØûЋ‡ç1>°¶Ø)žkg.e§ƒØÀ9³òKI©C¿™[˜’~Î7ø„·_â+KåœÓ˶08°ÂØ®*¯ˆíaø(€Ø@3úÍ;ï}jÏÎÊßÙ7bFØO'ìîùŒ:ú#+Œmq=-+ÂÎEÿ‘ €Ø@3i5E{l'HPE }IqÏodfg÷ñüÍaXml U`ôþ‘³øð6ˆmÜMÂÉ6㥩ôSDÆÖÛâØœ•2±÷AlàŽ¤-ÿ»¡·•.Þ ¬<¶u%eûGÎRFó± €;÷ÔüЮOJYe®e4`å±ýÃío‡÷™Tr>›Çˆm4ß­o÷šÚű(5ƒÑ±-²¶ì‰íQU^ÁÃÄ6€µ»™/µœøx3¦Ì ;wÚ<ݼÛ6{’µ-Ï-`ÇÛíSâÌ¥)žkx€Ø°:5º*uô?S.Ù“¯03¶µÚc©©Š ÆzzN7ç3ÛòS§ûöñǃ̉íÒÒÄ=º7xMcˑ뗕ý«©± ¼Ê/ @ÿfòÔÔTFÃC‚ؾ»Ês "í]óâ’ ÛVÛz÷ÞÛmèP{Ÿ¹RÅfÙ–éÈ‘€'žpZ6'¶ï»ÏÆX–7¸›{ ßÖVgmMl‘l²lxȲµÛ;vìOLLT«Õ<$ˆí»NJ[z[ª›¡± `ù±}‡Í;eÆwmÉéÇû¹‰càõ—óè£Ã“’¶7iÅ ±­øÀWbÛðNr•JÅC‚ØnάØzØÙ‹oƒØ ¶¹(?ÿëÁƒÎË‹3Û:Ý·Ï?ÿäÇÿÉDl×_Κ5 ÝÜœšÛ¡Ë×íܹó믿ÎÌÌä!Al·’ÙñNžék‚ ÛÄv#}þù{¯¿îZ?¡+*’³³cwíútÜ¸ÑÆþô—‰åhµÇœœÆ¼ðÂøøø¿_¿žPY™").K#¶‰íífv~„‹&á$CbÀJcÛô¤¦ªªoÇŽýeíKõllî0ÀnÊ”gþñ¿VWoô{Ëë,GŸëëÖý¿_ýê÷ÝgÓ­[WYÚœ9S¾ý6„Ø&¶;´ÜؤH{W­¦ˆ¡± `™±my±Mlw§|6)'/d@lÛÄ6ˆíS£«Š¿àœo0Cb€Ø&¶Al·˜RÕ•ð>“ SÒ ÛÄ6± b»ÅäD(£¹UÝ`(@lÛÄ6ˆísÂÛ/Á} ãb€Ø&¶Al·˜]ÕÁ1³3ü ˆmb›Ø±ÝbôÞ.JÍ`(@l´wê¨Éžs~¾ŸV5É&ߊmŸ5Ä6±Ý\VŒ6UWRÆP€Øh×2>»u€×j§Ð9ïÛÄvÇ’â¹&qæRÆÄ6@»¦Ž>"Ù“ñÙ§ù‡V5É&sd›ØîˆªÊ+bG{dmÙÃP€Øh¿øÌ6±Mlw8%ç³ÃûLºž–ÅP€Ø¸ûjtUò]êZ}vÕ¶äù«•“F v'¶‰mb»Ã‘gñþ‘³øð6ˆm€¶SYt£(5#'B™á¯8áíwtºÏÁ1³#ì\B»>5È-ÞÉóØœ•§—mÉÚ²'76éâ¶(b›Ø&¶;"y"ËÄ8€ØhݨNp_;Ú#ÌvâîžÏì9ë×Eǽ>=眽;¾ )­<· Á…ð6rb›Øî t%eòLWF3 ¶š©Tu%/.ù—{OùlJœ¹ôà˜Ùá}&ITK]KcKiKoKT¦¤k5EMZ2±MlÛ×õ´¬;—âïT ˆmSªÊ+äÕsN„òœopŠçåä…Ñæ†v}2ÒÞ5ÞÉ3yþ곫¶]V,HJkjTÛÄ6±m‘²¶ì‰í!¿: Û·èJÊŠR3²wǧ¯ ’®>ìì%E½Ëfüþ‘³Ü—œ\¼>sã®ÜؤâïT5ºªÖ[ b›Ø&¶;ºÄ™KåwãbX©eýñjéêäù«ãÆ/ˆ°s©ÝÕY[öäÅ%ßÌÎoûu#¶‰mb»£Ó•”E›zYq¡± ,Yyn¬ôó o¿o\Å8Ìíú¤ü¼ë]MlÛĶ¥*JÍï3©Tu…¡± ,AUy…þ­àgWmKòX~pÌì0Û‰ò’7nü‚äù«ÏùçD([û}àÄv󦜨 [±ýñb›Ø¶ þ ùÔžÛ€Øh˜®¤¬0%ýRðþS>›Ü—èYï9ëèt9G]”VYt£cm”¬³dÏ•ýÁÖÛY[ÿ&®øüKb›Ø¶ú¿MÀ8€Øíšd³„¨áÝàQƒÜvÙŒí‘8siúš œåõ´, 8ˆt3;_²GÊÓÚb;mùÊ[G¶CC‰mbÛ’~kÉo*ùíÄP€ØíEUyEaJº*0ZÒZ9ya„ËîžÏ3ûØœ•ç|ƒÕÑGJÎg[궇÷y.Éãmk‹í#Ó›Û åI?gïŽ?³bk‚û’èaS£¹}ãºèäâõ—‹R3ªÊ+¢ÚŽy=j‹®,ÙJbûÀ“3v“BC¾"¶‰m‹¤ÕEÚ»æÆ&1 ¶À©,º‘øx†¿"ÅsÍÁ1³ÃûLŠí‘<µœ£I8Ùá¾!¼íéÿØ)Ÿ¬ç°vè[„ÞFlÛI~õEعXÕ'b@l€;U£«ºž–uYqðäâõ߸.Š´w•ºVN^('/ï·Œ/ o{ǽ>µ†7“—\<´ó'Ï(†üNX[¡Plß¾=,,L©TªT* ĶÅ8»jÛag/~‚ئHBKHK3{—Íxý'®õßΡ›–ú/ŒØ§„vu´àÞ.»òϽC]ÿóÛÐ[õ‡µCBB$¶wìØž˜˜¨V«y$Û–ô¤–Ø>³b+CbüGÉùììÝñ'¼ýâÆ/³;Ú#Éc¹þmẒ2Ƨ5ä]ÊŽüÍlýûÉ-ïóÛ×NE…~Qa3.ôC?}i+ Él‰mI˜ÔÔTFÃÀض$幑ö®yqÉ ˆm¬×Íìüœå)ŸM‡½ ºÎܸ«0%ï3kÅÅřߋšúŽ„PÄÏ&«vl¶Œ¿VqíÈ‘?þEÑÅQaûlè*Ci×~yff¦ŒbÛÂHiKoó ÛX­¦H}äÌŠ­ß¸.аs¡®ïþÑjÕjujjê¾O¿P ùäÐ.['匷¾û|ý•ýÁyñŠŽ5] 8éë»oÂìÐ{Ÿºõ‡¾&Ì ý2°viÞ&ó­–m—àa@l[žÓ˶('/d@l`±*‹näÅ%§¯ Jp_iïºä¬csVR×íJqq±J¥JNNމ‰ yoMðog+~òÌ­Rí°Sh¿{+ÔwSè ¥mx¹l¯l5‡µ‰mKU£«Š¿@~÷2 ¶°º’2MÂÉ ÅÑé>ÑæJ`KfË >In>wÝ>iµZF“™™™˜˜)-zëà¯ßÆà埆,[«øÀ7tùº–nõpK/óßÓÇB¿ØZ‹d¶l‘þsÚ²Y2/Û([*Û+[ÍambÛ‚ÝÌΰs‘_È ˆm:¤]UaJzæÆ]IË÷œµ»ç3‡½N.^Ÿ¡”—zŒO‡ë혘 ÔíÛ·ü(°¥It¶ÃVÈÉvÉÖQÚĶõÈMŠ´wÕjŠ Ût º’2y wfÅÖx'O©ëØÑǽ>½ðåÞëiY NGïíäää„……éK£µ4‰® ¶"ë/[!Û"[$Û%[GiÛVåäâõ îKÛ´_å¹ú¿Ë%iÞg’rò³«¶ñæpËëm•J•ššªT*cbbÂÃÃ¥Qw¶4‰®mEÖ_¶B¶E¶H¶K¶ŽÒ&¶­J®êà˜Ùþ †Ä6íHñwª _î=6geô°©13d&kË_[vo«ÕêÌÌLIÓÄÄDiÔ¯[šD××mEÖ_¶B¶E¶H¶K¶ŽÒ&¶­M©êJxŸI…)é ˆmîš]UARZ†¿BÿåáqãœòÙ”¡ä#V˜ÜFêT¥Re¶4‰®Ì¶"ë/[!ÛBfÛÖL~G r«,ºÁP€Ø íÔþ¶¼“Ì–Ø–ä–ðfp@týnŽ{}š8s)ãb€ÖUûØ‘ö®ò ,kËžâïTŒ ˆ.°ß-RUy…ü—_õ ˆmZXí`G r“U`tÉùlFDØïÖ@~á‡÷™T”šÁP€ØàN¦¤ë?€-/°bf¤x®¹¼¿Tu…‘Ñö»º¬8=l*EÄ6Íq3;_8s©>°OxûÉ««òÜFDØïHñ\slÎJÆÄ6fÑÉ™tµÔµþ-â—‚÷Ø ºÀ~Gý/öœ¥ Œf(@l`TaJzúš x'Ï;¾ä DØï0Çõ´,ùWC~2 ¶øûģ¹)'/”ØæÛn@týŽ&‘GöœÅ‡·Al¬á]âòÚ(nü‚3+¶æ>ÎßÁÑö;š-ÉcyŠçÆÄ6ÀÞ%®ÿ"quôŽB€èû-BþA‰6õ²â Cb` ï´w=ììuÎ7˜aƒèû­¡(5#¼Ï¤’óÙµÏämS ¶–£öw‰G›zÜëS9YU^ÁÈ€èû­*s㮃cf×켸d†Ä6 c+JÍпK<¼Ï$åä…òЧÎá€èû­-Á}É o?ÃÉäù«Û€I“pR^ÖDÚ»F›zrñú¼¸dbƒèûwKeѨAn9JýÉ=<Ç;ÉAl: Éi‰êäù«#ì\âÆ/H_ÄAl]`¿£(LIï3©TuE¿§.D2& ¶íš®¤,'B™ä±\;Á}‰*0Z«)bX`åB»>É»9ˆm´7ç|ƒãÆ/¨ÑUÉžúúé7Û€öHŠúRð~©ë¨AnÉóW«£€“9¾‘â¹&Ã_ñýÎC{|áVlß÷[¾“Ä6àî+9Ÿ-¯Q⌠Û ¶;Š‚£§÷ýüeÙ;a÷?óÈ+»{:ɼLÿz}ƒbpw¥fœ]µíà˜Ù‡½.|¹—cÄ6ˆí*Ãÿ«]÷ýVŸÙú)ìþ ¼“Ä6 MiNž^¶%Æa Û ¶-†¤uÊÿ~ÚÅQÛ‘~Ç;ÉAlÚâ%ˆ¼æ8îõ©D Û ¶-•Vs]þ™SÜó›]÷ý6yþjÄ6 UT•WäD(õ›ÆˆmÛíÓÍìü¢ÔŒüÃÇ[jºø‘уÝ÷<àœ—Ò‚‹mËI¤Îw­ƒØ´ òïô±9+Ãl'ÒØ± b»Ê‹;vÂû“èaSjК©þã0õäb_ÞOlî2 €3+¶J ÄŽöÈðWðŸâ± b»½É?üm¼Ó¼Û û©o\ç¥-_™µõoÙáyñ &Ã$r~ózåä¹2P2\±£gåD(yüÛ€6¥+)»ðåÞx'Ïð>“Nxû¥f0&± b»:½ìó[ß^fÿüÅ€Mº²äššLN2P2\±£§ÉÐ)'ÿ‰w«Û€¶—œä±|wÏgÜ—äD(ù3'± b»}’¡ŽN_,Ø⹘ÌnÞt~óú]6ã"r¹”x¢¸¸X«Õò¸"¶-¬ä|öée["í]Ž™¹qÿÉ Û ¶Û¹S>d Ï}æK3ßÉtíÄÞðþλû8§JP«Õ$7± h•E7²¶ì‰¿ ÂÎåäâõ×Ó²€Ø±Ýþ©£dOxûPËw>]OÙÓÏy÷à)'Ž¥¨T*FCoÛ€fªÑUåÆ&%Î\f;ñètuôÞ.Û ¶;ØÑ3£‡½X]ù-©Ü"Ó•ýÁò€Ü;ç½äääÌÌLz›Ø4YñwªS>›"í]9¾‘µeoˆmÛÎeŽìð"¹§NõTt{rŸbwbb"½MlÌUU^qYqP[2[b[’›1ˆmÛT’Ç_"ìœMtcyù¿V¯özä‘!÷ÝgÓ«×ýnnN‡mÑ_Ô©S§ú×7œÙéGÝ»ß;|ø€¥Kçݼ™Tç¢ÚêßjèPûwßSRrÔÌ•it™=zt8ðA¹UHÈÇÕÕÇk¯s³okìÍäò˜ ™óndd$½Mlw3;_ê:ÂÎE9y!ß.Û ¶;:ù‡,ÌöÙäù‹Ltãïÿâ+¯L:{6LB7?ÿk…â“gžù•™±m(äãÇCœœÆ¼õÖ 7¬s‘V{ìôéÏ?ÿä›oNmöÊÔ_æåËûCC?qt%K–…ÜùmMá#Üþ1|JHHHLLLrr²J¥*..æ!GlêÊ‹K>:ÝGÿ‡²9” Û ¶-ƒ ³˜8s©&á$ùÔ!b»½­^Û¬±MlƒØn¶üÃÇeèòâ&¢qÏžu}úؾüòsþþK$2uºo›ÛZí±ÔTÅ„ c==§›óé:o#Ÿ:u‚™+Óè2kO¥¥‰=zt¿óÛ›.íÞ&c¼Ê/ @ÿfòÔÔTFÃ£ŽØ+U£«ÊÞïäaçrfÅÖòÜ‚ÖΤúšOW¯^íׯ_í·¨¹»»æËËËûöí+×¹“fkñ®[»vm—.]ä'µÙìííè›ßâëOlÛhÙØ–I£9¸ÒËë•Çw<øá¤¤íMú‚´{ïí6t¨½Ï\iTsŽ$×Ö¿ï¼¼¸f¯Œé`¾ï>›;¿­±IFõÖw¤-[+±½cÇŽðððÄÄDµZÍ£ŽØ«#]}vÕ6il)méí6þò³–Ê'77·;wêç ;wîløOô¹ô›§e»¨ººzðàÁ6l2dˆÌÛÄ6± b»}ÆvíéÿŸ½wkâÊûÿë­Å}¬ õF[ЍðH·jÑ•gi±eÅRÅK½´üëýY¶¥?XØe½_`—*P¤ÞZº\*Õ ¢Ò°ˆÂ EP Ñ È% úÿêlg³! ᢄäó~Í+¯“ÉÌ™s¾sæÌ¼3·ïþúÆÓ˜ô˜1¿‹/)þúða&©uß Vé§¶¶^½z|æLË={þ Maz»¸Ë—£Þ|ó¿û?¯fÙæì &Ùf¯$ …hum0 ê² ³WoK4vÊó j,*Ó}:zôèäÉ“ŒŒìì슊ŠX;Ý·oŸ………‰‰Éúõë[ZZ”òIJJrssû—rœ=Kùœ9s†ùêââB¿RB ,_¾ü¥—^;vìÒ¥KW:µNtttìØ±ÃÜÜÜØØ888¸o£éCCCÍÌ̆ Ö½Ö<oΜ9”°µµMIIQ¬þçŸ>qâD*ä§Ÿ~*“Éz¯”èCy”~R—ƒÊ°hX\CCÃøñã>|È.ˆÆPhŒº¹ÒÓÓgÏžM¦8GDDt/§ÒšêskѾÖêO>ù„Â>iÒ¤ýû÷«»n¿Çøw¯Z÷ê@¶d[Çe»¡á&&c™ôܹ¿:þˆâ¯çÎîóÐ**+;;nÜ/›š²z,L¯×ÑñÃï~÷Ößþöÿú?¯fÙŽÛsêÔ©ï¿ÿ¾´´­² ú\"+;v:ÍÆ#Õze顸vñ£ÁÜst“www¡PH–²gÏ{{{f|HHˆ““SEE©š‡‡Ç–-[”òiooùå—kkk)íïïO‚ý§?ý‰Ò•••¦¦¦ô+¥gÍšÅçóÛÚÚ½¼¼6mÚ¤² eee´,Ÿ¾Œ¦_²d‰º«æ-ZI‰¨¨(J+VßÙÙ¹ò ”ؽ{wã•}(ÒOêrP ‹£OOOÏ   vAûÛߘÕÍEk0!!A*•Þ»woãÆO¯µh_ë]»v1‘‰D ,èQ¶{U5œÙm—íùóÍá|^]}Q&˸kÖ,\»ö}æ'oeež–vøáÃL(1mšYdäî”mV¬p>zt[…Ñ&OšK$J‹?`ooÓÛW©›² Ùðo$Õõ×ý'wÉr÷«NËщ=G7i©®®fÒ­­­£GfÒÖÖÖ·oßfÒ555“'Oîž•··÷_|A‰wÞy';;›Q¯¿þõ¯4¾ûÄMMMfff*Ë`iiÉž#ísÁhú»w窱ryyù„ $ÉO?ßONnÆÎuóæM&]\\}:g&ÈËËstt411a21b„Ê…1&ÜŸ‚=9XéRYe___¥Ã???v.vÑ” ’ô8¾ÿåQúI]*âaqLÂÕÕ•ÃáPâäÉ“~ø¡æ¹òóóÝÝÝÇgee•ššúT[KjM‰e»WUƒlȶŽË6†¾ mÈ6ÐñD–»×Ôõf@äà^1ÞÙ&gÖæÙ*$Û_}õs™îºu낃ƒß|óMö×iÓ¦Ñ^¿¡¡A.—Ó'›¹Òm̤C*Ïl÷ª`ê<Š9•­8KEE{¢[ñ 6%TžÙVßÏòtÿI]*ÃÒãâΟ?ÿÖ[oQÂÎÎîÚµkÚ¬Mr`gjjÚý'¥5ÕŸÖ¢e­Ïl³s‘„·¶¶2éêêj-­T5•÷óC¶d² ÙmÐiº:ä•ÜÌ‹vx–ËÊŽ–Kd:ºçÐN¶ÃœKJJd2Ù7V­Z¥2·/¾øbܸqǧ4íãÇŒÃ\XÎ@’“””$•JËËË—/_ÎfN®K9³“:88Ð4J÷l÷ª`êä6**Š}‹««+•–™ËÅÅ…¹7›;wîdsS7¾Ÿåéþ“ºT†E›Å½ñÆ_~ù%MÖã"V¯^MfK#ÉH_yå•îEUZSýi-ZÖzÇŽläiv.{{û={ö´´´TTT,^¼¸ÇE«¬šRu Û² Ù†lÈ6è4äÕ¥‡âɱɴɷu}Ï¡lwvv†‡‡[[[¿ð 3gÎär¹*s{ðàÁ¨Q£D"¥…B!¥_¯’’2}útinnN¹)>ÔÊØØ˜ýÚÞÞ¾uëV333“ÐÐоLÜÎ;·ûÒÉÉɶ¶¶?)Ÿß½¨Jkª?­EËZS¨===©Ê|ŧ‘ÙÙÙ1O?räH‹VY5¥ê@¶d² ÙmÐQ¤uââÝ\S×,w¿º¬BdèíGÕx—¡½O{È­ ² ÙmÈ6dÀÓ|GtÍ'4ÑØ)wS`Ó-!ÙmÙmÈ6d@ß®¬ðOïrcû1Iu=ÙmÙmÈ6d@ß©â]æ;xÒáµ ŒÓÑ܆€`à@¶!Û ûгYºÛ¡Ád†v ¨>–mÿ È6d†]ra4/ÍÆƒ†»±çè+b¢»ÀgrγKÑ׳â8ÛÙíþ øâñÙW Ooˆ[÷gÈ6d† ìcÆùž5é¹d[Ç¥ô©.² ÙíþPÅ»L¡|q 6ƒƒa`Š*ÎlC¶`(i¶ ŒÃ%Ïý̤I“>þøãúúzÈ6d@¶ûîÙÆ=Ûm€fC³!Ûƒ)ÛS¦L Ÿ:u*¥E¶™Deeå|ðá‡B¶!Û² Ù†lC¶@³‡¤i³0c¤R©··÷¤'P‚¾jpB@°|ùò—^ziìØ±K—.­««ÓìÀ”8zôèäÉ“ŒŒìììŠŠŠ˜ñ;vì077766Ö<±†Â«›…Ìyß¾}&&&ëׯoiiQ 7gÎJØÚÚ¦¤¤°ã»Oå¢5°Çê(Å­¶¶vܸqÚ¼ÇÌ!Û² Ù†C¶!Û`@´‹ïŽH6s»²Âš=X¾­ø•¬ÒÙÙYô„ ìܹSÃ,³fÍâóùmmm^^^›6mêQ¶ÝÝÝ…B!éîž={ìíí™ñeee>ôññÑ<±†Â«›%$$ÄÉÉ©¢¢‚ò÷ððزe‹ÊP,Z´(22’QQQ”fÇ«+žÊ’¨ `o«£R¶5¼ÇÌ!Û² Ù†C¶!Û`@š4Þå꺽ÍwDˆŽÈöÔ©So޼ɤ‹ŠŠ¦M›Öã, MMMfff=Êvuu5“nmm=z4“¶´´TyâZåÄše[å,ÖÖÖ·oßfÒ555“'OîžUyyù„ $ ¥é“Ò$çš‹§ò«ºöª:UUU«Ÿ }À{̲ Ûm¸1d² Ðl0h²mddÄ'ãœôUÃ,yyyŽŽŽ&&&ÌÕ#FŒèQ¶UŽW\h6«ý4ìW’OÅ ¿‡Þ=+__ßçþ??¿>O]µ¬Ãĉ?úè£ô3àmÙmÈ6d Ù`p6l˜â×^Ù¦_騢¡¡A.—Ó§¢p¶¶¶2éêêêeÛÊÊJûSÇ ¯n–éÓ§ …B A`Ne+NSQQÁžèVY ÷¸ÑeûoámÈ6<#˜{³¹¦®Ðld@¶—º¬B ]õÅpãŠÃBËöW‘mÈ6ød² L%73ÍÆãÂܵµùˆ² Û:K»øQ¢ñ‚Ï`È5´Ýÿǩ߳óˆ{d² Ù5ßÁ“L›|Ñ@¶d[÷)ÞýuÜH»úÜ$xò€ KÿøÑhONkC¶!Û04ä•fó,— £yˆ² ÛC…vñ£³‹ÏZ¸Ê^†*ÌäÿŸcÚ'**êÛo¿MLLÌÌÌ …homèâA–»×Ô•4»«CŽ€ Û²=´hÈ+‰iwÑ~Mû£sŸ‡ëû›¶Ã†¸Ÿ9qâÉöñãÇ“’’²³³«ªªÐØ Û ÍwDW×í%Í„qä€lÈö¥$ê g„ÝùÿYÕòã÷ÐæÞ²‡—³>üŒÚ!çuq'N²§µI³I¶I¹SSS êêêÐÒ ÛÐíâG×|BI³‹wGt4·! È6€lišššò¾>7zÞ©_Ì+ ükG[.Z›uó‹xã컾XÓŽU¼†¼´´”‚Œ–ÙµÈ%²’ ˜¤ñ.ù^$Õõ² Ûz€T*­ªªÊMIO|{íãk¡GÛ_t÷,9V™r¼†ÏÁ 4ÜãF‡…^pÝ7zÞãÚ3?ˆûüK%ÓŽ~¥ÏŸ?_PP@ᥠ£¥A¶@5Âh^²™Û•þÍwDˆ² ÛúDSS“P(ÌÍÍ=úõIçM'ǼûX#1¨âÆ9qyÆ„Å)Àš6{9…”‹ÓÚmPMMznšGú¼ÍuY…ˆ² Ûú‡T*­««+--ÍÎÎNNN&QŒ=³=(vkЉíû9;ƒãv…èæÀyeaÜZÿgºÐ¿…Ç}÷ŸfSИû´I¶)Ma¤`RH)°8­ ÙeÄ‚ g/žå2¼:ÙmÃñíÔÔT²Ço¿ý6êg¢u•X÷O¿sýßA/( …ŽÓ†l€jZEµÌÃÆËŽÆ;½mÙ6(ßÎÍÍ=þ|bb"s¶–2FW9¾/ää¤ßz1(D( BG„iC¶@™vñ£ëþ‡ŠwGP@¶dÛÐ|[(dff¦¦¦&%%‘@žÒaâÆ9 ÿzpË@!¢@Q¸(h: L² ÿF.‘ Â8Iã]r7âaãÈ6€l¬o755UUU•––’7fgg“@~¯Ãpßÿììæƒ[ ŠÂEA£ÐQaÚmø÷8x–Ë.¹miº%D4mÙ†r“1ÖÕÕ‘: …ÂR&?&éì¯=· " … š Ù€S›‘aîZ(h Û² †r‰,ÑØIZ'F( Û +4ß]Yáϳ\vsÑ@¶d QèxFÍC Û0ø0OA㚺–ÅÈ%2Ùm0t!Ó&ßF Û0˜tuÈË¿9“l憧 ô ®©+d² i8ÑØ ç Û0hÔfä§Ùxð<ÅD ÒÕÑ܆8@¶a’>osMz.âÙ€g {{¶(h ]ëè%A1yžAˆdž¸=éXï@ïi,*;k±q€lÀ³·g ]ëtÌÓ|G„8@¶àé‚Û³.€õ ŠÜM¥‡âÈ6<-p{6Ò°ÞBG>™ ½È6 <¸=éXïÀ`éhnK3‡@m`îÆž#ÍÆíÙHÀzKú¼ÍÕi9ˆd´Eó›cÅÚµ\´ÛPŸS„X ]ë,7" }"mÐ a4¯Ex_åOíâGyžA\S×òoÎ PH‚€õ œ†¼’T땈dz¦±¨Œ\ºûxæµ^ôÉ6)7.¬w::Rw–@¶à_t4·¥X-O›õ‘Òøúœ¢ sצÏÛL*Ž(¤ ë–«ëö–;8@¶@ãÞâã=q£Þ¦};FZ'ÎÝÈ5u½{ñHÀzJÜã\¸ä¶q€l€Z„Ñ<îÄ…¼iKoÇþôäºqA'i¼Ëuÿøn ]ë¨DZ'N3Ÿœ È6¨ ±¨ìô¸ßư;÷«UU¼ËuY…çf¬Épöjº%DpÒ°Þ.Úm¨ÍÈG Û LGs[ê¯Vyy@œ6qÎrÿÓY‹%¢>".€õzäÆöc4 mPæêº½©¯¯¤£f8óÊ{×¼CH¶›ïˆ€t¬w ™ÚŒü‹vÈ6üoÕž´5m¥!ÑØé’Û–ÒCñÒ:1b.€õº#—ÈÆÌïhnC( Û€~Ñ*ªj3ò‡â@%—T׳ui,*Kø¯wã†ÿæÔ ö¤Ü©o¬Êø­Wî¦À[ŸK7ÝâÑhéXï GøžÕi9ˆdÐ;º:äU¼äyî>kñžº3ÀCn87cåíGk.^…N¤ `½ƒ~R¼;âºÿaIJ è…f £¹<ËÅ´gMóΕžE»ö–E|)JŠªás†â@%¿sôàí»3œ×Æ´£ze¹ÿŸ¸@€u ½¢]üÒÙ€¥6#ÿÂܵˆd -Âûæ~DûÔôykÈQ;Ûè꺦OCû£œÛ_';Ä|«ìØi¬qèÕ>â¬ÅIJ CW‡¡ˆd i˜kú;2í¶ûÿ0Óf†KKþ—3Âd¡PXWWßÈ6€lí©Ï)J³ñ@ Ûµän ˆi'¾qÖ L›¹ž<á—‰o¯ÍÍÍ---…od@¶ö0·mKëÄd ‚Æ¢2Ú‰^÷ßih¦Í ·¿¦êŸ ý:;;¾ mÙ½"ËÝïçâÙ¨ Ð÷`¼‘½ìáeÍR*_úã?2775j$}nÙâÑØ˜ÅþúÜsÏÙÚ¾!—ÿÇÓËi$›¦Ÿ¾þzǼy6¿üåÊáÕW'®\ùÛôô¯´Ï Ôýùäq#íN.ý,99¾ mÙ½BÆÉó B Û$›½—åþ¿šM»¥%ûÍ7ÿ{ýúÅW*½JŸ6,™=Ûºµ5‡ÕÝ>Ztôè6•ÜÞž÷þûï.^<ÿÒ¥ÈG.Ëd¹BaÊñãóçÿZûüdA*¾ÓÚ“f®'NœHMMÍÍ¥Y„MMMhÙmÐ#âAªõJIJ PFR]O{Ð;Gj–íÝ»=ß{陋4fïÞOXݽÿâ”)¯>xÀïîÀ¡¡¾Ë–9ö3ÿYÊáÆöÝ„èèh‡sþüù‚‚‚ªª*œÜÈ6€lmH4v¢CJIJ øòJhZy6F³ŽÎ˜ayéR¤ÒÈÌ̿Ϝi©¨»aa~7ºww`›éW¯ïþý_ÊáÎуe;ìð·ß~›˜˜˜™™YZZŠ“ÛÙ}“mÚ/#hOE„t5°¸m² PAmF>íAkøÍ:jdô|}}†ÒH3zô ŠºÛÑñÃìÙÖÙÙ1JL“µµý³ÿù÷A*QR!6 4**й˜¼   ®®Í ÛàÈvÓÃÞu<·ƒcñ¶mÈ6àéÊ6 —/GÍ™cM2¬8’fgï¾f§gè›l÷yA*ª>áÄöý$ÛÇOJJÊÎήªªBóÈ6P’íýû÷1‚>uÇ û¶¸¡(ð:[溬 s×bKlú"ÛÚ_æMÃúõ‹ÃÃÿ¬8rÖ,«œœo»gËNÐÛËÈû¼ ²ÍÙL²Í^I. Ñ<² e»³³sÊ”)áááS§N¥4d²Í —ÈÆÌ§Ol,m@¯e[û˜ÑP[ûý”)¯ÖÔ¤³#ƒ‚¼—,qÐàÀ½z@Z¤A¶ãv…DGGÇÄÄœ:uêûï¿/--EóÈ6P”m7gÎJØÚÚ¦¤¤°Èd²O>ù䥗^š4iÒþýûY'¤Ä‘#GÈÏŸþù7Þx#+++**ÊÊÊÊÈÈÈÎÎîÖ­[JI‰£GNž<™™ ¨¨Hi‚ôôôÙ³gÓ¯4MDDó ;qhh¨™™Ù°aÃè«@ X¾|9•mìØ±K—.enë>—T*õööžôJ°ÏIUÊM‘ŽŽŽ;v˜››3#U.NCÕTfÒÙÙ¹oß> “õë×·´´ô‡Aç¢Ý:ªÄÆÙôZ¶››¯ÌšeµaÃ’ÒÒ32Y.}nÜè®òÕ\ÌpäÈÖ?vcGJ¥Wæ.Z4Ïÿº±1«½= 9>þ;öù÷sAmèÏ.ƒŽ§C–íE‹EFFR‚œ™Òì»vírvv®¬¬‰D ,PôÞ÷Þ{¯¬¬Œt100ðÅ_\¼xqyy9óuþüùÝeÛÝÝ](Ò{öì±··Wšàå—_NHH ¾wïÞÆ•~e¿.Y²„½lÖ¬Y|>¿­­­±±ÑËËkÓ¦M*ç"ã¥*ˆž@Uعs§ÊÜ ppp Ú=|øÐÇǧÇÅ©¬šÊLBBBœœœ***h¤‡‡Ç–-[´‰ÃàrÍ'´$( dÐkÙ¦ááÃLŸÍÌ&9âµ×&ýñ‘ͪ;u,—ÿ`kû†âHRèÿûõ¯_ÿÅ/ŒFinnºnÝâ~8ÑÛüû¿ È6ôy—ÁwðD V¶I’'L˜ ‘H(MŸ”&d&˜6mÚÍ›7™tQQ‘¢<ß¿ŸI·¶¶Ò×êêjöëèÑ£»Ë¶æ ^{íµðððüQÉ®•¾Þ½{WeEšššÌÌÌTÎ5uêTÅ*PzÌÍÒÒ’=A­ÍâTVMe&ÖÖÖ·oßfÒ555“'OÖ&ƒ‹(Ÿå è‹lëñÙÈ6èQ¶}}}ŸûOüüþ%WFFFŒ„3®(Ïš­X]BÝøüü|ww÷qãÆYYY¥¦¦ª›«««‹ýš——çèèhbb”yĈ*çRª}U™›ºYz»8ö«ÊLHÅã<|øpmâ0¸Hªë“Æ»`cl Ûm€lƒ^È6s*[ñé¡ì‰nÅ3ÛÅÅÅOO¶È~y<ž©©)óUénj¥‰©l´sohhËåôÉþª4—†3ÛêÂB¢Ûý¤´ºÅ©«šÊL¦OŸ®òA­šã0蜵XÒtKˆí² €lC¶² ´•í¨¨(777¥ñ®®®´ëüéÉ Ï...•Opvv~z²½zõjRb™LF’ùÊ+¯0#ÉùKJJÔ-…\4))I*•–——/_¾œýUi®mÛ¶±÷l;::nß¾½GÙ tpp lo·V·8uUS™IXX†ŠG5½qãÆªU«´‰Ã “½z›0š‡í² €lC¶² ´•í¹sçv¿\999ÙÖÖö§'òöôô411™8q¢ÒÓÈV¶Ož<9}út##£Ù³góù|fdHHˆ±±±ºLRRRh–Q£F™››‡‡‡³¿*Í%‘H>ûì3æiä”`/íÖ Ûííí[·n533£Š‡††j^œºª©Ì¤³³“æµ¶¶~á…fΜÉårµ‰Ã #ãäya{l Ûm€lmeA=".¤Ùx md² mÙFW‡©‹á±l¯û3d Û²­ÖCžƒ‰ô@%73s¡7âÙ@Ÿi?jÞèÀ¨ŠwYÍ+;vºxw W×í¥!ÃÙ‹˜R­Wr'-¤=¨à‹µƒ¨ú8³ =R–Ù†l²C¶{DZ'N4vB Û 1äùscQù³(Oþ\Còœ»)䙎~.Úm8k±$i¼ í©›£tšÏ\èMÐdŒlÓŒ4Ô¤çR>M·„÷N^À=Û¸g´v9»Èö3`ÿþý#FŒ ÏAàç~fÒ¤Iüq}}=d[{’ÍÜšïˆÈ6ºkÑ÷8˜SÐù^…&a&mŽùV¼ÑˆdûiÓÙÙ9eÊ”ððð©S§RzPd›ITVV~ðÁ~ø!d[{è0µ’›‰8@¶x´‹5ß1"]z(ž¹û’Û–ôy›Ižh–læ–j½’,:{õ6æ´ ŒÃ(tC^ )´\"{JŃlC¶² tJ¶y<Þœ9s(akk›’’ÂŽïèèØ±c‡¹¹¹±±qppðO § Y fR©ÔÛÛ{Ò(A_Ù Ž=:yòd###;;»¢¢"Í_[[;nÜ8¥ñ`ùòå/½ôÒØ±c—.]ZWW§}æzÏíÇhÀVÙ`èhnSéÒÌEÝq#ßJ3Ÿ$ÒWVøçy1ç¢+¹™4 Yô ¿Š² ÙÈ6Ð)Ù^´hQdd$%¢¢¢(ÍŽppp(++{øð¡Ow1VüJZîìì,z‚ vîÜÉNàîî. [ZZöìÙcooßÙž5kŸÏokkkllôòòÚ´i“ö™ë=t”KGÂØj ÛhK«¨¶>§ˆú޲c§ol?Æ<`,Õze¢±suww—f.ê~zg¤!Ûm€lý“íòòò &H$JÓ'¥+**˜Ÿ,--»Ÿ+V'ÛS§N½yó&“¦¹¦M›ÆNP]]ý¯¼ÖÖÑ£GkíªªªÕOP¹,†¦¦&333í3×{è8ÙÌ [ d€ÓÕ!gNPÓ‘ÄÍ€È|¯Yî~í6PgA;®©ë…¹k/¹matšyÀXcQÙ Ÿ—†lC¶² ôI¶}}}ŸûOüüü˜ŸŒŒŒ ×F¶'¦}Õ<½ÒH†‰'~ôÑGosî¦À’ QŸ¤Zn¥†l?Ë¡òlÌcÙþ[8d Û`eèEeçf¬A Û@wiÕV§åÂ8yžA™ ½y–˨£§OJç{¸[Éͤ-Y÷÷­ãÔçQ`5dÙ.‹ø’‚À9ò d4P¼;¢Ð÷ âÙ@3]òx£yÍmdèM·„$Ï%A19».Ì]›0f>×Ô•ñêÒCñdÝÍwD´Ý"POãO Úƒ’m²líÚûøÌv\d4Ë6 ˆd€¡ãùúœ"IJ ž5r‰L\ ¸Ç¹pcû±++üÏÍX7ò­T땗ܶúFóhËÄuàÏ’¤ñ¿ÍñøÌeûòrϸ‰Îm€lÈ6ò<ƒJÅ#mðÔiº%%ð¯û&æY.‹7šG‚MšM;lRno\ >¸än LïÔÙþƒaš6U<þ¿æq~»² mÙ™6a"m0Àt4·5ä•Жïuà¢Ý†„1óI°³ÜýnDVr3I¼"]£&=—v¢ÂãG S¶EIQoØþóß ÛÙm0 \˜»q€lƒþ"­Wñ.ߎe® 7šG›Vî¦@òíº¬B\>$¸0÷ã³®m¹(ÛçßZÍ1u‰;q² mÙBGsIž¸Ù½¦Ex_”À/ô=xÉmKÒx2œ½èëÝØsEeب†"Ì À®ûï4ÌÓÚqŸìŒ{d Û² „T땸¦² z¦£¹­.«ðvpl–»×Ô•æ²ð*ÞåVQ-â£ä{0´‹É›+.žzq>gêûÌim‡õí·ß&&&fff …Ø=d@¶A¹ä¶¥’›‰8@¶ šïˆîÆž#K³ñH3ÿ¢Ý†k>¡¢>ìZ_éê§½³9n¤øvÛýœ™æÆù¯wãE0§µOœ8A²}üøñ¤¤¤ìì쪪*´ P„ŽnÇ"mÐc@¶Aïèhn«IϽÉ\~ÖbIöêm‚0NC^ žn ÔÜ%ÿf-s=¹~ß¿ýðúÙ¤)ïqŒìãö„2¦ÍápH³I¶I¹SSS êêêÐ$@‘«ëö £yˆdm(=Ÿç„8@¶ —vñ£{œ ´0§¯ùž×ýWr3%ÕõŽÒÔÔTzëöÙe¤}*÷µ…ÂãGõï}`²‡—/úÎ;Žñ‚¸€0Ö´cc¾U¼†¼´´”¢&mÙ}£&=—äq€lÍmÕi9…¾I°²Üýaq6R©´ªªª   åÀWœ©ïÓž5ÞØ!så'·Ž¼.¶†ÏéíP}ñDæzCEbTappŠãÚ¸çß~ü¢/ÇqßD³¦ýÝ×?9é·Ç?ÜÂ9þÝùóç)Šš@¶dôVQ-×Ôq€lë?r‰¬6#¿xwDú¼Í cæg8{•ÅÔçA°MMMB¡077755õÄÖ Øw×r^œÿØNõbˆç÷þ'qÁ‡ã~æñ9íØØÇ!99wÍ©I.ü]_ ËÊqZ Û² ú yGGsâÙÖOòJHª3z';]´Û@²Mʰ¤Ri]]]iiivvvrrò‰'HD£CÅî:pbû~ÎÎà¸]!Ú6«â6ôbú§:ü-<î«È8H³©vÌ}ÚTEJS}3ŽÆ¦üf-ïõ•Âøt4€lÈ6èi6ââÙÖ#Yª—s&ËÝ›Ú÷5ŸÐê´ü¥úæÛ©©©$¥ß~ûø~f†híˆùü çÅù1G¾ŠÖUØQí¨ŽTSª/Õšê~7)ƒ¶‹vj3òѲ Û o\Yásq€lyšïˆnǦÏÛLŽã±‹š5Y7ÂúéÛ¹¹¹çÏŸOLLdN“—ÆhÇ Û5±+½ctª ÕˆêEµ£:RMÓfoÕ¾{Žg¹,s¡7þ‘È6dA}àºÿᛑˆd{¨Bpcû±4d3·|¯5鹸   o …‚‚‚ÌÌÌÔÔÔ¤¤$òÒSÚ°ÿÇxÁ©ïNžÒa¨.T#ªÕŽêH5U4mÚšJÅsM]s¡$Ø©Ö+I¶ësŠð4|»©©©ªªª´´”t4;;›¼ô{-8m÷1ïÓ}ßë6TªÕ‹jGu¤šª{ü¸\"»™4Þ%Ï3oÅ2|OÜ_Ù@{HR.Ì]‹8@¶‡tÄ_Éͼºn/ô§ÏÛ\ƒSmà™)w]]©P(,í‰üãg’&¿_zëv©nCu¡Q½4h¶"íâG…¾®û¦4² ÛôpY'¦c'IJ­ÓÔçån ¤–šáìUþÍœ[:~(¦ÇÒÖGcÒx—’ <ØÙm4CGMx’d[¡vy;86Õzå¹k(f tŸšô\j±zÿà€æ;¢++ü¹¦®eÇNã) È6€l Ž‹vpÇ+d[‡ c÷ê´:”O4vÊó BëCˆ4Jn¦TV\ ÈpöâY.»{«Ùmº“㱫ü›3ˆd{ðiÞ¿±ýX²™[ú¼ÍÂh^Ž †¤Ù$Û†Vk:î¼h·*^Å»Œ6€lÈ6Š”Å\÷?Œ8@¶ ¹Dvs!ÃÙ‹kêJm±é–« E ê´¶Tñs3Öà0 ß\´ÛP—Uˆ8@¶ÐÒœìÕÛÈö Ð.~T¼;"i¼Ë%·-U¼Ë¸ù iÛ4ÀÓÚŠÐö+Œæñ,—e¹û‰ hýã¬Å’á}IJ €öÔfäó<ÈöàhöÕu{ñ/ \˜»Ö`Ok+"—Èaœd37lÚÈ6€l@‡CÔu mh6}¤6#ßÀOk«Û̯ù„â]}È6€lƒ¥£¹-aÌ|IJýÔ‘Ö‰éø›kêŠão gð§« è=Yî~¥‡â‡>ÓÕ!Fóx–Ë(’â€q¬z ÐÑÎÕu{Èv¿¸Ç¹À5u-Þ}Àhº%¤Ë7úÅPÆ¡`Ò~ÏêÀ¸V=Ð3j3òùžˆd»Hªë/¹mI³ñÀ¹)`8Þ ˆDŠŽæ¶âÝIã]ò½àùCÀªzCcQYªõJIJÝGÓ¦ÖsÝÿ0NhÚ}¢±S»øB1°°/ ¼±ý Ð)äY¼Ñ<IJ @o‘Ö‰éØq€l÷E9ÎÍXƒ§1Cƒ„Äáéu,W×íMïRƒ õ:B‹ðþY‹%ˆd€>7ò-œ˜„l÷ަ[B2m¼Ü íâG‰ÆN¸ÔùiÓ|Gte…?×ÔµìØiìŸmÙCê=¨A Û½0í4úÄ*†ÆÍ€HÈÝXƒ>H7‘T×çy%w¡=™\"ÓµâiÓc<ãmÄp6dÈ xJ²í² d²=tuÈ3œ½Tj†Rg×ç¾oÉ’%§NbÒ Ã‡¯««c¾ž8q‚~íg‡;°rggç”)SÂÃçNJiȶ!ïJëĉÆNô‰>H—i¾#ÊñØÅ5u„qtG¹µì1pPÙCH¶±]C¶èåßœÉ݈8@¶U@šM{•WÏv—í£GNž<ÙÈÈÈÎή¨¨ˆÝ'íÛ·ÏÂÂÂÄÄdýúõ---Jù$%%¹¹¹1é³gÏR>gΜa¾º¸¸Ð¯”Ë—/饗Ǝ»téRÆÆ•N­;vì077766î[ÁhúÐÐP33³aÆu¯5Ç›3g%lmmSRR«ÿùçŸOœ8‘ ùé§ŸÊd²Ç+%úP•³xzzFFF²Óüýï§1òW´îëº{ÀÕåIÕüä“O¨Ê“&MÚ¿ÿP?Þ¥­]äPA\ Èr÷ãY.Fótá²u=†ºm„GŽ¡ãøçŸþ7ÞÈÊÊŠŠŠ²²²bº¯[·nuï@Töoìééé³gϦ_išˆˆ•²R'£e+•J½½½'=ìJº,•½ÊÅi¨šÊL4ôŸêâ@¯¸Ç¹pe…?¶kÃÜ®!Û ŸÐ1ÉÕu{ȶ2 y%\S×VQ­6ÿ,ÒWwww¡PH½áž={ìíí™ñ!!!NNN>ôððزe‹R>ííí/¿ürmíã¥øûû“`ÿéO¢tee¥©©)ýJéY³fñùü¶¶¶ÆÆF//¯M›6©,C@@€ƒƒCYY-ËÇǧo£é—,YRUU¥²Ö‹-bT–ö””V¬¾³³så(±{÷îÇ+%úP•³ÐÎþÝwß‹‹{¼{àp(ÍÄP]þꂦr]+W—ç®]»˜Z‹D¢  iÙ&aK6s˦†Ì9¨s3ÖTr3·$êz uÛ%Þ{ï=Ú©¿ |ñÅ/^\^^Î|?~÷MReÿÆN@½kBB1ß»woãÆê6pÅNFËþ–ŽŒ© ¢'PvîÜÙc—¥²üI{ €IDAT·Ñ°8•US™‰†þSCèÛ±2¶kCÛ®!Û² Ù~*\˜»VÃã—»wëÕÕÕLºµµuôèÑLÚÚÚúöíÛLº¦¦fòäÉݳòööþâ‹/(ñÎ;ïdgg3ï_ÿúWß}⦦&333•e°´´dÿ%ísÁhú»w窱2í'L˜ ‘H~úù~rÚ°sݼy“IO›6­ÇñJ‰>”GÝ,”ž2eŠ¿¿?}RZóÄꂦl«Ë“ªÉÖš2Ò²-Jàãu/C”*Þå4‹vȽ¥z uÛ%îß¿ÏöWêº/Åé5OðÚk¯…‡‡ÿøãš7puŒ†þvêÔ©ŠUPìÜÔ妲·Ñ°8•US™‰†þSCèñ2¶kÜ®!Û ŸTr3³Üýȶ²cÐqª¦Òh¼g›ýJ=©âuJÇïžUAAL&›1c}}ýõ×¥RéôéÓi<3A^^ž£££‰‰ “Ɉ#T.ÔÈȈÙÿõ§`”îêêRYe___¥'Ãùùù±s±‹¦•¤Çñý/†Øîܹ“ư×bi˜¸WAS¯Mž”Ò²Mª6ègGA¸{Žg¹Œïàùì/OÐÐc¨ÛF´ìW{œžMäçç»»»7ÎÊÊ*55UÝ\ŠLú[¥ÎM]—¥²·ÑrqìW•™hè?5Ä€>È6¶kÜ®!Û Ÿôáu@Ïe»«CN‡§šOi¹ó g …=.‘dû«¯¾b®Z·nYâ›o¾Éþ:mÚ´˜˜˜††¹\NŸlæJ÷Qo«ýIZuSg†ÌØŠ³TTT°o+žÁ¦„Ê3ÛJãûY ³š™™>}úÕW_½sçŽæ‰Uv{­­­LºººZ]ÀÕ婸ï~qqñЕ톼’³KðÆ¯¡­ÁòoÎpM]³ÜýšïˆžÍB5÷ê¶‘?(ÿWººx<ž©©©Ê Yib-û[ gÀÔÅDeo£nqꪦ2-ûO¥8Ð[ÙÆvm˜Û5d@¶!ÛLé¡øÌ…Þ=”F»GXX˜³³sII‰L&»qãÆªU«TæöÅ_Œ7îøñã”ŽŽŽ3f sa9õ¡IIIR©´¼¼|ùòålæ´‡£œÙÉhmn?VW0u{”¨¨(öAn,®®®TZf.æÞlJ(Þæ¤n|?Ë£n––––™3gff>>›œœ<{öì¶¶6 ù« š½½ýž={(+:ŒX¼x±º€«ËsÇŽl­i‚¡+Û9»nÇ¢ëÑä­Í¤ñ.tĬîQˆæCÝ62àå«W¯¦CgÚBé`ô•W^Q¹!+e¢e»mÛ6öÞNGGÇíÛ·÷Øe©ìmÔ-N]ÕTfÒcÿ©2hõ…¾±]æv ÙmÈö@Ò.~¤Í¡´Üytvv†‡‡[[[¿ð $\.Wen<5juî” …”V|½vJJÊôéÓi¤¹¹9妸P1cccök{{ûÖ­[ÍÌÌLLLBCCûV0u{”¹sçv¿H‰lÖÖÖö'…§ŽÓ¢ÿð‡?(>ÀSÝø~–GÝ,üñ×_ÍNCqX¿~½†üU­¨¨ÈÎÎŽyÂç‘#GÔ\]žTMOOOÊ*>tŸFμñ‹6t=ú­ÐâÝ´f¯ù„JªëŸÞ‚4÷ê¶‘?(?yò$už´-Ïž=›Ïç«Ü•2Ѳ¿•H$Ÿ}öóÔbJ°—€jØÞUö6ê§®j*3é±ÿT´‡yM.¶kÃÜ®!Û Ÿ´X‚8@¶ÿ½GÁóz½nÔì‡ð–סËíàXlú i6É6×Ô•º;üŸÐR¶Ã² Ûí<MïBmá†l8©Ö+ësŠ=¦ùŽè꺽¤Ü‚0Ž\"C@mÙmÈöS$ßë@¡ïAIJmàÔf䟛±q0‹Ê²Üý’ÍÜ„Ñ<< Ùm0°´‹%;!í5i±NŽÇ.Aq0êsŠøž<Ëe¢ÜÐ P&ßë@é¡xIJ Zd»ïÐŽ${õ¶)«žžÑÕï3ÕÙÙÙVVVšë¨î1'úþu2Xj3òÓl¥ßîg&MšôñÇ×××Â>fìØ±ìËÉ Y¶ð_'0DéêÓu²™[–»_ïAÔÐa ²íšЊ€þÈvžgPIPŒŽÈvggç”)SÂÃçNJéAÙÇ0‰ÊÊÊ>øàÃ?4„}Œ6µÓ{ÙnÈ+Á#ƒ\"„qH¹é8»ùŽHgˉý“ml×Ð$€VôG¶;šÛ$Õõ}î‘YØ1G{ölš€&‹ˆˆÐüoE÷*«©.¶*×}¤R$Õ•G)ñÎ;ïœôððزe‹ÊÜ-ZI‰¨¨(J³ãÊÊÊhvÍšJ{#gggÑ,X°sçNÍeëÕ>fÖ¬Y|>¿­­ÄÒËËkÓ¦MÚgN:š@û].—¯Y³†~Hocí6Tñ.£¯=ö™WVøsM]ËŽîênaô¾ÃÀeÛ54 @¶þÈ6/^]··Ÿ™ 6L›~Ÿ¬L(jȇùWqšŠŠ öÿ]2Ãîèª[´†?tµÙÇhþOr‹‰‰ihh ɤOu Ó°éêêâñx¦¦¦ì‘='\]]ÍΨ²Ê*GjŽ­ÒâTŽT,­ºòtOP&3gΤ㠙L6t702(Ò§Aw'0T2œ½x–ËîÆž¬2N‡ Àà’fãÑW‚íÚ0·kh@+‚l÷‹,w¿{œ ýÌ„v%%%=ÊvXX˜³³3MIVvãÆU«V)å忦|šÝÕÕ5::šåå劷*©[ô¶mÛØ[•·oß>€ûÔ¤¤$©TJ…Y¾|y¯ö1«W¯¦E€D÷•W^aFÚÛÛïÙ³§¥¥…ö©‹/fgTYe•#ÕÅVåâTŽT,­ºò¨¬)‡Ã¡¯ß|óÍÞÀnD^ó EGzEmFþE» t >(×DB‡ €.pÖbI‹ð>¶kÃÜ®¡I­²Ýwº:ä cæKëÄýÌ'$$ÄØØ¸Ç®¶³³3<<ÜÚÚú…^˜9s&—ËUÊgîܹìUÍ,ÉÉɶ¶¶”hooߺu«™™™‰‰Ihh¨æEK$’Ï>ûŒy'%Ø+©d“’’2}úôQ£F™››Szµ9yò$Íkdd4{öl>ŸÏŒ,**²³³c~äÈvF•UV9R]lU.NåHÅÒª+ÊšÆÇÇ[YYutt é Œg¹¬.« è•ÜÌs3Ö<ûG(B‡ €¡É6¶kh€lý‘m²‹4ô‡÷ßÿĉCº âÉ6V%è3]ra4ZÑ%·-Ôœ ÛšZ‘¡Ëvñîˆëþ‡\Ð7:;;¿úê«™3g²oÿ¢ú¼±ýV(è¿r Â8\S×]ÍwDúA²™[«¨q€&€VÙîéó6פç"¸ ò¹ç,,,òòòôà@ª±¨ + r‰¬xwDÒx—<Ï ¼¶(¬}€V„ ¢lw4·%Œ™¯#o‹`°ÀÍàiÐ.~Tè{0ÑØéºÿaJ# à@`í´"`@²]ÉÍÌ\èÈ'wSàÍ€HÄ< $ÕõÔÀ’Æ»PÃ?›à@`í´"`(²}ÝÿpñîD2]r!ÜŒž*ÍwD9»¸¦®‚0Þå”Ö>@+ú/ÛYî~¢>" ™*Þåôy›ð .¹máY.Fó Ü $Õõ\SWÄšZd»w¤Z¯ÄC¡€“㱫ôP<âžµù|Ï4Jn&¢€îÓ"¼Öb âM­²Ý äYÜÈ·pv2´$;IëÄxÆi“o§ÏÛLîhÙÐ$€VôJ¶Å‚s3Ö ¬À%ð3œ½0X£y<Ëe™ ½©CF4€lh@+z"Û÷8®¬ðGX!suÝÞ²c§0ˆtuÈKÅsM]³Wok¾#B@Ð5ê² ùžˆ4 ´"Èv/À£ÈÀsÈŽ —ÈnDRƒÌÝ(©®G@Иç, Ð$Њ Û½"N]Vašât‡vñ£ëþ‡ }R²  I­ IÙÆ£Èscû1 kHªëó<ƒ’Æ»ïŽKdƒKMznæBoÄšZd»$Œ™ßÑ܆°ƒåÜŒ5õ9EˆÐMšïˆrwS`Òx—›‘r‰ `¹ºn¯0š‡8@“@+‚lcÏ@Â8yžAˆÐ?šïˆ²Wo㚺–;—r€C&MhE² À3…lè1âA†³ÏrÙÝØs*' ¯Ë*D Ð’++üñªHhhEmÈ6=Ã5uÅ Û@ï©ÍÈ¿h·!ÍÆ£Šw¹û¯7"ësŠ%´ïà‰—ÛC“@+‚lC¶è¦[³K` Tr3ÏÍXCª t*»Ex?iâïšnV D@¶4  È65ûìÕÛ`8tuȩٟµX’åî×XTÆŽÏ\è}ÚĹ£¹ !@3éó6ãJhhEmÈ6=·ü²c§`hÈ%2A‡kêJ›@ó%ðißϳ\ŽG© ™³Kpó4 ´"È6d€ž™šn `˜´‹ïŽ å¾æÚ&ª=mâ7ì7™‹| ÛšЊ Ûmúޤº>ÑØ qØH¶I¹ÓßÞ7ò­S£Þ¾þ—à mMhEmÈ6}äçB–»â!:ýdóÅœa¿¡#€SÏ¿ýc\:b€JâF¾… @“@+‚l÷‚ëþ‡K‚bS`Päy Â8ˆ0d:šÛnîû{òknñFó8Û63Äÿâ]< %4€VÙŠwGЀ˜ƒ"ÍÆ£!¯q† 5þ<Ï øÑï°‚­4$þÒ‘½V¶UT+.ÔfäÔ@U–T×£©%4€VÙîÂhÞÕu{S`8´‹%Œ™/ Žšô«×|>çY.Vgã†3¤Z/+ô ®IÏE«Ò:1×Ôq€&€VÙî÷8ð¶a`PÔç]˜»q ;µ?ð6>¾˜ÜèíKn‹ví-‹øR”UÃçÔ@U¾sô U?sáz $ÍfM%7-ÄiÞ?k±q€&€VÙîÕ¡U>ßÁ1†CÙ±Ó¸š€îÜØ~„’Í~Wu¸£-·«ë(4›åœÌ…ÿOZ'FSlhhEm­Òl<S`8\ó ½‹8ÀÒÕ!¿²Â—Žò<}¡Ùê†;GÆÙs_q½›}­©©I*•¢åt¼„«¢ I@+‚l÷üS Ì…ÞU¼Ëˆ,×ýÃi÷û‹`µæááµ3I“œÆ;]̪ªª‚r¸@“Zd»×È%²x£yˆ)0¸¦®xÎ0,U¼,Ú÷_óñ‡Kk34–¤žžèœ0eñµ«yB¡°®®¾ ÙÐ$Њ ÛXë<~y¢±âKšÍjžå{í?@¤µ¥ýæ™u[sssKKKáÛ( pÀ Њ Û}á¬Åö•ªè7õ9Eí6 üìçiÇ/JŠ‚B÷jøÇ2OΨ·R8 ÙÙÙðm¯JÐ$€VÙî Î^uY…+0JÅçy!0äxü…kê¬Y,ÛÚþ¹gÏ~õ«©/¼ðüرÿåìü›ä䃊„†úÒOô©8ò¹çž³µ}C.ÿAi¤6Ù>§Šî9¨Ì¶ûÈKB?}ýõŽyól~ùË1£F|õÕ‰+Wþ6=ý+Í“ÓÓ‰uNNN†oC¶4 ´"ȶZò½Ð.a†µvAqà§'!O4^»i‹«”HþùöÛo®_¿¸¸8A*½úðafJÊ—®®öŠÓØØLÿæ›]o¾ùßJ6ûÑG‹ŽݦRq5g«Ržû#ÛJÒÞž÷þûï.^<ÿÒ¥ÈG.Ëd¹BaÊñãóçÿZóIÓ—|gµøÄ‰©©©¹¹4—°©© J¹Y¼;q€&€VÙâ±ÿßÁ³:-qà§'o£ ½~YÄ—”20ÐËÝ}† ®];iooC‰Y³¬(­h³÷ï_œ2åÕøÝWs¶.ÛJê»l™c®$¿âõΨ·¢££9Îùóç ªªªpr[¡#%,A“€VÙî5ä9»V` é'<½Ì³³³­¬¬´\DoK2àÓ?ÕPõ9E´×¿.VƒRΜiyåJ´† ¼½?üRâàÁ?QZÉfÃÂü6nt﮸š³pÙÖP›éW¯ïƒl_?°Ÿ¢ýåÑo¿ý611133³´´'·õ˜ÛÝ ˆD I A¶{‡¤º>}Þf„q#ß’Kdz&ÛbžóæÍ;}ú´–,Ùîsôø_’þP›‘O{ý>GƒR=ßÐðu¿Êd¹3fXJ¥W)]_ŸñÚk“hŒ¢Ívtü0{¶uvvŒ’âjÎvÀïÙÖP’Ñ£_hkûgdûnB$E/6 4**й˜¼   ®®ݬ¾ruÝ^ÜsMBZd»/¤Z¯DXÞÓÏ÷~566úûû[YY=ÚÄÄdÉ’%éééÏRÒžª³;Vû+`qf{Hoeûôé­[7²_׬YHc”löòå¨9s¬Éu{%Û~f[CIZ[sTª¾fÙ¦¸=~FÚöý$ÛÇOJJÊÎήªªBO ÙÐ$Њ ÛÿÁ…¹k‡îé>´¤é–°?ÿ+ýîw¿Û¼ysii©L&{ðàA\\ÜüùóõF¶{•9dÛpd[óõÞK–8(¦1ÝxýúÅááÜËÈÕ•dÖ,«œœo{UEÙæì &Ùf¯$ …èiõ•ìÕÛD |ÄšZd»×\]··±¨ ‘ú ©ßÁ³Ï³=º±±QåOR©ÔÛÛ{Ò(Áž"¦ãõ#GŽL™2åùçŸã7²²²è¸ÜÊÊÊÈÈÈÎÎîÖ­[Ìdûöí³°°011Y¿~}KK 3/ ›ÛÑ£G'OžÌÌ^TT¤¤vê& òxzz¾ôÒKT¼Ï?ÿ¼» *-K ,_¾œ¦;vìÒ¥KÙ+c»_Å­²ä„L&ûä“O˜%îß¿¿û§M›V\\̤£££™¡ñZÖTsQ9 $2òß·_þýï§12ìèèØ±c‡¹¹¹±±qpp°†CT®¸~® A”퀀OÕ=ɬ¶öû_þrLccÖ¿_ˆÕ˜Ech¼’¬Ò˜)S^­©IgGjÈö©Êv÷’y³ôA¶ãv…Pމ‰9uêÔ÷ß_ZZŠžV_¡Ým2ˆ4 ´"Èv¯) Š©â]Fd~ÓÏ·¤ººº®\¹òòåËmmmJ?‘†9;;‹ž°`Á‚;w²rõÞ{ï•••‘³¾øâ‹‹/.//g¾²'ÆCBBœœœ***>|èáá±eËu.çîî. iö={öØÛÛww9•lß¾ _õ•.§8rÖ¬Y|>ŸªÙØØèååµiÓ&u®«®ä»ví¢€TVV2é¾DRñ#GŽP‚& °¯ Á’mZ[svíú½µµÅóÏzñÅÿúíoíΞ=Hãß|ó¿/\8ª41a^¸­$«rù¶¶o(ŽT—mHë>^ÝH•²Ý½$dû!!ÿ÷ë_¿þ‹_5ÒÜÜtݺÅ?üp² I³ñÀ5€Ð$ A¶ûd·„WVø#²@¿¹æ*ãô?Ÿ®®®âââµk×¾÷Þ{ÌÒo‰D¤)A_5K—ÒW’.EO>|x¯fïÑñ¨<ì•íT¼e;//ÏÑÑÑÄÄ„)ψ#Ô-H]É•Ò}‰ÍÍÍæææ”˜9s&Ç{ûí·)McØK赬©º(¢aš;wÒö²p-kÔÙîó DÙÆÙ? ñ7Gh@+ƒ)ÛĹkY ßd¯Þv7öÜ@å&‹_|ñE&­á̶6n6}út•ÏU6lØ€ÈvoÏlSùIär9}jXº’+žÙ...V¹D‡¸¸¸ÿùŸÇ{úär¹ ,èmMÕ@uÓš™™>}úÕW_½sçŽæ‰­¬¬ºŸÙ&InmmeÒÕÕÕl©Ô­¸þ¬ È6d .Éfn’êzÄšZd»²ÝÑ܆à=¦ŸwÜÍŸ?Ÿä°¦¦¦½½½¢¢â÷¿ÿ½««+óÓ¶mÛØ{¶·oßÞ+Ù £ÙKJJd2Ùÿ?{çÖÄ•÷ë»Ö…ªHw©¢ÕWºÕÖvu+]Q(JK/µÚ²õ¾K[\¨/î²J½UZTD¤ÞZý *¥‰€ ."…" +T@A£‹$áêÿ³Í› IÈrù~žyx“ÉÌ™ß9sæ|23g®1÷WcÆŒ¡™šË6eOüHðüùó{•m+++‡#‰ÊËË—,Y¢`CòrþÅ_8;;ß놹ÅÀÀ@²ÜÐÐPJ8p€¼w÷îݪ H"s™¦¦¦iÓ¦eddtõ¡ãâ^ýuæQ|y+ ppp €H>³={öì;vЪ¨>,\¸Pœ+y§IA¶!Û]d€:P‹€¡Êö¥¥~µÙ….0bâmÜoU¨ýuêI/Z´ÈÂÂÂÌÌl„ Ÿ}öY]]ó‘P(ܰa39%Ä÷+)Û䜶¶¶Ã† #är¹Ìü}ûö™››+s²¼(?ùË_(ç–––;wî2dˆbÙNHH˜2e -6nÜ8ʘ‚ ÉË93¼6³E™£‘ùùùƒ~ðàA—õÕÔPº  @Õ=•—Id.óñÇ÷Ýwâe‚ƒƒW¯^­`…­­­›7o¶¶¶¦¢…™™………vvvTÆøðaÉaÛdœ&Ù†lt‘ê@-†*ÛWý•ŒBpqË6ž¸»zõê„ PP@mÈ6P‘ žkå‚8@“€ZÙV“»¬”+^{\`ÄpF;S‡É4÷ÝÇǧªªêÎ;ŽŽŽŸþ9* ² ÙÊÓÄ¿oã†8@“€ZÙV“ºÜâ”+\€Î( ±¶¶¶´´\¿~½xL/`âÙ†l%i¸ÁÇ8²šP‹ ÛêÓÖØ=bNg[;â ÐÀ` TÆgÒAq38ˆ¼“ªÅ­K¶ý!Û¦@Mú•4OĽ A¶Õ'yºG}~ â L³ë³! ûº½èïýÒÛÀêgÆú2W%û»®ÍbÒdb¯úd² Læ¢â ÛmõÉó . a!¾À(yRQgíªŸê¥ÓMke嚬²­^®ú {•¼‹tÖ/Ù¿·&…IÕ‰â†+Û¦Ã]VJ¶Ç6ÄÁ”Ásû² ÙÖ”{ÜŒ¬å[_`"§IÅïåRƒ={ö 4ˆþö‹æ ø…±cÇ~üñǵµµý¥”Z ìƒ,--E"‘xŽ»»»8- ÇŒü6Lík=2ÊW=ù Ïlã™m $üpÞåU;ô"Ù†l«Okýc¼Ù@¶Õ£££c„ ¡¡¡'N¤t¿È6“¸wïÞûï¿ÿᇴlnnn$0Lº®®nàÀ€ù722’>Õ0ÂÚŒJ² Ù†EéÁ¨<Ÿ`ÄÁ”©Ï/Ižî8È6d[#ÎÛ­Á‹ˆ±ž&%Ç’ xΑ#GÆofffggWXX(–¨/¿üÒÆÆÆÂÂbõêÕMMM2×ÏãñÞxã JÌœ93!!A<¿­­í‹/¾7nœ¹¹yPP¼M3 ‘Häíí=¶Jˆ¯ëÊË›ŸdfÇŽ³gÏfæïÛ·ÏÑÑñöíÛ>ôððظq£Ìõ¿óÎ;'Nœ DXX¥ÅówíÚåààPVVF_÷ññ‘·i&ABèääTÑÍܹs·nݪ8o*Éö«¯¾š––ÖÜÜüèÑ#//¯uëÖ©±rUs¢I`[[[_xáÚJûùù‘`ÿýïÚ}éÞÊÊŠ>U¼S’«’W *eŒ–wss«¬¬T©з¨LïuC‰íÛ·÷:_*¡^~ Ûm ]Ù.<‰8 8È6d[#ª’³s=b`š²]UUŤŸ½¥¥eêÔ©ôïË/¿,‰¦L™BóUÚ)•JA^Æ(ÝÙÙ)³v)¨”–W¦ŠËZ“ü@¶!Û@ë²}'" q0e0H€lC¶µÃy»5"A=¢ Œ^¶¥žt•§^¤v|>_Áš™+™’Ëܾ}[|sòäÉ=¯©ÊÛ´‚+Ûª*nÏù´6R‚ºººöövú«ž?+³°¶Ë@²ýí·ß®]»–Ò«V­ zíµ×ÄŸÊÛ)©<È,U3&/Š+€älJȼ²-5_Ãü@¶!Û@ë¤;yÑñ‚8@¶Ù†lkJÑöcwY)ˆ202nðm—IÎ!*..îU½BBBœœœhÉ–––k×®}ðÁRk ã%ÆÅÅ…ºà”ppp(//—|ZXÞ¦·lÙ"~f{Þ¼yþþþZ”m+++‡#‰(3K–,Ñlk+° û÷ï5jÔéÓ§)M!1bsc¹â’ʃÌRP5cò"¬¸зœ™g³)!ù¾¼ùæ² ÙZ'ÍÁ²mâž¼êwqmȶ¦Ðé?Ýã£ç«¿öíÛgnnÞ«Fvtt„††ÚÚÚ6lÚ´i\.WjÍ3fÌHLL”š7sæÌ§Ý£|mÞ¼ÙÚÚÚÂÂ"88Xñ¦…Bᆠ˜ÑÈ)!¾ÍX+²0eÊ”!C†Œ7ŽöHw²­­À2ŸOiÉ×kËÛ)©<È,U3&/Š+€xÔqÚô'Ÿ|"9¼¼ùæ² ÙZ‡7i1ÞÕbâ\ñÚ[ÂBd²­)íÂ)'À(e€¾h¯{û¤lk2Ýå†wÉöסmS€Nmç‚ëÆJÞEÄ@¶!ÛZ ÍÁóQa Û±l 2 è¬_u>æ¬ÆTÜ%Ûßž€l›Qföm툃)“h» }cÙ†lk¼O@¶0zÙ¦ƒ‚Îú·ÃÁœÕ˜²?ßÂdÇf³!ÛèùÁ¥­±qhL ÛZ@Yîä…@È6FL»°%ÊlvSüïßgM|²m ˆõ\+ÄÁ”VÕ¢È6d[kt¶µsF;ã<`LP}Ž1wD$cÁ§‰¶ aΪNÂXƒìX|Ù6ð[-dœ·[ƒ8È6d[‹=0ïªälÄ ÀÈVÕ–½´Ô/ÎÚµô ›Ž‹‡y±ðg•¦‚m_v=°}à[ȶ)PŸ_’2c%â`Ê܉HÊö؆8ôE!ÛZ£8ð䯽ˆ5@€qÐx«âfPDªýúès¨×H}G‘ ¾µþqŒùÜl ðgå§æû?yîOl;v7m£§&ýJšƒ'â`Êm?vÍÿ(âÐ…lk³[gíŠXc"ÞÆMXU‹8Ssìë»N$O÷àZ¹ä¬ ¨NÍ‘T¹hûwìÁvµ9X´’Sú¢¿v Ö}Y²m Tò.^Zê‡8˜2Tî²RÙ†lkêœ 2 n`4œ·[ƒ¥Ssì8k×<Ÿ`ykýãx›…ñ6.-/B¤•½üÏ›Óf±Xaaa§NЉ‰ÉÈÈàóù¨{Æ?œwyÕNÄÁ”‰·qk¸£@¶!ÛZ…:j¸g\7Ö¤_A[ŠºÜbö`»ó³W´>ΆN+˜®~ÐeÚkØ¿I²}úôi‡“••UYY‰Jh|Ü Š¸êwq0Y„Uµ`@¶!Û:é´ñ&-F¸ÑpyÕN ûŒ’úü’kþG“¦®Pɱ%)‹e ²;÷‡šþý#¤ºçÔòðb懨“Äzk;òñemÒl’mRîÄÄÄüü|@€Úh|m?Fâ`²TD§eºoBd²­}’§{à¶`L&~8qÆA»°¥*9;×3›7iñU¿Cš<øÓÐÐûÝöpû3ÏÚ|ÕÖœÁf& Åõýû¢ÌçŠßõ%6툈É{ÈKKK)Œ¨™ÆeåÇc“%Ï'¸8ð$â Ûmís}× šq`”=[z0 q†Kg[{mv!uûÒ¼¢GÌIsð¤´V~‰D•••9 ©1o®ìºSzøìóîžÅ‡Bî%œ®Nc™àt—^œâ²Ž=ܾë‚ö´÷Ù»¿‘2íðn(}îܹüü| …µÔø¸¼jçˆ$ÄÁd9o·Ï È6d['4ñï'O÷@ÄqP†[ud€·*¨/Âç•„°(ÈWýQ÷”&²AyÓ¥¥~Ì2y>ÁÌ]šôõ»¬ZO}~ 5>íÂÄ–xTXF}ŠóÖ.j“)}›ÑÖØ¬Ý 544ðùüœœœøàï~pZ÷È?uI¦iOìQެw<Ù»B؈M[|9B‡ËÚÆJº“\Ëd¡35¼Zood@¶ÿuïžTÔ èÀ ÞÞ¯9ªÚJÞÅë»Nd-ßrÞnMœµ+{ð¬x7òg±9ž$s¦‰b.oªˆNc–aüœ&ú:­“ÖC>I+Œ2³§‰Ô eºoÊY@Ë”=KžY›]hÄËKÙ5oÒâKKýnET§æ´Ö?ÖÝvE"‘@ (--ÍÊÊŠ‹‹# >xÒ?0bs`¤ÿÖÖ ö¶}º>Ùʲ|[ç[Qrú:”ýí öCšMaažÓ&Ù¦4ŠÂEA£Ðá²¶±Bµ9ˆƒiBEËN² ÙÖ!ÔÆäÀ8 =Ãë[Ô ­±™ìúšÿÑ ®9£i¢DﻬAfNŒk¶P©ÑVH°ËÇ’lçz’x“ä“„Ó‹kåBÝ ŒÞT²†¨â$Ïu¹ÅÑiÅ'i×úØ®ûvbb"¹å©S§Â~!\ÇœúߑӖ†ë1âPPX(8"˜¶)@­ ^i²P㌟éd²­C¨'Ÿô€q@æ–îä…8( )ëõ]'Ò5ˆƒi’îäu—•‚8È6d[ç¤Ú¯d ôÀÐÉZ¾ÏD0«j‹¶ãZ¹\pÝXšƒ€€~‡”¦ñVâôŠºÜbê!&H[c3^ú Ûí>¢$„•ëˆÐCçŠ×^ŒtB>“³. ÆÜ‘j\Êú{ð,< ôêÔœŒÞˆƒ r'")Ó}â Ûí¾àIE g´sg[;¢ æ‘Z“Ý}aUí¯½t,S0¸.зÊɵrA€¾Áçᑦ ™6•>â Ûí>"ÍÁ³": цÞmºêwÈw¼µþ1 vŒ¹cžO0žZzžŒúII«À÷â`j0÷ãWiÙ†l÷wY)\7"úÀ ©JÎ6µ‘E;ÛÚKFq­\.¯Úi:cŒƒ£&ýJšƒ'âô¢íÇ®ï:8˜eGÏârÙ†l÷)íÂꯣ³ Sê†ö7yºGº“×£Â2”>ÐgªSs¨¢"@ßÈY€{‰M¤©+ª’³Ù†l÷)¾ðû.0hLçVÕ¶Æf:`¹V.è&ƒOÆý$kù–{Ü ÄÁ¤d$Ú.Cd²Ý×4Üàã™:`è˜ÂˆÇÕ©9t¨f{lÃófÀP(;zï¼zHšƒgMúÄÁ¤¸´Ô¯ô`â Ûí~ c7Î:À IžîaÄ÷T·56ç¬ ÓÆýoÀ°(Ú~Œ&Ä艶Ëðúw“BXUcDd²Ý?TD§I/…KgÀ°  l¬ãê×ç—$M]qyÕNRn40, |žD€¾gíŠ~ŽIQ´ýî²mÈv¿ÑÙÖoã&ùƒ_ùñX 0 nEåÐeGÏrF;߉HBCäòª_ g úêår­\0¤(@“ÙîO®ú"]ÿ‹ñcaQÉ»˜µ|‹1íQ[c3íQòtÜë —L÷MÆzË 0\Zë“z!¦?œ‡n-€lƒ~–í&þ}Þ¤ÅLZ$¨gž…"„°ª–¼Ôhv§>¿„ŽÇ\Ï@£õ 7† è!·*’¦®@L„ζv:Ÿ¢!m0 ßs±À›~©üx,* 08â¬]cG*y¹V.wY)(SÙ@ëP¤š‰8˜Ô§EqÈ6Ð Ù®ˆN»àº‘鎟¡ÒCìÖ7ñïú^Ü Šˆ³v­Ë-FÈ6ºàND^ÿn"´ [¸V.8¥È6èÙ®ä]¼ËJß§ÚÙÖN½ü‡y7£~5•y>ÁT¥ 7ÿtæz&M]ñ¤¢¥ Û舒VïÄÁDÊ:Ó}â Ûài]ÙîëØr~†‹w]NÑÓîaÒ2\|b_|•åÇc%ù3,Ú›Ó¼.¸nÄû½drÍÿ(ÞHg 0#áÕç— ² žöãmäUÉÙQÃߢZ»àÊßöú&{* 08êr‹ ô-šÂªÚDÛeW¼öv¶µ£d‚7Ò™EÛáyÙý/ÛÄ£Â2®ÕÒlª+¬gþÐõ•mÍ)3V\¶nðãmÜJBX(AÙ ÈXà]šƒ87"A=g´3Þ Û@/dû)óæ¤×>Š1wdL•"ÉÓ= ëâ0™vœµ+L@¶è3Rf¬|TX†87—–úåù#² ôE¶Ÿv_Ìx÷s±o£H€!ž\ èé,Æ´+¢ÓPp² @Ÿoã&¬ªEŒ:±Òéc È6Ð/Ù~Ú=rž÷¾˜çæ¢ÒC¤hû±ò㱑UAf×ʦ Œ›óvkðÒ€n1èKžTÔĘ;Þãf ­ ÐT¶©A©Ï/©I¿¢Ý)Ïg?k ÖW«Ý‰v¿L)èäzÅk¯A˜6g´3L=‰¶Ënð ?ˆõqÖ®ˆƒsÁu#^÷ Û@}Ù®N½œç³›7i¡øájŸmøa°@4ÞªÐÿ1ÒêóK¸V.äÛ(/`ôà6r ‡-°!¥ ”¤üxlôˆ9O*j Ù*ËvMúÏik©8£Ì޼ຶpÛβcßTpªÓX&8ÑŽß:r€‚±`5„Â’<}îœÑÎú)þ+ Àëw¿›øì³f#ÝÜΟ?Ê|4`À€žËKÎlnþ׎ŸÐw‡ :r䯜œþw@™OÈBù†6~ü ”ÕÈȯ;:®HæMíïÊ»™œª~äªÄÅÅÁ·MŠâÀ“úsçjκý|†€> &ýJšƒ'âôÒl’mÄÁhh¶$O÷¸PÈ6PY¶»ï†›³n£ŸüóŸßýàçë×cȺkj~d±vÏ™ó{ed›–óÍ×V¯^XT-]~ø0#!á—ÙÊ|*sÍÊD+¼{7‰ÍÞmg7mþüY´-Í¿+oâLqû~òÂÈÈÈÄÄÄœœ>ŸßÐЀªfôÔf¦;yéCNêr‹¹V.¸· @¶ÐoU$M]8 ¹ž ¼ÙêÈvÿ>•\Ù±oÈäðáÃÈ„•÷^ñÌ€/w÷¹òV«øS e[<µ¶æ¾ý¶mKóïÊ›.yý“5dVxx8‹Å:wî\~~~ee%.n=mí¤¸úð¦ÍTûõxï0q·áMZŒ8=¡:5nf4”„°m—‰õ€lud»6»Jî~R„™œ0á·/†©!ÛÓ¦Mºt)\Þjª-Ùî’áKá´-Í¿+oººwÅ0ü›#§NЉ‰ÉÈÈ(--ÅÅmS€ºSýþ²Í»¬=]ýjÀÝ­<++kòäÉJnBÕœh}y†B‹%¥ùìlkgž¥á/_zpý̘ž×O­×[•–/;zÏõüpg´sà >B Û@MÙ®I¿B%WÆR “gÏî=Úüý÷ß ÙDòÙÖö³’²mf6´®î'y«Uü©ÚÏl÷\USSÖðáÃ4ÿ®¼éNô ŠaĮర0æfòüü|@€Úfôž, aõcÚ…-ñ6nÆ4¹¼­VzöööögÏžÕ‡¾¸2{jp²Ý¿N´]¦a‡²m¸¿¶è›l_õ;„ç{€{ÜŒ(3{¼ã@¶Îe›& =<|§—ׯ¿n;aÂo³³Oõlkë#ægŸ5Óü»ò&Š^×iþ{H¶OŸ>Íáp²²²*++QÛŒžÚìÂKKýú1×wÐî;?=zäçç7yòäáÇ[XX¸¹¹¥¦¦ö¥Hè´ã>räHåŸïÀ•mÃr­ ®+y•ÌyOÔÞ‘XZZJÖ+wwwqZ(Ž3†–Ѥ”µá={ö 4ˆþåoú ÛÔ,WD§áiÐcG˜s'" ¡mв-9}ÿýW¯¼ò“1âÙúú ’Ÿ>|˜1t轺üâŰ×^ûÍ¿«X¶Y[ƒH¶Åw’óù|Ô6£§³­½Ÿ}RQÃíÜÄ¿¯ÅuΟ?ýúõ¥¥¥---dl6{Μ9F#Û*­²mX²ç¬êm&Úª{nnngΜaÒuuußÙIŸjXÊÚpGGÇ„ BCC'NœHiȶ.ö÷¼Ý¼#Ê i¸Á1wÔŸŽÙ6-Ù®«ûÉÂb$“ž1ãwçΖü4)éø!ç]»>S0šâOµ%Ûmm?ÏŸ?ëë¯ÿ¦ùwË6{Û¾ððð“'OR¯ëÇ$]Am32x÷×Ó\ÙÛ®ùÕî:‡þèÑ#™‰D"ooï±ÝPB|)œÃ‡S÷}èС¯¼òJfffXXØäÉ“ÍÌÌìììnܸ!îâùå—666«W¯njjzúß×Åk;räÈøñ㙯öì"Ë\€òãééùüóÏSövïÞݳg,µ­’’’%K–Ðò#GŽ\´h‘ØŽzöÅeæœhiiùôÓO™-îÙ³§ç_z饢¢"&M“ 94_É=UœIÚÚÚ¾øâ‹qãÆ™››13ì£ÌMË\‰¼­+¸\í2Ò„ÒƒQª>%ÛS¶ef»×øs8WWW&Oë‰ýÏ€…ÎÎÎô©¼²èYÿeª£åƒƒƒ­­­Ÿy晞{ÍãñÞxã JÌœ93!!Ar÷©P,--)“Ÿ}öUï^ç+YQäGê­Ú¸ðzƒ¦‰ŸJP»wÙ†l+’í9s~Ïbí®ª:ßÒ’SRÂ]±bÁÊ•ï1ÑüÉ“Ç%'zø0ƒ&J¼ô’õ‰ÛůÑþã§®]ë^TMߥŠ_î¥øS e›VXQ‘µwöì骾úKÞw!Û 'Å'ËŽžíûí2¯ûjklÖîj]\\–-[vñâÅæfé5SwÖÉÉ©¢›¹sçnݺUÜg}÷ÝwËÊʨ“ðÜsÏ-\¸°¼¼œùW|a|ß¾}ŽŽŽ·oß~øð¡‡‡ÇÆå »»;ŸÏ§¯ïرcöìÙ=»È2ð÷÷§ÌWvC†#³g,9óÕW_MKK£Ý|ôè‘——׺uëäõÅåå|Û¶m{÷î1é¹Eê©>|˜´…åñã®Î÷¡C‡ÈX”ÜSÅd×®]T ´ŒO¯û(sÓ2WÒkÁÉL¨]FjS•œ­êøÏJÖ½^ãßÚÚú /ÔÔÔPÚÏÏvíïÿ;¥©nXYYѧÊÔ7E jÆhy777y2½óÎ;'Nœ DXX¥%wŸ©Ï%¶oßÞë|%+ª‚üèC½Uû—G»°/Ù6\„Uµ‰¶ËRí×S9"² úH¶SS¿]´h®…ÅÈáÇ‘ZÿóŸkšš²ÄŸFGÙÙM£Oiš5ëÕ³g÷I~÷É“ìmÛþjkk3tèçžûÕÛoÛÅÇPæSµHc03:nœÕÂ…s¾ÿþ«ŽŽ+*»&ï»mГÚìÂL÷M}¿Ý¬å[JFi}µ $Õ¯½öš™™Ùĉ}}}źéßëׯ3éÂÂBæò,Óg½ÿ?·²?yò„þ­ªªÿ;|øp&mkk{óæM&]]]=~üxyÂ#óë’]d™ PöÄWÑ)Ÿ½Ê¶Ô^[[[Ëë‹ËË9E@2 =WûÁP"00p̘1ÇŽuÝ‘¸lÙ²¸¸8%÷Tq$™4i’øBŸ2û(sÓ2WÒkÁÉL¨]Fj£ÆÛ¿”¬{ÊÄßÛÛ{ÿþý”xë­·²²²üꫯh¾’õMA¨qLݹsGæ.———SU …Oyžœ$Sü-q}ß¡x¾’UA~ô¡Þª}€ËãQaYòtœ ‘¶ÆæóvkÈ´µþC6mȶ ·‘c‚l1míqÖ®}ü¶m‘ >ÆÜQ§·)vvvRÇzåÊ•ï¾û.3‡ô›é£3ÝtúWž±Èü—zÆ’¿p 8P¥¯Ë{ÒRü/åG|g;e¯WÙÎÍÍ7ož……“ŸAƒÉÛ¼œK¤çÇG‰iÓ¦ñx¼7ß|“Ò4G| ½’{*/’HfFù}ìÞ+éµà´[Fš êÛ¿4¬º’äççOŸ>½¥¥eêÔ©ôïË/¿L{:eÊš¯RYÈ,5Ž):~e¯¯ÔoÍ›6mKÞ®øÀ×$?úPoÕ>ÀåQÉ»Ø/¿ÀÍÏæiž0mÙmÈ6Ð/2x×föåoE\^µ³6T__ÿÜsÏ1iW¶•™£J=Æ©¶l«ze›òOGk]]]{{;ýU°!y9—¼ðUTT$s‹l6ûè:;Ò_.—;wî\U÷T^$™Ú²e‹ø™íyóæùûû«d,!!!ôõâââ–––k×®1÷WÔ×§™šË6eOü<ðüùó{•m+++‡#‰ÊËË—,Y¢`CòrþÅ_8;;‹Ÿe•¹ÅÀÀÀßþö·¡¡¡”>pà€µµõîÝ»UÝSy$ €ÄžöEò±Õ^÷Qê_™+‘·u5d[™2Òåßþ¥•ª+ÅþýûGuúôé§Ýãá1‚¹±\qYHÕ™E jÆä6,,L<›*fô>ú–¸>SBrPyó5ÌžÔ[Mp™\ñÚÛ/yMÈó †iÈ6€lC¶>BÞ›æàÙg›£c6iê ­œªî¢E‹,,,ÌÌÌ&L˜ðÙgŸÕÕÕ1 … 60£‘SB|ƒ¥’ÆÒÑÑAÎikk;lذiÓ¦q¹\fþ¾}ûÌÍÍ{íO÷ºåç/ù åÜÒÒrçÎC† Q,W S¦L¡ÅÆGS°!y9gÆÖf¶(o°âüüüÁƒ3¯Y®©©¡tAAª{*/’´¶¶nÞ¼™džò¬ä>Jý+s%ò¶®†l+SFšP´ýØU¿CZ—meâÿ´û…Û´G”æóù”–|½¶¼²ªÿ2‹@ÕŒÉÓÂ3f$&&JÍŒ‹‹›9sæS‰QÇiÓŸ|ò‰äëäÍ×0?zRo59Àe’±À›Ziœ j:`Ú² Ûm §t¶µÇ˜;öÙS^:͘¸zõê„ S+#:·[ƒØªÙ9#“ú>í~„kåm3 ®ù…iÈ6€lC¶^“±À»*9»6ÔC£.>>>UUUwîÜqttüüóÏS+#æ•Kxad»iklÆ{¿ j+²–oIžîÓmÙ†l½¦8ðdžOpl¨Ï†F3DBBB¬­­---ׯ_ÿäÉÄËè¼ÝÜÁ ÙîGêr‹Sf¬Dô‘ >Õ~=Æm sÙ®ŒÏ¤’»DƈI½‰¢×%Û~mSî`%Ú.ëƒ énh4Œ€«~‡úx´B$¹ËJÁï¡úOà >L/-õëã×vÙ6EÙ.ÙßuU“æ{Õ? Û¦Lœµkã­ ]+=oÒbM›#½NÕïû¥·ÕÏŒé(WUÉÙZ­‹!’••5yòdÅû(oÐ5͹æ´8ð$NˆúŒ ³€3Ú9×3¦ Û /d»’w‘J®dÿÞšt&õ&Š®lƒ+^{KBX:Ý„VnVïÇ~¶N7­••k²ȶz¹ÒnöÚ›£GÌÑÖcÛ8X {{û³gÏöKõ#.-õ»ÇÍÀ Qo¹‘Ä<ëšÿQ„@¶AÉ6žÙÆ3Û@+T§æ¤;yét´~•^#,³7©yçrÏž=ƒ ¢¿ýÒ§ð cÇŽýøãkkkûË´ØXZZŠ_§D¸»»‹ÓB¡p̘1’oR#ÂZŒò _|O“Ƕq°#GŽ”< úX¶“¦®xTX†¢~r}× 2m¼Î@¶d² ζv®•‹î†Zi¶D˜£Æúµë&L 8q"¥ûŘĽ{÷Þÿý?üРe›pss£FƒI×ÕÕ 8P 0ÿFFFÒ§FX»‘Q©ô‹lkòØ6cèå¨rPèâÆ œ õó}yÕÎsÇêÔD@¶d² ’lmwY):Z9uRí׫Ñï#žsäÈ‘ñãÇ›™™ÙÙÙŠÅàË/¿´±±±°°X½zuSS“Ìòx¼7Þxƒ3gÎLHHø¿^f[Û_|1nÜ8ssó   y›f"‘ÈÛÛ{l7”_†’—7y=éšššQ£FIÍ/))Y²dÉóÏ??räÈE‹‰ÅU¥•÷º°Ëáp\]]™t||<­'66–ù×ÙÙ™>•·S=óгÔÈ-lmmýÌ3Ï(_è[»wï¶´´¤L~öÙg---½Î—J¨—Ÿž¨ýØ6•–ÔÔÔ×_ ÅŽ;¦ø× ™5SæLy±•¹¹ž3¥")/?R‰·Þzë‡~/v÷îÝ^xáÑ£GªV¡ºÜb¼é]i­LmBÒÔºWÈ6€lC¶¹ÇÍÐÝ8´j_¯ëÙÝtwwçóùԋݱcÇìÙ³™ùûöístt¼}ûöÇ=<<6nÜ(smï¼óΉ'(Fiñü]»v988”••Ñ×}||÷¼©‡íääTÑÍܹs·nݪ8o*ù믾š––ÖÜÜL}e//¯uëÖ©±rUs¢I`[[[©gOûBi???ì¿ÿýïO»¯FZYYѧŠwJrUòJA¥ŒÑònnn•••*Uú•é½n(±}ûö^çK%ÔËOO˜‹õ8Xtz°P¥ŽŽ&ù'5]»v­â½¹Ë2gÊ‹­ÌÍõš%e;99ùå—_ß}@« T£þ”Œºâµ§B½¢‰?ÑvÙ×xÅ€lÈ6d6t.³vÕÑÊSf¬Tï¥_=»›UUULúÉ“'ÇgÒ¶¶¶7oÞdÒÕÕÕãÇ﹪òòò1cÆ…§¿½¥¥eêÔ©ôïË/¿,‰¦L™BóUÚ)•JA^Æ(ÝÙÙ)³.)¨”–W¦ŠËZ“üÈ£:5'eÆJ,:=X®\¹âîî>jԨɓ''&&*¿Š««¼ØÊÜ\¯yP^¶¹\.qííí+V¬ Wû'Ñúüœõ~8L[wÏvÙmÈ6èkÊÇêâGtê.Æ«÷]©']åõ>©£Éç󬇹:'¹ÌíÛ·Å×Ûó"•¼M+¸X§ªâöœOk£Ã°®®ŽúÍôW=Vfam–dûÛo¿en…]µjUPPÐk¯½&þTÞNIåAf)¨š1yV\$¯`SBæ•m©ùæG1qÖ®jhåWÎÐÙÙÉãñ¬¬¬Äþ,¾&\UU%þ¢Ì]–9Sql¥6's¦dnåå§g‚V2mÚ´M›6ÙØØˆGP‰Î¶v-¾vhBkýãKKým—á·ÙmÈ60*D‚z:Ák}µ9ëÔ~[ uñ‹‹‹{õ‡'''Z’:š×®]ûàƒ¤Ö&ÇKŒ‹‹ U{J888”——K>~)oÓ[¶l?†:oÞ<-úõ¹9ŽH$¢Ì,Y²Dw²­­À2ì߿ԨQ§OŸ¦4…tĈÌåŠwJ*2KAՌɋ°â @ßrvvfžÍ¦„ä£Åòæk˜Åm?¦ÆKéq°(¿òåË—_¿~"@¢û›ßü†™9{öì;v455ݾ}{áÂ…â/ÊÜe™3åÅVæædΔ̭¼üÈÜSÉ 8~ü¸z--y]òtœûêÔœ8k×Ë«vâ!mÙmÈ60BRí×ký×têé½Î}ûö™››÷Úîèè µµµ6lØ´iÓ¸\®Ôzf̘!¾QSL\\ÜÌ™3Ÿvòµyófkkk ‹àà`Å› …6l`X¦„øVR­øCBB”)S† 2nÜ8Ú#Ýɶ¶ËðàÁÊ3 ¥ù|>¥%_¯-o§¤ò ³T͘¼+®âQÇiÓŸ|ò‰ä Ùòæk˜Å4ñïsF;«z™‹ò+ÿá‡è»fff¯¿þzZZ3³°°ÐÎÎŽüðáÊk¦Ì™òb+ss2gJæV^~dîiTTÔäÉ“ÛÚÚÔkiKBX¹ž8ö#t¼çùG˜ƒ[Çd@¶jºË ï’í¯C!Û€ériýNò(3{ÜôúÔÒ›×õ=éN^Ñi( $ï½÷^dd¤Ú_ÏZ¾Ž×Ôç—$M]‘<Ýï÷m _²-È, ’«: gV{* î’íoO@¶AgzÞ¤ÅZ\aà ¾vW€)Èöˆ¤ ®Q4 W:::¾ýöÛiÓ¦‰ßþ¥\+aU-‚Ù÷t¶µß Š`ž•ç¬Þ0¢@¶e»‰ŸJîvØ!8³ÚSöç[XƒìØl6d0h÷Çõ{Ü 8€l«J»°…3ÚùIE JôZ{mllrssÕ^Cà ¾.Fë½BØTûõt¤Wò."² ôQ¶©;e6»Àw œYí)þ÷ï³&¾Ùb®ù½¾ë„¶ÖVx²À÷¢ €ªäzÒáƒ8]Svôlκġá‡ó¢G̹´ÔO$¨G4dè©l >M´]gVo>¸ÀdÇúàsÈ6£Ýai³–o¡.¢ €G"×Êã]“í± ­t_BvMŽÍíŒqd€l—dSá=Ì‹…9«1lû²ëíßB¶$qÖ®Úº“œ¼½.·!@ ²–o)Ú~ qº£³­=ÆÜl÷UÉÙt†ÅmÙ#Û­õcÌçf{l€9«:5ßÿéÌsbÛy°»l1y>ÁÚº“<ÊÌï @=˜w€A„€î¨NÍIµ_8ôt*¼âµ´d˜lEÛ¿c¶«ÍáÀŸUšÒýµkh´îËÚm ‰ ³@+w’7ÜàÇÛ¸!ž¨ÍU¿Cxžèª]%!,ÄA×܉HгvÍö؆ Ú² O¶[ëÇÛ,Œ·qiyx ­Ú äÞĘ6‹Å ;uêTLLLFFŸÏGm3qx“×ç—h¸ E€†Ð Žú蚌ȃÞ뚺ÜâTûõ‰¶Ë™ˆ€lƒ”m¦-c¶;?{Eëãlˆt¯ÓÕ/ºLÛa û"##I¶OŸ>Íáp²²²*++QÛLœ¢íÇ®úÒp%¥£r=L4¡ìèÙt'/Ähšô+çíÖ :BXU›³. zÄœâÀ“x‡6mÖm¢8,–5ÈîÜ>hú÷ÐiySËË™n êÎzk;òñemÒl’mRîÄÄÄüü|@€Úfâ4ñïÇY»jnìÞ ¡nz¢í2¼‰hL÷MeGÏ"º8fI°cÌ/-õÃ@¶D¶r¿;ÃnæYû€¯Úšs Ö’äúþ}QæsÅïú›vDD„ä=䥥¥LÔ6j¿^ÃÛÞrÖ”E$Ъälòm\Z¤áï–ÓÑÑÊ›´˜&J @¶G¶E"QeeeNBjÌ›+»î‘>û¼»gñ¡{ §«ÓXú6Uì› Ý内§¸¬c·ïº =í}öîo¤L;¼JŸ;w.??ŸÂHÁDmäÉŽÌti©†]@+¤;yá"$Ð"—WíÔÖ['€ø÷‹ŒÞ1æŽXüŠdÛØdûi÷Åm>ŸŸ““üÝNë~ñ§.½Ä4àìQެw<Ù»B؈M[|9…ŽˆËÚ€™I“CšƒgMúDÍ©Ï/áZ¹à–T --¤êDŸŸŸŸŸ‘‘‘˜˜ÈápH,Ïèì÷>e/ñî—MS((  …ˆÓòÈó VïnCêÒiþò0€u¹Å$KMüûPRD¼‘QjÒ¯¤;yq­\JBX ȶ‰Ê6ãÛ •••¥¥¥ä“YYY$–?êçŽ5æíSÎ÷ý¦) ‡BD‚iyÔç—¤ÌX©Þ“¦®@Ð:¥£’§{ £Ô«9¸\mÍNsð„fÙ†lK+·@ ¥äóù¥zFüôs¿;Ó÷Û¥PP@(,Ðl  äÌdÎêõK=tAÖò-¹žˆPæž Z©¶fã^@¶!Û?œwÁu#Êè97ƒ"ò|‚!Ûèm͉¶ËJBXP<í¯±”+I³‹¶ÛҀlC¶ ¯·D-8^Éôœ'5\+U·l SèÜÁ›´¾ ”ãØ«¡ÙÉÓ=èÜͲ Ù6`r=‹O¢hž“îäUÙ@¯h¸Á³vUõئFïjñ¨¶Jšoã†g³€lC¶ žúüê*áôœ;Iª>òÙ¾ ú2ÆDÛe¸6Û+Ôã‡óx““iS3 Ûm#„ý$ ç´ [¸V.O*j”ÿ Õê¬å[:úÀ·cÌ+y г޳vU©é6AZë—Œ"ÍNµ_ÇÚ€lC¶ jÙÏÛ­Aé=§À÷€JüñÃy—WíDÜè™\+ú‹PÉZÁíü¨° ¡'×30ÆÜñÒR?>@¶qÊvg[;oÒâÚìB0Ðgšø÷©7¯üumúÞ·«’³ ÀÔrÈšô+…Ì~WEtZªýz Qï:»!&@¶ÑÊöÓîGªpÃ-Ð.¸nTþ‘È6}ïWqÖ®eGÏ"¨ ¤‘xB­'ªڢíǸV.‰¶ËJFaü3 ÛÀ$d»µþ1g´3žªzN%ïbÆoÈ6zKã­ ²ˆßÞÉ”M›d×´¥ €\ZêÇ<ë‚ëFÜd˜–ly>Áx&Ðâmܨ7Ù@oi­œæàI^ÑÖØŒh˜¦iãñc1t”=›4ug´sï%Ï_È6dÛØhâß§3îhzÎÍ ê¯(³dEtu÷Åÿâ:}n—WíÚ›³–oIµ_/ï&#£‡N1×üÆY»ò&-¾¾ëÆš² Ûr!Ók úÓ™“šC}wæ±ÀâÀ“Ôz2 „sÇ.(<%îâSׇ>JøŸ÷Óæ~Š0Ð_t¶µø ÁÕFFÿ>õ.¯Úi‚¿f’T—„°h÷£GÌÉY€'Õ€lȶRàâ6Ð7„Uµ×w¼+Îc?‹¿õÌÄÅåùScšôÊroêÆÛ¸e´æÜeGè_ªSsÈ·¯úÂh…–¼´©@¹V.¦6ÔK[cóˆ¤t'¯(3{úKiÜ6d@¶U·~vøŠ¶ãŽ]ð/­¤ÖÔ¹¡NõrxQë)üŒºA±V.ÔŠ1wÌþhëÙçß>3t6{ð,t†ÐD‚ú ®Sf¬Ä …ë»NÈ|ûfP„Iݪ@ç ªäìlmÑ#æ¤Ú¯/ aávq Û²­&¸¸ ô’jògj.ã'¸§;}–ð?K“^YNÿòÃyâe¨êž}Þ‰fžûýǬ¤Äs>AèÐHT8£Éâð+˜žÓx«‚JJj¦HPŸé¾)eÆJ±ÍºÜâ<Ÿ`®• u¨Òâw" Û²­)¸¸ ôaUíùYk©Åd=Ó%ÒÌ$5 yÆ;>Qfo12»"&q@¯ O#aK´]†wòé3iŸ$Ú¾/9GYgíZà{À蟠=½êwˆ7i1M×üâ v@¶d[›àâ6Ð[˜Á–Î ž%–íœuRýøègÿôÙf«gè'•¼‹]+xlVÕ"úƈ$jB/-ý§¸á-Ú~ŒL»*9ÛXw™N´wtBáZ¹0Ž]Ÿ_‚šd@¶µ™¶øÅèeý)jØlƨ3xK}zc÷é3C»>ýÉùoˆú¬7Wý1ïâÆÀiúCký㳎g†Í¾Aÿ6ÜàŸ·[“îäe”?‹0cže-ß=bs¯8®cÙm“h» ¯iúLÓ*®å‚®áǧ®úˆzíq/¾Çd‡4ÐÈåRíדçण'd{l‹þVì ïTò.–Œ2ÊQÇŸTÔ”=›îäÅ<‹ªžÇ² Û} ?œ‡‹Û@zEõù%Ô—šªÎ]N|yY̯çõüèÖá˜3fö´@ÏÔ˜hë¸ÓRÆ›´˜Î;}y﮼¶Åø&å1AfAôð®a/¢GÌ9o·†&™’î/;äÕ´SQfö\7–Ÿâ@¶d»èlk§~®3€þª~•¼Ÿr=·ÇÛ¼+~6[¦¤©Ë®ù©Í.D ‹Ÿä‡kåri©Ÿî.3V§^ÎóÙÍ›´P¯Ú–>›møÉš®ë¶ ßºŠæZ.¸¼r{EtšA_õ¥xFµ+g]À=n†ó² ÛýÌ]V †%}ß+â‡s™~pôˆ·.-õ,ܶ³ìØ7œ°ê4VM´õ[G\óßžî´’=ØŽò–éþ¿;]@Tx’3Ú™¤¨×‹±"A½òk®Iÿ9Í¡ëuQfo^p]«mK7b´Ë VÓîS’§¯èù ÍÕÍ‡Ï yS¦¥Gœ—龩üx¬J1ïGoU”=KyŽ1'eÆÊkþGñ;)mÙÖ/Rí×K¾ÄÒÄ¿Ÿ2ã#jSíWP×°£õçÎÎ<}›ZgßÜcîÀ<‹zr(5tAkýc²£sÇ<Ÿ`7úVò.6Üà+³Âkþ‡©m‰³ž;ìP[s޶-}6ÑîS’§/é`òoby&;e¶c²;kîÈ›´$mî§?{Þ:U•œý¨°ŒJÄ ªMEtZ®g`¼g´s¶Ç6êÃà! d²­§Ôfr­\p·è™ù1æsIbI³õ¿·Úòðb¦û_º^ó×€††‘H„@ë&ø kº¼j§¼1¢3Ý7)öíζöKK}éhÍõô5qÍ–šn9e6›û—;YyÔŽ5V ¨{–¯ ³ hû1æIlúKi\Ä@¶!Û†AÖò-tÞBñ]ÿ¬=âO‰¶ oŸ7 ®jž5ßY;VVVB¹Ðd€Å'¹V.äÕ¤URŸÖåÇY»*ðí«~¡tœÞÜ»î9=Ì‹åŒuŠíTx>Ó°Ú1*qñ]â¼I‹s=ï²R ñÇd˜´l7ñïsF;ã., ;D‚z®Õ|2íæû?\Wõ‚Û_XƒìþÅŠãóù¾ €Žh¶\1#–K=lL¢óëy2}»’—I}¬<?xµ¼éQqâYK§è ó.çêy;F]2êœuñ6n\+—¬å[øáý´Ø“5dV+ZÚ1êoQ_Zêǵr 6:!È6dÛh);z6ÝÉ •hß®zZ^TÐ ÐÉOƒƒ}‡ J¥¾"•P¼BÉå†6~ü nn‘‘_wt\Q<>9{°Ý‹6ÄÅÅÁ·èKD‚úâÀ“ñ6nçíÖœõv·oÏyTôŸËžÙÿäZ9)–ÌææíØñÉï~7‘š‘‘#åäôǸ¸Ê´03g¾ÒÞþ³ÌfGñj·?2Û+]7bÌÍä½ÈUÿè—v¬]ØR“~…Š2cwôˆ9Ì-âÔñPr¨y€lC¶ ›Î¶öDÛe•¼‹¨@»ÄY¿›éþŽ@™}Mñ4}ú”ãÇ·½öÚÿ¨$Û½nK$º|÷n›½ÛÎnÚüù³„Â)ÈCšãʬ]"##srrø|~CC €>;CÝ‹½û[W6óFèáo‘oÓÌó¹9ë6*8ré¸~óÍ×V¯^XTM‡üÇ ߸¸ÌV¦…ùè£wŽÙ"³õP¼ZeÚõfjÒˆÑÄ™âöýä…}ÖŽµÖ?¦NÅU¿CižQföÉÓ=r=ïD$áld²mŠT%g“oS÷Uh aU-5·ŽP[¶óò~˜={:%^}u2¥µ(Ûÿwáº5÷í·í¼äðšÿvÚ‘ððp‹uîܹüüüÊÊJ\Ü NOçþû›wcFÎeuË6ãÛUç²)QvìG.×îîs½q@~ sÿþù ~ûàAZÏÖCñjûL¶UjÄhºäõOÖY Ú1 ßÚÖØ,È,`îçMZ=bNº“×5ÿ£Ô»À(âÈ6€Ðy±ô`”~•Ê”‹S—[LÍß½ø“j˶·÷‡ßÿ%ø;¥u!Û]}ÐKáÓ¦MRüºÚ.Ù9têÔ©˜˜˜ŒŒŒÒÒR\Ü@×4\/?7ÝCìØäÛÑ#è/{ð›är4ç~R„‚#—Žk:º, ¸… Ù´v­{ÏÖCñjû^¶•iÄhººwOW;öÍ‘žíØ“Š²b²e•J§³­½>¿¤üxlκ€äéìÁ³è/¥iF8@¶d»G·æŸ3Ú™Nº†+Û½.{ïKjÒ¯PóWÆêU¶e>šØÒ’3uê$‘è2¥kkÓ_|q,ÍÑü™ížË75e >LA+8a´#»‚Ø›0óóóŠA"Wx2êÙ?ÑÄ›´$ÝÉëòªEÛ‘ÈU%g“ËUÆgöÚ¼˜™ ­«“ûÆÁ^[˜¶¶Ÿ_Ý6+ë¤Të¡xµ}ð̶Mw¢OôlÇTUçÿïîØJêqã­Š»¬”<ŸàTûõQföqÖ®™î›nEPk¯ª«d²mr\ßuâ‚ëÆ¾ÙÖž={ DõÇÕÛ^sÙ–9ÿìÙ}›7¯ÿ»bÅš£‹+ÛÔO}öY39¤]è[ÈuROŸ>Íáp²²²*++QÄèˆ&þ}Ò?·+Ó¼(¶beZ˜‹ÃÞxÖ¬[%Ùîû+Û½6b2Û±ôÃgGw ;ÇçÉûɃJáNDRïŒÞ1掌]SW¡’wQ$¨GE@¶d[èÌš<Ýã.+E×êèè˜0aBhhèĉ) Ù†l÷œÜܤ.ìÐ]È6õ§¥†G’ÙIem ¢NªøL>Ÿ"@Ÿ›Å÷{+ÓÂдzõÂÐÐèùmä½6bÒíØwÿï̬?³ÚòæåU;ÅQm­L- aÑLê D™Ùó&-¾´Ôv €lȶv¨Í.äZ¹èúœÊãñÞxã JÌœ93!!A<¿¥¥åÓO?}þùçÇŽ»gϱÇRâðáÃäçC‡}å•W233©»0yòd333;;»7nHy/%Ž92~üxfÂÂB©RSS_ýuú”–9vìó‘ñÂÁÁÁÖÖÖÏ<ó ý[RR²dÉÊÛÈ‘#-ZÄÜEÜó["‘ÈÛÛ{l7”?#µ6)]—ü¨££ãË/¿´±±±°°X½zuSS“JA虺ººÑ£G?|øP¼EšciiIsäm«g|úX¶kj~üõ¯Gñg ›Mé3#æpƽw'"ùšÿÑL÷Mq̰֮ádÚäÛ‚Ì l€lȶö)ð= ùS·.xçwNœ8A ÒEJ‹çoÛ¶ÍÉÉéÞ½{sçΕôÞwß}·¬¬Œl0 à¹çž[¸payy9óïœ9szʶ»»;ŸÏ§vìØ1{öl©^xá…èèhÑ»wï®]»VêSñ¿nnnâ»…_}õÕ´´´æææGyyy­[·Næ·¾øâ Ú…Šnh¶nÝ*sm 6´oß>GGÇÛ·o“ {xxlܸQ¥ ÈÌ€§§g`` x‹_ýµ‚mÉŒO_Êvp°ïG½#5“æ0¯ÃÕ\¶[Zr**’£¢öΞ=½×·æ@¶0DÙnnþ×ÿ8uíZ÷¢¢h:ä>ÌHL<ȼ£KɆ™ÞüñǮ♠VÛ—²­R#öW¶_|‡5hÖ/cÎý1z”Ó×WýUD§=*,ÃImÙÖ9mÍñ6nUÉÙ:Z?ùá˜1c„B!¥é/¥Iö˜^zé¥ëׯ3éÂÂBIy¾ÿ?/ç|òä ý[UU%þwøðá=e[ñ/¾øbhhè¿ÿýo)é•ú÷Î;2w¡¡¡ÁÚÚZæ·&Nœ(¹ ´G½®Mê#[[Û›7o2éêêêñãÇ«™¸qãÆ¸qãÚÚںʷ­ÍÆÆ†,ZÁ¶dƧ/eûµ×þ'%åˆÔLšÃÜ*))Û=ÇR<¶ƒ™ÙÐqã¬.œóý÷_ut\QòŠd’mšž<ÉÞ¶í¯¶¶6C‡yî¹_½ý¶]|üå[fjoÿyæÌW$gÊ[­’íâöJM7ƒƒÄãº3ÓËù1£ßn¸ÁGu@¶d»O©NÍ!ßÖÑࢾ¾¾R}ˆM›61™™™1Îx¸¤<+¶by yó¯\¹âîî>jԨɓ''&&ÊûVgg§øßÜÜÜyóæYXX0y4hÌoIíý+sm 6DÚ,œªypqqa±X”øá‡>üðCÅÛ’õ†õ‚l` ²IúʶÓúÈß.` }“õkÖÀ?²‡Ì:kîß@¶d»¯¹¼jgžO°ÖWË\Ê–\êöíÛâ Ý’W¶‹ŠŠt'Û ¤¸<ÏÊÊŠùWêij©…)o$ZuuuíííôWü©Ô·\Ù–[ùþû£)S¦È|KÉ ÈËÀ¹sçfÍšE ;;»¼¼<ÅÛ’È6dȶ1´cû±WùEÿniÔˆ91#¢GÌd]C¥@¶d»ïh­̵r©Í.ÔîjÃÂÂ\]]¥fº¸¸Ðéÿi÷óÆÎÎÎ÷ºqrrÒl/_¾œŒ´¥¥…dò7¿ù 3“œ¿¸¸XÞVÈ99ŽH$*//_²d‰øS©omÙ²EüÈô¼yóüýýU•íZ­“²wíÚµ>ø@¥ ÈËñÊ+¯|óÍ7ôi¯Û’È6dȶ1µcEþUÂJµ_O§{\ß@¶d»O©ˆNKšºB»#¦Ì˜1£çmÉqqq3gÎ|Ú=’¶§§§………¥¥¥ÔhäÚ•í~øaÊ”)fff¯¿þzZZ3sß¾}æææòV’@_2dȸqãBCCÅŸJ}K(nذ œâ;º•—펎Z¿­­í°aæM›ÆårU ‚¼ ÇŽ8pà¹sçzÝ–Ìø@¶!Û@¶²{RQs—•‚ÒmÙîS˜qJÙ†l€æí@¶d[kˆõqÖ®º™ 7ŒN*²v ÛmSDYÀµryRQƒPÈ6:© yA;mÙÖÅ'Sí×ãi. •ñ™ÔüÝ ¢NžAOÌûiÙ~褀æí@¶d[Ëd,ðÆÃÛ:©yŒ¶î•ìï~­«±LìUÿ@'4/hÇ@hklŽ1q€l<¦üð¶N}؈e»’w‘úv%û÷Ö¤³ z¢]À!м }£‰?ÞÆ q€l†þðöž={ DûE€üÂØ±c?þøãÚÚZ£—m<³ @ó‚v tGà ~¢í2IJm$îÃÛ&L 8q"¥ûE¶™Ä½{÷Þÿý?ü²N*²v 49¤9x"mãÁ@Þæñxo¼ñ%fΜ™ žßÖÖöÅ_Œ7ÎÜÜ<((è©Ä%h±‹"‘ÈÛÛ{l7” Å 9rdüøñfffvvv………fÔ¨QRóKJJ–,YòüóÏ9rÑ¢E@ù•£7ŒN*mLhǦFujNº“âÙ6ZëÇÛ¸ÜÃÛï¼óΉ'(Fiñü]»v988”••=|øÐÇǧ§KþKZîääTÑÍܹs·nÝ*^ÀÝÝÏç755íØ±cöìÙjÈö«¯¾š––ÖÜÜüèÑ#//¯uëÖ)¿rô†ÑI² íÀÔà‡ó.¯Ú‰8@¶ŠºÜbÎhç†|CÉpyyù˜1c„B!¥é/¥oß¾Í|4iÒ¤ž×ŠåÉöĉ¯_¿Î¤é[/½ô’xªª*&ýäÉ“áÇ+íÊÊÊåÝÈÜCCCƒµµµò+GoT ۘЎLÒƒQW¼ö"m#ü‰7i±°ªÖ rëëë;à¿Ù´ió‘™™#áÊȶä” //5“ÁÒÒò£>zðàÔ¹¹¹óæÍ³°°`4hò+GoT ۘЎL\Ï@òmIJm„žLžîÑÖØ¬çùd.eóù|ñœÛ·o‹/tOž<¹ç•ígžyF¦ß*¸²­ŒlË®y¿Ì§µQ¨®®®½½þö|b²N*²v ĤÌXY›]ˆ8@¶“<Ÿà4O=œ<,,ÌÕÕUj¦‹‹ u8(àààP^^.ùÌ6©xqqqO¿Ý²e‹ø™íyóæùûûkQ¶­¬¬8ŽH$¢Ì,Y²²N*²v =bNkýcIJmœf_ZêG“>grÆŒ‰‰‰R3ãââfΜI‰ÖÖÖÍ›7[[[[XX3ŸîÛ·ÏÜܼ§î … 60£‘SB|K¹Vd;!!aÊ”)C† 7n\hh(dTdíÈ£ñVE¼âÙ6rßNµ_ŸçŒPcí ß‹?ÙÕIý:Tмèt—Žv `dÜãf\p݈8@¶œ¶Ææ¤©+nE @’ÚìBêÛÝOŠ0ôNjÙ±ohGX‡£“ €ž È, £²ê|$,ZÉ©($¸K¶¿=v `4ø¸¾ëâÙ6~„UµqÖ®w"’ æIE õíÈT ½“Z¸mgW'•ÍF'=¡‰ŸŽÊÛa‡`ÑJNÙŸoa ²C;0&x“?*,C Û&Aà >×Ê¥*9¡b8£ßÎöØ`èÔ‹K<Ù–N褠?´ [¢ÌfønE+9Åÿþ}ÖÄ÷ÐŽŒ<° Ù69™äÛô¡ 9ë8£;Z6Ü*e>êWö¬·×£“ €^‘±àÓDÛ…°he&რ¬Av¬>G;0nE\ñÚ‹8@¶M‹{Ü Îhçšô+ ªSsXþÀ?}Äp;©œ°®¶ÿñ5:©è¥Ùtl>Ì‹…K÷:lû²ëY˜ߢ iž¸£²mŠ2 bÌ+¢Ó @¤Ìø8ÞÆ¥­9Ç@;©çf-gY9³#@'½¢µþqŒù\#xPE×SóýŸÎ<÷'¶»´c#@XUËíÜ.lA( Û¦óüvI ¡Ìz®úm5ÜËÚìO·¢“ €R´ý;ö`»ÚŒZÁ”¾è¯]C£u_ÖF;0®ï:{È!Û&°ª6ÑvYï„Pkhˆ7“7Þ>æ¹9]C u_Öf±Xaaa§NЉ‰ÉÈÈàóù(Yú—ÖúÇñ6 ãm\Z^„T+ºüÏ›ÓF;0:ÛÚ¹V. 7ЂA¶M¾”j¿>Ûcˆ†‰·‰Éo­g¶3 ßn¾ÿSìK®¬_ý‰}ðÓIŒŒ¤NêéÓ§9NVVVee%J€~§.·˜Ú–ó³W´>ΆZKMW¿ è2m‡5ì_@;0î²R2x#mð´­±ùÒR¿t'/J ¦LõЏ?®dî'×ÿç·^çLx—e6›½#X|9ˆº§ÔI¥®jbbb~~¾@ @± ‡Å²ÙûÃMÿþ‚ÍL-/f~¸¡kpÇ·V1÷æ  çíÖTò."mÐEg[{®g`ÊŒ•ªZDÃdihh(½q3~ñçÔù㾸€úˆ~¾Œz¨?ûg×;rÌç²w…ˆ{¨’÷^–––Ò¡XГæ%÷»3ìáögžµ/ øÊp‡cÔÊD»}ÿ¾(ó¹âw}¡u¹Åx½6dHS´ýX¢í2<\a²ˆD¢ÊÊÊüüü„½ß²&¾GÊeî±ìÓ‡ÜOЍNcõït;&¬ ((aÞJöÐ7»®Í[Ë>.ÙC ÿÿíÝ \ç½ÿq“[M0Ò„öÄsÂIMkSm0!‘¨¡bh$Æx+ž[þ9ä/¼Â‰V…#Q¤`4!G*`¶º+ஸr1ˆ²'KApµ¸"] â 4ÿN²¥ˆw.»Ëçýš¯ÙÝa™y˜}æùî3óÌŸ×'$$È|ff¦l…l‹lÿVÀvªãöì”ÚNŸÑ xq‡_`ÙÚèãÛ?¿öÃ^»cc¯W8Ý1Ó%”FGeùhx¶UbϾ¥ùèãI;á*ê1vm‡Ç;¦=å@ØFGÕq[u®>ܯÏjll4™LF£1--m㪗g«mkṲ́2^ó«w5‘k;¶P?þt“Ëø!á²æ²þ²t¶Y½l‹úŸM6 |٦ꖞ¬ÄÔ“­gåtHÚÖÈ©ÇØ©ªØ-#ý аÎYòŠSÝ|‹Äð!郚››-Keee~~~jjª´ùÚ:Y¢Ö¨–¬Ü¸h…zq¤fɪ^›–¯Ö|¶¾CóTÖP¹¾QVsÓ’HÍïnŸr¨ä lÝA€­W/QkE¨>ˆø¦zù¯4#§«~Uó÷fUÓƒ•صõ˜ÌKáHIAQ°;Mµõ)ÎãOf) Â6®ß$²4äù…dš}Ît‚ÒèË â´´4inذ!þ[ ¶ÄºV²†²ž²¶{³vfN Ò˜ñ÷Â2þ•€U/‰®ÜôÈ«;'!v]B_rm=FÒ`¿ þK„œ°[R­Ö¹úSgQ}¶Al4333SRR”Ži &ÚYY+Y7YCYOY[¥…Z›"»nUìþ•€]T/ñ_ 4öów%ö=׫ÇHÚìÎÉl£æ¾1§Kª( Â6nICQ…~ø”‚Àî Ö7Ä&“©¨¨(777--M«ÕJ[p³-‘õ‘µ’u“5”õ”µµ¶PÏ®ÉéŸçÒlià¿ ØlõòÅöŒä1oo>9ùÓõ›û¤Ôc`GZ›.JjØEQ¶q$f9gYúˆ™|MÕÄf³¹²²Rš€ùùùÒÜiKd}d­dÝd e=emÛ·P¯´´/ˆIuóåÂ!À6«—rmö׉™¿þ¯ü¼=¶V½ØH=öâÀ¢Ø4÷i¹) Â6n›)A¯uñæ¼Ü>Û&¶X,Ò 4™L•¶DÖGÖJÖíÍSIÚ’·÷‡®eÀ?ÀvÈç±,"QûÈÄê-;m³z±©z l\c¹)ÉÉóT#æ¶q§”ór÷N ½Ôp†Ò€=}_`iØí;?kÔlÙ‡)  ×5ÕÖïš”ãx¾¦ŽÒí9·té:ʰ»ÒÚtq_pÔ¶¡“ë %”ìKåš$­‹·)AOQ½¨6Ãêæ+m2N6ÇP·5kÔljuÂ6ºÆq]®ÎÕ§,"‘¢€}9]R•1Ò?ÆBÎÎzž´Ãö‡®•¤]·«ÒÇ`É+–Š“ ÛèJçkê²=çæNœ×T[OiÀŽ´6], Z¹mèd96P@9g:±Ãã9jpƒpå&«#ѶÑõ®´´–.]ÇPϰGfý96XË)O@8®ËÕºxWD«) pMµõÒšª\“DQ¶Ñ]êv2Ô3ìô‘;qÞwΙNP@7imºX±mèd†¨Gr©áLšû4c@8EAØF÷j¶4Z`§*¢ÕZïcê,Šèrå&e”„–³( pR«g{ÎÍñ ¤³° -5É9ìKCQEúˆ™ÿ%ä  ™ô:WŸªØ-8 Ø{§†¦¹Oc¸YÂ6zÔ©‚2ýð)ù32þ ì‹õLWîiܽ–³ þKÒGÌ<]REi€ƒ‘&“$mÆH&l£wBËþеÜÍöH¹§ÝÁ°õœܱ†¢ ýð)ÒãTp<¥Kב´ ÛèýÆÖwr¼ËM”ìÈùšº]‚²=ç2pXUì’6a6áJKkåš$iuÑO»s(RE`nË¥†3y~!Y£fóE8$S‚>ÕÍ÷|MEA؆­änßùiîÓ¸ö¥¡¨Bö[FMn…%¯XZ`û‚£øjÔíûC×Ò§M؆ªIΑvXaÐJ-„‘˜Í¨iÀM[`e‰:WŸÚ ¥ÙÚí;?Ûs.Ý„mØ.‰Ù’[$rso0ØFM®§©¶~ׄ ™8«’Tïé#fîÚÚt‘Ò lÃÖÕJÒܧåù…Ð2ƒ}i5 è 6àsõ)]ºŽï¡ÀQÛíRÏs‰aöD>®Ò8ÓºxW®I⣠;¨i€µ/^“êækÉ+¦4À!U¥'+Š‚° ûÓXnÊñ ÜáñÎé’*Jö‚QÓ€s¦Y£fçù…4[( pHÅj]¼Of) Â6ìXuÜVù$ï]Ëu °Œš†¾ì˜:K*íŠh5EŽÚÎQîãÈ%Ÿ„m8‚¦Úúü õçðå죦¡6¿ #¤®n(ª 4À!IÀÎéo §Œ° ‡bÖïÙ6tò—s–q^"ìè€Ä¨iè#N—T¥˜Éàð­ñ긭aHÚpÅ bt®>¦=½…°Œš‡W»Evr©™) pH—Î|9gYÆHÎ]aÛÁ*(Ëñ ”O{m†Ò€]`Ô48pókïÔP©Ï®¡4À!×妺ù/ˆáÔq¶ûÐÇ^Ò‹¤n¾`ƒ]`Ô48Ù™e—ÞEó R³¥aïÔPIÚu» ) ¶û–+-­U±[t®>_ÎYÆ5±° Œš‡Q‘(;³Z3c -úIDATìÒ8¤šä­‹wþŒ…—ÎP l÷Q-g/”.]§ÜŒº¶Ï.FMë׊×ÕT[¯ìÃÜô’µC[ò6¥Â6ÚÆ€påæ®ôÂöÙø¨i·¶oº<éÝaÔft®>¥K×QÓ€C2%襞/ ZI'Ûø'å¦Ý¾óõç0ò3l_oš¶bÅŠþýûËOÛÉÆwöçð=LÒuñ‚˜T7_K^1¥Žç|M4¤ÓGÌd|¶q]u» ³FÍ–‰±`ã$f­ìÉQÓ._¾üÄO¬^½zذa2OØÆ-:g:!•jž_H³¥ÒÇS·UçêS‘ȉK lãæŽªÒõçHÓ°±ÜDiÀ–™õ{zlÔ4½^ÿÜsÏÉÌèÑ£·oßn}þâÅ‹ï¾ûîC=ôÈ#¬X±Âšceæ“O>‘|þï|çÇ?þq^^^||üSO=åäääááQ^^Þ!÷Ȩ̂Ÿ~úøã+ ”””tX ;;ûg?û™¼*ˬ[·NyÉʺpTT”››Û=÷Ü#+**Þ|óMY·xà7Þ°X,þVssó¼yó¹Jfäa§ïÖ!®·éòåËüã‡:xðàÿ÷?wîÜmµ+pêÔ)—¯¾úÊú噇~Xž¹Þߺ¶|lÁ1u–ÖÅ»rMŸVp<µ†Œ‘þ{§†2Þ0Û¸ ­M•+c #šjë)Ø,Ù?s'ÎÛáñNwç&Mš´~ýz™‘¸(óÖç—,Y2a„ãÇ×ÔÔ¼òÊ+ísïk¯½VUU%i0<<|РA¯¿þzuuµòpìØ±×†m???“É$ |øá‡/¾øb‡~øÃ&''K=vìØoûÛ¯ZNž<Ùl6+ò“Ÿäää\¸páôéÓAAAþÖþðÙ„š«d/^Üé»Ýà­ZµjüøñGŽ‘0ìïï?þüÛ*„NW 000""Âú—/_|ƒ¿Õiùô¢–³¾œ³,Í}7YÇsª ,Ç+P’6gƒ‚°;t©áLñ‚‰Ü¥K×q3XزŠhµì¨GUéÝôþ’ðƒ455µÅû¦&™—°§¼ôä“OÅvÕ.ïëëÛáIŸ„„„¯¯^oìíí}üª &t_Øž1c†$Ò‹/J˜üÑ~¤<)™¿¬¬ìzE2§V«mnn®®®~óÍ7­¯vø­… Z/™7nÜ¢E‹n7lGGGË;È{Êê8p`úôé·U×[ñãÿøã?–Woú·:-Ÿžt2Û˜êæ»?t-u#8ŒÖ¦‹ÊùJÿ%g×P l£ÛIÒÎñ ÔŸÂÅ*°M];jÚ¨Q£®=-955uôèÑ__I;00pðàÁ?üp‡ÑÈ»6loÚ´éé§ŸvrrúÙÏ~–““£<¹jÕ*ggçë½ÉöíÛåWî¿ÿþÇ{lõêÕÖW;üVSSÓ{ï½§ .3Ö3ºo=l_¾|YÞßÝÝý»ßýî³Ï>«Óén«®·bݺu÷Þ{offæMÿV§åÓ3¤”Œ-I[ò6Ÿ>pÒÐUnÐÃP— l£§Õí*”ȽmèäŠh5çÁu÷¨iÀ×WGµØáñÎnßùÜFÆq]nÆH©Û‰Ù l£7I”ç¢sõ‘`ÓSw¹vߨi€rm©ý( p­M«b·(½ÙÜÓ „mØP¤1ø/QnFª­8»cÔ4ôqÜFISmýE±:WiÐR±ƒ° [töp´>%rï Ž’:‹í0ë÷¤ºùvá¨iè˸68ŒÓ%URŸ§8/ ZÉh lÃÖIÌ–°-u–üäî°ÖÛ2q(½ÛCE¿>}°8©â6ÚàNfs'Γ&ëþеô° ;‹ÜÅJ“ôË9ËÈ6°•k’d·¬ŽÛJ&lßAµ&ͲlϹ|öëJK«)AŸ1Ò_?| C° ;&õWéÒu:WîšÛqº¤J±{§†ÚþñuÅŠýû÷—Ÿ½€û}ë‘Gyûí·ëëëûrØ®Í0tíýÛ=ß.-‹H”ÊýôÓÇÜÉÉÉÃ㤤䯾®®nÈ!ž¯¨¨xóÍ7zè¡xà7Þ°X,·þæö¢±Üd/çA®m|Sgíš´mèäC‘*jr¶áà”ëdÒGÌLsŸV¹&‰±|Ñ»”QÓ²FͶÁ‘&Mš´~ýz™‰—yëóaaa^^^UUU_}õUppðµÁ¸ýC‰å&L¨¹ê•W^Y¼x±u???“Étîܹ?üðÅ_¼ƒ°ý“Ÿü$''çÂ… §OŸ ¸õ7· U±[t®>ò“O Ø—SeZï½SCk3 Ûè[,yÅù3*7Zà|ô.5­ººú?øASS“ÌËO™?räˆòÒðáïí+¾^Ø6lØÁƒ•yù­'Ÿ|Òº@mm­2þüùÜ l›ÍæWuú·nnn·þæ6îRÙ<¿Œ‘þå&> `/š- ‡"UJ§NYD"cŒƒ°>M*Aeµ]‚j’sy½ÅÖFM[°`A¿¢¼ää䤄ð[ Ûí–yxãå;<©xøá‡ýë_ÿýïï°pAAÁ¸qã¬,Ö¿ÿ[s[V·«0ÕÍ·xA 5Ø©®Íú=y~!ÉÇæÏXh˲€° ôByL•í9W¸| ‰Þb;£¦)]Ù&“ÉúÌ‘#G¬ÝO=õÔµ=Û÷ÜsO§ùö=Û·¶;?T|û¼¼[bbâ©S§Z[[åçµWŒÛ]Ø–ºhèZÙ8çìBc¹©xAŒÎÕ'kÔìŠhu³¥2aè\CQEA`DŠóxƒÿ’zC ‚žg £¦ÅÇÇûúúvxÒÇÇ'!!AfÂÃý¼¼ª««Û_³-Q¼¬¬ìÚ|»páBë5ÛãÆ[´hQ†mWWW­VÛÜÜ,+óæ›oÚ{Ø>{¸FÚj»}çÓV×röBUì–ï(‡l.Ha¸U—ÎTD«õçH÷:nkkÓEÊ=©×GM5jTZZZ‡'SSSGÝö¹téƒ>pss%}ÄÌC‘*ªct7Iey~!½8jºJuÜVÆBÑröÂQUºr‹ì]‚ÈØ l6Ä’Wl OqŸ;qžTÖ\x‰nU»EçêèivªÙÒÀXh`S[špòÓ” çT#¶%û˜:Kò¶TÙ’½%S&è&å&FM³G'³©n¾Å b¸zñz(RµkBÎÕÇ࿤&9‡qÅAØìFSm½Tâ’…¶ \ºtgü¢;0jš}imº¸/8Šÿô ‰ÓªáÒ6KsŸ&P©ùÞ„mÀŽ5UHóZëâí9·:n+èrŒšf/UAúˆ™œ‰=ìtIUYDbŽW r1våš$º@@ØФ ³~´³SœÇçÏXX›a ¡ 1jšüEª´.ÞGUé”ôk'vª›¯ÎÕGfŽër9Q lÃÁ]j8S»e‡Ç;Rõ/ˆi(ª LÐUd×’DǨi6åœéDŽW L2Ci@·:UPv0l}¶ç\¥»,"‘q(Â6ú¢³‡k,ŠMuóÍé_­fèKt iU0jší0%èµ.Þ‡"Ut9ÞSg}9g™ÎÕG¹»6ÃÀMaÂ6ЦnW¡!RœÇïöT•Î9N¸KŒÂe ¬7÷âì莀}\—+;©fµ.Þù3šôÜ lk›GIÝÕq[éëÆÝ¨Í0HÞfÔ´^aÖïáæ^Ð};yàXi2UD«ùB l·º•¡ÔvM"uãŽÉž³Ûw>£¦õðç· 0bÛÐÉœVw¯©¶¾}ÀÎ8¯,"±ÞPÂW™a¸+­Måcð_"G—lϹÑjΨ\“Ĩi=CÚ³á\ w°©³ #Òܧ%9yæx–.]W·«Ë°Â6Ð-©Û¬ß#-xåf݇"UtTâ¶0jZw»ÒÒZ¼ Fçê#UJî&`§8§ l½Ð ?™mTR·d§ƒaëIݸEʨiÛ†N¶äS]«¡¨B>y~!\ñ·îtIUuÜViÕHÀÖ¹úìÊ5Øa°‰Ô-‘IqZZùÅrp­¨Í0HƒFvú ºê“x(R¥uñ6%è) ¸±fKƒY¿GŽA»&%›>b¦$m©?ËM@Øl‘’ºõçÈDêÆM5ÕÖçNœ·Ããs¦”ÆÝ”bÌñ ¤$ SWZZO”UD« þK¤•"[b¶´Uj3 \Ö¶{"1[`r0Kuó- Z)G2†ÁõHÓGëâ}T•NQÜeØ9)FŠÚ;_SW“œ³/8*Ûsn’“§4K$iKIg@ØÁé’ª²ˆÄ¯@å&r„ãÒn\KÚ=é#fJˆ¡³o·©ÜPóàë«·<¬ÛUx(R•ç¢sõѺxK%YºtY¿‡îk€° 8òñ¯&9Çžêæ«>…înt ;ƒrSèzC ¥q+ä%íHiDrÑ;€¾|ì8UPV¹&IŽ Êí¯³=çî Ž’’/÷Â6ÐÑÝë9®ËÕ¹ú [m€ìk!\JàzŸ‹K göN MsŸÆ™ú©Of•K¯ÓGÌLròÌ5»0heuÜV©ùò løÝݸÖùšº]‚²=çÊLûçÍú=}êºîƒaë;íLÊA>/Å bø¤è šjë¥Þ“*1Ï/Dš ¤k€° à¶*(“C©D,kw7ã*÷eÊ]¬j’s¬Ï4[¶<ôj¹èîìáíïO¶œ½ íËmC'×í*dàÀ TþûC×îö/åÌpÒ5@ØÐ$MSg}9g™ÎÕ'Í}Zñ‚˜“ÙF:ñú SeúáSŒáÖQÓ¶<4!ú¾°íéÏÎÊúùÛퟩ7”HÌ–ÏcÈp$ž%BU¥+c†§8Ouó•˜}`Q,×]„mÝ·”în9úæNœW‘(‘ƒoµû –¶­'ï|ù÷ê~¿°ä;öVWÿV6Srµµ%Z¼ FçêcÖïa—`ïËM’¢K—®Û;5T¹è:c¤¿ÁÉ¡HÕÉlc³¥"ÛzÔ¥†3Çu¹û‚£”±F Þ}Ê1u–ÖÅ[Úa}*)4õÑ×øÿ.»zÒ÷^Ö>2Q¶÷ë«7E“}^š¤4@Ø£s¦rø–CvþŒ…R›I´Þ6tònßùûC×JÝ~º¤Šã8@Ø`ëÁûTA%ãØÍµlϹ™?[sÿ ɼRº,ÎQ·4ïõê~¿Hû·iÇ·îV.\7%èÙØe03©»¾œ³,kÔl9F§8Ïñ , ZY¹&©ÞPÂÍ®¶û ÞiîÓ䈞çR­æ~H³¿iº½ý¡îõ=ÏK•œC^È÷÷/ömþÎ ²²3gþì7ÒBí0$;ØŽfKƒr®‚ÀˆïHÅ¥uñ–y(OÖf${SJÛ€#ƒú1u–ã ÞöîJKëq]®ü%TKòlK×ßy«ëĤ/)“¼¼sl ãmµî‘‰ÊÊ´å¡ »}çWDmMÂå%S‚¾nW¡ 8ýÛîÖùš:‰Ù¶óüBRÝ|SœÇïš$Qü¨*]g´·nJÊ*ÉéÅ‹_í¹A.í÷­¾ûøã?œ<ÙkãÆå—/^»@{Ö—nð†–ïw}7ý+7^ÃNÇ'×Üç±é÷RSSÉÛ@ïºÒÒzÎtBrò1uÖ¡H•ÔKù3f{ÎÝ6t²æ¾1J¢Îñ ”˜-µ}Uì ÞRÃsî7¶ôi{I#¬,"Qj#ý•»žJߺ¶&9çtI•4é(¥öRÝ^ËóûÝ;­™¶¹ùËcÇÒ5š<<žýå/Ç45ýï õÃöM{ž¯]æVþÊõÖ°Ó)güìMn>7nLKK3&“©±±‘]è¾úYrm†Aé 6„çNœ'µÖÅ[ÝïòSæÛ÷QË’Risâ7¶Ø"‰ÖÒV“˜-M·½SCÓGÌLrò”Ÿ2/Ï×åöñ»|7ÕÖK÷ð§1·›{/]*xõUðð [ Û×[ÃN§‹–Êæ'$$¨ÕêÌÌÌ¢¢"³ÙLç6pgZ›.ž3°äK•[»E‰Ó»}çïðxçÚêý¡k+¢Õ²¤,/¿Å× @ØàÍÁ†¢Šcê,iêåù…HËOâwÆHÿü Ë"%~K³¯ï”Æ©‚2I›Ç·%Þnî•iïÞ„gŸn³a»Ãv:þ4¦-lG¯Ý°aCJJJnnnee%ÛÀõœ¯©“J£6Ã`JÐ [_´RjN ÏúáS$H+c’IuºkBr¾wåš$©lëvJ½Ê}€°  Ï‘& 4¥íX¼ f·ïümC'K«1kÔliDJkRZŠòª£žÇ(`iŸÌQßAØ>w.À€ïví5Û7 Û·õWÚ¯a§S6^6_¯œL^TTd±XøP oÖ„J¿´’¥Ë"÷‡®U>b¦2¾wŠóxÉÕ’®¥z”¤­œìmÖï‘J²O}M „m¸C­ë %GUéÅîš1Ò_â·´5³=çÂEªŽërO—T9Àèkw¶¿÷='[îÙn¿†N²á²ù­°ýùçŸkµÚüü|³ÙÌGޤ%+ÒÅ b” 5j¶rŽ·|RÝ|¥ºSNóÞeÍÒRr¦7¶ »4ÕÖŸÌ6VÅn‘Fªrþ¹4OõçäNœW´²rMRm†Áîî=v7a{ÏžøŸþô_m9l·_ÄmõâH ÛÖ3ÉM&{;ì…2ˆ÷é’*ù,×$çH6–„,9¹Ó >b¦Ò)]!‹Im¦œã-Ãzal‹4RÍú=ÑjÉÛ’º¥9« À¶Ûw¾<#Ï×å6UØìYèw¶[ZþúË_ŽY¾üÿÚlØî°†7Ûš%«7oÞ¼sçÎÊÊJvlôº¦Úz©^äªôEŠT)÷—V®‘ÎéoMÑZo™—'¥ÚQ;¶^é‘V.–f4o l€#hmºxº¤J2¶$í}ÁQy~!ÊYè)ÎãefïÔÐâ1•k’¤,‹õÀˆAòWªã¶^ïŒ÷Û Û/kj2’’V¾øâHÛ¼õ×õÖ°^'¡W¢oCQ…µZ‰Ð’%-+£vK]!»e’“§Ì+'uKÀ–dIY¾}Ц;@Ø€¶Ìž*(“¶4—•npe t«´°¥1½?tmUì–¬üQÝ#%öŸ;Z{Ça[áäôÇs}ýõ±ùË_¾\x‹C—]û|w vã5$l£Ë)§p+ýÏÊ bÊý®”qÅöN •œ¬œÈ­äg™d^é…VÆëVNçn¡åÝÎ×ÔQ¶Â6Ü­¦ÚúzCÉ1uÖÁ°õÆ€pi‚K—¦¹rCZët¤/mñ†¢Š;ëË’¼­éçùž¿“7¹Ý°í¨aVq%9Ÿ*(³ž¹]·U’°r³hùîöo Ï©n¾JxÖ¹úÈÃïXûŸ%i˯T®I²æçÆr£‹Û`+ZÎ^6úÉl£2â‘4÷s'ÎËé¯uñNròTî¾£´ì¥Y/Yú¦w²•eä5÷>¯¹ÿ…ÌçÞ–@ؾqؾÔp¦*v 74²#­MåÿeÍÌ2ÉÇG¦ŠhuûØœçb=g[&%6kî£<Ìöœ+¯îj‘»,"QÞä¨*]yOåO8ÀÍ„m@ÇD¡œ¹ªÜ§ 0B9aUr‚ÄiåRO%-('©VÇmUnÞSõIJʃ¯(Ñ"y×¶¡~Eó£ Û׆íÓ¥Gr'kþ¥%¯˜ý­ç£rc¹I‰µÊ—Mí;™ ƒVJV®p¶ö3+{¾²c§8·žª-“äjeyå×%u·Íò9bü0apK”ê*#0U®IRF0Þí;_éÇSßû¼’IÚ¦oçEE¶•°¾V÷äê{~‘4à%’ö­Çc™ê %rû.eå®ÎÖÁ´Ûw,ë\}¬û¤2°¶L’¢•Å”s³•‹)ä}”q¶e:®ËUþœr’6×9@Ø€^ËE{g,Ü|õ^AÊI³iÿ6í¯A+éÙn Û^¯úõ|õƒ^³5ý=’z9FÒ¶&aë­˜¯ÍÃ2•E$*‘Øz‹fe2ø/Qoû>dë§:ô$[;“­ç`Ëd ·¾³õÏÕf”ÕURÖ¾eÛ`—ήÑ?9¥-XÞû¼~ø›å+UÊ@k\³ÝV&£g©ïAbvÛüw_Lrw+I[ °}”í0É;X“m‡é¨*½}ÐU&k"µNʈÖ&kšU¦ô3­AW&­‹÷?N[øçŽâaØzOæk#qûTÜ>[¯U–©©¶žÏ„mè뎩³’z釽Qö烙÷ñ°]öQD[(íÿ¼ú»/vˆ©7’ŽmŸc;LÊvNÖó¢ÛOÊÕõ¦ªØ-2¹2ºuû©CÈgÔ.@Ø€nw¥¥µ&9§:nëõnfÞ–§\³-y»N²ám½Ù?Ÿ¹iÀK›†ýJ=d¼r‚ýßóŠØyap‡*þ¤¾Ý]Ç›4sÞOüä3Õ´yjçW’ŸœœòïÍ÷©ÛõWv@ØÜ ³~¤ÍŠ?­¬Û¥îƒ“lx[Øøf4òMê´Vmý··ÚÎÿþØ“ÙFö@ØÜ6Hëô>ÛR,y~!:WŸÆr; lÛ]¶•Â9{¸æ`Øús¦ì'€° lwMØ lÛ„mÂ6¶ Û„m l¶ÛÂ6a¶„mÂ6aÛ„mÂ6€°ýÍt|[b[Ø^¾š° Û€.So(‘´y"]Õ7ÃvÕºeó՟ĶaÐeÎ×ÔIÚ”ÌÙ7ÃvÉ’em=Û a¶]IëòªÁÿ½¾¶÷¼¨yxa¶]Ì®uùÒ_ûZÒ–MNú¾§úÕ¹„m@Øt±“ÙFu¿_˜>ÿ´¯…ím|ÛÛï/'lÂ6 ëez{ÛPŸ– Æ>¶3ÇÌP»zk6n"lÂ6 ë)7Ûº¸¯ukkÞ]¬¹Š° Û€®W´²ïœL~öȎ̓ƪ‡ýJéÖV«Õñññ6lHIIÉÍÍ5™L쀰 èWZZ3^š«¹ÏÃáóö…_l}ÒWýý—5kÖ)ÝÚ7n”°ýùçŸkµÚüü|³ÙÌþÛ€®qòhMêó³•óÉõúí¯öoÓ>ñšÚéE͇QJÒV«Õ³%lKäNKK+**²X,ì €° è•凶Mù’·uN4}þ©#ÝìâW{öüÇ©û{¨_Ñ„E[“¶J¥jyee¥”; lºFss³Ùl.**Ú¾ò3õ°_IäNröÊönù'1'ÒU'sÔö8I‰/ŽŒÜ>n¶æ;/´ÝèkÜo5q í“vÂU2Ÿ™™)Û.% åÀÎÛ€.ÓØØh2™ŒFcZZÚÆ"T/ÏVÛ–Qí|Ò ¯ùÕ»šÈµšoY“¶õrÙjÙvºµaÐÅš››-Keee~~~jjª¤Ð¶nߨ5ª%+7.Z¡^©Y²ªË§¶0Ü oûÍ´|µæ³õšv$fËv)×iËÆÉ¼l©l¯lµl;ÝÚ€° èÞ¼––&ÑtÆ ñßJè¶zu[d»dëdIÚ€° è¹¼m4333SRR”®`I§‰Ý@Âvb’­m‘-’í’­“m$iÂ6 çò¶Éd***ÊÍÍMKKÓjµ’N7w Û›{l…l‹l‘l—ll#I¶=—·Ífsee¥„Òüü|I§;»„í=H¶B¶E¶H¶K¶N¶‘¤ Û€^ˆÜ‹Er©Édªì¶+{l…l‹l1¶KÂ6…@ضÛÂ6a@Øa@Ø lÛ„ma„ma€° l¶„m¶„mÂ6€° @ض@ØÂ6ÛÂ6a@Ø lÛ lÛ„ma€° l€° l¶„mÂ6€° Â6€° @ضÛÂ6a@Øa@Ø lÛ„ma„ma€° l¶„m¶„mÂ6€° @ض@ضÛÂ6a@Ø lÛ lÛ„ma€° p$çL'n¶;¼ Â6àæ†­o,7u¶-yÅU±[("Â6àöœ=\£sõ±æmkØ–¤-Ï_j8C¶·-óço[ó¶¶ÿþž$'Ï}ÁQap'J&;ÕÍWò¶ÌHÒÖÜÿ¢¦¿ÇÙÃ5ap'®´´n¾ÿ…ôg¦KÞ–°ää©òjž_%@ØÜ9ìE³wûΗŸ2é\},yÅ apç.ZÔ÷<ÿMÒ~xbÖÏߦLÛ€»•í9W ÛúáSŽ©³(Â6àn5R÷{>yàØ­?zíJK+@Øtý°76ßÿ¡HE@Øt¿mÚ‘ô½—/5œ¡(Û€»røðá°°°‰'º»»{?ò¯#FŒðõõŽŽ®©á>Û„mÀm2™Lþþþ®®®ÁÁÁz½¾¼¼\ž)))Ñét...AAA‹…‚ lnIFF†Äé°°°¦¦¦NhhhîææVPP@q¶7¡×ë%E ™onn¶X,&“©´´Ôh4æææîl'""bÈ!FÅdaJ€° èèðáÃ...’œÍf³ÄlÉÕ«V­  ‰ßü­”””Å‹Ëòiii²˜,,¿Bä lþ‰2þ™Ì(I»°°pÆŒýÚéß¿¿ÏgŸ}–˜˜¸aÕJ5iÒ¤×^{-77ך·)FÂ6àƒaøðá---ÍÍÍ›¿øâ‹'žx¢_gÜÝÝÿüç?Ç_õÉ'Ÿ 4(..Κ·éÜ l¾&3‹Eb³——W¿ëûÍo~£V«U*UbbâË/¿ìïï¯Õj•¼ÍõÛ„mÀ7FŒQTTôõÕû~IŠîwCýû÷—e$ooܸqÞ¼y?ýéOåajjj~~¾ü:'“¶mxöìY™)--ýÝï~×ïf–/_®¹ê£>rssKHHì™™©tnSž„mÀ×’Ÿ•£Ñ8sæÌ›†íßÿþ÷JØþä“Ozè¡øøx¥s[~Ýl6Sž„mÀ×NNNMMM2³sçÎ÷Þ{ï¦a{Þ¼yJØŽŠŠúÑ~$a{Æ )))¹¹¹&“‰ò l¾vww/))‘™ÌÌÌ•+WÞ4löÙgJØ~ÿý÷GŽ™˜˜¸yófÉê•••”'aðu@@€r“m ÛjµzôèÑ7HÚ^^^šoMš4iúôé„mÂ6 £ììì‘#GZÃöÇüàƒvš´ÿå_þ%..NIÚ*•ÊÙÙ9&&†° @ØtÂÓÓS³¶%<¯X±â™gžé´'Nœ(¡ÚÚ­=}úô—^zIfÛ„m@' \\\6lØ ™Y’³r7¯°°°Y³f½õÖ[óçω‰Ñ´#/ 4hÍš5²¤¶‹‹‹:tègŸ}¦R©”»yIÖtfùòåC† ùàƒdëÂÜú €° èIJeË|ðÁ   åÌð7vˆÙòÌìÙ³ ôþûï+­Ýà™™™¥¥¥‹…b lþÁd2­_¿þ™gžyüñÇgÍš.YZ‚´üü裦OŸîêêúÜsÏÅÄÄÈ“JÒ¶vkççç˯766RŒ„mÀ?X,–ÒÒÒÜÜÜ?üÐËËë‡?üáý÷ß߯_?''§G}tÒ¤IaaaF9{\IÚŸþ¹V«•_Qºµ›››)FÂ6à$*›Íf%oK„–DUB;ÖgdkÒ–_$i¶hll´æíÔÔT¥{Æ ‰íÈCeø4YÀš´9€° è\ss³5oçççgffJ¢NIIÙÜŽ<”'å%YÀš´éÖ ln¹-‹Éd’,m4sssw¶#åIyIà:mÂ6à6òvcc£di³Ù,¡º²y(OÊKth¶ l@Ø„mÛ8„ÿ|.ŠBNF ÍIEND®B`‚python-watcher-4.0.0/doc/source/images/sequence_launch_action_plan_in_applier.png0000664000175000017500000012231613656752270030466 0ustar zuulzuul00000000000000‰PNG  IHDR#k®Ý5tEXtcopyleftGenerated by http://plantuml.sourceforge.net:gU3zTXtplantumlxœ¥TKoÚ@¾[â?ŒèÅH12iHTª@y”WKhm…{ˆ·2»ÖzÖIþ}w\µD.–íï1³3s‘S¤WqÅ©v†_&p©Ó*xŸ úQ¡‚N’ÄUZ3-‚hÎâRÌ“˜ ·ô^ך‡5Ãó²L×cÄ,EËW„íñèj|3ºú/Éw‹Xã/õ1Ú!‰/Ÿ *°TSAv‘mqt´N/æ)í—ÞGÏÛ•_Zqb)XJÈ‚híU."\-cùPq…Ö{'ÿ·Åøó¡¡çÂd*ˆo ¿1 xà¹X“(L¥V `"4¸D$L±ªô5ŒÅ<4"ó ÄÝZˆÔøóÁ·¸ß¾(% )Bžûpg%F|Ä@Z.ÓÚO¡ºâ÷Êš{®}€ÕŠS lKdÆ 3¹±)‘î:rÎÃ#1¥y$Í#讀7PkbœbY9ä)[ÄÑS‚*ã©TÛµ­‚§{in¡v+Æ¡°^Ú¤4¾¦/ÂÃ+¹cŠIŸÎºÝ~¿×ï½¹KJL[<ï¿Þîê¯ T¢»0Þò5ëLL͆ÈÌ YÈÿý‰;e·êŸCÃoùg­æ9Ìîºpì7NkŽ{5@1ǦHñ…¶r5ç–e̽Ö`Ú‡¯ÚlƒB_d\I±BAù9\Kš&’ò¸Óï’LM™ä¾ Fý¬îϛޢÑt\èÇ?%ÇôtEvŸ€IDATxÚìÝ\\õïÿ¬ã8EÊRºtJéܹS¬M³Y,Í^ŒÑE«i]6›¥H¹—›uùåòh—˯½m³ts½1õOdSDÊÚÔˆc$Ä+êyD]¢³?Óú‡¦iLX$I ±tä:"¿÷ÛÓ™sÎ`ø33¯çc<Μ9sÎ÷œù~¿ó}sæÌ,›€ÙZÆ!°˜‰â·ïüæì ¯rûí¯ƒÔ§$A§}Q'¹Q™úOúœX& )kÛ²nç^|•f™$¨ó´/ê$7*3@ÿIŸûDñfã]g_hKÎۛ߿‹7‰$ì}’¹ÎÓ¾¨“ܨÌý'}Nì…”ûý÷_KÎÛÐóm¼I$aï“ÌužöEäFeè?ésH±Ážýþ}@àõ×_?yòä™3gFGGi¥ô>Üh_ÔInTf€þ3áûEÌ^°ö­ÛŸ|òÉC‡9rD^³ááaZ)½7Úu’• ÿLø>‡D³ìáÚ¶gÏžÈk&APR ­”Þ‡í‹:ÉÊ Ð&|ŸC¢ˆÙ ¶ë»w<øàƒòšI §N¢•Òûp£}Q'¹Q™úÏ„ïsH1{ÁöÞr—¼`=ôP{{»¤À“'OÒJé}¸Ñ¾¨“ܨÌýgÂ÷9$ Þ$@§}Q'¹Q™úOúoH¦ÞgÙ²eK¼Q¨ν¨´¯x釗~µ\ôS™úOÅì_‰{î©ËÈøCùÖ•_}õŸF.\Xø}/ÿë_þö·ÿÖíþ¸ÓyqVÖÉ´ÌѯDãp\¤=úÿïÿgø¶¡hëùÚ×n<{öoˆm_¾Ü÷‹_<®î>öØ]Rå|»šóæ›.¿Ü;÷!Î<„ ›*‰"Þßã·Zª~ÛåºDÞJK¯øáÛcXëþSz‰‚‚?Yø®Ãläl6è5+R¢% y1}´~ÕªaÇeåÊË^yåaýÌŸüä‘ÜÜϪã"GJžUSóßOŸ~VîÊßÚÚòüüÏAyS\¿¾°²ò¯£¾mœ9óœ¼ «Wçò&ØÖùo|£âî»7©»R]óò>'ÕœûîÛ"Ë,Ù‘aS]ôÿþÒ¾æØÇoµÔ¯ð7¿yIRPaáÖ®½âÂ…WH&QTTI'¹À]‡ÙÈÙlЛ‰âç?ìÚkÿL;:?ûY»þ¸<öØ]ÅÅ×ê–»{÷þ³:.·Üòõ þ2l…røn»­ÆðþúׇSR\6ß6\®KÌUsä[·îÏeNçÅ’d¤l¼IÀ¢Î?ó̤…«»Rg¤’¿jNyù ²ŒKK¯×ªVQÑUÚ³e¿O{ÊO4Êd1¯7ëþû·ª*úÈ#Û¤¯‘ù²’²²/éϹÉSdȨý[wóæÿ©¯Ø÷ÜS—•õGÇE3jª²—_î•ú|Ù=t›Í‡"Û×ì Fûšc?¿ÕÒ°ë.)ùâ÷¾W­MÛ,°ábÖØì)o f;Heâ:QHó—^âW¿zѰk’>$'çÓÒ!Èßþðfm,š‘ñ‡¿ýíOÔ22žþQYƒYÇbÿíØlЛ‰bÓ¦¿Óú\9Ð2v\rs?«NÇË„ÜÕ¹ûÚk†­ð7öJ¯=—D!¯è·¿ý·×]÷ߢ&Š+>#uE«RÉ©¼IÀ¢ÎKU‘øÞ{?•éwß H"ii©¿ùÍKÚ2ÔÐ]¹ò²—_~P¦å)uu•fÿfxê©{dÄ&KjçÖ6n,V‹É¸M;Å'+ÿÚ×n”NJ{èÙg[V¯ÎÕÎÛowJJß²åkêY2ÓnÐfS•ÎTëÔä¯L«ÌX?61ë‚ѾæØÇoµ4|”•H9µi›¶X̬›=ÅìMÁb©Ì@\' í3HÕÕe‘]Ó£Ö«>Dš¿„ í“™Ò<òÈ6µ¼Ì”^΢c™ÑÈ9I…5ýkïUòãñ|R›V;/ÝwUU‰6GÞ~{ì.ýq‘ g¸NÃ#xòäå%4ûÔS˜ììO¼õ–?j¢p¹.~Ù°_çׯ/”±…ö˲²/É„ ª´aŠ„aý¿ŠõUZ†w†µ±°ð Ï=w¯a•>uêiu÷Â…WÔ97yŠþCóÒãdeý‘zÖüG‡a¶nª/¼pŸZòßþí~u”õCa³+í+&ÿÙ‰Ójiø)3{k0+°ÅbfØì)fo ;Heâ=QÈMÞ"Õÿ¸ÕÌÕ«sõ}È¡C;´‹.¤§½á†+Õ|™–9ËŒFΆƒÞÄOÏ<óýÿidÏŸzêžÈI÷r‹<ó oê8êµþKê" ßþöß¾ûnÀÎ9ŠŠŠ"ýÿÕÌ–ß¼ùj—qK Õ®å`Äë:Ï=uÚGÒ¥6j×TÉ_í3ëwß½é?ø®¶˜ ¼d'ÙXU`ÃÚhØ¢Æ`u“§[¬Ü~SÕ—!,Ò[<“‚Ѿb’(â´Zš% õ`³À‹™U`³§˜½)Xì •H€D!qB}XÔl˜ªú™p»?~æÌsÚ‰\uجc±ÿvl6èMüDQVö¥°“ÚyýÎ?þøv9(r‹ü¼„á§ždΊŸ™ãåw.¼’ššõ]Pûž¼ãJ‘ q×]ßbÄë:ÿæ›´ï­^+•Gß Iå—GÕEW·ÝV£.05ûf$Ìtè&þã›6/&³nª1I³+í+&‰"N«eÔO=Ù,°ÅbfØì)fo ;He QÈ­ªªä_ÿu³D¡}%ÆwüƒLÜyçÿ«¾ âc™ÑÈÙ~Gš ‰âW¿z1--Uÿ¥ÒáÊíêýÎË!Ö9—zHŽûM7ýUØjeÎÍ7WÍ1QHªSÿå’ }!ßzËo¸Ú·ßîTOáM‚ÞÇbô–“óéS§žÖ>­®Ý´j¿|¹OÍÑÿgâ'?yD?ð û(…öaûC7yŠþS;=NÔ¦«O=Í´`´¯XÕÉx¬–Wf«/ç°Y`³Å,*°ÙSÌÞ,vÊ $F¢~A:Lù«fÊØU߇ȴͪ¯\y™ô!6;û#çäJ÷ÜS§>Y¤n2G;çnç¸È1•צ¦æ¿«3G’ó¼Þ,uÅýì…d†ŠŠ"•U$JîÔ.R”xÕªjùõë å=FB§ÜäÝBýcŒ7 z‹ÑÛ×¾v£ z´O«k·ÒÒë7mú;©ÉjŽt4Ú—BÈ O_å²³?ñïÿ¾ËðØÓ§ŸUW†Y ÝÚ‘•õG=v—Vonúï¨Mõò˽Ú?¶ö³véOîÌ6{hî£}Å0QÄ]µ {èÝwR»®½öÏn¸áJu6Àfͳ¨ÀfO1{S°ØA*3‰Bûf'éñÔÌGÙ–“ói­‘¿>_¶þ‚léî¾{“v~غc‰áÈ91…„éyÃfÊíëumè×ÕUÊÛƒ;í',´t1ÓDö³ ÿëýúðÙÙ³‡ÊËop¹.‘‡òó?ÿè£õúo§‘70™Ÿ’â’w_³Ëy“ ÷ÑßdP"UH;7ªÝ¤OIOÿ¨þÂ,îH/#UKFf?øÁwU•Û»÷Ÿ¥¶ë¿¦SÖ–—÷9mIý×tZ´Í\wÝÓ>Ò-êßÉfí%jS•P:M)ƒü ûŠX‹‡æ^0ÚW EÜUK}×ít^œ™ù1é„õïÖö l¶˜E6{ŠÅ›‚ÙR™„IÚgG#¿=VæÈß°Ÿ­¸ãŽ>ö_þå›Q;–ØŽœùîbu{ûíN úÿ<%áœ#.Fo‰t›EFû¢Nr£2ôŸIÕç,‹¯WâС’õ¿3›è}H Qðî€þ“DÁ zŸ%z3ûBë‡h_ÔInTf€þ“DÁ+Á›¨ó´/ê$7*3@ÿIŸC¢àMô>´/ê$7*3‰‚ ô>´/Úu’Ê €þ“DÁ›è}¸Ñ¾¨“Tf*3@ÿI¢àŒ7 zFo´/ê$7*3@ÿIŸC¢àMÔyÚu’• ÿ$Q,¡Dñæ÷ï’r'çMö7‰$ì}’¹ÎÓ¾¨“ܨÌý'}Nì7Þ$’­÷áFû¢Nr£2ôŸô9±I¿ýuðÜ‹¯j·g¿_ûÖíÿÓ¶]ß½Cv ¹nìâM"IPçi_ÔInTf€þ“>'–‰B/<ùä“{öìy0YɾËãpêÔ)Zi2 ÎÓ¾¨“ 2ôŸIÞçÄ8Q¼þúë’~8 å~(ùÈ^˾ËãpæÌZf2Hò:Oû¢N‚Ê ÐÒçÄ8Qœ¼0Ë–-2â”ù2”õX/lȬ$fk6,Þ¦M›Š‹‹G§•””ÈÝY”D–窕DnÔâ nE†ÝI²§1I²!}©Â–—¤±-ˆ£ŽèÙHóÂ쫸 ?Ö"£Ã´´´¨Ã>õŸlY^•³³³#¯<6[8*}IÌÖlx7++K-,n·{%‘…ûûûÕJdöšáV,6—`{“D!ÇA_ª…I|' cz6Åâ$Š@ PXX˜’’¢}´ÆápÌq8hgLiÈfIfT 5ÒQI¢®d¦Íçóé?õ”À{“Da±r9Œóô©'†hà}èÙH³L§££# É´üUóeì899©MƒÁ¨£½ýÝYIfúŸ{ý¿·õÿ¹ŸÑ8Ûú?÷fE5ÛJyyy{{{2쩞YŠZµ,ÎQ´µµÍÓ•Ù ÑÀû.г‘(f™(222:;;eâܹs6lPóW­ZÕÔÔ$#?™_QQu(©] Û¹ÀYI לšš:44¹fõ|ªë¯.˜Ñ8[VœfxuYQͶ²gÏYO2ì©~Ú¬ E­ZÚ®i¥’][˜oeˆÞw€žDa,ê•Ù~¿?''Çápx½Þ––5¿»»;??_›ï½÷ÚJÊ V†¿aßõdtkVÃ5766º\.Ão@ª­­uM«©©Q¿†6ÓqvCCƒ ¦e%UUU‘+1+ªÙVB¡ÛíîííMø=ÕO›U¡¨UKŠ!…q:R0ýw=Íë/Üq©+ cz6–L ˜‡_c¨¯¯_‚?¤°¤~w"*9€0fô”±Óg{ï{büíj5 Q€q6{:ƒþ@Û²‚}—þùÙ^¥nE"pYR%Y¬ò°§óäÔ®g$TÈí€û†cw>taøš! Q˜Á§_Þë\óÁÉŠ\Ýî\óoùMNY…-\À„… íöØ¥…úS´б=‰Â˜?qãÆ-ìöøÇ×jýûñuœˆ¯Žƒ€žDÁË,´_l{ í®±÷â+ŸÍû›ßWžºìÆ_Üù vŽ‚–:v g#QðÆD‰g¬}nÕMûÓ¯{ù«ßê|åýÐ{´б=‰‚7 ŠŸm¾Wût“þ¤-tì@ÏF¢°… øä>ü=Š\vR‚–:v g#QˆbìôÙã »ß=5È¡$ $ $ HŠDÁ|-TW g#QÌ_2ÐR@uz6/@K¨®èÙH¼<- º‰‚— #[BÖ2¾L‚ê ôl$ŠùÄ|Àbµ”…ë“(¨®@ÏF¢(}Çb îço£6×Ã=ztݺu)ÓŠŠŠä®~+òŦ‡††jkk=ÓéLMM•…Ÿ ¸ wÇpý‘‹‰Í›7gddÈÚ***‚Á ÍC-;ÕÔÔ¤Ÿ#we¦Ú YÕÊ•+UÐï»U98³^óÜ7‰€1_ÉÉÉP($B C·lÙ"£ê‘‘‘‰‰ ™6ú »»iÓ¦âââÑi%%%rW- cnY‰¬_F«Ö‰Bž«V¹Q³¢ÚÙJfffgg§LHN¨­­5Ü‹õëÛºu«ì¬lN«ªªÒ¯Íz¢„óçÏëW%{*™M¿)€¾üêéRþ°çêÙYó7@"' .àbÕRd0––¦Mggg÷õõEú ïfee©…eÂív«Ô©Y¹Óé´Nýýýj%²NÃFÕÎV¼^ossóàà ÅîX¬_ÿÇãQåS÷è=¯ÑæTiÕ£’å$´„Í Û)³“EÖkžË&èØ€ž-‘_2Ì¥¥””møèp8,©f‰"l~äX6êÞÎJlÕp+ÝÝÝeee2ú_¾|¹ßï7\ÒþúõÔbQÉÖGGGõs‚Á`ä™ 3GŽÑÏŒ< ¡Ôæšç² :v g#QtdÆ-Åãñttt„B!™–¿j9ÓsúÓ ús3JÖç(ÌŠ:£­tuu™ý°¹~YLµ‰YÒˆü¿Í«¦¦¯´ÎËËS37nÜØÐÐ`örØ¿ŽbÖ› cz6@GfÜR222Ô6lÐ0F»:YEjjêÐÐPäpSÐ.ƒÅÅÅúë(f”(d%Ái†×Q˜Õb+ú1·$QÈz wÇlýa‹Õ×ׯ_¿~``@¦{zzÔÀ=*ÄKNkmm•¸299)r7ò™4;v쨮®V3%kIÚ¶m›VyúÁƒÕ£6×<—Mб=‰ #3n)~¿?''Çápx½Þ––ýR‚ ¦Õw=‰ÆÆF—Ëeø]Oµµµ®i555Bf—(´ïPªªªŠ\‰YQí$Šööö+V8ÎÜÜ\õ©§°Ý1[ØbZ¨ðù|²d^^ž¬Ùþ«pìØ1ÉiÚZ»v­á¯F(’[ô3%ÃÈaq»Ý²]yQJKK%ÍhÍsÜ;г%l¢à> 1Z ¿ ÆNŸí½ï‰ñ·GèØðFL¢fcÐh[VðXjáÙ^åh‰fÌeD›ÏÁI¿¼ÿÇ*äöcϺ㠻/ ¿Ã130pà…½Ž+%T<ž±¶ÝuÕK%›8e$ f*äÖ~ñšÇÓ¯ã”(ðvhã6nÜ–ò­ý’5ÚÄKeÿD›`¸2{©“¨£-qç?~¸¿í®Ð"Ä“ÿeý?½®ã3_ùÅ^~‡ê €7b/@K¢Ç‰öK®zzyÙci_|ù«ßê|åýÐ{TW¼“(xyZ Åñú‡´Sê¤ÕoÄ$ ^€–Ø¢ýž\­?)AuÀ1‰b1qe6@KA¼;}öxÃîwO R]ðFL¢¼HHHH‹† øZ ¨®@ÏF¢˜=¾d ¥€ê ôl$ ^€–P]г‘(xyZ @u/}]]]gÙ²ø>¤ñ^~0DÕèÙH³·(—¹¨!øêÕ«_|ñÅxô“(’—º‚ê ôl$ŠE=d&cn—Ë•£üùÛ–Í5/ÀÎ=ztݺu)ÓŠŠŠä®~ëòE‘†††jkk=ÓéLMM•…>^]]:M&ä®¶°¢ êç_{íµíííjýƒƒƒÙÙÙÁ`P¿ÑÒÒÒ””ÙhQQѹsçÔCuuuéééiiiÍÍÍ[4+¡Å…íõöíÛeÈër¹d€«®u í¬\Á¹¹¹²fŸÏ'ký0\äbbóæÍ²¶ŠŠŠ°Ã8 ²³MMMú9rWfª½“M¬\¹R%ý1‘ƒ,mÖkžû&HK4QÈPudddrrRs^+³iÓ¦âââÑi%%%r7lð9RÔ&ºººd<­¨©©ihhÛ¨ (€l1 I„PãÎ-[¶ÈðZ 311!ó­·hQBÃ= [‰h¸ÑŒŒ u¥Á† ôŸ„Ñ.SÖ_Ga¶E³ÚLòÜà4Ãë(ÌJhgå2ŒÖr‘$ Yá^˜­?l±úúúõë× ÈtOO Ïš â%¶µ¶¶JŒ‘°'r7ò™4;v쨮®V3%ƒI@Ú¶m›VByúÁƒÕ£6×<—MБ÷] g#Q,òË3<<õðÃüµ“(ÆÇÇkkk]ÓjjjÔµ2'r€¶žh|2ä÷ûsrr‡×ëmiiÑ?W‚ŒªÕw=YlѬ„6ECCƒöJUUU‘Ï5+¡•···Ë¾;ÎÜÜ\õ©§°½0[ØbZ¨ðù|²d^^žþK´fíØ±cÛ´ã¶víZÃ_P$ÏègJ¶‘Ãåv»¥<ò•––JjšÑšç¸ :2ð¾ ôl$Šydq™ËáÃSŸú” æ¦þs|;ïd ¸ÿþ¥ûÂóÛØIŒK]Auz6…]ÃÃS SÏYBn<°Pѳ­Mÿ²$ $ª„V>`.-Åb¼¨çp8ì'Š@ PXX˜’’bç¹6²jl¶¼ÍDÑÝÝ]VV&iùòå~¿ßp%>ŸO9–àÄfQçRòÈù+—Ã8»O=ѱHž7bÅ’À— si)úsýýýúsú lyÕs;::B¡LË_ý?³gôO}ý¾õÿÔŸK¢PºººÌþÇ_^^ÞÞÞ¾4wÇú…YQílEâÇää¤6 Õ2fó휣hkk›Ý•Ùtì’ç˜DÁËÄ}K©««+-- N+++SÃÁúúúõë×ka£§§G}"?55uhhÈz[êZ… 6¨ujÈLý…a+Œüt¾¬¸¸XáÁ\…ìˆ$QH9 ˰gÏÙôÒÜí¢ax…YQ-¶¢¦W­ZÕÔÔ$áAž[QQu~Ø®i¥’]‹É·ÇÒ± Q(xy€¸i)ããã7nt:2mhhÐÂ^B…Ïçs8yyyêö.—Ëzàë÷ûsrrä‰^¯·¥¥E¿°Œ¤eÌ­¾)r…ú/Gª­­uM«©©Q?”6ÇD!;²bÅ ÙÍÜÜ\õ©§°2„B!·ÛÝÛÛ»wG^#í ¸ªªª"WbVT;‰¢»»;??_{î½÷Þu¾þ»ž¤0ª YÿÂÝØé³½÷=1þö;ÞˆI¼<@¶J„àXiij þÂ’ú݉¨äJÀˆœ?èHm|ì£×œ}áU:v¼“(–.àæØR6mÚ ÏŸ?¯ÿ4–bß½,AzïŸm¾WÞYåöÔgo<Þ°ûÂð;tì’ù˜D îµ´´¸Ýî””ý¦Ùá2²(»°tJ2¯»“HßÄú‹m´ýÁìºçþ¬ò±ÔÂ@ùÿ¶8e Q`*Ú–ì½øÊƒùÛáýk³S€Å§}È„·¥|{üãkµ‰þ}‡h³@¢ÀÊàÓ/ïu®Qqbߥ…Ü7üü{÷qŽH øZ â7NìûÈÕí®«_üò? u¾ò~è=ª+ÞˆI‹€/h)ˆ/§v=£—0;)AuÀ1‰‚— ¥Æ´ß£ØçºJR‚ê €7b/@K¢;}öxÃîwO R]ðFL¢àåh)ÕH øZ ¨®@ÏF¢@¢@¢E4\ÀÐR@uz6Åìñ%ƒ-TW g#Qðò´€ê €žDÁËÐRTG³ŒK¶@Ç$ ^€–B„;г%v¢à>`‰´èØ yz6ÞõØ200PZZš’’ât:‹ŠŠÎ;§ª««KOOOKKknnÖâ„0ÆÇÇ«««S§É„ÜU ìܹÓëõÊÊóóó»»»µù~¿?77×årù|¾ÖÖV^HâÕÊ•+Àääd(’QYY©Íß²e‹Œ‘‘‘‰‰ ™!ÂînÚ´©¸¸xtZII‰ÜU TTTÈJdýÛ·o/(øðtpfffgg§LH€©­­åU€D ȸ?--M›ÎÎÎîëë ïYLEVV–ZX&Ün·Z@¯•;NmÚëõ677rÌ Qˆo@ °°0%%Eû8“Ãá0 Ö‰"l¾JfËwww—••¥§§/_¾Üï÷ó*@¢˜_\ÀÌ_Kñx<¡PH¦å¯ôÏôE¿6vŽÂâ颫«KžËkGuz6ÅüâKùk)ê’† 6¨A¿v…ÌÔ_G‘šš:44™d’’’ÑÑÑ`0X\\¬¿ŽÂ0QTVVjqE…€×Žê ôl$ ^ ZÊ…áwú~Fþêgúýþœœ‡Ãáõz[ZZô@‚Dõ]O¢±±Ñår~×Smm­kZMM„ëDÑÞÞ¾bÅ §Ó™››Ë§ž¨®@ÏF¢àå–zK>üÆ“ž¿’ýèØ€žDÀVK¹0üΉÆGŸþ\™<$·_>ÐÁQ;г%c¢à>`¦-eøð¯Üô½}©×üøS¹÷¢Õí®«ÎtæŽèÙ’4Q°I”xüãkϸ^;5±ïÒBâ QˆbÐÐ"DÛE«?œ˜¾ýäïëÞò#™ÐþwÂ4ÓKa@¢°ýæD_×_}«ý’«Tœxñ˵û?v=ç(‰À ¼y×#¾b-T<›÷7„ Ô‰‚SäÀìZJØ) ¾ë tì@Ï–¤‰‚/æØR޼둙_â÷(@Çôl$ ³o)†ß9ÙÜ>vú,Ç tì@ÏF¢@KÕèÙH¼ñ´P]€žD1߸€ ¥€ê ôl$ $ $ Q QDÃ|-TW g[Љ¢¦¦æ¿Æƒ?^–6÷•äåå]$´¯.û4TW g³¯©©i®‰BÆÙË$¥k¯½6f‰¢.¡iûèóù®ÚW—}šƒª+гÙñ±},–‰BÆÜçš*n¹å>Þ‡ÄÆƒê ôl6]sÍ5$ ø„ê ôlóš(þôOÿ4¼þúë'OžyèС#GŽH¨&QÄ8Q¨»ÙÙÙrw```q_ø˜(ÃV8ßV‹Ã¸À%™ïí†íi¬VK¢±M+V¬Ø³gÏ$T¼þúëgΜ!Q«®®žc¢X"#¹ùN‹Xþ%’(–xP!Q€Ø&ŠÏþó>ø „Š'Ÿ|2œ:ujÅÿù?ÿÇãñ8Îììl™Ö/iö¶†–––Ï~ö³.—K½ãŽ;æ/N¼ñÆÚg(¾cK挌ŒTTT¸¦•••ÉV"— [¡,SYY™šš*O¹á†ôÿ§×–—•ŸŸÿâ‹/j3ƒÁ`MMMZZšYIÒÓÓwíÚeQø¨+ŒÜ/ý¶ìì£Y Ÿ¹¹°G–$r+‡qéÙ­ÖívgddÈDcc£,,eZÖŸ™™I¢ó”(V®\)‰â¡‡joo?tèÐÉ“':QHNéo}ë[2ýï|G¦Ur°xH[CqqñéÓ§Ïœ9sã7ÊÝy ?ÿùϽ^¯¬_²MX䲟("ïʈPîvtt<ýôÓ2Q^^®_L^ÈnܸQ:räHOOL¬^½:lÁÁAí+nõËßzë­‘¥’©­DÆ »`…†w­÷Ѻ ÖϵÀ#·õ0.‘#0‹Õ–””È݉‰‰ÜÜ\™¿ããã2QZZj?Qp©+âÕ=‰âÚ‡ÅùË_Êô[o½%Ó2'êCÚŽ?®Ý=qâ„ܽì²Ëæ/NȘrFq"j¢p¹\rwršLÈݨ>í)ŠÃáÐæûý~;:ΰùj‘¥Òfšmk+4¼k½Öe˜Åñ1,IäVÌãR;³XíÍ7ß,w·nÝ*kjjäïí·ß.e¾ýDÁ×q"ŽP]г‘( >¥}Îd¦…=Ã;©8ÑÓÓ3ÓCýöÛoõ«_•»wÞygÌãÄL¯°Ÿ(d+êâZ™‰:à ƒµµµa—&Ë‹§¥¯°ùÚòÚàØpüj¶­­°±±1%%Åðb;ûhV†Y‹’DîEäa\jGÀ¢þ˜­VHI´“u2=::*Ó2Çbµ ÑÀû.гÅ}¢Ð®ºÖFo‘ßõdöú®§Ï}îs‡C–‰aœ˜õ¥Øa\:4,:›’K]G¨®èÙ’7QÌÝüý¢Å\.Å’‰"ö—b$ŠY& -T,Ç´Ø®SûUlâ°8‰"1ÌñÚ –ˆ±¾ÁmqtëâU’4Qäååiÿ׿ei«¾f½ÅˆHr\ê ªkÂ1zÛ²‚8ºlz”W HÒ+³µ]{íµK|‡ù’A€–ªk&Š7ï:ûBÛ¿½ùý»H@<öl$ ´P]?QÈxýý÷_[â·¡çÛH‰‚DБTWÅœÅʪï¿ÿ~‘tvv¾öÚk¼ˆ g#QðòtdÕ•Da7QüËW6ÞqÇMMM»ví’PqâÄ ^Dг‘(–.àh)ñEºVÊFuË»Dq×ÿøú–-[n½õV 2.yíµ×èÀ1‰£ÛaÏk‘ÌV¾Ça)”†–<‰â_¿þ·Ýv›„Š;î¸ãþûï?|ø0U Q`x´l!Ÿ>ëÍmß¾=%%Eþ.©TÛmÅöµ°³¶…9VI’m’$QìÝr×Ýwßý½ï}ï–[nijjêìì¤HHñ‘(–/_¾cÇŽË/¿œDA¢ Q((Ìï€F&vîÜéõzNg~~~ww·6ßï÷çææº\.ŸÏ×ÚÚª-©hË ”––¦¤¤Ès‹ŠŠÎ;g±Îȧ[¯'ry±yóæŒŒ )UEEE04ܵçŸ~Íš52±zõê°L]]]zzzZZZss³u‘ÆÇÇ«««S§É„ܵ>\zÏݾ}»Ûí–òWVVêçGÝÁ¨%×§©ãÇkÓmmmÚ„ÌÑò•¶¼áŽ[ï׌^})dVV–ÃỲ³²G²ã Ö;k±_‘k6;\‘U×úE1kaÛ { lVHµB‹ã°D…‰D€+³Ì ¥èGN2™œœ”ñnAÁ‡_I‘™™©½ýËH±¶¶Öpø%=N '†B!oÉ€Ézf£7‹õè—ߺukqq±¬V«ªªR¥ ³~ýú]»vÉÄîÝ»×­[§æoÙ²E¾òtÛÉV¬‹´iÓ&ÙÖè´’’¹k½kzÏ•»j~dÌvÐNÉ•o|ãÚ0zppP†ûò™–´ ­-2;Ùß/û¯þM7ݤ†×òì¬Ü•½–b½³R]-†Úak6;\†U×âE1kúm¾v*¤Z¡vTæ)QÜsO]FÆÊßYDÀuŠo0£–¢9©ÿ×ÊèÐétjÓ^¯·¹¹YF¥cJ=ynZZšõ:í|Ò&l=ú‡<O¿6=66–žžùtY@“•h«ÊÊÊêëëÓÊÎÎVÓfePwõO” ·Ûm½kzÏUå—ù²XØFÍvÐNÉ•ŽŽÉ2ÑØØ˜““£¥‹òòr™o(¢î—ýW_Ùú£¡~…@&¬wVª«u¢Ð¯ÙìpV]‹Ŭ]è·eøØ©j…²ý«?O‰¢ àO}´~Õª R‚D$Æ•D`–‰Âp~wwwYY™ ’–/_î÷û ………)))ÚÇTÔçC,Fi†óí¯GO-¦wóÍ7‡-¶yóf‹±Í¢Î(Ù|nä|³´³% ÉHW&òòòdwõÕWkc_™oçµ°¿Å™¾úvv6j¢°SìTݨ/¨ÍµBÎh…sL?ÿùc×^ûgZ®øÙÏÚõ=ñDc~þç΋½Þ¬ûïߪн°€qß}[rr>-ËËßþðf}yä‘m’Xä¡”WYٗΞ=D¢H$ €De ÓÕÕùßtŒS;::´¡ªüºN³ù6×#‹©ë› ¦¥¨ê?ÁSÓ§,Ün·vÊb¦ç(ôÿQŽú/m=‹çF=Ga¸ƒ3:G!n¸á†§Ÿ~Z»˜DþʨZ}ú+†‰Â櫦?G!»o½³3JQ냾ꚽ(´ê!‚Á Ù^˜£°.ÀÔž£Ø´éïöîýg™ ÓjþSOÝ#Aâå—”é3gžÛ¸±ØúSO>Zùå^-“¼öÚ£*~øvµŒÄ‰W^yX¦ó›—¾öµKK¯'Q$ @¢0JVVVjC(–eddh3SSS‡††ÔÂ2_}`}Æ Q×öt5ßl=aË××ׯ_¿^žöôô¨î+»wï.-- ›YTT$ÝâÔ~^6¡ÿ(¼Y‘Ô§Þe”Y\\õc÷zÏÕ®(†×Q˜í ’ëmß¾ýòË/¿÷Þ{eZþæää466Ú|-,ökv¯¾X^m¯ËÊʬwVª«Å~…­ÙìpV]³eÕªU2Ò•P!{QQQa¶†/AÔ Y£´ã ˆy¢x⦅æå}Nþjc}ç“Ú´Ü ¿ðÜs÷Ú¿ŽbõêÜ^¸OÍ?thGAÁŸ¨eNzZ=táÂ+.×%$ €Dlj‚+³¹´”¨£éDV¬Xát:sssÕGGd`êr¹Ô22_F«‡Ãëõ¶´´D]gØÓ£®'lym çóùdɼ¼<)aØN­Y³æÅ_ ›)ƒ˜Õ«WkÓ2Ž”«þëzÌŠ4>>^[[ëšVSS£]ßlsämñ܆†í«ªªª ×i¶ƒQK®wìØ1yáΟ?/ÓòWÖ&sl¾û5»W_ŽÆÆ¥<²ã²ûú+4"wVª«Å~EÎ4<\†U×ìEéîîÎÏÏ×öBÒ—Ù^¾Q+äÔïד¼èê8ÄýÆ{eZþú|Ù2‡D(H$l¢((ø“û·ûÃfÊõÃ?¾=/ïsÇEêÛcå¶wï?K¨°øöX™#eÚ"?(E|' ®Ì–HK™Ý5¦{¬Å¼‘(€8íÙøöX‹ÓRfôûk;‰ g#Qðòtd‹â.¢©èØI$ €DA¢èÈH$ :v‰ Q(:²hJKKSRRœN§öÃÀjL¼sçN¯×+óóóó»»»µù•••.—Ëív›ýV—þ— W"ó·oß.kõè¿Èð§Öôß²o¸-ÃUYìWsssVV–öC‹µ´´h%—¾õ¥—^jmmÍÉÉÑvDýfœØ¼y³ö»uÁ`ЬÀ‘‹EÆï÷çææÊ2>ŸO6Gu‰ g#QÌ#.àbÕR¤ã“““¡P¨®®Nåj°+cß‘‘yH†ìö‰²LIIÉè4™ˆš( W2ý \¿[‰¬Ó"QLYþËßlUûuÓM7©1½ÅbåååCCCò †$±èï®Y³F[lëÖ­ÅÅŲƒòôªªªÚÚZÃ[,¦/Lff¦6ê’`£–¡º‚Dг‘(Ä .§¥¥©Á®ú¿Ìw:Útvvv¿6Ý××5Q®DæëW¢~åmv‰ÂpUû%#{;»¯ú2?ì®ÚÇ£¶>66–žžnX`‹Åô…ñz½ÍÍ̓ƒƒTE((Ä@ PXX˜’’¢}JGûøýÁ}ÔDag¾>iÌ"Q®Êæ~Ít÷# ¦gñt;‹uww—••IÞX¾|¹ßï§r‚D€D x<žŽŽŽP($Óò7j˜é9 ³ù†'d¨=99©MƒÁ¹œ£°¹_3Ýý°»òtué…Åò6Sººº O¶€DA¢@¢°äddd¨Ïîoذ!êZ»Ž"8­¸¸xÖ‰B­DñêU«dä!¡B SQQ¡–OMM2K†«²¹_3Ýý°»õõõëׯéžžuFXÍ [­Ì—P¤% )•‘‰âÍïß%ãõ%~“B’(Wf ÂNKñûý999‡Ãëõ¶´´DROLLTUU9NòÚù®'³ùò\í»dm²Nm~www~~¾V˜{ï½W-ߨØ(KšmËpU6÷k¦»yWÒ‚Ïç“5äååIçkV`ÃÅÂV+óW¬X!Ç6777 ?õDÇn'QÄÑDLqeößa¾dˆë–ßkà—¨®ñnìôÙÞûž{$ÊbýC'›Õn?(«þ—¯l¼ë|ý_¿þ2p_¢·v‘(¾=–— ##Q€êºýÙý})WŸ}áU;ËßÿýwÜqÇ–-[n»í¶»c¡éÖ;ïžR<)¤U |øða*9èÙH¼<Y̸\®%¸*P]Ñ[>§}Xèñ¯ýÙÍ?¼0üŽÅÂò6ßÔÔtë­·Êxý{±pûº¿ýÞ<âI!¥¨Rà×^{Jz6/@GP]çÑPç+í—\õÁ]±÷â+]óu³S»ví’‘úwÜqK,lùã/Ý2¤xRH)ªøÄ‰Trг‘(–.àh) º&p¨ÐníÎ5†§,^{í5£Ë›ýý÷ßß ·}¼ iHñ¤RT)°ö]g=‰@üýk„7nq}{ê3ÅÚDÿ¾CªiËèüĉ2L?|øpg,49¿Ð9¤xRH)ªxtt”> Q€ùõƦ¦¶eWL_Mñ¥¾Xýôå¥O}öÆ_Üù õesÇ "$ $N<éýëç¯ùûýé׿üÕïu¾ò~è=Ž ˆ'š?ü˜Ó‚œ”çgZ ¨®‰ãÃߣøÈÕœ”èÙH ‡/h) º&†±Óg7ì~÷Ô u g#Qðò´€ê—8Ÿ г‘(ÐR@u q‘(èûZ ¨®àà4.ÅÂàü,@KÕ$ €žDHH žq‚‰‰‰bap~X°–"}€µÛo¿½¬¬L›–‰mÛ¶Q]€žD1︆ X°–¢ƒYr QÌÅÄÄDfff__Ÿv···×ívËLª+г‘(xy€DKó·@܉áíÚµ«¼¼\?§´´t÷îÝTW,$Nôl$ $ŠxM'öï߯Ÿ#=xXÆ º‚ƒиHô}ÀRi)Á`033sttT?ÇívËœÒÒÒ””§ÓYTTtîܹ°Ñ³š˜˜˜¨¬¬t¹\òƆ5ßp ËtÔF7oÞœ‘‘!k¨¨¨XÝ·oß.’…e£ãããÛÒ–onnÎÊÊr8Ö‹µ´´x½^™/]çK/½ÔÚÚš““#wóóó;fQTû{V¿ßŸ››+Ëø|>Ùœ¶Œ”appP¿ËCCC2“ê >@ã"QÌ/Îϳn)ßøÆ7$¨»2ýío[&¤ “““¡P¨®®N†ïf‰B-))&j~Ô5h¶nÝZ\\<22"‹UUUÕÖÖZ' ý¶dµQ·uÓM7©1½Åbååå2v—‡ššš$±èï®Y³Æº¨6÷(¬0’å:;;eB‚ZF2Lä^Kê º‚A@ÏF¢°Dõôôèÿž““300¶Œ ¬ÓÒÒÌEvvv¿6Ý××gø Ã5h<zúØØXzzºu¢Ðo++++ê¶ddo¸ª°ÅÔ@_æ‡ÝU£|³¢ÚÜ£°ÂÈaonn;#A¢‰‰@üY·nÝdBþnܸQ› SRR´ÏóhÔ1Laãiu7êÔ]=µ˜Y¢0ÛÜ–ý"Þ5+ªÍ= [¬»»»¬¬LòÆòåËý~¿6Óçó…e ¹›œŸzÂ"’éêêºúê«eBþªk<OGGG(’iùk$ÌÎQD]ƒZL]Ï•Ù9 ûÛ²³˜Ù]³¢ÚÜ#³ ¸åø«)//—.[ÿh[[[r^™ Qˆ'yyy;wî¼á†ÔœŒŒ õ)ÿ 6X$ í:Šà´ââb5ßl ©©©CCCjCõõõëׯ×>jÕÓÓ£®m0Kj[úë(¢–vF‹™Ý5+ªÍ= [­Ì×~wB…L›¹gÏÙ/ýbIûí±Å‚âü,0Ç–"qÂårÉÐVÍñûý999‡Ãëõ¶´´X$Љ‰‰ªª*§Ó)Ãbýw=™­¡±±Q¶¥^ËÜçóÉ’lÂþC™(dÚ×(ÉFÕO¿E-팳¸kXT›{¶Z™¿bÅ 9n¹¹¹êSO¡PÈív÷ööjw“ùîèØг‘(×IÒR’á¹%¨9É„þ›¸¨®˜‹±Óg{ï{büí@ÏF¢à Q€êŠÙôäÀîKùó³/¼j±'ˆz6€¸o).#S|*ÕsöÖ£Ïɱ•ÛÌ/Ýzß…áw8ø=‰‚¾ ¥€êŠê|eߥ…{/ºb¯ãʽÎ+_¸¾&ì” g›âÊl´P]=T8Vkç+ö¹®ÖŸ² QôlS|{,CÚÐ7nÜ,nýû‘((€•¡ÎWÚ/¹J‹í®«ÿøÚ'ÿËú_Üù vŽ‚DH zœ8ùå}—^ÿm™ó~è=Ž ˆB}דþ¤(8? ÐR@uEtþ…ë*NJôl$Šp\CÐR@u…µ±Óg7ì~÷Ô ‡ g#QðÆÐR@uÅ<â@ÏF¢@KÕ|€ÆE¢ ïh) º‚ƒиH ƒó³-TW(z6 Q Q€xÆ "$ $ $Š…ÁùY€–ª+г‘(fkÈZ ¨®@ÏF¢àX¢-åèÑ£ëÖ­K™VTT$wו,û]grìØ±ìì즦&5èØã'ˆz6€X¶”'Ndeeíܹ34­µµÕív÷ôô„%Š#GŽÈbÒÝpœAÇÎÁ@¢ Qtd¿SYYvÚAîÊL}¢èêêòx<Ï?ÿ<tì|$ЏLœŸ毥dddœ?^?gtt4==]%ŠŽŽ¯×ÛÝÝÍ;ƒ Ó³ñí±bF¥„ât:Õ£‡Ãï÷s €DB¢3éé飣£ú9Á`PŽbÏž=™™™@€c$N Qˆ%;×QùA–Û/èà(Žœ èÙH¢·”áÃo¼ø¥Ú½Î5{«÷§_·ïÒÂ3‡9D c q‘(XµuRB²„v^B¦ûÃ/'@Ç>@ãJêDÁùYÀŽÀßlÑRDØí'_/H&´¦Ä4ÓKaúÐ5ÏqX¬iÀuŠo`F;Gq óË{/¾’s ‘((Dwö…W_¸¾æƒ\ñWìÿØu„ gþ(Ø¥NYð]O€D`öξðªö²ü˜âÊl³k)†ß9ÙÜ>vú,Ç tìä=ß €–ª+г‘(xãh) ºbÁq‚ g#Q ¥€ê >@ã"QÐ÷´P]ÁÁh\$Š…ÁùY€–ª+H=‰((@<ãEüèêêòx>^]]:M&ä®¶°¢hêçË’c­Ö?88˜ õ(--MII‘;wN=TWW—žžž––ÖÜÜl±E³Z쑞 ‚sss].—Ï瓵ᆠ¹˜Ø¼ysFF†¬­¢¢"lOgAFóMMMú9rWfª½“MHµVI@Lä8lß¾}Ökžû&èȪ+‰"…ŒƒGFF&''e¤XPðáJ6mÚT\\<:­¤¤Dî†,#‡¡ÚDWW— ÖÕ555 a•W%ÈC¡D5¨Ý²e‹ŒÝ¥02ßz‹%4Ü#½ÌÌÌÎÎN™œP[[k¸!³B†-¶uëV)†lN«ªªRk +³!ÃWDÂÉùóçõsd%eé·.Óï—Z•ìWØsgºæ9n‚Ž  ºrð(â8QÌîü¬ìšúフžN§6••Õ××§MË„Ûí¶™(¦¦¯Ònkk“‰ÞÞ^¯×+ñÀ¢²Ñ´´4m:;;[m4raw-Jh¸GzRªæææÁÁA‹ ™2l1ÇÓß߯M©ú¬Cí…zTÒ—„™°™a;kvzÇzÍsÙD\à“  º‚Dг‘(bÉNB0wZ,ðàÁ+VLMÌFûðR˜@ PXX˜’’¢ F‡Å7V%Ôëîî.++“ÑÿòåËý~¿á’6 vÚA-6kRªÑÑQýœ`0y&ABNAAÁ‘#Gô3#ÏBèµ¹æ¹lH ‰bN‰"++Kýë}¦ç(D~~þ­·ÞêóùdT¹QÇÓÑÑ …dZþªçÎôÅŒJh¨««KÖc¸¤Y!#Ó_bvíêÉæÕSÓWZçåå©™7nŒüŒ™zÔþu³Þ$ N QÌ5QÔÕÕ•””ŒŽŽƒÁââbu•BjjêÐÐPÔD±ÿ~™#‡Þp£ê2† 6è?f£]­¿ŽÂl‹f%´“(d­EIRà ™2l±úúúõë× ÈtOO Ïš â%Yµ¶¶JŒ‘<&r7ò™4;v쨮®V3%bI@Ú¶m›VByúÁƒÕ£6×<—M(9Q O=üðí$ŠñññÚÚZ×´ššu-Dcc£Ì‰}†­çÀÚŸ ùýþœœ‡Ãáõz[ZZôÏ•` Cvõ]O[4+¡D!µAŠçt:sssÕ§žÂ6dVȰŴPáóùdɼ¼<ý÷\ÍÚ±cÇ$Yi»¶víZÃ_P$ÏègJ¶©ªªr»ÝR9Œ¥¥¥’šf´æ9n€D¯‰ÂâüìáÃSŸú”Œ§þsð<ïd”¹ÿ~j!â«¥TW gKêDy ÙððTCÔÇóA–Û,TIÚÚô_ ,ñ–P]€žDþò>MÝH8? ÐR@uz6Åìq @KÕèÙH¼ñ´€êºø8Aг‘(ÐR@u q‘(èûZ ¨®àà4.ÅÂàü,@KÕ$ €žDHH žq‚‰@Ü“žg·xûí·—••iÓ2±mÛ6^‰‚D î…Y´ˆm䘘˜ÈÌÌìëëÓîöööºÝn™É  Qpe6€¸l)‘a^ÏZìÚµ«¼¼\?§´´t÷îÝÔª+$mÏÆ·Çˆï–²À‰BâÄþýûõs¤ß Ë º@Rõl$ 3h)ÒÌ·oßîv»].Weeåøø¸šßÜÜœ••åp8´9›7oÎÈÈÅ***‚Á ZC]]]zzzZZš,¯Í‘•TWW§N“ ý:wîÜéõzNg~~~ww·6bbB6-k–b444„}êi™NXÀ˜é†ü~nn®lÈçóµ¶¶j3e±ÁÁAý1’™Ô:öäÄ "€žD`Ɖ¢¤¤dtšLHÝì…~úS†ý¹œ£˜Ñ†”®®.µ§åååÒQþÞˆª­+³A¢ÌHf–(JJJ‚Ó®£Ð/V__¿~ýú™îé马¬Ôæk×QHØÐ_G¡®‹uë/o0èkËkeå#ƒDjjêÐÐÙíoHŠ­åIÚÌ={öÈJôËóí±Hfœ @¢0ãDÑÐР}‰SUU•úe·ÈÿëK¨ðù|‡#//OÿO}Çˈ?컞jkk]ÓjjjÌÖ©îʲi§Ó)ňü®'ÑØØ(«2ü®§mHнbÅ ÙPnn®úÔS(r»Ý½½½Ú]~á iìôÙÞûž{„C€D ö‰‚ƒ aI}ÌI&$ÕpLxý¶e}ôš³/¼ÊÑ@¢ø=œŸæÒRH cO§v=#¡BnŸùÊñ†Ý†ßá˜ôl$Šp 0—–Âפ‚Ž=© >ýò^çš½]ñÔg¾²/åÏåÿ›S=‰‚7€–ª+f*´“2¿ü„û/ô§,8Aг‘(˜¶nܸq3¼í½è m¢ß!ÞU†¬$ ´P]ae¨ó•}©×´_òáiнŽ+÷ìº×¾¹];GÁÁèÙ¦¸2-TWXlj½_ùA–¸øÊçþìïÎ<Ùõ~è=â@Ï–Ô‰ØñÖ£Ïiç%ô'%Â((€í÷(ÚkÂNJ„á7vúìñ†ÝïžäP Q QÄçgZ ¨®@ÏF¢˜=®!h) º=‰‚7€–P]'ˆz6Z ¨®àà4.}@KÕ|€ÆE¢XœŸh) º‚Dг‘(‰‰Ä3N Q QX2Ž=ºnݺ”iEEEr÷w]ɲßu&ÇŽËÎÎnjjâˆ@¢ˆ³DÁùY`þZʉ'²²²vîÜšÖÚÚêv»{zzÂÅ‘#Gd1én8Πc€èÙøöX1k)•••a§ä®ÌÔ'Š®®.ÇóüóÏsAljѳ‘(Ĭ¥dddœ?^?gtt4==]%ŠŽŽ¯×ÛÝÝÍ{bà@ÏF¢Ë–¢¿RBq:êQ‡Ãá÷û9¼ cçà Q(:2éé飣£ú9Á`PŽbÏž=™™™@€# :v>E¼& ÎÏó×Rì\GqðàÁŒŒ ùËA;ƒ‰Ñ³ñí±bæÄ‰ÙÙÙ­­­¡PhrrR&änäw=ÌÌÌ={öpÄ€@¢0†ßé{øù6ÿرcEEE®ik×®5û=ŠãÇKØhlläHqDHfløðOzþªmYÁ ŸË!‰€=†ß9ÑøèÓŸ+“,!·_>ÐÁ1S\™ jK>üÆ+7}o_ê5?þÔ_î½hu»ëª3‡9D cz¶$M\CØl)ê¤Äã_ûxÆõÚ©‰}—'@Çôl$ Ñ[ʇ·‹VÿnzYÁOþ¾þè-?’ í'L3½¦µÊÉqX”iN YIL[ÊoNôuýÕ·Ú/¹Jʼn¿\»ÿc×sŽtìàà4.€´”7ïz¤ÃW¬…Šgóþ†P:vpðWR' Îϳk)a§,ø®'бƒDг%i¢0GoÞõÈÌ/ñ{HHfïÂð;'›ÛÇNŸåPÉŒDHHH ƒó³-TW g#QÌ×´P]€žDÁ@K¨®‹D=‰-TWpð‰‚¾ ¥€ê >@ã"Q, ÎÏ´P]A¢èÙHîöÛo/++Ó¦ebÛ¶m€D€D¶LLLdfföõõiw{{{Ýn·ÌäÈóŠDH–:éXì,¶k×®òòrýœÒÒÒÝ»ws Q QD'qbÿþýú9Ò…e @¢ˆÎÏsl)›7oÎÈÈp¹\Á`PæÔÔÔèÏÈ´Ì1[XSWW—žžž––ÖÜÜl˜´»Ët, ¼^ïàà ~ CCC2“—2É«+г‘(bkÈ€¹´”­[·ŒŒ„B¡ªªªÚÚZ™999)mÿÇ?þ±L8p °°Ðba±eË–¢¢"™?11!ÑÂ"QDÎ7[§ÓéŒ,­¤^Êd®®@ÏF¢àå–\Kñx<ýýýÚôØØXzzº6}îܹœœœ[o½UþÊ´õÂÙÙÙêêÈa(ÌÖI¢ ºbQp‚ g#Q˜YKYöû‡zhÛ¶m2§©©)ꆗFØLfëôù|aŸz’»|ê‰÷]pð‰‚¾XZ-Åãñ¨SzÇŽ“áûÁƒå¯:ÿ`¶°á9 É“““Út0´8Ga¸Îòòré¿~oÚÚ¸2›÷]pð‰"ö8? Ì¥¥Ô×ׯ_¿~``@¦{zz*++eb||<777È´„Šüü|í‡ žúÏë($诣XµjUSS“„ ™_QQ¡‚DjjêÐÐuÄž={JJJôEåÛc©®`Ðг‘(,E2¦÷ù|‡#//O;-PUUµk×.µÀ½÷Þ+sÌÖlÚ´I¢‚þ»žº»»%ŠÈ’^¯WÖ Ecc£ËåÒŸ©0\g(r»Ý½½½Ú]~á Q QÀŒ£Žú˜“L444pL€Ù;}¶÷¾'Æß‰º$'ˆ(€A mYAû%kþã‡s4(ÀŒ½õès*>È®«^º±îݾ·9&Hàü,@KÕ6 u¾òØ~qú?ˆPÐñ™¯pÊ g#Qp @KÕ3O¸oÐÎWpÊ g#QðÆØm)ܸqãfxûñ§Š´‰þ}‡8A0d%Q ¥€ê +}»ým­–ƒüXjagÁÿóTNÉSŸ½ñw>xaø>@ÏF¢@KÕÑãÄŸü‹Î+6îO¿îå¯~w¨ó•÷Cïqðz¶¤NœŸh) ºÂŽÿøÑÚ§›ô'%ˆs=‰D§ýž\vR‚D€D¢;}öxÃîwO F]’DHHH ƒó³-TW g#QÌ×´P]€žDÁ@K¨®‹D=‰-TWpð‰‚¾ ¥€ê >@ã"Q, ÎÏ´P]A¢èÙHàC555ÿu‘üñ²´X­*//ïK[SS‰€$Ãñe0ÿ̆ú$ $QÔÅ'­ð>ŸïZàÿoï|€â¸î<¯5ǘ`²„#˜›âØudÖa ÖFÁ²}Dëå8B¸ÓqŠCé¨ÜR„Ëé(ЬޒµN¢•".ç³Û²‚e$L,bOl•,‘¹3±ƒ‰Bd¤SD$„%ÁÊ!X'B¸Ÿiç¹ÕÓÝ43̟ϧº¦º{ºß{ýú÷ûõï;¯»"•›o¾EãŠBRóKщ&*zè!N%@ÄbŸêód6à)€¹¢(>@1ˆpE¡¿ #ÄÞ K!a,g¥ÊäÚCis4¶<2#›–êò“Ÿôûý¢Î;799§Š‚·Rà)€¹¢(*ŠåQA+а$Ç+¢(´-Uo3ã Š ºöåLèM{5zE¤E6¥<(j¡¿¿_Ãøø8ŠðÀ\Qñ®(V„ Exk_Î,EKŠ"''§£££»»[ÃÀÀÀ¹sçP€§æ¿Šâ[ßú–ì•ðáxÓ¦M¿úÕ¯ *ÂðŽ—¥S_þò—CWÚ—Ë•ŸŸÿꫯj+§¦¦jkk“’’Ôö†ƒ2511QYYéž§¢¢BÚ¦/\#99yÏž=öMêêêÊÎΖ¾µJgs_}'///##C'==]’3Cù###’ºÞÕcµFß-= ‡YUU•˜˜(G}ÿý÷ŽŽÚUou`]VgʪÌÀ3hÓ`Å2(ŠÛn»M4ƒˆŠƒúýþÓ§O£(OÌ5~ÅÃ?ü³ŸýLfäò/»ñ‹_\‘1 ÉH´ZÂ2F166¦½6J[¥$‹r¤6ûêENÈ|OOÏ /¼ 36lÐo#YÔÉ“'e&--;I’K¾;88¸Ø#’Ÿ/77×årië´õn·[ggg* µ½ 3²¨ßF+ÇfØA­OOO—}³³³7oÞ,Y¯ó#ÒW¡Ík?ùÊ÷x<úvrý»Å°™vÔ µ™½¢ì Ó£^T“Ë4=ƒNŠ"2ŸÌFQ (Þç…^X·nÝ 7Ü îQ1UKª(”œÈÌÌt˜v[¥‰©©©²8<jxFB›Ww=ù|¾À»žœ+ íÆÓ[¤BT¥¥¥6w=-Ø-†ÐziË–-!* Ó£6ÔeÕ$«y›»žìŒ¢@QÀÒ*Š~ô£²Ëüc‘555V*âÆoÔ~QûèÄÇ?þqMN¨·O†¢($}IOO|xjjª®®NËbÕÊ––í7þÀL]Ú¦žÌ–éœà…ºgõêÕ"Û®(FFFrss­z^°[ = õRJJŠÍ¿78ÉþMÚP—U“¬æMÏ “£(P°´Š¢§§ç¶Ûn“ä/33sçÎVŠbÇŽ7ÝtSxG*Þzë-u³Ó¢žÐãÖϧþõ×_—žÌÈÈÀ 0ŲÂ|x `®(Š•BäÄ_þå_÷ì¦Ëçî»ïîëë£Cˆl(Še…— à)€¹¢(VJNh£ŸøÄ'ÑÙP\xðÀ\Q+Ì/~ñ ¯×«É‰àž ²¡(8=2Ìu …&*"õð÷bÿw€È†¢àôÈ0×åSrE#Š‚øðÀ\cŒ¼¼<-_hÙùüª[œoŒœˆÞȆ¢ˆeVðÌ@\ÅŠ"Ì0@€¢@Q (P( .ÁÎøú׿^QQ“‡&f°¤ÛGBÉ‹m€œëíÛ·cö(Š0Ãø,ž˜+Š"z“à™žžNMM‰Ä7¼‡ŠbÁ®8uêTZZšœw"Š"œð ž˜+Š"öp˜ÂîÙ³gÆ Ñ¥‚–®aÁˆ“½ÂÒE¡¢ß·¼¼|ïÞ½D6<sÄKpäüŒê0û9ÑÕÕ…¢ˆ+E!Ùmè2’Ȇ¢à€§æŠ¢XîΗ&µµµy½^—Ë%yF___{{{vv¶,æçç 2¿éé骪*·Û––¶cǵ^fZ[[ÓÓÓdqtt´¼¼ÜãñH9ÅÅÅ/^Ô¶Q¨lÙ²%%%E ¬¬¬œššÒVJ{ÆÆÆóN™Ù½{·ÖZiÞàà ¶ÞçóåææJ!YYYÒ~ÓºL›dS¦ÐØØ˜œœœ””$‡fß`ûtÙªÓ¬°ÙÞ´v'‡oÕBÓc¿råJMMMâ<2#‹¡W´à¡?^ZBdCQpáÀS0×èS6ldnvvv×®]’çé×­[gÈ%É.++“\vrrRt}®ÿàƒªWR¿ß/…ÌÌÌÈ.’DÊÑØ¶m[iiéÄÄ„lV]]]WW§­—×4•ɤe{)yçÎk×¾\©©©‡’Ñ ªC]6M2-sëÖ­"333Ïœ9£Í_¾|999yAE¡~/×7Ïëõ¶¶¶ê‡5ì3]C“LËÌÈÈP†/Ø`û4ZŠR{I™ ¦àVÛ[Õ¾ØÃ×oczìr–ձˌèÌÐ+rÒѨ(x2P«œ,ÎØ¯÷ûý………G»F»Êtw=j³¬¬,«»žLk¬¨¨ {õêÕ>Ÿ/Ä&Ù$ÊV ¶ï[ûNv~R¬jwrø‹­H¿^)Ð+²é 9ãÑx×SdÆŠ"ÌØüŒºXE¡£8sæŒU¦˜™™ÙÓÓ333#óòi³™zžAφ $× "5ïí핆Ò$ý¯é¦c¦ ¶ïÛ0ŽQØ×nsø‹:ûRˆ¾jŒ"ôŠlºbß¾}Ñød6ŠâBQ‘SZ¥òåååSóTTTXåå)))ê†û7ªoÏŸ?¯6knn.))Ñ$ÊÉ“'Õ³ eeeÎ…ì¨eÿ’éJÕ¦uY5ɪLí9 ÙXÿ…UƒíûV=|"”––êk4MÇ­¶·ªÝÉá/êì«§´¨ç(B¯È¦+¢ôí±( @Q,NQ\¹reÓ¦M.—KÊ;v>h¡áóù²³³¼^o[[›ú¶¥¥Åívë7–,9++K¶ÌËËSã333iii§Nr¨(dÇœœiLnn®ºÇP—U“lz@2iI— ïz2m°}ߊ&©®®V¦Ö÷÷÷îhµ½UíNQg_Îr]]{žÚÚZõÇs¡WduhÑûw(Šˆ†øðÀ\QŽd’Ý.Qá’:Çü=0ëׯ÷ûý‡¹ùûÜD`DN{.Ÿ½pêñ\y{"ê"ŠâxÉ ž˜+Š"2ihh˜ššºté’þ~€cÌç—¨µßsÏ…#oDQdCQpáÀSsEQ, áýµ­­---ÍãñTUU©wÆ3n3â³…‘ß‹â×ϼ,K¦îÔÏÛöøÕñwP( R4Ì5Nç½¶ÿÆÂg¯ûô³ w>ëºóÈgj C( .<¤h˜+ŠˆŠ„m¼b¿ûnýŠ"¢á><0WESا3ûód6Ä‹¢ ÿ‚ãü¡×:¯¿K“ûÈúƒÿ²ä—ß|Òô± Ħ¢`€ D9ÑúÙý7-©—5˜ù}„ÇŠVõ®§H”@Q ( yÿÿ(ÜwEø ŠÂÆgðÀ\Q°"\>{áøŽ½ïž‹ºÈ†¢¸ž!ÀSsEQ‘ EÁ…OÀ\WþÌŠEx `®( :€È†¢ öà)€¹¢(PD6ÅòÀø,ž˜+ŠE@dCQŠPÍŠ‚"ŠE€¢@Qp ÅòÀø,ž˜+Šˆl(Šàá2<0WED$LÇŽ=ðÀžyŠ‹‹eñƒÌcÕ¹ÇÐÐPFFÆ®]»0u ²¡(8=2Ì5ÖEÐ?£§§§ïÞ½{fžööö´´´“'OE¿l&Ù vD6§€@€¹Æ ¢ºó«ªª ò(+õŠ¢··733ó•W^ÁÈȆ¢àôÈ0WÅ5¤¤¤\ºtI¿frr299Y)Šžž¯×;88ˆ…‘ Eqðž˜+ŠbÅ“ý“ —Ë¥¾MHHðù|˜7ÙP€¢0!99yrrR¿fjjJ?FÑÑÑ‘ššê÷û9¿( ˆYEôϨNž£x饗RRRä“S €¢€ØTA3<<œ‘‘ÑÞÞ>333;;+3²ø®'¿ßŸššÚÑÑÁY@Q@<*Š«ãïŒ<ý¢|~544T\\ìžgýúõVÿGqüøq---œhE¤À|x `®(Še`üè›Ïìoö­Z;æãY€¨l(Škà%ƒx `®(Š¥ãêø;Ã-Ïôd•Êy‘éWOô`¢1ÙP\xðÀ\QK‚þgÔñ£oþ¤bKç ÷¸é^9)ׯ;×sû@Q (d˜+ŠÂ®óÕ ÄÄ{;?t§64!º9€¢@QÈ0WÅÂo:ýôïš=ô¿dFÄ`žyæ5¯-¢("àÀSsEQ„QQhc3K$Þ»ïºÆ(b2²¡(PK¥(Ôü…#o¼÷Åõëößx¢ &ã ŠEfFUC¼ë E¢@QÏ…#oð( Š"$®Ž¿s¢µóòÙ œ&Eì( àÀSsEQ‘ E<¼dOÌED6<s½æ\YY¹üU3@'‘M" Š‚ ž˜kŒ+ ᡇ’Eù\¶ùϯºeEêežù˜Ÿ—ÈQíÑfP\xðÀ\cYQlÛ¶Î ²-(Šk`|OÌ5Å*!–oSÈ¢’ž°4&°¨ ‹µÚq±†ñ¸Â~‚B/vŎȆ¢€•TaÉPWDQh[Zm¢XŠ˜ö9ŠP(Š%ÌP—îgTçŠ"ì‰xÔ)Š%ís (P׬q¹\ùùù¯¾úª¶rjjª¶¶6))Im¿êZ EMLLTVVºç©¨¨¸té’¾päää={öØgÞ]]]ÙÙÙ 6i½!»‘|%ðV'Åš»¾()ùÔ©SÊVUU%&&ÊñÞÿý£££VÇe8ö»ï¾[;::dþù矗ùÂÂB›J­*Òo/ß®^½ZÕ+ógrrrÁprÖP€¢@Q8£“õYYYÚâ¦M›dñᇶÙW¿¨½A²§§ç…^™ 6è·‘úäÉ“2“––f¯($ó–üxppÐùa–””è«6UöÅŽ]ÛQøÀ¨õO¿v\VÇe8vMEHú.óÕÕÕ2ÿÊ+¯ØTjU‘¶½äg2___/ó‡’y)Mæe“°?kZáV (Eaðà)€¹Æ¡¢ðù|¹¹¹.—K[Ÿ ­w»Ý²8;;ëPQ¨í™‘Eý6Z9Nî&JOO—}³³³7oÞ,9´“Ãôx<úªM…i±VǮڬ(å[¯Bíx\Ç.ÍJ§§§¥Uk×®µ¯Ôª"}###²¾¸¸XæåSæe“°?köÝŽ¢ ²¡(Là=wx `®q¨(RSSeqxxØŽ‡WQØç úõåååZ–Ÿ““.EaZ¬Õ±;WW®\q˜sëçwíÚ%óuuuòùüóÏ;Q:³¢¢BîP NzEAdCQpáÀS0×PErr²,ž9sfïÞ½wõîzÒPÃÝöÚ¼ºÆçóÉLñ'>œ¢ÐÐnC2½E*uדVµMu†b­Ž]Ý€¤hs×Ó–-[‚P—/_ÖPÉÍÍ5lX©UE†£óûý²˜žž®Ý"å° gÍp׊‚Ȇ¢à€§æŠ¢XXQÈ^KC 5OMMÕÕÕi9·ZÙÒÒ¢ >™-2C=ã+3¯º#8E¡îíY½zõ /¼àä0GFF$5·2Û´X«cׯ‘’MŸÌÖú'%%ÅæŸlŽ] %cØ8°R«Š+]³f¬Y·nóŽ5œµ‰‰ ‘ EÁ…OÀ\PntþŠ033SPPpë­·Fi‚¾<&(ЍT<À€§æŠ9 yVV–ßïLÓ鈺Ȇ¢@Q (E¦( @Q (P( ŠPËã³x `®( ²¡(‚‡gÈðÀ\Q@dCQpáÀS0וW6?£Fþ¿.„±…_ÿú×+**"äÀùCºÈA¬bûöí( )抢¦óãGQLOO§¦¦ŽŒŒ,ó[U´Ô @±8çÔ©Siiib!( )抢XE±¤Éë‚…;¬}Ïž=6lˆœg$¢_^^¾wï^ÅOfÄ!x `®( …ÃÚENtuu¡(P¦Hn­œÑÙP(ŠP»wïöz½.—+??ppP[åÊ•šššÄydFµõ>Ÿ/77×ívgeeµ··k%(œ$jQfvîÜ™––&¥UUU©*¦§§eQVÊW;vìPÛŽŽ–——{<ijqqñÅ‹­jß²eKJJŠ”PYY955¥­”c[Î7¯ÕqÙìØÖÖ¦µPRÀ¾¾>©7;;[kðÐÐM·ú9ðH÷›UGÙÔkº½ÐØØ˜œœœ””ÔÚÚj_é‚=ch¼ÌH™ééé 6&gSÝùóç¥Í1gP(Š%ÁþÉlI­&&&fgg%¿_»ö}íÑÐÐPZZ:9OYY™,jëSSS:$3’AÖÕÕ™jçŠBJVUHº©òNýzµ½¤A~¿_Ú933#ÛHvnZ¾ô°´\ŽH6«®®V”w™ܰÕqÙì¸aÃÉt¥…»ví¢_\·nÃn±:R‡ýfµ»M½¦ÛoݺU„‡¬e¥ÎµM¥¦ØÕƒ>¨WA¦§Àª: ‘( ŠbÑH“ÔÏð’ª´;==]=Ä,3’Îjó^¯·µµUÿc(ŠâÌ™3ª ©Q›ÏÈÈЯ7-\šš””dZ~ff¦ÚýòåËÉÉÉVŠb©ܰ“ã2ì¨Rdi¡aÑp86Ýbu¤ûÍjw›zM·—ÃW»`¥Î{U-ŠNXÐäì«CQ (PA* «t_¿^å…ƒƒƒ’Š­^½ÚçóYb¸#eQUXmï÷û =V¸v‹Mí†Í²²² w=…ýÀí{ØæVœEšÅv‹Ã#µê·ÐO‡ÍñZU´Ý.Ø«êÄ6¸ë)ðà)€¹¢(VPQ¤§§ëPW?Õ+z{{Õ¨‚}r,yÛìì¬6?55ôEfffOOÏÌÌŒÌ˧Uª*›©{úõlذAò§åõܱc‡öâêêjU…ÌÈ¢T!_é߉$Õeggkå´µµ©õµK☕•%[æåå©q‰™™™´´´S§N-ÛJ¶:®þþþ …U·,˜â;ì· ë \y&JÀð®'ÓJÄÐxC¥V&gUÿp‡¢ EÀ\QQÙùËÿ’MwgË’²~ýz¿ßOˆXAÄ*Dãé×\>{áÔã?¸òöŠE@Š€¹¢(PÁ0æó‹ãì÷ÜsáÈ(ŠhU<À€§æŠ¢ˆmE/ëÔN 8~Tóëg^ß‘©;õ³Ç¶=~uüÈl( E) €Èçü¡×ößXøìuŸ~6áÎg]wùL­ýŠbSQ0@ª¨H(ÐÆ+ö»ï¶²@Q@l* -bbb ïtfÿaÄ…¢€ 9èµÎëïÒ$D§û®ç>²þà¿,ùå7ŸdŒ‚'³b<0W,ƒœèNýìþ –ÔËš?Ìü>b#Šâx† OÌE+ˆzד͠oåÂ@Š€¹Æ…¢`€`±¼ÿî» ƒ( )抂ÎX—Ï^8¾cﻧǢιPÄ><0W@dCQ„ ÆgðÀ\Q( "ŠP€¢€hV  (P( ŠE(ŠåñY<0WÙPÁÃ3dx `®( ²¡(¸ðà)˜ëÊ+ ˆP( ÀSsEQÐùD6±OÌEmŠâرc<ð€gžââbYü óXõAî144”‘‘±k×.Lˆl(ŠHñY<0WÅŠ'=ÃÃÃééé»wïž™§½½=--íäÉ“EÑßß/›Iv‚‘ E(Ѝªª2 ;È¢¬Ô+ŠÞÞÞÌÌÌW^y…S €¢€ØTAÿŒš’’réÒ%ýšÉÉÉääd¥(zzz¼^ïàà çE1«(‚Fÿ¤„Âår©o|>'E( ’““'''õk¦¦¦ôc©©©~¿Ÿó €¢ˆ8x€OÌE±â8yŽâ¥—^JII‘OŒˆl(ŠÈ‚— à)€¹¢(VœááጌŒööö™™™ÙÙY™‘ÅÀw=ùýþÔÔÔŽŽìˆl( N sAE±àϨWÇßyúEù üjhh¨¸¸Ø=Ïúõë­þâøñã"6ZZZ0u ²¡(8=2Ì5Ö…Mç}óùýl0æãYŠ€@€¹¢(œuþÕñw†[žéÉ*•¯dúÕ=˜(ŠbŽ'³â<0WÅb“žñ£oþ¤bKç ÷¸é^Yßyýºs=G±O€Øˆl( ÅR) 5(q ñÞÎÝ© Mˆ®@NÄ( Å’àÿ[5 a˜~úwÍÇú_2£ýÎÊ<óÌ/j>‡^Q(Š¥B£8˜Yr ñÞ}×0F“ (PKÎ…#o¼÷Åõëößx¢EÁ“Ùñž˜+Š"8Ôïzˆ¥È†¢¸^2€§æŠ¢X.yƒÿ£ˆ™È†¢à€§æŠ¢Xœügö‰ÖÎËg/`¨(  sEQÐù( ±OÌ5F…|>´ì|~Õ-@8—½fàÉlÀSsE°Ô (b“]»vÝ÷G>ùÉOÊE=''ç¶Ûn»}éùüª[n€¸¡ªª Eãøýþƒvtt<¹,ìý·µO@œ!FâŒD›Ó§O£(b¹wwwË%ÿ)€p#±E"ŒÄ‰6çÎCQÄr ïïï—kùÁƒ;Âĉ0g$ڌǩ¢à><0׿ܹsrðûý‡ j9øàßGfÃ$¶H„‘8#Ñfrr2N/ÀSsaä?>>.WúÓ§OŸ€¨E"[d6Lb‹D‰3m®^½Š¢<0W ˆˆl( ÀSs:çBQûðÀ\ÎÀ¹PËã³x `®€¢ ²¡(E( ˆf  (E±<0> °¤žrìØ±xÀ3Oqq±,~JV}L†††222víÚEW Ú#o€°yÊððpzzúîÝ»gæiooOKK;yò¤AQô÷÷Ëfnèg °Ä@dCQ@Ø<¥ªªÊ0ì ‹²R¯(z{{333_yå:ì1DD6„ÓSRRR.]º¤_399™œœ¬EOO×뤇ÀNçŠE@ 3¢RBár¹Ô· >Ÿî;(Š(VŒÏ,§$''ONNê×LMMéÇ(:::RSSý~?= v’ˆ™ÈÆÛc l8yŽâ¥—^JII‘Oº E±ŠÂÆððpFFF{{ûÌÌÌìì¬ÌÈbໞü~jjjGG=í0@( ’«ãïŒ<ý¢|Ö »çY¿~½ÕÿQ?~\ÄFKK = €¢@QÄãGß|þc³oÕÚ1COf€3O¹:þÎñ{fþ­h ™~õD½v"Ûo€=eüè›GK:Ýwï¿áîÎîÙã=çzŽÒE@` ²¡(ÀÎSÔ „Hˆ}×}ZÖ¸éÞI÷!'€À ˆˆl( °ôÓé§×, „ÌhióÌG¼fœôÊÌsU eEQ€¥§\çgÿõÛÏ%i2ýðÏʺ’‹£;Ðù8W\+ Ægë)c/ú}zÓ³óº¢û_#*€À( "[\+ ކ,x× (EA3ö¢ÿ½Ÿÿ£€9ˆEAsuü­—Ï^ +P( @Q8ƒñY<0W"Š"xx† ÒȆ¢à€§`®+DD6à)€¹€s¡(ˆ}x `®@çà\(ŠåñY<0W@QÙP€¢D3 Š– tŠE'!V®öLNNnÞ¼Ùëõº\®¤¤¤òòò¾¾>›*ô+‡‡‡ËÊÊeß‚‚‚îîî¿Z€“õ '//¯¡¡áüùóúƱŠâŸX6O‰UEQ\\\__?66&óÓÓÓ]]]*ôÙ+ŠS§N‰Ù½{·ì%‹••• ~eÕì×ÏÎÎ 555¥¥¥9s&”½ìD6Åð @(ž2::Z^^îñx\.—$Ö/^T_566&'''%%µ¶¶Î]û[¸!g½råJMMMâ<2#‹jI©µßþóóóàóùrssÝnwVVV{{»iE¦ ÜLزeKJJŠ”&éûÔÔ”ÃΑb%ívžå«•UUUmmm¦;Ú|´¢P477Kù¡ìE` ²¡(¸ð„ÇS$Xøý~ɧgffDB¨ŒsëÖ­’»OLLLOOËzÓ$U-644”––NÎSVV&‹jÉì¥)çÎkך´!55õСC2#:¡®®Î´"«F6Û¶m›4Cª“ͪ««õ¥™¢v,**Ú´i“VÅ¢…(.9dÓŽµù*tE!%‹Ò e/;XÁ‘ EÁ{ŠäÓ*ãÌÈȱORÕbzzºÚXfÒÒÒÔj¼B w¹\•z½ÞÖÖV펣skC# ›effª»z._¾,9½ÃιtéRSSSnn®Çã‘BêëëeEazD ~Äs6å·è|œ EAì§øýþÂÂBɤՃ¼N2iâa½>Ù]P' VTTHö¿zõjŸÏgº¥ÃFkµÙ¢ET[[[TTdÓfUòJQLLL(½Ü^v óp.Å0> ЧdfföôôÌÌÌȼ|ª4t±cjpÀ0Fá$cÖèíí•rL·´jdàfúç@¬”Fà]O(Q”••uüøqýW²èõzµùªª*í!“@l¾ ]Qlß¾=ˆç(ô{ØE@dCQ@xHIIQ1lܸQ¥¡Ús²RÿEbb¢á½¥ÚŒlPVV699955UZZªŽÂ*ÇÕ?ܬIQÒÓŠ¬iج¹¹¹¤¤dttTæOž<é<{.**êîîÖnК˜˜öª2 ^ýumq```ݺu²R[Ô^èôØcijçØ±cÒ¼¿ å]O¢gšššô.¸½P€¢€ðàóù²³³$ýmkkÓ§§’XKÊ®Þõ$´´´¸ÝnÓw=ÕÕչ穭­ÕÞ—êPQHÊÉÉq¹\¹¹¹ê®'CEV4l¦ €¬¬,Ù2//OJvØ }}}Gv”´»¦¦F?Ö!‡/msÍ#3†‘‡ááaí=T²ïš5k ÿGaúUpÿG!ÈÁæçç‹~Ó7/¸½VŠËg/œzüWÞžÀõ"ˆE͘ϿoÕÚ7Ý{áÈôŠ`Ñü|Ë÷DTÈôÃ?ÿÜñ{¯Ž¿CŸ (VÆgðˆ:~¹ý‰}òéçn.zù¯ª$ú7üwýæ \ˆQË Ïà)½¢B,óÙÝùRþz¼ÿF Y`®À…EÁéˆDOabŠð鹬WóøìJÁ)+ŠðˆÆ^øÉ³®uJB쿱°;íþ_üÃãŒQ+p.§Op*'ößpw§ûîW?ûåó‡^ûÃÌï1WbÎ…¢XŸÀS º8½çEm\B J`®$=\ˆQŽÐþb¿û.ý  (E°0—Ï^8¾cï»§Ç芈…"@QŠPËã³x `®D6Eð𠞘+‘ EÁ…OÀ\Wˆˆl( ÀSs:çBQûðÀ\ÎÀ¹PËã³x `®€¢ ²¡(¢•ÞÞÞÌÌLé(ºP (–%! ^}õÕå¬`é`€PËлÝîh—+ZêöLNNn÷ª¹û" IDATÞ¼Ùëõº\®¤¤¤òòò¾¾>›ªõ+‡‡‡ËÊÊe_ÑxÝÝÝ ~µ*'ë<O^^^CCÃùóçõ b/Šb óìxSÅÅÅõõõccc2?==ÝÕÕ¥lÏ^Qœ:uJtÈîÝ»e/Y¨¬¬\ð«ÅžPµ~vvvhh¨©©)--íÌ™3¡ì€¢ˆ‚Ÿ•C“DSûEÓ-6Ò@«Í‚(¥ÚÚÚ¢¢"›cQ5®ÔÅÄÄ„ÒQÁíE À\é|œ EƒŠ"==]ýô¾Ø1 !??ÿá‡ÎÊÊ2}$ 33³§§gffFæåSí»Ø1ŠEµÐ”ÞÞ^)ÇtK«Fn¦Ī“ßõˆKÒŸÇ×%‹^¯W›·Òoö_…®(¶oßÄsú½d˜+€s¡("‚ ŸÌ6]lll,++›œœœšš*--UO)$&&ÞjZNWW—¬‘®7­4%%E=ưqãFµ¯ö…¬Ô?GaU£U ( Ée5é"ŠBcZ‘U# ›577—””ŒŽŽÊüÉ“'CÏ’‹ŠŠº»»µ·&&&ä¸ U]¯¿þº¶800°nÝ:Y©-j÷˜=öØcš :vì˜4{Á¯Byד虦¦&½´ n¯¨ðÌ•¤â0²ñöX#ããsO?ýÞ§E!m]]{žÚÚZõ,DKK‹¬ †r$'Ön|2Åçóegg'$$HšÛÖÖ¦ßWhIÙÕ»žlj´j¡E!Ö Ís¹\¹¹¹ê®'CEV4l¦%úYYY²e^^žþ=WÁÑ××WQQáñx¤@I»kjjôc Ò-Òf×<2cyÖÞO%û®Y³Æð¦_÷‚tB~~¾è:}ó‚Û E(ŠHçèѹÌLÉêæþ˜ý dPQ¦(Ž{ðÁ¹ÄĹ}lîºëæÜžeªÚív{½^u¯?Š"jžÌVƒùÈ\JÊûC7Þ¸|r òáNÀ\ˆl( sþ~ÕW4 qÝuïk &&¦À‰§-!ŠÀ\€È†¢XîÓ3<<÷·;wýõ$OŸýìÜÍ73F@Š˜+,ˆˆlsqþw<2—•õ¾¨ÈËCT¢æ t>Î…¢Xüé1 Y,Û»žd˜+(ŠèS6ã³<2—šº¬ÿGž€¹Š€È׊bAÆÇçZ[çΞŒP€¢€%ƒ"@Q@$)Š›o¾ù^ˆD„SQ@E±k×®ûþÈwÜ‘››ûñ·EŸ_uËm€§æ @d +•••¢(:::<è÷ûOŸ>½hE¡g``@tIww·”øT„±oÕÚ§OÌÂÁÞ[K'Ù’ü‹! ràܹs!)Š'Nô÷÷KY"P:# 9=€§æ t>În$ù B@äÀøøxHŠB‰”"ÒÄï÷Ž0äô<0W óp®p#É¿H"&''CR²¿ˆ)èôéÓ'"Œ×5Ÿ<0W’ôÐ D6…$ÿ"Dˆ¸zõjHŠâþ3‚¢øÏl@QŠPËã³x `®D6Eð𠞘+‘ EÁéÀS,Ö*nÔÄ\aù`€€È†¢€åó”åÉõQ˜+Ðù8Š‚Ó#ž²"ÉýÒUê°äp5`rrróæÍ^¯×år%%%•——÷õõÙT¡_9<<\VV–˜˜(ûtww/øÕªœ¬OHHðx °lž‚¢…âââúúú±±1™Ÿžžîêêºï¾ûœ(ŠS§N‰Ù½{·ì%‹••• ~eÕì×ÏÎÎ 555¥¥¥9s&”½ì$=Ñ[à}FGGËËË=Ëå’”÷âÅ‹ê«ÆÆÆää䤤¤ÖÖÖ¹k¥6d“W®\©©©IœGfdQm É®ö«|~~þàà }â¾sçNÉGÝnwUU•¾û¦:©ÅçóåææJÉYYYííí¦‡cZ~àf–-[RRR¤4Iß§¦¦vµ+i·ó,_­”Þhkk3ÝÑæ« …¢¹¹YÊe/@Q@ ƒ¢€÷¹ýöÛý~¿dº333"!T.¸uëVɪ'&&¦§§e½iú¨JKK'ç)++“EµäÜRˆ”/jaíÚµöŠBöU…VjÕT'µ¤¦¦:tHfD'ÔÕÕ™ŽMùúͶmÛ&+ÕÉfÕÕÕúÒLQ;mÚ´I«bQŠBtô‰i§Ù|º¢’EO†²D# Š‚G2]• fddŒŒŒØ§j1==]m,3iiij5Ô …»\.{E¡î–‘B¤L›\VßT'µx½ÞÖÖV펣sëÀòõ_effªv^¾|Yrz‡Ý{éÒ¥¦¦¦ÜÜ\Ç#…Ô××Ë'ŠÂ¦ßl¾ â9 ›òƒÛ PûøýþÂÂBÉqÕ#¶Nr\âa½> u˜Á;)ÄaSMk¬¨¨ìõêÕ>ŸÏtKçåëQ›- ‘LµµµEEE6mV%¯ÔÅÄÄ„ÒKÁí(Šè€ñY€P<%33³§§gffFæåS%ˆ‹£Ð/èÇ(¥(ìÇ(¬šº¨Zz{{­F?–/›éŸ6±Rw=¢TSVVÖñãÇõ_É¢×ëÕæ«ªª´GY±ù*tE±}ûö ž£ÐïE`ˆáÈÆÛcâ+OIIIQlܸQ%ˆÚs²RÿEbb¢á¢ÚŒl =155UZZªŽbQŠB ™šÇô9 «¦ÚÔ¢¸YH¢(¤Óñ*ß°YsssIIÉèè¨ÌŸ|ß_ùöÕñwì$=\ˆQ‹àü¡×:o¸GÓÏ^÷é—ÿê‹6C€¢€¨˜×VC°œ0@( 0GåmLL‘9¸éÞ—ò¿ ÍŸÙŸ@QØñ¾ð¦|Ÿø}Ÿk|1çßÿðÏ?÷Ëo>ÉŠb¹a|O¨”òéåVö>°¹ëæÏüäó_=èµ?Ìüs.Ä(Š€gÈðˆ.êwi£Vƒ˜+p!FQpzðs´ÿ£ØÃÝúA Ì5r`€€ 1Šðˆ\.Ÿ½p|ÇÞwOa®Ä œ EÁéÀS0W:P1ã³x `®@Ò@dCQŠPÍ0@( @QŠby`|ÒȆ¢ž•}†¬··733sÕªèîÒho?D¾§`®@dCQpzÌSð‚‚‚W_}5Ú“~ s ˆˆl(ŠåιÝnw dùKW—Ã’—ú`'''7oÞìõz].WRRRyyy__ŸMÕú•ÃÃÃeee‰‰‰²¯Èîîî¿Z€“õ '//¯¡¡áüùóúƱ)ØÎÀ¹PÑ¡(V<GQ8¡¸¸¸¾¾~llLæ§§§»ººî»ï>'ŠâÔ©S¢CvïÞ-{ÉâÀÀ@eeå‚_-ÖZÔúÙÙÙ¡¡¡¦¦¦´´´3g΄²YØÎÀ¹PN n|V²1ɵ­óóóµõW®\©©©IœGfdqîÚ‰õÉœ~½d¨ª|I^322¦¦¦ô•ŽŽŽ–——{<©TrÜ‹/ª¯““““’’Z[[mj´j¡ÍŽzçÎ’wºÝ*ý¾ö-tR¸ÏçËÍÍ•’³²²ÚÛÛM´üÀÍ„-[¶¤¤¤Hi’¦º1¤:I»gùj¥ôR[[›éŽ6_­(ÍÍÍR~({…ËS¢(°I‘ E±¼]°j•¤ª’hJ’½víû´¡¡¡´´trž²²2Y4ÍÞýÞÞ^ɧÕµµµ;vì0Tzûí·ûý~©qffF$„Jþ¶nÝ*éµ4fzzZÖÛ×hÓBÓ#2"»¨}ë²j¡“ÂSSS:$3¢êêêL¦|ýfÛ¶m“c”êd³êêjUšáXL1=ÝEEE›6mÒª^”¢™'}eZ¦ÍW¡+ )Yäe({ (E±äŠBýB/Y¦ËåÒæÓÓÓGFF´y™IKKs¨(ææŸÒÞ·oßÜï‡Ñn†±B*UÉ_FF†ªÔ*_T‹6-4="C!ê®ÙWвÉYõ-tR¸rkk«vgÑ‚9t`ùú¯233U;/_¾,¹{ˆ§ûÒ¥KMMM"ù<^__/kœ( Ó#]ð« ž£°)?¸½‚ƒ"@Q,BQ,¨ Y݂ۿôÒK999só÷Ãh7/ðûý………’Ôªgj$µ!¶Ðfeà¾[hZøàà`EE…dÿ«W¯öù|¦[:/_Ú,,ˆ”ª­­-**²9UãJQLLL(Ü^(Š•Qéééú_ñ5F!äçç?üðÃYYY¦wígfföôôÌÌÌȼ|ª};F±¨VÚQXµÐIáŠÞÞ^«Ñ‡åËfú‡L¬Î ó»žQjJNÖñãÇõ_É¢×ëÕæ­Ä¡ýW¡+ŠíÛ·ñ…~/…S‚~2Ût±±±Q{Ò`jjª´´T=¥˜˜hx¡§i9]]]²æÉ'Ÿ4­4%%E=i°qãFµ¯ö…¬Ô?GaU£U * ÙwjÓç(¬Zè¤pÉe5]$ŠBÊ1= «ò ›577—””ŒŽŽÊüÉ“'CÏ’‹ŠŠº»»µ·&&&¤Ó U]¯¿þº¶800°nÝ:Y©-j7°=öØcš :vì˜4{Á¯Byד虦¦&½n n¯py @v"Š"H‚{†Ì*E–¤³®®Î=Omm­z¢¥¥EÖf؆r$mÕn|2Åçóegg'$$H&ÚÖÖ¦ßWr\ɪջžlj´j¡CE±cÇíJÕÕÕûZµÐIárì.—+77WÝõd8 «ò ›i‰~VV–l™——§‰VpôõõUTTx<)PÒîššýˆô¹´Ù5ÌF†‡‡µ÷SɾkÖ¬1ü…éWÁý… ŸŸ/JOß¼àö —§DQ` ²¡(–êôŒÏ=ýô{ŸË€ä—]]]‘{âùolæ ¶0@@dCQ\Ã3ÏÌ}ô£’FÏýñó%nɾ}úÈ¢(€@€¹Òù€¢ˆÊÓóöÛsUUs7Ýôž–é‰'–£n·ÛëõªÛñ#i$þL À\ÎÀ¹P× Ÿ}晹Üܹ?ù“÷„Dvö{¢¢§0z æ ( "ŠâÔ „¦%dúÔ§æ’“‘( @Q,„Ï÷¾Š`bb²™œÀÄ£¢˜››SKËœ×;÷áÏ}èCï%O×_?—˜ÈŠb192·aÜÛý¾´@T ( p|V Y,绞¢ÎS0W"Šâ=lž!;rdî–[–ïÿ(¢ÔS0W"ŠÂŽññ¹ÖÖ¹³g±d `®° ÙP€§æ t>Î…¢ öà)€¹€s¡(–ÆgðÀ\¤€È†¢ ( ša€P€¢ÅòÀø,ž˜+‘ E<)çL„ ßï?}ú4^Jôa¿°I¦%5æ¨Sø2?cÒéPa;aÏ>ôˆœ°§žzª³³STà‰'ðR¢þ…M2-©1G¢À—ø“N‡¢ J6a“L( |ˆŸÄQâ)ú¬Zµ*ÂBµ0ô¦â_ч#ß,W¼ÁQ§(ÂÛEø2?Q!‰GmLIù°|âÔÝw2pãÂÂ;ô!ì7¿9Z_ÿ…´´¸\JOÿS™—5úB4®Ó¾ýÿïÿ˜ÆD…VΗ¾ô¹ %!¼6¿zuÖ/ùœZ}úµxõêkjÌMvÑß4/'=ýOÕ^ÿ÷ÿö˜Ú°½«9ò¸ÚòÇ?þ'õ”ýW†™à†…%­ŒR³4½FJì. V ¶ÙÌÊ€­v±º(Øà * ¿¶9‰¦‹¢—ô}uøðcêær›óŽ/Cl( ™Ä•ÔoÜ ú…DÚûï¿S­—yYcX•9›&½±¯(^|ñ»úßiäÈøÃGO’$÷2Ž<ÈeCõ£¾¯õ7,©‡TD$Ô×áÝwýNÆ(*+‹­~’ѯٲå?iq‹ Õžå J‚½Í?úh£vKºX£öL•|j÷¬ç; ßýîWµÍä,iœhceÀ¦Öhê Ê`5Éî6…;wU} ’Þæ«°4 ÿ ‹¢ˆR³´Rêà°Á6›Y°Õ.V›\AEaã×O¢ÕåØê§½ÈIn«(DN¨›Eô ™IKûȹs/kc€jØ*°8¿[%½±¯(**þÚ08 ûèþ¹çvJ§Èx¿„é]O²&'ç_…øøÝÕ«¯%&zœDÃ7ß|V®¸¢@Ä yä¿%ÁÞæßz«[»y¯  WŒG†Äøå[õÐÕ×¾V«0µz3’d'‹MÝ$Ðèoßtø0™½«†EQ×0ü+,Š"JÍrÁ»ž6Øf3+¶ÚÅê¢`s€+¨(lüÚáIDQŠBÍWW—ýÿ±Å¡_|å+•ßøÆ—eæ›ßü/êM6eQ™³ó@#ŠâŸÿùÕ¤¤DýK9$àÊíéýÁKë_Î¥¾’~ðÁ¿5+kššªCT¢êÔ¯\2£oä¯í3-öí·1’ N²·ìì[NŸ~A»[]›4³_½:K­Ñÿ2ñÓŸ~_Ÿxn¥ÐnVqžºÉ.úLDœ]5\w=-¶aøW¸l2ÍÒæÉlõr‡ ¶ÚÌÆ€­v±º(ØàJ) {¿¶:‰†®S.×h}_ɼþ®'ă¢?"S>ø…úùûöÛÿLbˆÃÀâüXzúŸ8ðˆf·G¶/øóÆ‚®zë­^í‡íŸÿ¼Sâ©áÉl«¯BoþFEufiøêÝwýb]÷Ý÷W÷ß§ pØ`«Íl Øj«‹‚Í®”¢°÷k«“hè:uàßÿþv¥Z_ÉgVV†zðEq¢(´7;‰³8ñ MK|ç; Úø°}` c曊BÄ€D^ÃJY£½^×a¿H¢ßØX%1N„öšºX¬¢0ü-ÈæÍÿQÝ|váÂá îw»¯—¯òóo{æ™fýÛiä&ë=·\}­$J}ô“$%bBÚØ¨6ILIN¾Iÿ`–\³%ʈiÉEý»ßýª2¹gŸý–X»þ5RZ^Þǵ-õ¯é´ñÉfŠŠ>¥ÝÒ-3ê—H+YÐU%JД6ȧá±6_…Þ0ü+ŒŠ"êÌRº]®¥¦Þ,AXµvÞ`«Íl Øj›‹‚Õ®”¢°÷k«“hèºÀ·dÊùÔ¿žEñ£(´{Gø…LßøÆ—%Æþã?þ×Kx3çXþ‡»pMo¿}HÄ€þç“8ü“sˆŠì-–¦ Bþ…M2EÎ?Üq­ˆÛø‰¢°œ~LT þFˆ’@ôAQŠEAr@üDQp€è¡“Õ ì¿Â¿°I&¾ ÄOg‚( Ø<þ…M2¡(ðe ~sPDI úà_Ø$Š_@Q (8a@ôÁ¿ð/lcFQ?Q\$€èÄa“3Š_â'Š‚+Q’èCö†a“L( |ˆŸ8Š‚( Ø<þ…M2¡(ðe ~¢("HQ¼õíG¤Ýñ9ɱ%ã0úijÍã_Ø$S„sz|ˆŸ1ìtaSLDÉx‹>Lø6É´âÆu§_âgL:]Åï~3uñÕ7´éGß~¼sÛΧÿ~ûž¯~C ¾¦'ö%ãlÿÂ&™"Ę£ïÔãË@üŒE§[Þâü~ÿÁƒ;::žŒW䨥¤NŸ>—ÆØ<þ…MB„sz|ˆŸ1ætaV¢~º»»¥ÝOÅrÔrìÒÒçÎÃ3ã8·yü ›„È1æh9õø2?cÏé¬(Nœ8Ñßß/-ÔÈQ˱KH?Œã™ñ@œÛ<þ…MBäs´œz|ˆŸ±çtaV¢{¤­"€ü~ÿáøCŽZŽ]z@úarrÏŒâÜæñ/l"ǘ£åÔãË@üŒ=§ ³¢VŠô‘æž>}úDü!G-Ç.= ýpõêU<3ˆs›Ç¿°IˆcŽ–S/ñ3öœnÖ( @QŠâ„ÿRSÏ„Œä„tIEND®B`‚python-watcher-4.0.0/doc/source/datasources/0000775000175000017500000000000013656752352021111 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/datasources/index.rst0000664000175000017500000000011013656752270022741 0ustar zuulzuul00000000000000Datasources =========== .. toctree:: :glob: :maxdepth: 1 ./* python-watcher-4.0.0/doc/source/datasources/grafana.rst0000664000175000017500000004215313656752270023246 0ustar zuulzuul00000000000000================== Grafana datasource ================== Synopsis -------- Grafana can interface with many different types of storage backends that Grafana calls datasources_. Since the term datasources causes significant confusion by overlapping definitions used in Watcher these **datasources are called projects instead**. Some examples of supported projects are InfluxDB or Elasticsearch while others might be more familiar such as Monasca or Gnocchi. The Grafana datasource provides the functionality to retrieve metrics from Grafana for different projects. This functionality is achieved by using the proxy interface exposed in Grafana to communicate with Grafana projects directly. Background ********** Since queries to retrieve metrics from Grafana are proxied to the project the format of these queries will change significantly depending on the type of project. The structure of the projects themselves will also change significantly as they are structured by users and administrators. For instance, some developers might decide to store metrics about compute_nodes in MySQL and use the UUID as primary key while others use InfluxDB and use the hostname as primary key. Furthermore, datasources in Watcher should return metrics in specific units strictly defined in the baseclass_ depending on how the units are stored in the projects they might require conversion before being returned. The flexible configuration parameters of the Grafana datasource allow to specify exactly how the deployment is configured and this will enable to correct retrieval of metrics and with the correct units. .. _datasources: https://grafana.com/plugins?type=datasource .. _baseclass: https://github.com/openstack/watcher/blob/584eeefdc8/watcher/datasources/base.py Requirements ------------ The use of the Grafana datasource requires a reachable Grafana endpoint and an authentication token for access to the desired projects. The projects behind Grafana will need to contain the metrics for compute_nodes_ or instances_ and these need to be identifiable by an attribute of the Watcher datamodel_ for instance hostname or UUID. .. _compute_nodes: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/element/node.py .. _instances: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/element/instance.py .. _datamodel: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/element Limitations *********** * Only the InfluxDB project is currently supported [#f1]_. * All metrics must be retrieved from the same Grafana endpoint (same URL). * All metrics must be retrieved with the same authentication token. .. [#f1] A base class for projects is available_ and easily extensible. .. _available: https://review.opendev.org/#/c/649341/24/watcher/datasources/grafana_translator/base.py Configuration ------------- Several steps are required in order to use the Grafana datasource, Most steps are related configuring Watcher to match the deployed Grafana setup such as queries proxied to the project or the type of project for any given metric. Most of the configuration can either be supplied via the traditional configuration file or in a `special yaml`_ file. .. _special yaml: https://specs.openstack.org/openstack/watcher-specs/specs/train/approved/file-based-metricmap.html token ***** First step is to generate an access token with access to the required projects. This can be done from the api_ or from the web interface_. Tokens generated from the web interface will have the same access to projects as the user that created them while using the cli allows to generate a key for a specific role.The token will only be displayed once so store it well. This token will go into the configuration file later and this parameter can not be placed in the yaml. .. _api: https://grafana.com/docs/http_api/auth/#create-api-key .. _interface: https://grafana.com/docs/http_api/auth/#create-api-token base_url ******** Next step is supplying the base url of the Grafana endpoint. The base url parameter will need to specify the type of http protocol and the use of plain text http is strongly discouraged due to the transmission of the access token. Additionally the path to the proxy interface needs to be supplied as well in case Grafana is placed in a sub directory of the web server. An example would be: `https://mygrafana.org/api/datasource/proxy/` were `/api/datasource/proxy` is the default path without any subdirectories. Likewise, this parameter can not be placed in the yaml. To prevent many errors from occurring and potentially filing the logs files it is advised to specify the desired datasource in the configuration as it would prevent the datasource manager from having to iterate and try possible datasources with the launch of each audit. To do this specify `datasources` in the `[watcher_datasources]` group. The current configuration that is required to be placed in the traditional configuration file would look like the following: .. code-block:: shell [grafana_client] token = 0JLbF0oB4R3Q2Fl337Gh4Df5VN12D3adBE3f== base_url = https://mygranfa.org/api/datasource/proxy [watcher_datasources] datasources = grafana metric parameters ***************** The last five remaining configuration parameters can all be placed both in the traditional configuration file or in the yaml, however, it is not advised to mix and match but in the case it does occur the yaml would override the settings from the traditional configuration file. All five of these parameters are dictionaries mapping specific metrics to a configuration parameter. For instance the `project_id_map` will specify the specific project id in Grafana to be used. The parameters are named as follow: * project_id_map * database_map * translator_map * attribute_map * query_map These five parameters are named differently if configured using the yaml configuration file. The parameters are named as follows and are in identical order as to the list of the traditional configuration file: * project * db * translator * attribute * query When specified in the yaml the parameters are no longer dictionaries instead each parameter needs to be defined per metric as sub-parameters. Examples of these parameters configured for both the yaml and traditional configuration are described at the end of this document. project_id ********** The project id's can only be determined by someone with the admin role in Grafana as that role is required to open the list of projects. The list of projects can be found on `/datasources` in the web interface but unfortunately it does not immediately display the project id. To display the id one can best hover the mouse over the projects and the url will show the project id's for example `/datasources/edit/7563`. Alternatively the entire list of projects can be retrieved using the `REST api`_. To easily make requests to the REST api a tool such as Postman can be used. .. _REST api: https://grafana.com/docs/http_api/data_source/#get-all-datasources database ******** The database is the parameter for the schema / database that is actually defined in the project. For instance, if the project would be based on MySQL this is were the name of schema used within the MySQL server would be specified. For many different projects it is possible to list all the databases currently available. Tools like Postman can be used to list all the available databases per project. For InfluxDB based projects this would be with the following path and query, however be sure to construct these request in Postman as the header needs to contain the authorization token: .. code-block:: shell https://URL.DOMAIN/api/datasources/proxy/PROJECT_ID/query?q=SHOW%20DATABASES translator ********** Each translator is for a specific type of project will have a uniquely identifiable name and the baseclass allows to easily support new types of projects such as elasticsearch or prometheus. Currently only InfluxDB based projects are supported as a result the only valid value for this parameter is ` influxdb`. attribute ********* The attribute parameter specifies which attribute to use from Watcher's data model in order to construct the query. The available attributes differ per type of object in the data model but the following table shows the attributes for ComputeNodes, Instances and IronicNodes. +-----------------+-----------------+--------------------+ | ComputeNode | Instance | IronicNode | +=================+=================+====================+ | uuid | uuid | uuid | +-----------------+-----------------+--------------------+ | id | name | human_id | +-----------------+-----------------+--------------------+ | hostname | project_id | power_state | +-----------------+-----------------+--------------------+ | status | watcher_exclude | maintenance | +-----------------+-----------------+--------------------+ | disabled_reason | locked | maintenance_reason | +-----------------+-----------------+--------------------+ | state | metadata | extra | +-----------------+-----------------+--------------------+ | memory | state | | +-----------------+-----------------+--------------------+ | disk | memory | | +-----------------+-----------------+--------------------+ | disk_capacity | disk | | +-----------------+-----------------+--------------------+ | vcpus | disk_capacity | | +-----------------+-----------------+--------------------+ | | vcpus | | +-----------------+-----------------+--------------------+ Many if not all of these attributes map to attributes of the objects that are fetched from clients such as Nova. To see how these attributes are put into the data model the following source files can be analyzed for Nova_ and Ironic_. .. _Nova: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/collector/nova.py#L304 .. _Ironic: https://opendev.org/openstack/watcher/src/branch/master/watcher/decision_engine/model/collector/ironic.py#L85 query ***** The query is the single most important parameter it will be passed to the project and should return the desired metric for the specific host and return the value in the correct unit. The units for all available metrics are documented in the `datasource baseclass`_. This might mean the query specified in this parameter is responsible for converting the unit. The following query demonstrates how such a conversion could be achieved and demonstrates the conversion from bytes to megabytes. .. code-block:: shell SELECT value/1000000 FROM memory... Queries will be formatted using the .format string method within Python. This format will currently have give attributes exposed to it labeled `{0}` to `{4}`. Every occurrence of these characters within the string will be replaced with the specific attribute. - {0} is the aggregate typically `mean`, `min`, `max` but `count` is also supported. - {1} is the attribute as specified in the attribute parameter. - {2} is the period of time to aggregate data over in seconds. - {3} is the granularity or the interval between data points in seconds. - {4} is translator specific and in the case of InfluxDB it will be used for retention_periods. **InfluxDB** Constructing the queries or rather anticipating how the results should look to be correctly interpreted by Watcher can be a challenge. The following json example demonstrates how what the result should look like and the query used to get this result. .. code-block:: json { "results": [ { "statement_id": 0, "series": [ { "name": "vmstats", "tags": { "host": "autoserver01" }, "columns": [ "time", "mean" ], "values": [ [ 1560848284284, 7680000 ] ] } ] } ] } .. code-block:: shell SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^mem$/ AND time >= now() - {2}s GROUP BY host .. _datasource baseclass: https://opendev.org/openstack/watcher/src/branch/master/watcher/datasources/base.py Example configuration --------------------- The example configurations will show both how to achieve the entire configuration in the config file or use a combination of the regular file and yaml. Using yaml to define all the parameters for each metric is recommended since it has better human readability and supports mutli-line option definitions. Configuration file ****************** **It is important to note that the line breaks shown in between assignments of parameters can not be used in the actual configuration and these are simply here for readability reasons.** .. code-block:: shell [grafana_client] # Authentication token to gain access (string value) # Note: This option can be changed without restarting. token = eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk== # first part of the url (including https:// or http://) up until project id # part. Example: https://secure.org/api/datasource/proxy/ (string value) # Note: This option can be changed without restarting. base_url = https://monitoring-grafana.com/api/datasources/proxy/ # Project id as in url (integer value) # Note: This option can be changed without restarting. project_id_map = host_cpu_usage:1337,host_ram_usage:6969, instance_cpu_usage:1337,instance_ram_usage:9696 # Mapping of grafana databases to datasource metrics. (dict value) # Note: This option can be changed without restarting. database_map = host_cpu_usage:monit_production, host_ram_usage:monit_production,instance_cpu_usage:prod_cloud, instance_ram_usage:prod_cloud translator_map = host_cpu_usage:influxdb,host_ram_usage:influxdb, instance_cpu_usage:influxdb,instance_ram_usage:influxdb attribute_map = host_cpu_usage:hostname,host_ram_usage:hostname, instance_cpu_usage:name,instance_ram_usage:name query_map = host_cpu_usage:SELECT 100-{0}("{0}_value") FROM {4}.cpu WHERE ("host" =~ /^{1}$/ AND "type_instance" =~/^idle$/ AND time > now()-{2}s), host_ram_usage:SELECT {0}("{0}_value")/1000000 FROM {4}.memory WHERE ("host" =~ /^{1}$/) AND "type_instance" =~ /^used$/ AND time >= now()-{2}s GROUP BY "type_instance",instance_cpu_usage:SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^cpu$/ AND time >= now() - {2}s GROUP BY host,instance_ram_usage:SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^mem$/ AND time >= now() - {2}s GROUP BY host [grafana_translators] retention_periods = one_week:10080,one_month:302400,five_years:525600 [watcher_datasources] datasources = grafana yaml **** When using the yaml configuration file some parameters still need to be defined using the regular configuration such as the path for the yaml file these parameters are detailed below: .. code-block:: shell [grafana_client] token = eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk== base_url = https://monitoring-grafana.com/api/datasources/proxy/ [watcher_datasources] datasources = grafana [watcher_decision_engine] metric_map_path = /etc/watcher/metric_map.yaml Using the yaml allows to more effectively define the parameters per metric with greater human readability due to the availability of multi line options. These multi line options are demonstrated in the query parameters. .. code-block:: yaml grafana: host_cpu_usage: project: 1337 db: monit_production translator: influxdb attribute: hostname query: > SELECT 100-{0}("{0}_value") FROM {4}.cpu WHERE ("host" =~ /^{1}$/ AND "type_instance" =~/^idle$/ AND time > now()-{2}s) host_ram_usage: project: 6969 db: monit_production translator: influxdb attribute: hostname query: > SELECT {0}("{0}_value")/1000000 FROM {4}.memory WHERE ("host" =~ /^{1}$/) AND "type_instance" =~ /^used$/ AND time >= now()-{2}s GROUP BY "type_instance" instance_cpu_usage: project: 1337 db: prod_cloud translator: influxdb attribute: name query: > SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^cpu$/ AND time >= now() - {2}s GROUP BY host instance_ram_usage: project: 9696 db: prod_cloud translator: influxdb attribute: name query: > SELECT {0}("{0}_value") FROM "vmstats" WHERE host =~ /^{1}$/ AND "type_instance" =~ /^mem$/ AND time >= now() - {2}s GROUP BY host External Links -------------- - `List of Grafana datasources `_ python-watcher-4.0.0/doc/source/user/0000775000175000017500000000000013656752352017552 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/user/ways-to-install.rst0000664000175000017500000001051313656752270023352 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ======================= Ways to install Watcher ======================= This document describes some ways to install Watcher in order to use it. If you are intending to develop on or with Watcher, please read :doc:`../contributor/environment`. Prerequisites ------------- The source install instructions specifically avoid using platform specific packages, instead using the source for the code and the Python Package Index (PyPi_). .. _PyPi: https://pypi.org/ It's expected that your system already has python2.7_, latest version of pip_, and git_ available. .. _python2.7: https://www.python.org .. _pip: https://pip.pypa.io/en/latest/installing/ .. _git: https://git-scm.com/ Your system shall also have some additional system libraries: On Ubuntu (tested on 16.04LTS): .. code-block:: bash $ sudo apt-get install python-dev libssl-dev libmysqlclient-dev libffi-dev On Fedora-based distributions e.g., Fedora/RHEL/CentOS/Scientific Linux (tested on CentOS 7.1): .. code-block:: bash $ sudo yum install gcc python-devel openssl-devel libffi-devel mysql-devel Installing from Source ---------------------- Clone the Watcher repository: .. code-block:: bash $ git clone https://opendev.org/openstack/watcher.git $ cd watcher Install the Watcher modules: .. code-block:: bash # python setup.py install The following commands should be available on the command-line path: * ``watcher-api`` the Watcher Web service used to handle RESTful requests * ``watcher-decision-engine`` the Watcher Decision Engine used to build action plans, according to optimization goals to achieve. * ``watcher-applier`` the Watcher Applier module, used to apply action plan * ``watcher-db-manage`` used to bootstrap Watcher data You will find sample configuration files in ``etc/watcher``: * ``watcher.conf.sample`` Install the Watcher modules dependencies: .. code-block:: bash # pip install -r requirements.txt From here, refer to :doc:`../configuration/configuring` to declare Watcher as a new service into Keystone and to configure its different modules. Once configured, you should be able to run the Watcher services by issuing these commands: .. code-block:: bash $ watcher-api $ watcher-decision-engine $ watcher-applier By default, this will show logging on the console from which it was started. Once started, you can use the `Watcher Client`_ to play with Watcher service. .. _`Watcher Client`: https://opendev.org/openstack/python-watcherclient Installing from packages: PyPI -------------------------------- Watcher package is available on PyPI repository. To install Watcher on your system: .. code-block:: bash $ sudo pip install python-watcher The Watcher services along with its dependencies should then be automatically installed on your system. Once installed, you still need to declare Watcher as a new service into Keystone and to configure its different modules, which you can find described in :doc:`../configuration/configuring`. Installing from packages: Debian (experimental) ----------------------------------------------- Experimental Debian packages are available on `Debian repositories`_. The best way to use them is to install them into a Docker_ container. Here is single Dockerfile snippet you can use to run your Docker container: .. code-block:: bash FROM debian:experimental MAINTAINER David TARDIVEL RUN apt-get update RUN apt-get dist-upgrade RUN apt-get install vim net-tools RUN apt-get install experimental watcher-api CMD ["/usr/bin/watcher-api"] Build your container from this Dockerfile: .. code-block:: bash $ docker build -t watcher/api . To run your container, execute this command: .. code-block:: bash $ docker run -d -p 9322:9322 watcher/api Check in your logs Watcher API is started .. code-block:: bash $ docker logs You can run similar container with Watcher Decision Engine (package ``watcher-decision-engine``) and with the Watcher Applier (package ``watcher-applier``). .. _Docker: https://www.docker.com/ .. _`Debian repositories`: https://packages.debian.org/experimental/allpackages python-watcher-4.0.0/doc/source/user/user-guide.rst0000664000175000017500000001353513656752270022363 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ================== Watcher User Guide ================== See the `architecture page `_ for an architectural overview of the different components of Watcher and how they fit together. In this guide we're going to take you through the fundamentals of using Watcher. The following diagram shows the main interactions between the :ref:`Administrator ` and the Watcher system: .. image:: ../images/sequence_overview_watcher_usage.png :width: 100% Getting started with Watcher ---------------------------- This guide assumes you have a working installation of Watcher. If you get "*watcher: command not found*" you may have to verify your installation. Please refer to the `installation guide`_. In order to use Watcher, you have to configure your credentials suitable for watcher command-line tools. You can interact with Watcher either by using our dedicated `Watcher CLI`_ named ``watcher``, or by using the `OpenStack CLI`_ ``openstack``. If you want to deploy Watcher in Horizon, please refer to the `Watcher Horizon plugin installation guide`_. .. note:: Notice, that in this guide we'll use `OpenStack CLI`_ as major interface. Nevertheless, you can use `Watcher CLI`_ in the same way. It can be achieved by replacing .. code:: bash $ openstack optimize ... with .. code:: bash $ watcher ... .. _`installation guide`: https://docs.openstack.org/watcher/latest/install/ .. _`Watcher Horizon plugin installation guide`: https://docs.openstack.org/watcher-dashboard/latest/install/installation.html .. _`OpenStack CLI`: https://docs.openstack.org/python-openstackclient/latest/cli/man/openstack.html .. _`Watcher CLI`: https://docs.openstack.org/python-watcherclient/latest/cli/index.html Watcher CLI Command ------------------- We can see all of the commands available with Watcher CLI by running the watcher binary without options. .. code:: bash $ openstack help optimize Running an audit of the cluster ------------------------------- First, you need to find the :ref:`goal ` you want to achieve: .. code:: bash $ openstack optimize goal list .. note:: If you get "*You must provide a username via either --os-username or via env[OS_USERNAME]*" you may have to verify your credentials. Then, you can create an :ref:`audit template `. An :ref:`audit template ` defines an optimization :ref:`goal ` to achieve (i.e. the settings of your audit). .. code:: bash $ openstack optimize audittemplate create my_first_audit_template Although optional, you may want to actually set a specific strategy for your audit template. If so, you may can search of its UUID or name using the following command: .. code:: bash $ openstack optimize strategy list --goal You can use the following command to check strategy details including which parameters of which format it supports: .. code:: bash $ openstack optimize strategy show The command to create your audit template would then be: .. code:: bash $ openstack optimize audittemplate create my_first_audit_template \ --strategy Then, you can create an audit. An audit is a request for optimizing your cluster depending on the specified :ref:`goal `. You can launch an audit on your cluster by referencing the :ref:`audit template ` (i.e. the settings of your audit) that you want to use. - Get the :ref:`audit template ` UUID or name: .. code:: bash $ openstack optimize audittemplate list - Start an audit based on this :ref:`audit template ` settings: .. code:: bash $ openstack optimize audit create -a If your_audit_template was created by --strategy , and it defines some parameters (command `watcher strategy show` to check parameters format), your can append `-p` to input required parameters: .. code:: bash $ openstack optimize audit create -a \ -p =5.5 -p =hi Input parameter could cause audit creation failure, when: - no predefined strategy for audit template - no parameters spec in predefined strategy - input parameters don't comply with spec Watcher service will compute an :ref:`Action Plan ` composed of a list of potential optimization :ref:`actions ` (instance migration, disabling of a compute node, ...) according to the :ref:`goal ` to achieve. - Wait until the Watcher audit has produced a new :ref:`action plan `, and get it: .. code:: bash $ openstack optimize actionplan list --audit - Have a look on the list of optimization :ref:`actions ` contained in this new :ref:`action plan `: .. code:: bash $ openstack optimize action list --action-plan Once you have learned how to create an :ref:`Action Plan `, it's time to go further by applying it to your cluster: - Execute the :ref:`action plan `: .. code:: bash $ openstack optimize actionplan start You can follow the states of the :ref:`actions ` by periodically calling: .. code:: bash $ openstack optimize action list --action-plan You can also obtain more detailed information about a specific action: .. code:: bash $ openstack optimize action show python-watcher-4.0.0/doc/source/user/index.rst0000664000175000017500000000016513656752270021414 0ustar zuulzuul00000000000000========== User Guide ========== .. toctree:: :maxdepth: 2 ways-to-install user-guide event_type_audit python-watcher-4.0.0/doc/source/user/event_type_audit.rst0000664000175000017500000003044213656752270023656 0ustar zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ====================== Audit using Aodh alarm ====================== Audit with EVENT type can be triggered by special alarm. This guide walks you through the steps to build an event-driven optimization solution by integrating Watcher with Ceilometer/Aodh. Step 1: Create an audit with EVENT type ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The first step is to create an audit with EVENT type, you can create an audit template firstly: .. code-block:: bash $ openstack optimize audittemplate create your_template_name \ --strategy or create an audit directly with special goal and strategy: .. code-block:: bash $ openstack optimize audit create --goal \ --strategy --audit_type EVENT This is an example for creating an audit with dummy strategy: .. code-block:: bash $ openstack optimize audit create --goal dummy \ --strategy dummy --audit_type EVENT +---------------+--------------------------------------+ | Field | Value | +---------------+--------------------------------------+ | UUID | a3326a6a-c18e-4e8e-adba-d0c61ad404c5 | | Name | dummy-2020-01-14T03:21:19.168467 | | Created At | 2020-01-14T03:21:19.200279+00:00 | | Updated At | None | | Deleted At | None | | State | PENDING | | Audit Type | EVENT | | Parameters | {u'para2': u'hello', u'para1': 3.2} | | Interval | None | | Goal | dummy | | Strategy | dummy | | Audit Scope | [] | | Auto Trigger | False | | Next Run Time | None | | Hostname | None | | Start Time | None | | End Time | None | | Force | False | +---------------+--------------------------------------+ We need to build Aodh action url using Watcher webhook API. For convenience we export the url into an environment variable: .. code-block:: bash $ export AUDIT_UUID=a3326a6a-c18e-4e8e-adba-d0c61ad404c5 $ export ALARM_URL="trust+http://localhost/infra-optim/v1/webhooks/$AUDIT_UUID" Step 2: Create Aodh Alarm ~~~~~~~~~~~~~~~~~~~~~~~~~ Once we have the audit created, we can continue to create Aodh alarm and set the alarm action to Watcher webhook API. The alarm type can be event( i.e. ``compute.instance.create.end``) or gnocchi_resources_threshold(i.e. ``cpu_util``), more info refer to alarm-creation_ For example: .. code-block:: bash $ openstack alarm create \ --type event --name instance_create \ --event-type "compute.instance.create.end" \ --enable True --repeat-actions False \ --alarm-action $ALARM_URL +---------------------------+------------------------------------------------------------------------------------------+ | Field | Value | +---------------------------+------------------------------------------------------------------------------------------+ | alarm_actions | [u'trust+http://localhost/infra-optim/v1/webhooks/a3326a6a-c18e-4e8e-adba-d0c61ad404c5'] | | alarm_id | b9e381fc-8e3e-4943-82ee-647e7a2ef644 | | description | Alarm when compute.instance.create.end event occurred. | | enabled | True | | event_type | compute.instance.create.end | | insufficient_data_actions | [] | | name | instance_create | | ok_actions | [] | | project_id | 728d66e18c914af1a41e2a585cf766af | | query | | | repeat_actions | False | | severity | low | | state | insufficient data | | state_reason | Not evaluated yet | | state_timestamp | 2020-01-14T03:56:26.894416 | | time_constraints | [] | | timestamp | 2020-01-14T03:56:26.894416 | | type | event | | user_id | 88c40156af7445cc80580a1e7e3ba308 | +---------------------------+------------------------------------------------------------------------------------------+ .. _alarm-creation: https://docs.openstack.org/aodh/latest/admin/telemetry-alarms.html#alarm-creation Step 3: Trigger the alarm ~~~~~~~~~~~~~~~~~~~~~~~~~ In this example, you can create a new instance to trigger the alarm. The alarm state will translate from ``insufficient data`` to ``alarm``. .. code-block:: bash $ openstack alarm show b9e381fc-8e3e-4943-82ee-647e7a2ef644 +---------------------------+-------------------------------------------------------------------------------------------------------------------+ | Field | Value | +---------------------------+-------------------------------------------------------------------------------------------------------------------+ | alarm_actions | [u'trust+http://localhost/infra-optim/v1/webhooks/a3326a6a-c18e-4e8e-adba-d0c61ad404c5'] | | alarm_id | b9e381fc-8e3e-4943-82ee-647e7a2ef644 | | description | Alarm when compute.instance.create.end event occurred. | | enabled | True | | event_type | compute.instance.create.end | | insufficient_data_actions | [] | | name | instance_create | | ok_actions | [] | | project_id | 728d66e18c914af1a41e2a585cf766af | | query | | | repeat_actions | False | | severity | low | | state | alarm | | state_reason | Event hits the query . | | state_timestamp | 2020-01-14T03:56:26.894416 | | time_constraints | [] | | timestamp | 2020-01-14T06:17:40.350649 | | type | event | | user_id | 88c40156af7445cc80580a1e7e3ba308 | +---------------------------+-------------------------------------------------------------------------------------------------------------------+ Step 4: Verify the audit ~~~~~~~~~~~~~~~~~~~~~~~~ This can be verified to check if the audit state was ``SUCCEEDED``: .. code-block:: bash $ openstack optimize audit show a3326a6a-c18e-4e8e-adba-d0c61ad404c5 +---------------+--------------------------------------+ | Field | Value | +---------------+--------------------------------------+ | UUID | a3326a6a-c18e-4e8e-adba-d0c61ad404c5 | | Name | dummy-2020-01-14T03:21:19.168467 | | Created At | 2020-01-14T03:21:19+00:00 | | Updated At | 2020-01-14T06:26:40+00:00 | | Deleted At | None | | State | SUCCEEDED | | Audit Type | EVENT | | Parameters | {u'para2': u'hello', u'para1': 3.2} | | Interval | None | | Goal | dummy | | Strategy | dummy | | Audit Scope | [] | | Auto Trigger | False | | Next Run Time | None | | Hostname | ubuntudbs | | Start Time | None | | End Time | None | | Force | False | +---------------+--------------------------------------+ and you can use the following command to check if the action plan was created: .. code-block:: bash $ openstack optimize actionplan list --audit a3326a6a-c18e-4e8e-adba-d0c61ad404c5 +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ | UUID | Audit | State | Updated At | Global efficacy | +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ | 673b3fcb-8c16-4a41-9ee3-2956d9f6ca9e | a3326a6a-c18e-4e8e-adba-d0c61ad404c5 | RECOMMENDED | None | | +--------------------------------------+--------------------------------------+-------------+------------+-----------------+ python-watcher-4.0.0/doc/source/index.rst0000664000175000017500000000356213656752270020442 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ================================ Welcome to Watcher documentation ================================ OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Watcher provides a complete optimization loop—including everything from a metrics receiver, complex event processor and profiler, optimization processor and an action plan applier. This provides a robust framework to realize a wide range of cloud optimization goals, including the reduction of data center operating costs, increased system performance via intelligent virtual machine migration, increased energy efficiency and more! Watcher project consists of several source code repositories: * `watcher`_ - is the main repository. It contains code for Watcher API server, Watcher Decision Engine and Watcher Applier. * `python-watcherclient`_ - Client library and CLI client for Watcher. * `watcher-dashboard`_ - Watcher Horizon plugin. The documentation provided here is continually kept up-to-date based on the latest code, and may not represent the state of the project at any specific prior release. .. _watcher: https://opendev.org/openstack/watcher/ .. _python-watcherclient: https://opendev.org/openstack/python-watcherclient/ .. _watcher-dashboard: https://opendev.org/openstack/watcher-dashboard/ .. toctree:: :maxdepth: 2 architecture contributor/index install/index admin/index user/index configuration/index contributor/plugin/index man/index .. toctree:: :maxdepth: 1 API Reference Watcher API Microversion History glossary python-watcher-4.0.0/doc/source/contributor/0000775000175000017500000000000013656752352021146 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/contributor/api_microversion_history.rst0000664000175000017500000000011313656752270027023 0ustar zuulzuul00000000000000.. include:: ../../../watcher/api/controllers/rest_api_version_history.rst python-watcher-4.0.0/doc/source/contributor/environment.rst0000664000175000017500000001733313656752270024252 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _watcher_development_environment: ========================================= Set up a development environment manually ========================================= This document describes getting the source from watcher `Git repository`_ for development purposes. To install Watcher from packaging, refer instead to Watcher `User Documentation`_. .. _`Git Repository`: https://opendev.org/openstack/watcher .. _`User Documentation`: https://docs.openstack.org/watcher/latest/ Prerequisites ============= This document assumes you are using Ubuntu or Fedora, and that you have the following tools available on your system: - Python_ 2.7 and 3.5 - git_ - setuptools_ - pip_ - msgfmt (part of the gettext package) - virtualenv and virtualenvwrapper_ **Reminder**: If you're successfully using a different platform, or a different version of the above, please document your configuration here! .. _Python: https://www.python.org/ .. _git: https://git-scm.com/ .. _setuptools: https://pypi.org/project/setuptools .. _virtualenvwrapper: https://virtualenvwrapper.readthedocs.io/en/latest/install.html Getting the latest code ======================= Make a clone of the code from our `Git repository`: .. code-block:: bash $ git clone https://opendev.org/openstack/watcher.git When that is complete, you can: .. code-block:: bash $ cd watcher Installing dependencies ======================= Watcher maintains two lists of dependencies:: requirements.txt test-requirements.txt The first is the list of dependencies needed for running Watcher, the second list includes dependencies used for active development and testing of Watcher itself. These dependencies can be installed from PyPi_ using the Python tool pip_. .. _PyPi: https://pypi.org/ .. _pip: https://pypi.org/project/pip However, your system *may* need additional dependencies that `pip` (and by extension, PyPi) cannot satisfy. These dependencies should be installed prior to using `pip`, and the installation method may vary depending on your platform. * Ubuntu 16.04:: $ sudo apt-get install python-dev libssl-dev libmysqlclient-dev libffi-dev * Fedora 24+:: $ sudo dnf install redhat-rpm-config gcc python-devel libxml2-devel * CentOS 7:: $ sudo yum install gcc python-devel libxml2-devel libxslt-devel mariadb-devel PyPi Packages and VirtualEnv ---------------------------- We recommend establishing a virtualenv to run Watcher within. virtualenv limits the Python environment to just what you're installing as dependencies, useful to keep a clean environment for working on Watcher. .. code-block:: bash $ mkvirtualenv watcher $ git clone https://opendev.org/openstack/watcher.git # Use 'python setup.py' to link Watcher into Python's site-packages $ cd watcher && python setup.py install # Install the dependencies for running Watcher $ pip install -r ./requirements.txt # Install the dependencies for developing, testing, and running Watcher $ pip install -r ./test-requirements.txt This will create a local virtual environment in the directory ``$WORKON_HOME``. The virtual environment can be disabled using the command: .. code-block:: bash $ deactivate You can re-activate this virtualenv for your current shell using: .. code-block:: bash $ workon watcher For more information on virtual environments, see virtualenv_ and virtualenvwrapper_. .. _virtualenv: https://pypi.org/project/virtualenv/ Verifying Watcher is set up =========================== Once set up, either directly or within a virtualenv, you should be able to invoke Python and import the libraries. If you're using a virtualenv, don't forget to activate it: .. code-block:: bash $ workon watcher You should then be able to `import watcher` using Python without issue: .. code-block:: bash $ python -c "import watcher" If you can import watcher without a traceback, you should be ready to develop. Run Watcher tests ================= Watcher provides both :ref:`unit tests ` and :ref:`functional/tempest tests `. Please refer to :doc:`testing` to understand how to run them. Build the Watcher documentation =============================== You can easily build the HTML documentation from ``doc/source`` files, by using ``tox``: .. code-block:: bash $ workon watcher (watcher) $ cd watcher (watcher) $ tox -edocs The HTML files are available into ``doc/build`` directory. Configure the Watcher services ============================== Watcher services require a configuration file. Use tox to generate a sample configuration file that can be used to get started: .. code-block:: bash $ tox -e genconfig $ cp etc/watcher.conf.sample etc/watcher.conf Most of the default configuration should be enough to get you going, but you still need to configure the following sections: - The ``[database]`` section to configure the :ref:`Watcher database ` - The ``[keystone_authtoken]`` section to configure the :ref:`Identity service ` i.e. Keystone - The ``[watcher_messaging]`` section to configure the OpenStack AMQP-based message bus - The ``watcher_clients_auth`` section to configure Keystone client to access related OpenStack projects So if you need some more details on how to configure one or more of these sections, please do have a look at :doc:`../configuration/configuring` before continuing. Create Watcher SQL database =========================== When initially getting set up, after you've configured which databases to use, you're probably going to need to run the following to your database schema in place: .. code-block:: bash $ workon watcher (watcher) $ watcher-db-manage create_schema Running Watcher services ======================== To run the Watcher API service, use: .. code-block:: bash $ workon watcher (watcher) $ watcher-api To run the Watcher Decision Engine service, use: .. code-block:: bash $ workon watcher (watcher) $ watcher-decision-engine To run the Watcher Applier service, use: .. code-block:: bash $ workon watcher (watcher) $ watcher-applier Default configuration of these services are available into ``/etc/watcher`` directory. See :doc:`../configuration/configuring` for details on how Watcher is configured. By default, Watcher is configured with SQL backends. Interact with Watcher ===================== You can also interact with Watcher through its REST API. There is a Python Watcher client library `python-watcherclient`_ which interacts exclusively through the REST API, and which Watcher itself uses to provide its command-line interface. .. _`python-watcherclient`: https://github.com/openstack/python-watcherclient There is also an Horizon plugin for Watcher `watcher-dashboard`_ which allows to interact with Watcher through a web-based interface. .. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard Exercising the Watcher Services locally ======================================= If you would like to exercise the Watcher services in isolation within a local virtual environment, you can do this without starting any other OpenStack services. For example, this is useful for rapidly prototyping and debugging interactions over the RPC channel, testing database migrations, and so forth. You will find in the `watcher-tools`_ project, Ansible playbooks and Docker template files to easily play with Watcher services within a minimal OpenStack isolated environment (Identity, Message Bus, SQL database, Horizon, ...). .. _`watcher-tools`: https://github.com/b-com/watcher-tools python-watcher-4.0.0/doc/source/contributor/rally_link.rst0000664000175000017500000000005413656752270024036 0ustar zuulzuul00000000000000.. include:: ../../../rally-jobs/README.rst python-watcher-4.0.0/doc/source/contributor/notifications.rst0000664000175000017500000000052313656752270024550 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _watcher_notifications: ======================== Notifications in Watcher ======================== .. versioned_notifications:: python-watcher-4.0.0/doc/source/contributor/plugin/0000775000175000017500000000000013656752352022444 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/contributor/plugin/strategy-plugin.rst0000664000175000017500000002715713656752270026347 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_strategy_plugin: ================================= Build a new optimization strategy ================================= Watcher Decision Engine has an external :ref:`strategy ` plugin interface which gives anyone the ability to integrate an external strategy in order to make use of placement algorithms. This section gives some guidelines on how to implement and integrate custom strategies with Watcher. If you wish to create a third-party package for your plugin, you can refer to our :ref:`documentation for third-party package creation `. Pre-requisites ============== Before using any strategy, you should make sure you have your Telemetry service configured so that it would provide you all the metrics you need to be able to use your strategy. Create a new strategy plugin ============================ In order to create a new strategy, you have to: - Extend the :py:class:`~.UnclassifiedStrategy` class - Implement its :py:meth:`~.BaseStrategy.get_name` class method to return the **unique** ID of the new strategy you want to create. This unique ID should be the same as the name of :ref:`the entry point we will declare later on `. - Implement its :py:meth:`~.BaseStrategy.get_display_name` class method to return the translated display name of the strategy you want to create. Note: Do not use a variable to return the translated string so it can be automatically collected by the translation tool. - Implement its :py:meth:`~.BaseStrategy.get_translatable_display_name` class method to return the translation key (actually the English display name) of your new strategy. The value return should be the same as the string translated in :py:meth:`~.BaseStrategy.get_display_name`. - Implement its :py:meth:`~.BaseStrategy.execute` method to return the solution you computed within your strategy. Here is an example showing how you can write a plugin called ``NewStrategy``: .. code-block:: python # filepath: thirdparty/new.py # import path: thirdparty.new import abc import six from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base class NewStrategy(base.UnclassifiedStrategy): def __init__(self, osc=None): super(NewStrategy, self).__init__(osc) def execute(self, original_model): self.solution.add_action(action_type="nop", input_parameters=parameters) # Do some more stuff here ... return self.solution @classmethod def get_name(cls): return "new_strategy" @classmethod def get_display_name(cls): return _("New strategy") @classmethod def get_translatable_display_name(cls): return "New strategy" As you can see in the above example, the :py:meth:`~.BaseStrategy.execute` method returns a :py:class:`~.BaseSolution` instance as required. This solution is what wraps the abstract set of actions the strategy recommends to you. This solution is then processed by a :ref:`planner ` to produce an action plan which contains the sequenced flow of actions to be executed by the :ref:`Watcher Applier `. This solution also contains the various :ref:`efficacy indicators ` alongside its computed :ref:`global efficacy `. Please note that your strategy class will expect to find the same constructor signature as BaseStrategy to instantiate you strategy. Therefore, you should ensure that your ``__init__`` signature is identical to the :py:class:`~.BaseStrategy` one. Strategy efficacy ================= As stated before, the ``NewStrategy`` class extends a class called :py:class:`~.UnclassifiedStrategy`. This class actually implements a set of abstract methods which are defined within the :py:class:`~.BaseStrategy` parent class. One thing this :py:class:`~.UnclassifiedStrategy` class defines is that our ``NewStrategy`` achieves the ``unclassified`` goal. This goal is a peculiar one as it does not contain any indicator nor does it calculate a global efficacy. This proves itself to be quite useful during the development of a new strategy for which the goal has yet to be defined or in case a :ref:`new goal ` has yet to be implemented. Define Strategy Parameters ========================== For each new added strategy, you can add parameters spec so that an operator can input strategy parameters when creating an audit to control the :py:meth:`~.BaseStrategy.execute` behavior of strategy. This is useful to define some threshold for your strategy, and tune them at runtime. To define parameters, just implements :py:meth:`~.BaseStrategy.get_schema` to return parameters spec with `jsonschema `_ format. It is strongly encouraged that provide default value for each parameter, or else reference fails if operator specify no parameters. Here is an example showing how you can define 2 parameters for ``DummyStrategy``: .. code-block:: python class DummyStrategy(base.DummyBaseStrategy): @classmethod def get_schema(cls): return { "properties": { "para1": { "description": "number parameter example", "type": "number", "default": 3.2, "minimum": 1.0, "maximum": 10.2, }, "para2": { "description": "string parameter example", "type": "string", "default": "hello", }, }, } You can reference parameters in :py:meth:`~.BaseStrategy.execute`: .. code-block:: python class DummyStrategy(base.DummyBaseStrategy): def execute(self): para1 = self.input_parameters.para1 para2 = self.input_parameters.para2 if para1 > 5: ... Operator can specify parameters with following commands: .. code:: bash $ watcher audit create -a -p para1=6.0 -p para2=hi Pls. check user-guide for details. Abstract Plugin Class ===================== Here below is the abstract :py:class:`~.BaseStrategy` class: .. autoclass:: watcher.decision_engine.strategy.strategies.base.BaseStrategy :members: :special-members: __init__ :noindex: .. _strategy_plugin_add_entrypoint: Add a new entry point ===================== In order for the Watcher Decision Engine to load your new strategy, the strategy must be registered as a named entry point under the ``watcher_strategies`` entry point of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique and should be the same as the value returned by the :py:meth:`~.BaseStrategy.get_name` class method of your strategy. Here below is how you would proceed to register ``NewStrategy`` using pbr_: .. code-block:: ini [entry_points] watcher_strategies = new_strategy = thirdparty.new:NewStrategy To get a better understanding on how to implement a more advanced strategy, have a look at the :py:class:`~.BasicConsolidation` class. .. _pbr: https://docs.openstack.org/pbr/latest Using strategy plugins ====================== The Watcher Decision Engine service will automatically discover any installed plugins when it is restarted. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, Watcher will scan and register inside the :ref:`Watcher Database ` all the strategies (alongside the goals they should satisfy) you implemented upon restarting the :ref:`Watcher Decision Engine `. You should take care when installing strategy plugins. By their very nature, there are no guarantees that utilizing them as is will be supported, as they may require a set of metrics which is not yet available within the Telemetry service. In such a case, please do make sure that you first check/configure the latter so your new strategy can be fully functional. Querying metrics ---------------- A large set of metrics, generated by OpenStack modules, can be used in your strategy implementation. To collect these metrics, Watcher provides a `DataSourceManager`_ for two data sources which are `Ceilometer`_ (with `Gnocchi`_ as API) and `Monasca`_. If you wish to query metrics from a different data source, you can implement your own and use it via DataSourceManager from within your new strategy. Indeed, strategies in Watcher have the cluster data models decoupled from the data sources which means that you may keep the former while changing the latter. The recommended way for you to support a new data source is to implement a new helper that would encapsulate within separate methods the queries you need to perform. To then use it, you would just have to add it to appropriate watcher_strategies.* section in config file. If you want to use Ceilometer but with your own metrics database backend, please refer to the `Ceilometer developer guide`_. The list of the available Ceilometer backends is located here_. The `Ceilosca`_ project is a good example of how to create your own pluggable backend. Moreover, if your strategy requires new metrics not covered by Ceilometer, you can add them through a `Ceilometer plugin`_. .. _`DataSourceManager`: https://github.com/openstack/watcher/blob/master/watcher/datasource/manager.py .. _`Ceilometer developer guide`: https://docs.openstack.org/ceilometer/latest/contributor/architecture.html#storing-accessing-the-data .. _`Ceilometer`: https://docs.openstack.org/ceilometer/latest .. _`Monasca`: https://github.com/openstack/monasca-api/blob/master/docs/monasca-api-spec.md .. _`here`: https://docs.openstack.org/ceilometer/latest/contributor/install/dbreco.html#choosing-a-database-backend .. _`Ceilometer plugin`: https://docs.openstack.org/ceilometer/latest/contributor/plugins.html .. _`Ceilosca`: https://github.com/openstack/monasca-ceilometer/blob/master/ceilosca/ceilometer/storage/impl_monasca.py .. _`Gnocchi`: https://gnocchi.xyz/ Read usage metrics using the Watcher Datasource Helper ------------------------------------------------------ The following code snippet shows how datasource_backend is defined: .. code-block:: py from watcher.datasource import manager as ds_manager @property def datasource_backend(self): if not self._datasource_backend: # Load the global preferred datasources order but override it # if the strategy has a specific datasources config datasources = CONF.watcher_datasources if self.config.datasources: datasources = self.config self._datasource_backend = ds_manager.DataSourceManager( config=datasources, osc=self.osc ).get_backend(self.DATASOURCE_METRICS) return self._datasource_backend Using that you can now query the values for that specific metric: .. code-block:: py avg_meter = self.datasource_backend.statistic_aggregation( instance.uuid, 'cpu_util', self.periods['instance'], self.granularity, aggregation=self.aggregation_method['instance']) python-watcher-4.0.0/doc/source/contributor/plugin/base-setup.rst0000664000175000017500000000634113656752270025251 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _plugin-base_setup: ======================================= Create a third-party plugin for Watcher ======================================= Watcher provides a plugin architecture which allows anyone to extend the existing functionalities by implementing third-party plugins. This process can be cumbersome so this documentation is there to help you get going as quickly as possible. Pre-requisites ============== We assume that you have set up a working Watcher development environment. So if this not already the case, you can check out our documentation which explains how to set up a :ref:`development environment `. .. _development environment: Third party project scaffolding =============================== First off, we need to create the project structure. To do so, we can use `cookiecutter`_ and the `OpenStack cookiecutter`_ project scaffolder to generate the skeleton of our project:: $ virtualenv thirdparty $ . thirdparty/bin/activate $ pip install cookiecutter $ cookiecutter https://github.com/openstack-dev/cookiecutter The last command will ask you for many information, and If you set ``module_name`` and ``repo_name`` as ``thirdparty``, you should end up with a structure that looks like this:: $ cd thirdparty $ tree . . ├── babel.cfg ├── CONTRIBUTING.rst ├── doc │   └── source │   ├── conf.py │   ├── contributing.rst │   ├── index.rst │   ├── installation.rst │   ├── readme.rst │   └── usage.rst ├── HACKING.rst ├── LICENSE ├── MANIFEST.in ├── README.rst ├── requirements.txt ├── setup.cfg ├── setup.py ├── test-requirements.txt ├── thirdparty │   ├── __init__.py │   └── tests │   ├── base.py │   ├── __init__.py │   └── test_thirdparty.py └── tox.ini **Note:** You should add `python-watcher`_ as a dependency in the requirements.txt file:: # Watcher-specific requirements python-watcher .. _cookiecutter: https://github.com/audreyr/cookiecutter .. _OpenStack cookiecutter: https://github.com/openstack-dev/cookiecutter .. _python-watcher: https://pypi.org/project/python-watcher Implementing a plugin for Watcher ================================= Now that the project skeleton has been created, you can start the implementation of your plugin. As of now, you can implement the following plugins for Watcher: - A :ref:`goal plugin ` - A :ref:`strategy plugin ` - An :ref:`action plugin ` - A :ref:`planner plugin ` - A workflow engine plugin - A :ref:`cluster data model collector plugin ` If you want to learn more on how to implement them, you can refer to their dedicated documentation. python-watcher-4.0.0/doc/source/contributor/plugin/planner-plugin.rst0000664000175000017500000001374113656752270026136 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_planner_plugin: =================== Build a new planner =================== Watcher :ref:`Decision Engine ` has an external :ref:`planner ` plugin interface which gives anyone the ability to integrate an external :ref:`planner ` in order to extend the initial set of planners Watcher provides. This section gives some guidelines on how to implement and integrate custom planners with Watcher. .. _Decision Engine: watcher_decision_engine_definition Creating a new plugin ===================== First of all you have to extend the base :py:class:`~.BasePlanner` class which defines an abstract method that you will have to implement. The :py:meth:`~.BasePlanner.schedule` is the method being called by the Decision Engine to schedule a given solution (:py:class:`~.BaseSolution`) into an :ref:`action plan ` by ordering/sequencing an unordered set of actions contained in the proposed solution (for more details, see :ref:`definition of a solution `). Here is an example showing how you can write a planner plugin called ``DummyPlanner``: .. code-block:: python # Filepath = third-party/third_party/dummy.py # Import path = third_party.dummy from oslo_utils import uuidutils from watcher.decision_engine.planner import base class DummyPlanner(base.BasePlanner): def _create_action_plan(self, context, audit_id): action_plan_dict = { 'uuid': uuidutils.generate_uuid(), 'audit_id': audit_id, 'first_action_id': None, 'state': objects.action_plan.State.RECOMMENDED } new_action_plan = objects.ActionPlan(context, **action_plan_dict) new_action_plan.create(context) new_action_plan.save() return new_action_plan def schedule(self, context, audit_id, solution): # Empty action plan action_plan = self._create_action_plan(context, audit_id) # todo: You need to create the workflow of actions here # and attach it to the action plan return action_plan This implementation is the most basic one. So if you want to have more advanced examples, have a look at the implementation of planners already provided by Watcher like :py:class:`~.DefaultPlanner`. A list with all available planner plugins can be found :ref:`here `. Define configuration parameters =============================== At this point, you have a fully functional planner. However, in more complex implementation, you may want to define some configuration options so one can tune the planner to its needs. To do so, you can implement the :py:meth:`~.Loadable.get_config_opts` class method as followed: .. code-block:: python from oslo_config import cfg class DummyPlanner(base.BasePlanner): # [...] def schedule(self, context, audit_uuid, solution): assert self.config.test_opt == 0 # [...] @classmethod def get_config_opts(cls): return super( DummyPlanner, cls).get_config_opts() + [ cfg.StrOpt('test_opt', help="Demo Option.", default=0), # Some more options ... ] The configuration options defined within this class method will be included within the global ``watcher.conf`` configuration file under a section named by convention: ``{namespace}.{plugin_name}``. In our case, the ``watcher.conf`` configuration would have to be modified as followed: .. code-block:: ini [watcher_planners.dummy] # Option used for testing. test_opt = test_value Then, the configuration options you define within this method will then be injected in each instantiated object via the ``config`` parameter of the :py:meth:`~.BasePlanner.__init__` method. Abstract Plugin Class ===================== Here below is the abstract ``BasePlanner`` class that every single planner should implement: .. autoclass:: watcher.decision_engine.planner.base.BasePlanner :members: :special-members: __init__ :noindex: Register a new entry point ========================== In order for the Watcher Decision Engine to load your new planner, the latter must be registered as a new entry point under the ``watcher_planners`` entry point namespace of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique. Here below is how you would proceed to register ``DummyPlanner`` using pbr_: .. code-block:: ini [entry_points] watcher_planners = dummy = third_party.dummy:DummyPlanner .. _pbr: https://docs.openstack.org/pbr/latest Using planner plugins ===================== The :ref:`Watcher Decision Engine ` service will automatically discover any installed plugins when it is started. This means that if Watcher is already running when you install your plugin, you will have to restart the related Watcher services. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, Watcher will use your new planner if you referenced it in the ``planner`` option under the ``[watcher_planner]`` section of your ``watcher.conf`` configuration file when you started it. For example, if you want to use the ``dummy`` planner you just installed, you would have to select it as followed: .. code-block:: ini [watcher_planner] planner = dummy As you may have noticed, only a single planner implementation can be activated at a time, so make sure it is generic enough to support all your strategies and actions. python-watcher-4.0.0/doc/source/contributor/plugin/goal-plugin.rst0000664000175000017500000001726613656752270025427 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_goal_plugin: ================ Build a new goal ================ Watcher Decision Engine has an external :ref:`goal ` plugin interface which gives anyone the ability to integrate an external goal which can be achieved by a :ref:`strategy `. This section gives some guidelines on how to implement and integrate custom goals with Watcher. If you wish to create a third-party package for your plugin, you can refer to our :ref:`documentation for third-party package creation `. Pre-requisites ============== Before using any goal, please make sure that none of the existing goals fit your needs. Indeed, the underlying value of defining a goal is to be able to compare the efficacy of the action plans resulting from the various strategies satisfying the same goal. By doing so, Watcher can assist the administrator in his choices. Create a new plugin =================== In order to create a new goal, you have to: - Extend the :py:class:`~.base.Goal` class. - Implement its :py:meth:`~.Goal.get_name` class method to return the **unique** ID of the new goal you want to create. This unique ID should be the same as the name of :ref:`the entry point you will declare later on `. - Implement its :py:meth:`~.Goal.get_display_name` class method to return the translated display name of the goal you want to create. Note: Do not use a variable to return the translated string so it can be automatically collected by the translation tool. - Implement its :py:meth:`~.Goal.get_translatable_display_name` class method to return the translation key (actually the english display name) of your new goal. The value return should be the same as the string translated in :py:meth:`~.Goal.get_display_name`. - Implement its :py:meth:`~.Goal.get_efficacy_specification` method to return the :ref:`efficacy specification ` for your goal. Here is an example showing how you can define a new ``NewGoal`` goal plugin: .. code-block:: python # filepath: thirdparty/new.py # import path: thirdparty.new from watcher._i18n import _ from watcher.decision_engine.goal import base from watcher.decision_engine.goal.efficacy import specs class NewGoal(base.Goal): @classmethod def get_name(cls): return "new_goal" # Will be the name of the entry point @classmethod def get_display_name(cls): return _("New Goal") @classmethod def get_translatable_display_name(cls): return "New Goal" @classmethod def get_efficacy_specification(cls): return specs.Unclassified() As you may have noticed, the :py:meth:`~.Goal.get_efficacy_specification` method returns an :py:meth:`~.Unclassified` instance which is provided by Watcher. This efficacy specification is useful during the development process of your goal as it corresponds to an empty specification. If you want to learn more about what efficacy specifications are used for or to define your own efficacy specification, please refer to the :ref:`related section below `. Abstract Plugin Class ===================== Here below is the abstract :py:class:`~.base.Goal` class: .. autoclass:: watcher.decision_engine.goal.base.Goal :members: :noindex: .. _goal_plugin_add_entrypoint: Add a new entry point ===================== In order for the Watcher Decision Engine to load your new goal, the goal must be registered as a named entry point under the ``watcher_goals`` entry point namespace of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique and should be the same as the value returned by the :py:meth:`~.base.Goal.get_name` class method of your goal. Here below is how you would proceed to register ``NewGoal`` using pbr_: .. code-block:: ini [entry_points] watcher_goals = new_goal = thirdparty.new:NewGoal To get a better understanding on how to implement a more advanced goal, have a look at the :py:class:`watcher.decision_engine.goal.goals.ServerConsolidation` class. .. _pbr: https://docs.openstack.org/pbr/latest .. _implement_efficacy_specification: Implement a customized efficacy specification ============================================= What is it for? --------------- Efficacy specifications define a set of specifications for a given goal. These specifications actually define a list of indicators which are to be used to compute a global efficacy that outlines how well a strategy performed when trying to achieve the goal it is associated to. The idea behind such specification is to give the administrator the possibility to run an audit using different strategies satisfying the same goal and be able to judge how they performed at a glance. Implementation -------------- In order to create a new efficacy specification, you have to: - Extend the :py:class:`~.EfficacySpecification` class. - Implement :py:meth:`~.EfficacySpecification.get_indicators_specifications` by returning a list of :py:class:`~.IndicatorSpecification` instances. * Each :py:class:`~.IndicatorSpecification` instance should actually extend the latter. * Each indicator specification should have a **unique name** which should be a valid Python variable name. * They should implement the :py:attr:`~.EfficacySpecification.schema` abstract property by returning a :py:class:`~.voluptuous.Schema` instance. This schema is the contract the strategy will have to comply with when setting the value associated to the indicator specification within its solution (see the :ref:`architecture of Watcher ` for more information on the audit execution workflow). - Implement the :py:meth:`~.EfficacySpecification.get_global_efficacy` method: it should compute the global efficacy for the goal it achieves based on the efficacy indicators you just defined. Here below is an example of an efficacy specification containing one indicator specification: .. code-block:: python from watcher._i18n import _ from watcher.decision_engine.goal.efficacy import base as efficacy_base from watcher.decision_engine.goal.efficacy import indicators from watcher.decision_engine.solution import efficacy class IndicatorExample(IndicatorSpecification): def __init__(self): super(IndicatorExample, self).__init__( name="indicator_example", description=_("Example of indicator specification."), unit=None, ) @property def schema(self): return voluptuous.Schema(voluptuous.Range(min=0), required=True) class UnclassifiedStrategySpecification(efficacy_base.EfficacySpecification): def get_indicators_specifications(self): return [IndicatorExample()] def get_global_efficacy(self, indicators_map): return efficacy.Indicator( name="global_efficacy_indicator", description="Example of global efficacy indicator", unit="%", value=indicators_map.indicator_example % 100) To get a better understanding on how to implement an efficacy specification, have a look at :py:class:`~.ServerConsolidationSpecification`. Also, if you want to see a concrete example of an indicator specification, have a look at :py:class:`~.ReleasedComputeNodesCount`. python-watcher-4.0.0/doc/source/contributor/plugin/cdmc-plugin.rst0000664000175000017500000002310313656752270025376 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_cluster_data_model_collector_plugin: ======================================== Build a new cluster data model collector ======================================== Watcher Decision Engine has an external cluster data model (CDM) plugin interface which gives anyone the ability to integrate an external cluster data model collector (CDMC) in order to extend the initial set of cluster data model collectors Watcher provides. This section gives some guidelines on how to implement and integrate custom cluster data model collectors within Watcher. Creating a new plugin ===================== In order to create a new cluster data model collector, you have to: - Extend the :py:class:`~.base.BaseClusterDataModelCollector` class. - Implement its :py:meth:`~.BaseClusterDataModelCollector.execute` abstract method to return your entire cluster data model that this method should build. - Implement its :py:meth:`~.BaseClusterDataModelCollector.audit_scope_handler` abstract property to return your audit scope handler. - Implement its :py:meth:`~.Goal.notification_endpoints` abstract property to return the list of all the :py:class:`~.base.NotificationEndpoint` instances that will be responsible for handling incoming notifications in order to incrementally update your cluster data model. First of all, you have to extend the :class:`~.BaseClusterDataModelCollector` base class which defines the :py:meth:`~.BaseClusterDataModelCollector.execute` abstract method you will have to implement. This method is responsible for building an entire cluster data model. Here is an example showing how you can write a plugin called ``DummyClusterDataModelCollector``: .. code-block:: python # Filepath = /thirdparty/dummy.py # Import path = thirdparty.dummy from watcher.decision_engine.model import model_root from watcher.decision_engine.model.collector import base class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): def execute(self): model = model_root.ModelRoot() # Do something here... return model @property def audit_scope_handler(self): return None @property def notification_endpoints(self): return [] This implementation is the most basic one. So in order to get a better understanding on how to implement a more advanced cluster data model collector, have a look at the :py:class:`~.NovaClusterDataModelCollector` class. Define a custom model ===================== As you may have noticed in the above example, we are reusing an existing model provided by Watcher. However, this model can be easily customized by implementing a new class that would implement the :py:class:`~.Model` abstract base class. Here below is simple example on how to proceed in implementing a custom Model: .. code-block:: python # Filepath = /thirdparty/dummy.py # Import path = thirdparty.dummy from watcher.decision_engine.model import base as modelbase from watcher.decision_engine.model.collector import base class MyModel(modelbase.Model): def to_string(self): return 'MyModel' class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): def execute(self): model = MyModel() # Do something here... return model @property def notification_endpoints(self): return [] Here below is the abstract ``Model`` class that every single cluster data model should implement: .. autoclass:: watcher.decision_engine.model.base.Model :members: :special-members: __init__ :noindex: Define configuration parameters =============================== At this point, you have a fully functional cluster data model collector. By default, cluster data model collectors define a ``period`` option (see :py:meth:`~.BaseClusterDataModelCollector.get_config_opts`) that corresponds to the interval of time between each synchronization of the in-memory model. However, in more complex implementation, you may want to define some configuration options so one can tune the cluster data model collector to your needs. To do so, you can implement the :py:meth:`~.Loadable.get_config_opts` class method as followed: .. code-block:: python from oslo_config import cfg from watcher.decision_engine.model import model_root from watcher.decision_engine.model.collector import base class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): def execute(self): model = model_root.ModelRoot() # Do something here... return model @property def audit_scope_handler(self): return None @property def notification_endpoints(self): return [] @classmethod def get_config_opts(cls): return super( DummyClusterDataModelCollector, cls).get_config_opts() + [ cfg.StrOpt('test_opt', help="Demo Option.", default=0), # Some more options ... ] The configuration options defined within this class method will be included within the global ``watcher.conf`` configuration file under a section named by convention: ``{namespace}.{plugin_name}`` (see section :ref:`Register a new entry point `). The namespace for CDMC plugins is ``watcher_cluster_data_model_collectors``, so in our case, the ``watcher.conf`` configuration would have to be modified as followed: .. code-block:: ini [watcher_cluster_data_model_collectors.dummy] # Option used for testing. test_opt = test_value Then, the configuration options you define within this method will then be injected in each instantiated object via the ``config`` parameter of the :py:meth:`~.BaseClusterDataModelCollector.__init__` method. Abstract Plugin Class ===================== Here below is the abstract ``BaseClusterDataModelCollector`` class that every single cluster data model collector should implement: .. autoclass:: watcher.decision_engine.model.collector.base.BaseClusterDataModelCollector :members: :special-members: __init__ :noindex: .. _register_new_cdmc_entrypoint: Register a new entry point ========================== In order for the Watcher Decision Engine to load your new cluster data model collector, the latter must be registered as a named entry point under the ``watcher_cluster_data_model_collectors`` entry point namespace of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique. Here below is how to register ``DummyClusterDataModelCollector`` using pbr_: .. code-block:: ini [entry_points] watcher_cluster_data_model_collectors = dummy = thirdparty.dummy:DummyClusterDataModelCollector .. _pbr: https://docs.openstack.org/pbr/latest/ Add new notification endpoints ============================== At this point, you have a fully functional cluster data model collector. However, this CDMC is only refreshed periodically via a background scheduler. As you may sometimes execute a strategy with a stale CDM due to a high activity on your infrastructure, you can define some notification endpoints that will be responsible for incrementally updating the CDM based on notifications emitted by other services such as Nova. To do so, you can implement and register a new ``DummyEndpoint`` notification endpoint regarding a ``dummy`` event as shown below: .. code-block:: python from watcher.decision_engine.model import model_root from watcher.decision_engine.model.collector import base class DummyNotification(base.NotificationEndpoint): @property def filter_rule(self): return filtering.NotificationFilter( publisher_id=r'.*', event_type=r'^dummy$', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): # Do some CDM modifications here... pass class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): def execute(self): model = model_root.ModelRoot() # Do something here... return model @property def notification_endpoints(self): return [DummyNotification(self)] Note that if the event you are trying to listen to is published by a new service, you may have to also add a new topic Watcher will have to subscribe to in the ``notification_topics`` option of the ``[watcher_decision_engine]`` section. Using cluster data model collector plugins ========================================== The Watcher Decision Engine service will automatically discover any installed plugins when it is restarted. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, you can use your new cluster data model plugin in your :ref:`strategy plugin ` by using the :py:attr:`~.BaseStrategy.collector_manager` property as followed: .. code-block:: python # [...] dummy_collector = self.collector_manager.get_cluster_model_collector( "dummy") # "dummy" is the name of the entry point we declared earlier dummy_model = dummy_collector.get_latest_cluster_data_model() # Do some stuff with this model python-watcher-4.0.0/doc/source/contributor/plugin/index.rst0000664000175000017500000000031413656752270024302 0ustar zuulzuul00000000000000============ Plugin Guide ============ .. toctree:: :maxdepth: 1 base-setup action-plugin cdmc-plugin goal-plugin planner-plugin scoring-engine-plugin strategy-plugin plugins python-watcher-4.0.0/doc/source/contributor/plugin/action-plugin.rst0000664000175000017500000001716413656752270025757 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_action_plugin: ================== Build a new action ================== Watcher Applier has an external :ref:`action ` plugin interface which gives anyone the ability to integrate an external :ref:`action ` in order to extend the initial set of actions Watcher provides. This section gives some guidelines on how to implement and integrate custom actions with Watcher. Creating a new plugin ===================== First of all you have to extend the base :py:class:`BaseAction` class which defines a set of abstract methods and/or properties that you will have to implement: - The :py:attr:`~.BaseAction.schema` is an abstract property that you have to implement. This is the first function to be called by the :ref:`applier ` before any further processing and its role is to validate the input parameters that were provided to it. - The :py:meth:`~.BaseAction.pre_condition` is called before the execution of an action. This method is a hook that can be used to perform some initializations or to make some more advanced validation on its input parameters. If you wish to block the execution based on this factor, you simply have to ``raise`` an exception. - The :py:meth:`~.BaseAction.post_condition` is called after the execution of an action. As this function is called regardless of whether an action succeeded or not, this can prove itself useful to perform cleanup operations. - The :py:meth:`~.BaseAction.execute` is the main component of an action. This is where you should implement the logic of your action. - The :py:meth:`~.BaseAction.revert` allows you to roll back the targeted resource to its original state following a faulty execution. Indeed, this method is called by the workflow engine whenever an action raises an exception. Here is an example showing how you can write a plugin called ``DummyAction``: .. code-block:: python # Filepath = /thirdparty/dummy.py # Import path = thirdparty.dummy import voluptuous from watcher.applier.actions import base class DummyAction(base.BaseAction): @property def schema(self): return voluptuous.Schema({}) def execute(self): # Does nothing pass # Only returning False is considered as a failure def revert(self): # Does nothing pass def pre_condition(self): # No pre-checks are done here pass def post_condition(self): # Nothing done here pass This implementation is the most basic one. So in order to get a better understanding on how to implement a more advanced action, have a look at the :py:class:`~watcher.applier.actions.migration.Migrate` class. Input validation ---------------- As you can see in the previous example, we are using `Voluptuous`_ to validate the input parameters of an action. So if you want to learn more about how to work with `Voluptuous`_, you can have a look at their `documentation`_: .. _Voluptuous: https://github.com/alecthomas/voluptuous .. _documentation: https://github.com/alecthomas/voluptuous/blob/master/README.md Define configuration parameters =============================== At this point, you have a fully functional action. However, in more complex implementation, you may want to define some configuration options so one can tune the action to its needs. To do so, you can implement the :py:meth:`~.Loadable.get_config_opts` class method as followed: .. code-block:: python from oslo_config import cfg class DummyAction(base.BaseAction): # [...] def execute(self): assert self.config.test_opt == 0 @classmethod def get_config_opts(cls): return super( DummyAction, cls).get_config_opts() + [ cfg.StrOpt('test_opt', help="Demo Option.", default=0), # Some more options ... ] The configuration options defined within this class method will be included within the global ``watcher.conf`` configuration file under a section named by convention: ``{namespace}.{plugin_name}``. In our case, the ``watcher.conf`` configuration would have to be modified as followed: .. code-block:: ini [watcher_actions.dummy] # Option used for testing. test_opt = test_value Then, the configuration options you define within this method will then be injected in each instantiated object via the ``config`` parameter of the :py:meth:`~.BaseAction.__init__` method. Abstract Plugin Class ===================== Here below is the abstract ``BaseAction`` class that every single action should implement: .. autoclass:: watcher.applier.actions.base.BaseAction :members: :special-members: __init__ :noindex: .. py:attribute:: schema Defines a Schema that the input parameters shall comply to :returns: A schema declaring the input parameters this action should be provided along with their respective constraints (e.g. type, value range, ...) :rtype: :py:class:`voluptuous.Schema` instance Register a new entry point ========================== In order for the Watcher Applier to load your new action, the action must be registered as a named entry point under the ``watcher_actions`` entry point of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique. Here below is how you would proceed to register ``DummyAction`` using pbr_: .. code-block:: ini [entry_points] watcher_actions = dummy = thirdparty.dummy:DummyAction .. _pbr: https://docs.openstack.org/pbr/latest Using action plugins ==================== The Watcher Applier service will automatically discover any installed plugins when it is restarted. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, you can use your new action plugin in your :ref:`strategy plugin ` if you reference it via the use of the :py:meth:`~.Solution.add_action` method: .. code-block:: python # [...] self.solution.add_action( action_type="dummy", # Name of the entry point we registered earlier applies_to="", input_parameters={}) By doing so, your action will be saved within the Watcher Database, ready to be processed by the planner for creating an action plan which can then be executed by the Watcher Applier via its workflow engine. At the last, remember to add the action into the weights in ``watcher.conf``, otherwise you will get an error when the action be referenced in a strategy. Scheduling of an action plugin ============================== Watcher provides a basic built-in :ref:`planner ` which is only able to process the Watcher built-in actions. Therefore, you will either have to use an existing third-party planner or :ref:`implement another planner ` that will be able to take into account your new action plugin. Test your new action ==================== In order to test your new action via a manual test or a Tempest test, you can use the :py:class:`~.Actuator` strategy and pass it one or more actions to execute. This way, you can isolate your action to see if it works as expected. python-watcher-4.0.0/doc/source/contributor/plugin/plugins.rst0000664000175000017500000000255413656752270024664 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ================= Available Plugins ================= In this section we present all the plugins that are shipped along with Watcher. If you want to know which plugins your Watcher services have access to, you can use the :ref:`Guru Meditation Reports ` to display them. .. _watcher_goals: Goals ===== .. list-plugins:: watcher_goals :detailed: .. _watcher_scoring_engines: Scoring Engines =============== .. list-plugins:: watcher_scoring_engines :detailed: .. _watcher_scoring_engine_containers: Scoring Engine Containers ========================= .. list-plugins:: watcher_scoring_engine_containers :detailed: .. _watcher_strategies: Strategies ========== .. list-plugins:: watcher_strategies :detailed: .. _watcher_actions: Actions ======= .. list-plugins:: watcher_actions :detailed: .. _watcher_workflow_engines: Workflow Engines ================ .. list-plugins:: watcher_workflow_engines :detailed: .. _watcher_planners: Planners ======== .. list-plugins:: watcher_planners :detailed: Cluster Data Model Collectors ============================= .. list-plugins:: watcher_cluster_data_model_collectors :detailed: python-watcher-4.0.0/doc/source/contributor/plugin/scoring-engine-plugin.rst0000664000175000017500000001740713656752270027411 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _implement_scoring_engine_plugin: ========================== Build a new scoring engine ========================== Watcher Decision Engine has an external :ref:`scoring engine ` plugin interface which gives anyone the ability to integrate an external scoring engine in order to make use of it in a :ref:`strategy `. This section gives some guidelines on how to implement and integrate custom scoring engines with Watcher. If you wish to create a third-party package for your plugin, you can refer to our :ref:`documentation for third-party package creation `. Pre-requisites ============== Because scoring engines execute a purely mathematical tasks, they typically do not have any additional dependencies. Additional requirements might be defined by specific scoring engine implementations. For example, some scoring engines might require to prepare learning data, which has to be loaded during the scoring engine startup. Some other might require some external services to be available (e.g. if the scoring infrastructure is running in the cloud). Create a new scoring engine plugin ================================== In order to create a new scoring engine you have to: - Extend the :py:class:`watcher.decision_engine.scoring.base.ScoringEngine` class - Implement its :py:meth:`~.ScoringEngine.get_name` method to return the **unique** ID of the new scoring engine you want to create. This unique ID should be the same as the name of :ref:`the entry point we will declare later on `. - Implement its :py:meth:`~.ScoringEngine.get_description` method to return the user-friendly description of the implemented scoring engine. It might contain information about algorithm used, learning data etc. - Implement its :py:meth:`~.ScoringEngine.get_metainfo` method to return the machine-friendly metadata about this scoring engine. For example, it could be a JSON formatted text with information about the data model used, its input and output data format, column names, etc. - Implement its :py:meth:`~.ScoringEngine.calculate_score` method to return the result calculated by this scoring engine. Here is an example showing how you can write a plugin called ``NewScorer``: .. code-block:: python # filepath: thirdparty/new.py # import path: thirdparty.new from watcher.decision_engine.scoring import base class NewScorer(base.ScoringEngine): def get_name(self): return 'new_scorer' def get_description(self): return '' def get_metainfo(self): return """{ "feature_columns": [ "column1", "column2", "column3"], "result_columns": [ "value", "probability"] }""" def calculate_score(self, features): return '[12, 0.83]' As you can see in the above example, the :py:meth:`~.ScoringEngine.calculate_score` method returns a string. Both this class and the client (caller) should perform all the necessary serialization or deserialization. (Optional) Create a new scoring engine container plugin ======================================================= Optionally, it's possible to implement a container plugin, which can return a list of scoring engines. This list can be re-evaluated multiple times during the lifecycle of :ref:`Watcher Decision Engine ` and synchronized with :ref:`Watcher Database ` using the ``watcher-sync`` command line tool. Below is an example of a container using some scoring engine implementation that is simply made of a client responsible for communicating with a real scoring engine deployed as a web service on external servers: .. code-block:: python class NewScoringContainer(base.ScoringEngineContainer): @classmethod def get_scoring_engine_list(self): return [ RemoteScoringEngine( name='scoring_engine1', description='Some remote Scoring Engine 1', remote_url='http://engine1.example.com/score'), RemoteScoringEngine( name='scoring_engine2', description='Some remote Scoring Engine 2', remote_url='http://engine2.example.com/score'), ] Abstract Plugin Class ===================== Here below is the abstract :py:class:`watcher.decision_engine.scoring.base.ScoringEngine` class: .. autoclass:: watcher.decision_engine.scoring.base.ScoringEngine :members: :special-members: __init__ :noindex: Abstract Plugin Container Class =============================== Here below is the abstract :py:class:`~.ScoringContainer` class: .. autoclass:: watcher.decision_engine.scoring.base.ScoringEngineContainer :members: :special-members: __init__ :noindex: .. _scoring_engine_plugin_add_entrypoint: Add a new entry point ===================== In order for the Watcher Decision Engine to load your new scoring engine, it must be registered as a named entry point under the ``watcher_scoring_engines`` entry point of your ``setup.py`` file. If you are using pbr_, this entry point should be placed in your ``setup.cfg`` file. The name you give to your entry point has to be unique and should be the same as the value returned by the :py:meth:`~.ScoringEngine.get_name` method of your strategy. Here below is how you would proceed to register ``NewScorer`` using pbr_: .. code-block:: ini [entry_points] watcher_scoring_engines = new_scorer = thirdparty.new:NewScorer To get a better understanding on how to implement a more advanced scoring engine, have a look at the :py:class:`~.DummyScorer` class. This implementation is not really using machine learning, but other than that it contains all the pieces which the "real" implementation would have. In addition, for some use cases there is a need to register a list (possibly dynamic, depending on the implementation and configuration) of scoring engines in a single plugin, so there is no need to restart :ref:`Watcher Decision Engine ` every time such list changes. For these cases, an additional ``watcher_scoring_engine_containers`` entry point can be used. For the example how to use scoring engine containers, please have a look at the :py:class:`~.DummyScoringContainer` and the way it is configured in ``setup.cfg``. For new containers it could be done like this: .. code-block:: ini [entry_points] watcher_scoring_engine_containers = new_scoring_container = thirdparty.new:NewContainer .. _pbr: https://docs.openstack.org/pbr/latest/ Using scoring engine plugins ============================ The Watcher Decision Engine service will automatically discover any installed plugins when it is restarted. If a Python package containing a custom plugin is installed within the same environment as Watcher, Watcher will automatically make that plugin available for use. At this point, Watcher will scan and register inside the :ref:`Watcher Database ` all the scoring engines you implemented upon restarting the :ref:`Watcher Decision Engine `. In addition, ``watcher-sync`` tool can be used to trigger :ref:`Watcher Database ` synchronization. This might be used for "dynamic" scoring containers, which can return different scoring engines based on some external configuration (if they support that). python-watcher-4.0.0/doc/source/contributor/testing.rst0000664000175000017500000000257413656752270023364 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ================= Developer Testing ================= .. _unit_tests: Unit tests ========== All unit tests should be run using `tox`_. Before running the unit tests, you should download the latest `watcher`_ from the github. To run the same unit tests that are executing onto `Gerrit`_ which includes ``py36``, ``py37`` and ``pep8``, you can issue the following command:: $ git clone https://opendev.org/openstack/watcher $ cd watcher $ pip install tox $ tox If you only want to run one of the aforementioned, you can then issue one of the following:: $ tox -e py36 $ tox -e py37 $ tox -e pep8 .. _tox: https://tox.readthedocs.org/ .. _watcher: https://opendev.org/openstack/watcher .. _Gerrit: https://review.opendev.org/ If you only want to run specific unit test code and don't like to waste time waiting for all unit tests to execute, you can add parameters ``--`` followed by a regex string:: $ tox -e py37 -- watcher.tests.api .. _tempest_tests: Tempest tests ============= Tempest tests for Watcher has been migrated to the external repo `watcher-tempest-plugin`_. .. _watcher-tempest-plugin: https://opendev.org/openstack/watcher-tempest-plugin python-watcher-4.0.0/doc/source/contributor/contributing.rst0000664000175000017500000001116013656752270024405 0ustar zuulzuul00000000000000============================ So You Want to Contribute... ============================ For general information on contributing to OpenStack, please check out the `contributor guide `_ to get started. It covers all the basics that are common to all OpenStack projects: the accounts you need, the basics of interacting with our Gerrit review system, how we communicate as a community, etc. Below will cover the more project specific information you need to get started with Watcher. Communication ~~~~~~~~~~~~~~ .. This would be a good place to put the channel you chat in as a project; when/ where your meeting is, the tags you prepend to your ML threads, etc. IRC Channel ``#openstack-watcher`` (changelog_) Mailing list(prefix subjects with ``[watcher]``) http://lists.openstack.org/pipermail/openstack-discuss/ Weekly Meetings Bi-weekly, on Wednesdays at 08:00 UTC on odd weeks in the ``#openstack-meeting-alt`` IRC channel (`meetings logs`_) Meeting Agenda https://wiki.openstack.org/wiki/Watcher_Meeting_Agenda .. _changelog: http://eavesdrop.openstack.org/irclogs/%23openstack-watcher/ .. _meetings logs: http://eavesdrop.openstack.org/meetings/watcher/ Contacting the Core Team ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should list the core team, their irc nicks, emails, timezones etc. If all this info is maintained elsewhere (i.e. a wiki), you can link to that instead of enumerating everyone here. +--------------------+---------------+------------------------------------+ | Name | IRC | Email | +====================+===============+====================================+ | `Li Canwei`_ | licanwei | li.canwei2@zte.com.cn | +--------------------+---------------+------------------------------------+ | `chen ke`_ | chenke | chen.ke14@zte.com.cn | +--------------------+---------------+------------------------------------+ | `Corne Lukken`_ | dantalion | info@dantalion.nl | +--------------------+---------------+------------------------------------+ | `su zhengwei`_ | suzhengwei | sugar-2008@163.com | +--------------------+---------------+------------------------------------+ | `Yumeng Bao`_ | Yumeng | yumeng_bao@yahoo.com | +--------------------+---------------+------------------------------------+ .. _Corne Lukken: https://launchpad.net/~dantalion .. _Li Canwei: https://launchpad.net/~li-canwei2 .. _su zhengwei: https://launchpad.net/~sue.sam .. _Yumeng Bao: https://launchpad.net/~yumeng-bao .. _chen ke: https://launchpad.net/~chenker New Feature Planning ~~~~~~~~~~~~~~~~~~~~ .. This section is for talking about the process to get a new feature in. Some projects use blueprints, some want specs, some want both! Some projects stick to a strict schedule when selecting what new features will be reviewed for a release. New feature will be discussed via IRC or ML (with [Watcher] prefix). Watcher team uses blueprints in `Launchpad`_ to manage the new features. .. _Launchpad: https://launchpad.net/watcher Task Tracking ~~~~~~~~~~~~~~ .. This section is about where you track tasks- launchpad? storyboard? is there more than one launchpad project? what's the name of the project group in storyboard? We track our tasks in Launchpad. If you're looking for some smaller, easier work item to pick up and get started on, search for the 'low-hanging-fruit' tag. .. NOTE: If your tag is not 'low-hanging-fruit' please change the text above. Reporting a Bug ~~~~~~~~~~~~~~~ .. Pretty self explanatory section, link directly to where people should report bugs for your project. You found an issue and want to make sure we are aware of it? You can do so `HERE`_. .. _HERE: https://bugs.launchpad.net/watcher Getting Your Patch Merged ~~~~~~~~~~~~~~~~~~~~~~~~~ .. This section should have info about what it takes to get something merged. Do you require one or two +2's before +W? Do some of your repos require unit test changes with all patches? etc. Due to the small number of core reviewers of the Watcher project, we only need one +2 before +W (merge). All patches excepting for documentation or typos fixes must have unit test. Project Team Lead Duties ------------------------ .. this section is where you can put PTL specific duties not already listed in the common PTL guide (linked below) or if you already have them written up elsewhere, you can link to that doc here. All common PTL duties are enumerated here in the `PTL guide `_. python-watcher-4.0.0/doc/source/contributor/concurrency.rst0000664000175000017500000002632413656752270024240 0ustar zuulzuul00000000000000=========== Concurrency =========== Introduction ************ Modern processors typically contain multiple cores all capable of executing instructions in parallel. Ensuring applications can fully utilize modern underlying hardware requires developing with these concepts in mind. The OpenStack foundation maintains a number of libraries to facilitate this utilization, combined with constructs like CPython's GIL_ the proper use of these concepts becomes more straightforward compared to other programming languages. The primary libraries maintained by OpenStack to facilitate concurrency are futurist_ and taskflow_. Here futurist is a more straightforward and lightweight library while taskflow is more advanced supporting features like rollback mechanisms. Within Watcher both libraries are used to facilitate concurrency. .. _GIL: https://wiki.python.org/moin/GlobalInterpreterLock .. _futurist: https://docs.openstack.org/futurist/latest/ .. _taskflow: https://docs.openstack.org/taskflow/latest/ Threadpool ********** A threadpool is a collection of one or more threads typically called *workers* to which tasks can be submitted. These submitted tasks will be scheduled by a threadpool and subsequently executed. In the case of Python tasks typically are bounded or unbounded methods while other programming languages like Java require implementing an interface. The order and amount of concurrency with which these tasks are executed is up to the threadpool to decide. Some libraries like taskflow allow for either strong or loose ordering of tasks while others like futurist might only support loose ordering. Taskflow supports building tree-based hierarchies of dependent tasks for example. Upon submission of a task to a threadpool a so called future_ is returned. These objects allow to determine information about the task such as if it is currently being executed or if it has finished execution. When the task has finished execution the future can also be used to retrieve what was returned by the method. Some libraries like futurist provide synchronization primitives for collections of futures such as wait_for_any_. The following sections will cover different types of concurrency used in various services of Watcher. .. _future: https://docs.python.org/3/library/concurrent.futures.html .. _wait_for_any: https://docs.openstack.org/futurist/latest/reference/index.html#waiters Decision engine concurrency *************************** The concurrency in the decision engine is governed by two independent threadpools. Both of these threadpools are GreenThreadPoolExecutor_ from the futurist_ library. One of these is used automatically and most contributors will not interact with it while developing new features. The other threadpool can frequently be used while developing new features or updating existing ones. It is known as the DecisionEngineThreadpool and allows to achieve performance improvements in network or I/O bound operations. .. _GreenThreadPoolExecutor: https://docs.openstack.org/futurist/latest/reference/index.html#executors AuditEndpoint ############# The first threadpool is used to allow multiple audits to be run in parallel. In practice, however, only one audit can be run in parallel. This is due to the data model used by audits being a singleton. To prevent audits destroying each others data model one must wait for the other to complete before being allowed to access this data model. A performance improvement could be achieved by being more intelligent in the use, caching and construction of these data models. DecisionEngineThreadPool ######################## The second threadpool is used for generic tasks, typically networking and I/O could benefit the most of this threadpool. Upon execution of an audit this threadpool can be utilized to retrieve information from the Nova compute service for instance. This second threadpool is a singleton and is shared amongst concurrently running audits as a result the amount of workers is static and independent from the amount of workers in the first threadpool. The use of the :class:`~.DecisionEngineThreadpool` while building the Nova compute data model is demonstrated to show how it can effectively be used. In the following example a reference to the :class:`~.DecisionEngineThreadpool` is stored in ``self.executor``. Here two tasks are submitted one with function ``self._collect_aggregates`` and the other function ``self._collect_zones``. With both ``self.executor.submit`` calls subsequent arguments are passed to the function. All subsequent arguments are passed to the function being submitted as task following the common ``(fn, *args, **kwargs)`` signature. One of the original signatures would be ``def _collect_aggregates(host_aggregates, compute_nodes)`` for example. .. code-block:: python zone_aggregate_futures = { self.executor.submit( self._collect_aggregates, host_aggregates, compute_nodes), self.executor.submit( self._collect_zones, availability_zones, compute_nodes) } waiters.wait_for_all(zone_aggregate_futures) The last statement of the example above waits on all futures to complete. Similarly, ``waiters.wait_for_any`` will wait for any future of the specified collection to complete. To simplify the usage of ``wait_for_any`` the :class:`~.DecisiongEngineThreadpool` defines a ``do_while_futures`` method. This method will iterate in a do_while loop over a collection of futures until all of them have completed. The advantage of ``do_while_futures`` is that it allows to immediately call a method as soon as a future finishes. The arguments for this callback method can be supplied when calling ``do_while_futures``, however, the first argument to the callback is always the future itself! If the collection of futures can safely be modified ``do_while_futures_modify`` can be used and should have slightly better performance. The following example will show how ``do_while_futures`` is used in the decision engine. .. code-block:: python # For every compute node from compute_nodes submit a task to gather the node it's information. # List comprehension is used to store all the futures of the submitted tasks in node_futures. node_futures = [self.executor.submit( self.nova_helper.get_compute_node_by_name, node, servers=True, detailed=True) for node in compute_nodes] LOG.debug("submitted {0} jobs".format(len(compute_nodes))) future_instances = [] # do_while iterate over node_futures and upon completion of a future call # self._compute_node_future with the future and future_instances as arguments. self.executor.do_while_futures_modify( node_futures, self._compute_node_future, future_instances) # Wait for all instance jobs to finish waiters.wait_for_all(future_instances) Finally, let's demonstrate how powerful this ``do_while_futures`` can be by showing what the ``compute_node_future`` callback does. First, it retrieves the result from the future and adds the compute node to the data model. Afterwards, it checks if the compute node has any associated instances and if so it submits an additional task to the :class:`~.DecisionEngineThreadpool`. The future is appended to the ``future_instances`` so ``waiters.wait_for_all`` can be called on this list. This is important as otherwise the building of the data model might return before all tasks for instances have finished. .. code-block:: python # Get the result from the future. node_info = future.result()[0] # Filter out baremetal nodes. if node_info.hypervisor_type == 'ironic': LOG.debug("filtering out baremetal node: %s", node_info) return # Add the compute node to the data model. self.add_compute_node(node_info) # Get the instances from the compute node. instances = getattr(node_info, "servers", None) # Do not submit job if there are no instances on compute node. if instances is None: LOG.info("No instances on compute_node: {0}".format(node_info)) return # Submit a job to retrieve detailed information about the instances. future_instances.append( self.executor.submit( self.add_instance_node, node_info, instances) ) Without ``do_while_futures`` an additional ``waiters.wait_for_all`` would be required in between the compute node tasks and the instance tasks. This would cause the progress of the decision engine to stall as less and less tasks remain active before the instance tasks could be submitted. This demonstrates how ``do_while_futures`` can be used to achieve more constant utilization of the underlying hardware. Applier concurrency ******************* The applier does not use the futurist_ GreenThreadPoolExecutor_ directly but instead uses taskflow_. However, taskflow still utilizes a greenthreadpool. This threadpool is initialized in the workflow engine called :class:`~.DefaultWorkFlowEngine`. Currently Watcher supports one workflow engine but the base class allows contributors to develop other workflow engines as well. In taskflow tasks are created using different types of flows such as a linear, unordered or a graph flow. The linear and graph flow allow for strong ordering between individual tasks and it is for this reason that the workflow engine utilizes a graph flow. The creation of tasks, subsequently linking them into a graph like structure and submitting them is shown below. .. code-block:: python self.execution_rule = self.get_execution_rule(actions) flow = gf.Flow("watcher_flow") actions_uuid = {} for a in actions: task = TaskFlowActionContainer(a, self) flow.add(task) actions_uuid[a.uuid] = task for a in actions: for parent_id in a.parents: flow.link(actions_uuid[parent_id], actions_uuid[a.uuid], decider=self.decider) e = engines.load( flow, executor='greenthreaded', engine='parallel', max_workers=self.config.max_workers) e.run() return flow In the applier tasks are contained in a :class:`~.TaskFlowActionContainer` which allows them to trigger events in the workflow engine. This way the workflow engine can halt or take other actions while the action plan is being executed based on the success or failure of individual actions. However, the base workflow engine simply uses these notifies to store the result of individual actions in the database. Additionally, since taskflow uses a graph flow if any of the tasks would fail all childs of this tasks not be executed while ``do_revert`` will be triggered for all parents. .. code-block:: python class TaskFlowActionContainer(...): ... def do_execute(self, *args, **kwargs): ... result = self.action.execute() if result is True: return self.engine.notify(self._db_action, objects.action.State.SUCCEEDED) else: self.engine.notify(self._db_action, objects.action.State.FAILED) class BaseWorkFlowEngine(...): ... def notify(self, action, state): db_action = objects.Action.get_by_uuid(self.context, action.uuid, eager=True) db_action.state = state db_action.save() return db_action python-watcher-4.0.0/doc/source/contributor/index.rst0000664000175000017500000000023413656752270023005 0ustar zuulzuul00000000000000================== Contribution Guide ================== .. toctree:: :maxdepth: 2 contributing environment devstack testing rally_link python-watcher-4.0.0/doc/source/contributor/devstack.rst0000664000175000017500000002431213656752270023505 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ============================================= Set up a development environment via DevStack ============================================= Watcher is currently able to optimize compute resources - specifically Nova compute hosts - via operations such as live migrations. In order for you to fully be able to exercise what Watcher can do, it is necessary to have a multinode environment to use. You can set up the Watcher services quickly and easily using a Watcher DevStack plugin. See `PluginModelDocs`_ for information on DevStack's plugin model. To enable the Watcher plugin with DevStack, add the following to the `[[local|localrc]]` section of your controller's `local.conf` to enable the Watcher plugin:: enable_plugin watcher https://opendev.org/openstack/watcher For more detailed instructions, see `Detailed DevStack Instructions`_. Check out the `DevStack documentation`_ for more information regarding DevStack. .. _PluginModelDocs: https://docs.openstack.org/devstack/latest/plugins.html .. _DevStack documentation: https://docs.openstack.org/devstack/latest Quick Devstack Instructions with Datasources ============================================ Watcher requires a datasource to collect metrics from compute nodes and instances in order to execute most strategies. To enable this a `[[local|localrc]]` to setup DevStack for some of the supported datasources is provided. These examples specify the minimal configuration parameters to get both Watcher and the datasource working but can be expanded is desired. Gnocchi ------- With the Gnocchi datasource most of the metrics for compute nodes and instances will work with the provided configuration but metrics that require Ironic such as `host_airflow and` `host_power` will still be unavailable as well as `instance_l3_cpu_cache`:: [[local|localrc]] enable_plugin watcher https://opendev.org/openstack/watcher enable_plugin ceilometer https://opendev.org/openstack/ceilometer.git CEILOMETER_BACKEND=gnocchi enable_plugin aodh https://opendev.org/openstack/aodh enable_plugin panko https://opendev.org/openstack/panko [[post-config|$NOVA_CONF]] [DEFAULT] compute_monitors=cpu.virt_driver Detailed DevStack Instructions ============================== #. Obtain N (where N >= 1) servers (virtual machines preferred for DevStack). One of these servers will be the controller node while the others will be compute nodes. N is preferably >= 3 so that you have at least 2 compute nodes, but in order to stand up the Watcher services only 1 server is needed (i.e., no computes are needed if you want to just experiment with the Watcher services). These servers can be VMs running on your local machine via VirtualBox if you prefer. DevStack currently recommends that you use Ubuntu 16.04 LTS. The servers should also have connections to the same network such that they are all able to communicate with one another. #. For each server, clone the DevStack repository and create the stack user:: sudo apt-get update sudo apt-get install git git clone https://opendev.org/openstack/devstack.git sudo ./devstack/tools/create-stack-user.sh Now you have a stack user that is used to run the DevStack processes. You may want to give your stack user a password to allow SSH via a password:: sudo passwd stack #. Switch to the stack user and clone the DevStack repo again:: sudo su stack cd ~ git clone https://opendev.org/openstack/devstack.git #. For each compute node, copy the provided `local.conf.compute`_ example file to the compute node's system at ~/devstack/local.conf. Make sure the HOST_IP and SERVICE_HOST values are changed appropriately - i.e., HOST_IP is set to the IP address of the compute node and SERVICE_HOST is set to the IP address of the controller node. If you need specific metrics collected (or want to use something other than Ceilometer), be sure to configure it. For example, in the `local.conf.compute`_ example file, the appropriate ceilometer plugins and services are enabled and disabled. If you were using something other than Ceilometer, then you would likely want to configure it likewise. The example file also sets the compute monitors nova configuration option to use the CPU virt driver. If you needed other metrics, it may be necessary to configure similar configuration options for the projects providing those metrics. #. For the controller node, copy the provided `local.conf.controller`_ example file to the controller node's system at ~/devstack/local.conf. Make sure the HOST_IP value is changed appropriately - i.e., HOST_IP is set to the IP address of the controller node. Note: if you want to use another Watcher git repository (such as a local one), then change the enable plugin line:: enable_plugin watcher [optional_branch] If you do this, then the Watcher DevStack plugin will try to pull the python-watcherclient repo from /../, so either make sure that is also available or specify WATCHERCLIENT_REPO in the local.conf file. Note: if you want to use a specific branch, specify WATCHER_BRANCH in the local.conf file. By default it will use the master branch. Note: watcher-api will default run under apache/httpd, set the variable WATCHER_USE_MOD_WSGI=FALSE if you do not wish to run under apache/httpd. For development environment it is suggested to set WATHCER_USE_MOD_WSGI to FALSE. For Production environment it is suggested to keep it at the default TRUE value. #. Start stacking from the controller node:: ./devstack/stack.sh #. Start stacking on each of the compute nodes using the same command. #. Configure the environment for live migration via NFS. See the `Multi-Node DevStack Environment`_ section for more details. .. _local.conf.controller: https://github.com/openstack/watcher/tree/master/devstack/local.conf.controller .. _local.conf.compute: https://github.com/openstack/watcher/tree/master/devstack/local.conf.compute Multi-Node DevStack Environment =============================== Since deploying Watcher with only a single compute node is not very useful, a few tips are given here for enabling a multi-node environment with live migration. Configuring NFS Server ---------------------- If you would like to use live migration for shared storage, then the controller can serve as the NFS server if needed:: sudo apt-get install nfs-kernel-server sudo mkdir -p /nfs/instances sudo chown stack:stack /nfs/instances Add an entry to `/etc/exports` with the appropriate gateway and netmask information:: /nfs/instances /(rw,fsid=0,insecure,no_subtree_check,async,no_root_squash) Export the NFS directories:: sudo exportfs -ra Make sure the NFS server is running:: sudo service nfs-kernel-server status If the server is not running, then start it:: sudo service nfs-kernel-server start Configuring NFS on Compute Node ------------------------------- Each compute node needs to use the NFS server to hold the instance data:: sudo apt-get install rpcbind nfs-common mkdir -p /opt/stack/data/instances sudo mount :/nfs/instances /opt/stack/data/instances If you would like to have the NFS directory automatically mounted on reboot, then add the following to `/etc/fstab`:: :/nfs/instances /opt/stack/data/instances nfs auto 0 0 Edit `/etc/libvirt/libvirtd.conf` to make sure the following values are set:: listen_tls = 0 listen_tcp = 1 auth_tcp = "none" Edit `/etc/default/libvirt-bin`:: libvirtd_opts="-d -l" Restart the libvirt service:: sudo service libvirt-bin restart Setting up SSH keys between compute nodes to enable live migration ------------------------------------------------------------------ In order for live migration to work, SSH keys need to be exchanged between each compute node: 1. The SOURCE root user's public RSA key (likely in /root/.ssh/id_rsa.pub) needs to be in the DESTINATION stack user's authorized_keys file (~stack/.ssh/authorized_keys). This can be accomplished by manually copying the contents from the file on the SOURCE to the DESTINATION. If you have a password configured for the stack user, then you can use the following command to accomplish the same thing:: ssh-copy-id -i /root/.ssh/id_rsa.pub stack@DESTINATION 2. The DESTINATION host's public ECDSA key (/etc/ssh/ssh_host_ecdsa_key.pub) needs to be in the SOURCE root user's known_hosts file (/root/.ssh/known_hosts). This can be accomplished by running the following on the SOURCE machine (hostname must be used):: ssh-keyscan -H DEST_HOSTNAME | sudo tee -a /root/.ssh/known_hosts In essence, this means that every compute node's root user's public RSA key must exist in every other compute node's stack user's authorized_keys file and every compute node's public ECDSA key needs to be in every other compute node's root user's known_hosts file. Disable serial console ---------------------- Serial console needs to be disabled for live migration to work. On both the controller and compute node, in /etc/nova/nova.conf [serial_console] enabled = False Alternatively, in devstack's local.conf: [[post-config|$NOVA_CONF]] [serial_console] #enabled=false VNC server configuration ------------------------ The VNC server listening parameter needs to be set to any address so that the server can accept connections from all of the compute nodes. On both the controller and compute node, in /etc/nova/nova.conf vncserver_listen = 0.0.0.0 Alternatively, in devstack's local.conf: VNCSERVER_LISTEN=0.0.0.0 Environment final checkup ------------------------- If you are willing to make sure everything is in order in your DevStack environment, you can run the Watcher Tempest tests which will validate its API but also that you can perform the typical Watcher workflows. To do so, have a look at the :ref:`Tempest tests ` section which will explain to you how to run them. python-watcher-4.0.0/doc/source/glossary.rst0000664000175000017500000002743013656752270021176 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ======== Glossary ======== .. _glossary: :sorted: This page explains the different terms used in the Watcher system. They are sorted in alphabetical order. .. _action_definition: Action ====== .. watcher-term:: watcher.api.controllers.v1.action .. _action_plan_definition: Action Plan =========== .. watcher-term:: watcher.api.controllers.v1.action_plan .. _administrator_definition: Administrator ============= The :ref:`Administrator ` is any user who has admin access on the OpenStack cluster. This user is allowed to create new projects for tenants, create new users and assign roles to each user. The :ref:`Administrator ` usually has remote access to any host of the cluster in order to change the configuration and restart any OpenStack service, including Watcher. In the context of Watcher, the :ref:`Administrator ` is a role for users which allows them to run any Watcher commands, such as: - Create/Delete an :ref:`Audit Template ` - Launch an :ref:`Audit ` - Get the :ref:`Action Plan ` - Launch a recommended :ref:`Action Plan ` manually - Archive previous :ref:`Audits ` and :ref:`Action Plans ` The :ref:`Administrator ` is also allowed to modify any Watcher configuration files and to restart Watcher services. .. _audit_definition: Audit ===== .. watcher-term:: watcher.api.controllers.v1.audit .. _audit_template_definition: Audit Scope =========== An Audit Scope is a set of audited resources. Audit Scope should be defined in each Audit Template (which contains the Audit settings). .. _audit_scope_definition: Audit Template ============== .. watcher-term:: watcher.api.controllers.v1.audit_template .. _availability_zone_definition: Availability Zone ================= Please, read `the official OpenStack definition of an Availability Zone `_. .. _cluster_definition: Cluster ======= A :ref:`Cluster ` is a set of physical machines which provide compute, storage and networking resources and are managed by the same OpenStack Controller node. A :ref:`Cluster ` represents a set of resources that a cloud provider is able to offer to his/her :ref:`customers `. A data center may contain several clusters. The :ref:`Cluster ` may be divided in one or several :ref:`Availability Zone(s) `. .. _cluster_data_model_definition: Cluster Data Model (CDM) ======================== .. watcher-term:: watcher.decision_engine.model.collector.base .. _controller_node_definition: Controller Node =============== Please, read `the official OpenStack definition of a Controller Node `_. In many configurations, Watcher will reside on a controller node even if it can potentially be hosted on a dedicated machine. .. _compute_node_definition: Compute node ============ Please, read `the official OpenStack definition of a Compute Node `_. .. _customer_definition: Customer ======== A :ref:`Customer ` is the person or company which subscribes to the cloud provider offering. A customer may have several :ref:`Project(s) ` hosted on the same :ref:`Cluster ` or dispatched on different clusters. In the private cloud context, the :ref:`Customers ` are different groups within the same organization (different departments, project teams, branch offices and so on). Cloud infrastructure includes the ability to precisely track each customer's service usage so that it can be charged back to them, or at least reported to them. .. _goal_definition: Goal ==== .. watcher-term:: watcher.api.controllers.v1.goal .. _host_aggregates_definition: Host Aggregate ============== Please, read `the official OpenStack definition of a Host Aggregate `_. .. _instance_definition: Instance ======== A running virtual machine, or a virtual machine in a known state such as suspended, that can be used like a hardware server. .. _managed_resource_definition: Managed resource ================ A :ref:`Managed resource ` is one instance of :ref:`Managed resource type ` in a topology with particular properties and dependencies on other :ref:`Managed resources ` (relationships). For example, a :ref:`Managed resource ` can be one virtual machine (i.e., an :ref:`instance `) hosted on a :ref:`compute node ` and connected to another virtual machine through a network link (represented also as a :ref:`Managed resource ` in the :ref:`Cluster Data Model `). .. _managed_resource_type_definition: Managed resource type ===================== A :ref:`Managed resource type ` is a type of hardware or software element of the :ref:`Cluster ` that the Watcher system can act on. Here are some examples of :ref:`Managed resource types `: - `Nova Host Aggregates `_ - `Nova Servers `_ - `Cinder Volumes `_ - `Neutron Routers `_ - `Neutron Networks `_ - `Neutron load-balancers `_ - `Sahara Hadoop Cluster `_ - ... It can be any of `the official list of available resource types defined in OpenStack for HEAT `_. .. _efficacy_indicator_definition: Efficacy Indicator ================== .. watcher-term:: watcher.api.controllers.v1.efficacy_indicator .. _efficacy_specification_definition: Efficacy Specification ====================== .. watcher-term:: watcher.decision_engine.goal.efficacy.base .. _efficacy_definition: Optimization Efficacy ===================== The :ref:`Optimization Efficacy ` is the objective measure of how much of the :ref:`Goal ` has been achieved in respect with constraints and :ref:`SLAs ` defined by the :ref:`Customer `. The way efficacy is evaluated will depend on the :ref:`Goal ` to achieve. Of course, the efficacy will be relevant only as long as the :ref:`Action Plan ` is relevant (i.e., the current state of the :ref:`Cluster ` has not changed in a way that a new :ref:`Audit ` would need to be launched). For example, if the :ref:`Goal ` is to lower the energy consumption, the :ref:`Efficacy ` will be computed using several :ref:`efficacy indicators ` (KPIs): - the percentage of energy gain (which must be the highest possible) - the number of :ref:`SLA violations ` (which must be the lowest possible) - the number of virtual machine migrations (which must be the lowest possible) All those indicators are computed within a given timeframe, which is the time taken to execute the whole :ref:`Action Plan `. The efficacy also enables the :ref:`Administrator ` to objectively compare different :ref:`Strategies ` for the same goal and same workload of the :ref:`Cluster `. .. _project_definition: Project ======= :ref:`Projects ` represent the base unit of "ownership" in OpenStack, in that all :ref:`resources ` in OpenStack should be owned by a specific :ref:`project `. In OpenStack Identity, a :ref:`project ` must be owned by a specific domain. Please, read `the official OpenStack definition of a Project `_. .. _scoring_engine_definition: Scoring Engine ============== .. watcher-term:: watcher.api.controllers.v1.scoring_engine .. _sla_definition: SLA === :ref:`SLA ` means Service Level Agreement. The resources are negotiated between the :ref:`Customer ` and the Cloud Provider in a contract. Most of the time, this contract is composed of two documents: - :ref:`SLA ` : Service Level Agreement - :ref:`SLO ` : Service Level Objectives Note that the :ref:`SLA ` is more general than the :ref:`SLO ` in the sense that the former specifies what service is to be provided, how it is supported, times, locations, costs, performance, and responsibilities of the parties involved while the :ref:`SLO ` focuses on more measurable characteristics such as availability, throughput, frequency, response time or quality. You can also read `the Wikipedia page for SLA `_ which provides a good definition. .. _sla_violation_definition: SLA violation ============= A :ref:`SLA violation ` happens when a :ref:`SLA ` defined with a given :ref:`Customer ` could not be respected by the cloud provider within the timeframe defined by the official contract document. .. _slo_definition: SLO === A Service Level Objective (SLO) is a key element of a :ref:`SLA ` between a service provider and a :ref:`Customer `. SLOs are agreed as a means of measuring the performance of the Service Provider and are outlined as a way of avoiding disputes between the two parties based on misunderstanding. You can also read `the Wikipedia page for SLO `_ which provides a good definition. .. _solution_definition: Solution ======== .. watcher-term:: watcher.decision_engine.solution.base .. _strategy_definition: Strategy ======== .. watcher-term:: watcher.api.controllers.v1.strategy .. _watcher_applier_definition: Watcher Applier =============== .. watcher-term:: watcher.applier.base .. _watcher_database_definition: Watcher Database ================ This database stores all the Watcher domain objects which can be requested by the Watcher API or the Watcher CLI: - Audit templates - Audits - Action plans - Actions - Goals The Watcher domain being here "*optimization of some resources provided by an OpenStack system*". See :doc:`architecture` for more details on this component. .. _watcher_decision_engine_definition: Watcher Decision Engine ======================= .. watcher-term:: watcher.decision_engine.manager .. _watcher_planner_definition: Watcher Planner =============== .. watcher-term:: watcher.decision_engine.planner.base python-watcher-4.0.0/doc/source/admin/0000775000175000017500000000000013656752352017664 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/admin/gmr.rst0000664000175000017500000000324013656752270021201 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _watcher_gmr: ======================= Guru Meditation Reports ======================= Watcher contains a mechanism whereby developers and system administrators can generate a report about the state of a running Watcher service. This report is called a *Guru Meditation Report* (*GMR* for short). Generating a GMR ================ A *GMR* can be generated by sending the *USR2* signal to any Watcher process with support (see below). The *GMR* will then be outputted as standard error for that particular process. For example, suppose that ``watcher-api`` has process id ``8675``, and was run with ``2>/var/log/watcher/watcher-api-err.log``. Then, ``kill -USR2 8675`` will trigger the Guru Meditation report to be printed to ``/var/log/watcher/watcher-api-err.log``. Structure of a GMR ================== The *GMR* is designed to be extensible; any particular service may add its own sections. However, the base *GMR* consists of several sections: Package Shows information about the package to which this process belongs, including version informations. Threads Shows stack traces and thread ids for each of the threads within this process. Green Threads Shows stack traces for each of the green threads within this process (green threads don't have thread ids). Configuration Lists all the configuration options currently accessible via the CONF object for the current process. Plugins Lists all the plugins currently accessible by the Watcher service. python-watcher-4.0.0/doc/source/admin/apache-mod-wsgi.rst0000664000175000017500000000260713656752270023367 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ Installing API behind mod_wsgi ============================== #. Install the Apache Service:: Fedora 21/RHEL7/CentOS7: sudo yum install httpd Fedora 22 (or higher): sudo dnf install httpd Debian/Ubuntu: apt-get install apache2 #. Copy ``etc/apache2/watcher.conf`` under the apache sites:: Fedora/RHEL7/CentOS7: sudo cp etc/apache2/watcher /etc/httpd/conf.d/watcher.conf Debian/Ubuntu: sudo cp etc/apache2/watcher /etc/apache2/sites-available/watcher.conf #. Edit ``/watcher.conf`` according to installation and environment. * Modify the ``WSGIDaemonProcess`` directive to set the ``user`` and ``group`` values to appropriate user on your server. * Modify the ``WSGIScriptAlias`` directive to point to the watcher/api/app.wsgi script. * Modify the ``Directory`` directive to set the path to the Watcher API code. * Modify the ``ErrorLog and CustomLog`` to redirect the logs to the right directory. #. Enable the apache watcher site and reload:: Fedora/RHEL7/CentOS7: sudo systemctl reload httpd Debian/Ubuntu: sudo a2ensite watcher sudo service apache2 reload python-watcher-4.0.0/doc/source/admin/policy.rst0000664000175000017500000001030613656752270021714 0ustar zuulzuul00000000000000.. Copyright 2016 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. Policies ======== Watcher's public API calls may be restricted to certain sets of users using a policy configuration file. This document explains exactly how policies are configured and what they apply to. A policy is composed of a set of rules that are used in determining if a particular action may be performed by the authorized tenant. Constructing a Policy Configuration File ---------------------------------------- A policy configuration file is a simply JSON object that contain sets of rules. Each top-level key is the name of a rule. Each rule is a string that describes an action that may be performed in the Watcher API. The actions that may have a rule enforced on them are: * ``strategy:get_all``, ``strategy:detail`` - List available strategies * ``GET /v1/strategies`` * ``GET /v1/strategies/detail`` * ``strategy:get`` - Retrieve a specific strategy entity * ``GET /v1/strategies/`` * ``GET /v1/strategies/`` * ``goal:get_all``, ``goal:detail`` - List available goals * ``GET /v1/goals`` * ``GET /v1/goals/detail`` * ``goal:get`` - Retrieve a specific goal entity * ``GET /v1/goals/`` * ``GET /v1/goals/`` * ``audit_template:get_all``, ``audit_template:detail`` - List available audit_templates * ``GET /v1/audit_templates`` * ``GET /v1/audit_templates/detail`` * ``audit_template:get`` - Retrieve a specific audit template entity * ``GET /v1/audit_templates/`` * ``GET /v1/audit_templates/`` * ``audit_template:create`` - Create an audit template entity * ``POST /v1/audit_templates`` * ``audit_template:delete`` - Delete an audit template entity * ``DELETE /v1/audit_templates/`` * ``DELETE /v1/audit_templates/`` * ``audit_template:update`` - Update an audit template entity * ``PATCH /v1/audit_templates/`` * ``PATCH /v1/audit_templates/`` * ``audit:get_all``, ``audit:detail`` - List available audits * ``GET /v1/audits`` * ``GET /v1/audits/detail`` * ``audit:get`` - Retrieve a specific audit entity * ``GET /v1/audits/`` * ``audit:create`` - Create an audit entity * ``POST /v1/audits`` * ``audit:delete`` - Delete an audit entity * ``DELETE /v1/audits/`` * ``audit:update`` - Update an audit entity * ``PATCH /v1/audits/`` * ``action_plan:get_all``, ``action_plan:detail`` - List available action plans * ``GET /v1/action_plans`` * ``GET /v1/action_plans/detail`` * ``action_plan:get`` - Retrieve a specific action plan entity * ``GET /v1/action_plans/`` * ``action_plan:delete`` - Delete an action plan entity * ``DELETE /v1/action_plans/`` * ``action_plan:update`` - Update an action plan entity * ``PATCH /v1/audits/`` * ``action:get_all``, ``action:detail`` - List available action * ``GET /v1/actions`` * ``GET /v1/actions/detail`` * ``action:get`` - Retrieve a specific action plan entity * ``GET /v1/actions/`` * ``service:get_all``, ``service:detail`` - List available Watcher services * ``GET /v1/services`` * ``GET /v1/services/detail`` * ``service:get`` - Retrieve a specific Watcher service entity * ``GET /v1/services/`` To limit an action to a particular role or roles, you list the roles like so :: { "audit:create": ["role:admin", "role:superuser"] } The above would add a rule that only allowed users that had roles of either "admin" or "superuser" to launch an audit. python-watcher-4.0.0/doc/source/admin/index.rst0000664000175000017500000000035413656752270021526 0ustar zuulzuul00000000000000=================== Administrator Guide =================== .. toctree:: :maxdepth: 2 apache-mod-wsgi gmr policy ../strategies/index ../datasources/index ../contributor/notifications ../contributor/concurrency python-watcher-4.0.0/doc/source/configuration/0000775000175000017500000000000013656752352021443 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/configuration/watcher.rst0000664000175000017500000000037613656752270023637 0ustar zuulzuul00000000000000.. _watcher_sample_configuration_files: ------------ watcher.conf ------------ The ``watcher.conf`` file contains most of the options to configure the Watcher services. .. show-options:: :config-file: etc/watcher/oslo-config-generator/watcher.conf python-watcher-4.0.0/doc/source/configuration/configuring.rst0000664000175000017500000004064713656752270024521 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ =================== Configuring Watcher =================== This document is continually updated and reflects the latest available code of the Watcher service. Service overview ================ The Watcher system is a collection of services that provides support to optimize your IaaS platform. The Watcher service may, depending upon configuration, interact with several other OpenStack services. This includes: - the OpenStack Identity service (`keystone`_) for request authentication and to locate other OpenStack services. - the OpenStack Telemetry service (`ceilometer`_) for collecting the resources metrics. - the time series database (`gnocchi`_) for consuming the resources metrics. - the OpenStack Compute service (`nova`_) works with the Watcher service and acts as a user-facing API for instance migration. - the OpenStack Bare Metal service (`ironic`_) works with the Watcher service and allows to manage power state of nodes. - the OpenStack Block Storage service (`cinder`_) works with the Watcher service and as an API for volume node migration. The Watcher service includes the following components: - ``watcher-decision-engine``: runs audit on part of your IaaS and return an action plan in order to optimize resource placement. - ``watcher-api``: A RESTful API that processes application requests by sending them to the watcher-decision-engine over RPC. - ``watcher-applier``: applies the action plan. - `python-watcherclient`_: A command-line interface (CLI) for interacting with the Watcher service. - `watcher-dashboard`_: An Horizon plugin for interacting with the Watcher service. Additionally, the Watcher service has certain external dependencies, which are very similar to other OpenStack services: - A database to store audit and action plan information and state. You can set the database back-end type and location. - A queue. A central hub for passing messages, such as `RabbitMQ`_. Optionally, one may wish to utilize the following associated projects for additional functionality: - `watcher metering`_: an alternative to collect and push metrics to the Telemetry service. .. _`keystone`: https://github.com/openstack/keystone .. _`ceilometer`: https://github.com/openstack/ceilometer .. _`nova`: https://github.com/openstack/nova .. _`gnocchi`: https://github.com/gnocchixyz/gnocchi .. _`ironic`: https://github.com/openstack/ironic .. _`cinder`: https://github.com/openstack/cinder .. _`python-watcherclient`: https://github.com/openstack/python-watcherclient .. _`watcher-dashboard`: https://github.com/openstack/watcher-dashboard .. _`watcher metering`: https://github.com/b-com/watcher-metering .. _`RabbitMQ`: https://www.rabbitmq.com/ Install and configure prerequisites =================================== You can configure Watcher services to run on separate nodes or the same node. In this guide, the components run on one node, typically the Controller node. This section shows you how to install and configure the services. It assumes that the Identity, Image, Compute, and Networking services have already been set up. .. _identity-service_configuration: Configure the Identity service for the Watcher service ------------------------------------------------------ #. Create the Watcher service user (eg ``watcher``). The service uses this to authenticate with the Identity Service. Use the ``KEYSTONE_SERVICE_PROJECT_NAME`` project (named ``service`` by default in devstack) and give the user the ``admin`` role: .. code-block:: bash $ keystone user-create --name=watcher --pass=WATCHER_PASSWORD \ --email=watcher@example.com \ --tenant=KEYSTONE_SERVICE_PROJECT_NAME $ keystone user-role-add --user=watcher \ --tenant=KEYSTONE_SERVICE_PROJECT_NAME --role=admin or (by using python-openstackclient 1.8.0+) .. code-block:: bash $ openstack user create --password WATCHER_PASSWORD --enable \ --email watcher@example.com watcher \ --project=KEYSTONE_SERVICE_PROJECT_NAME $ openstack role add --project KEYSTONE_SERVICE_PROJECT_NAME \ --user watcher admin #. You must register the Watcher Service with the Identity Service so that other OpenStack services can locate it. To register the service: .. code-block:: bash $ keystone service-create --name=watcher --type=infra-optim \ --description="Infrastructure Optimization service" or (by using python-openstackclient 1.8.0+) .. code-block:: bash $ openstack service create --name watcher infra-optim \ --description="Infrastructure Optimization service" #. Create the endpoints by replacing YOUR_REGION and ``WATCHER_API_[PUBLIC|ADMIN|INTERNAL]_IP`` with your region and your Watcher Service's API node IP addresses (or FQDN): .. code-block:: bash $ keystone endpoint-create \ --service-id=the_service_id_above \ --publicurl=http://WATCHER_API_PUBLIC_IP:9322 \ --internalurl=http://WATCHER_API_INTERNAL_IP:9322 \ --adminurl=http://WATCHER_API_ADMIN_IP:9322 or (by using python-openstackclient 1.8.0+) .. code-block:: bash $ openstack endpoint create --region YOUR_REGION watcher public http://WATCHER_API_PUBLIC_IP:9322 $ openstack endpoint create --region YOUR_REGION watcher internal http://WATCHER_API_INTERNAL_IP:9322 $ openstack endpoint create --region YOUR_REGION watcher admin http://WATCHER_API_ADMIN_IP:9322 .. _watcher-db_configuration: Set up the database for Watcher ------------------------------- The Watcher service stores information in a database. This guide uses the MySQL database that is used by other OpenStack services. #. In MySQL, create a ``watcher`` database that is accessible by the ``watcher`` user. Replace WATCHER_DBPASSWORD with the actual password:: # mysql mysql> CREATE DATABASE watcher CHARACTER SET utf8; mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'localhost' \ IDENTIFIED BY 'WATCHER_DBPASSWORD'; mysql> GRANT ALL PRIVILEGES ON watcher.* TO 'watcher'@'%' \ IDENTIFIED BY 'WATCHER_DBPASSWORD'; Configure the Watcher service ============================= The Watcher service is configured via its configuration file. This file is typically located at ``/etc/watcher/watcher.conf``. You can easily generate and update a sample configuration file named :ref:`watcher.conf.sample ` by using these following commands:: $ git clone https://opendev.org/openstack/watcher.git $ cd watcher/ $ tox -e genconfig $ vi etc/watcher/watcher.conf.sample The configuration file is organized into the following sections: * ``[DEFAULT]`` - General configuration * ``[api]`` - API server configuration * ``[database]`` - SQL driver configuration * ``[keystone_authtoken]`` - Keystone Authentication plugin configuration * ``[watcher_clients_auth]`` - Keystone auth configuration for clients * ``[watcher_applier]`` - Watcher Applier module configuration * ``[watcher_decision_engine]`` - Watcher Decision Engine module configuration * ``[oslo_messaging_rabbit]`` - Oslo Messaging RabbitMQ driver configuration * ``[ceilometer_client]`` - Ceilometer client configuration * ``[cinder_client]`` - Cinder client configuration * ``[glance_client]`` - Glance client configuration * ``[nova_client]`` - Nova client configuration * ``[neutron_client]`` - Neutron client configuration The Watcher configuration file is expected to be named ``watcher.conf``. When starting Watcher, you can specify a different configuration file to use with ``--config-file``. If you do **not** specify a configuration file, Watcher will look in the following directories for a configuration file, in order: * ``~/.watcher/`` * ``~/`` * ``/etc/watcher/`` * ``/etc/`` Although some configuration options are mentioned here, it is recommended that you review all the :ref:`available options ` so that the watcher service is configured for your needs. #. The Watcher Service stores information in a database. This guide uses the MySQL database that is used by other OpenStack services. Configure the location of the database via the ``connection`` option. In the following, replace WATCHER_DBPASSWORD with the password of your ``watcher`` user, and replace DB_IP with the IP address where the DB server is located:: [database] ... # The SQLAlchemy connection string used to connect to the # database (string value) #connection= connection = mysql+pymysql://watcher:WATCHER_DBPASSWORD@DB_IP/watcher?charset=utf8 #. Configure the Watcher Service to use the RabbitMQ message broker by setting one or more of these options. Replace RABBIT_HOST with the IP address of the RabbitMQ server, RABBITMQ_USER and RABBITMQ_PASSWORD by the RabbitMQ server login credentials :: [DEFAULT] # The default exchange under which topics are scoped. May be # overridden by an exchange name specified in the transport_url # option. (string value) control_exchange = watcher # ... transport_url = rabbit://RABBITMQ_USER:RABBITMQ_PASSWORD@RABBIT_HOST #. Watcher API shall validate the token provided by every incoming request, via keystonemiddleware, which requires the Watcher service to be configured with the right credentials for the Identity service. In the configuration section here below: * replace IDENTITY_IP with the IP of the Identity server * replace WATCHER_PASSWORD with the password you chose for the ``watcher`` user * replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created for OpenStack services (e.g. ``service``) :: [keystone_authtoken] # Authentication type to load (unknown value) # Deprecated group/name - [DEFAULT]/auth_plugin #auth_type = auth_type = password # Authentication URL (unknown value) #auth_url = auth_url = http://IDENTITY_IP:5000 # Username (unknown value) # Deprecated group/name - [DEFAULT]/username #username = username=watcher # User's password (unknown value) #password = password = WATCHER_PASSWORD # Domain ID containing project (unknown value) #project_domain_id = project_domain_id = default # User's domain id (unknown value) #user_domain_id = user_domain_id = default # Project name to scope to (unknown value) # Deprecated group/name - [DEFAULT]/tenant-name #project_name = project_name = KEYSTONE_SERVICE_PROJECT_NAME #. Watcher's decision engine and applier interact with other OpenStack projects through those projects' clients. In order to instantiate these clients, Watcher needs to request a new session from the Identity service using the right credentials. In the configuration section here below: * replace IDENTITY_IP with the IP of the Identity server * replace WATCHER_PASSWORD with the password you chose for the ``watcher`` user * replace KEYSTONE_SERVICE_PROJECT_NAME with the name of project created for OpenStack services (e.g. ``service``) :: [watcher_clients_auth] # Authentication type to load (unknown value) # Deprecated group/name - [DEFAULT]/auth_plugin #auth_type = auth_type = password # Authentication URL (unknown value) #auth_url = auth_url = http://IDENTITY_IP:5000 # Username (unknown value) # Deprecated group/name - [DEFAULT]/username #username = username=watcher # User's password (unknown value) #password = password = WATCHER_PASSWORD # Domain ID containing project (unknown value) #project_domain_id = project_domain_id = default # User's domain id (unknown value) #user_domain_id = user_domain_id = default # Project name to scope to (unknown value) # Deprecated group/name - [DEFAULT]/tenant-name #project_name = project_name = KEYSTONE_SERVICE_PROJECT_NAME #. Configure the clients to use a specific version if desired. For example, to configure Watcher to use a Nova client with version 2.1, use:: [nova_client] # Version of Nova API to use in novaclient. (string value) #api_version = 2.56 api_version = 2.1 #. Create the Watcher Service database tables:: $ watcher-db-manage --config-file /etc/watcher/watcher.conf create_schema #. Start the Watcher Service:: $ watcher-api && watcher-decision-engine && watcher-applier Configure Nova compute ====================== Please check your hypervisor configuration to correctly handle `instance migration`_. .. _`instance migration`: https://docs.openstack.org/nova/latest/admin/migration.html Configure Measurements ====================== You can configure and install Ceilometer by following the documentation below : #. https://docs.openstack.org/ceilometer/latest The built-in strategy 'basic_consolidation' provided by watcher requires "**compute.node.cpu.percent**" and "**cpu_util**" measurements to be collected by Ceilometer. The measurements available depend on the hypervisors that OpenStack manages on the specific implementation. You can find the measurements available per hypervisor and OpenStack release on the OpenStack site. You can use 'ceilometer meter-list' to list the available meters. For more information: https://docs.openstack.org/ceilometer/latest/admin/telemetry-measurements.html Ceilometer is designed to collect measurements from OpenStack services and from other external components. If you would like to add new meters to the currently existing ones, you need to follow the documentation below: #. https://docs.openstack.org/ceilometer/latest/contributor/measurements.html#new-measurements The Ceilometer collector uses a pluggable storage system, meaning that you can pick any database system you prefer. The original implementation has been based on MongoDB but you can create your own storage driver using whatever technology you want. For more information : https://wiki.openstack.org/wiki/Gnocchi Configure Nova Notifications ============================ Watcher can consume notifications generated by the Nova services, in order to build or update, in real time, its cluster data model related to computing resources. Nova emits unversioned(legacy) and versioned notifications on different topics. Because legacy notifications will be deprecated, Watcher consumes Nova versioned notifications. * In the file ``/etc/nova/nova.conf``, the value of driver in the section ``[oslo_messaging_notifications]`` can't be noop, and the value of notification_format in the section ``[notifications]`` should be both or versioned :: [oslo_messaging_notifications] driver = messagingv2 ... [notifications] notification_format = both Configure Cinder Notifications ============================== Watcher can also consume notifications generated by the Cinder services, in order to build or update, in real time, its cluster data model related to storage resources. To do so, you have to update the Cinder configuration file on controller and volume nodes, in order to let Watcher receive Cinder notifications in a dedicated ``watcher_notifications`` channel. * In the file ``/etc/cinder/cinder.conf``, update the section ``[oslo_messaging_notifications]``, by redefining the list of topics into which Cinder services will publish events :: [oslo_messaging_notifications] driver = messagingv2 topics = notifications,watcher_notifications * Restart the Cinder services. Workers ======= You can define a number of workers for the Decision Engine and the Applier. If you want to create and run more audits simultaneously, you have to raise the number of workers used by the Decision Engine:: [watcher_decision_engine] ... # The maximum number of threads that can be used to execute strategies # (integer value) #max_workers = 2 If you want to execute simultaneously more recommended action plans, you have to raise the number of workers used by the Applier:: [watcher_applier] ... # Number of workers for applier, default value is 1. (integer value) # Minimum value: 1 #workers = 1 python-watcher-4.0.0/doc/source/configuration/index.rst0000664000175000017500000000016513656752270023305 0ustar zuulzuul00000000000000=================== Configuration Guide =================== .. toctree:: :maxdepth: 2 configuring watcher python-watcher-4.0.0/doc/source/architecture.rst0000664000175000017500000004531413656752270022016 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ .. _architecture: =================== System Architecture =================== This page presents the current technical Architecture of the Watcher system. .. _architecture_overview: Overview ======== Below you will find a diagram, showing the main components of Watcher: .. image:: ./images/architecture.svg :width: 110% .. _components_definition: Components ========== .. _amqp_bus_definition: AMQP Bus -------- The AMQP message bus handles internal asynchronous communications between the different Watcher components. .. _cluster_datasource_definition: Datasource ---------- This component stores the metrics related to the cluster. It can potentially rely on any appropriate storage system (InfluxDB, OpenTSDB, MongoDB,...) but will probably be more performant when using `Time Series Databases `_ which are optimized for handling time series data, which are arrays of numbers indexed by time (a datetime or a datetime range). .. _archi_watcher_api_definition: Watcher API ----------- This component implements the REST API provided by the Watcher system to the external world. It enables the :ref:`Administrator ` of a :ref:`Cluster ` to control and monitor the Watcher system via any interaction mechanism connected to this API: - :ref:`CLI ` - Horizon plugin - Python SDK You can also read the detailed description of `Watcher API`_. .. _archi_watcher_applier_definition: Watcher Applier --------------- This component is in charge of executing the :ref:`Action Plan ` built by the :ref:`Watcher Decision Engine `. Taskflow is the default workflow engine for Watcher. It connects to the :ref:`message bus ` and launches the :ref:`Action Plan ` whenever a triggering message is received on a dedicated AMQP queue. The triggering message contains the Action Plan UUID. It then gets the detailed information about the :ref:`Action Plan ` from the :ref:`Watcher Database ` which contains the list of :ref:`Actions ` to launch. It then loops on each :ref:`Action `, gets the associated class and calls the execute() method of this class. Most of the time, this method will first request a token to the Keystone API and if it is allowed, sends a request to the REST API of the OpenStack service which handles this kind of :ref:`atomic Action `. Note that as soon as :ref:`Watcher Applier ` starts handling a given :ref:`Action ` from the list, a notification message is sent on the :ref:`message bus ` indicating that the state of the action has changed to **ONGOING**. If the :ref:`Action ` is successful, the :ref:`Watcher Applier ` sends a notification message on :ref:`the bus ` informing the other components of this. If the :ref:`Action ` fails, the :ref:`Watcher Applier ` tries to rollback to the previous state of the :ref:`Managed resource ` (i.e. before the command was sent to the underlying OpenStack service). In Stein, added a new config option 'action_execution_rule' which is a dict type. Its key field is strategy name and the value is 'ALWAYS' or 'ANY'. 'ALWAYS' means the callback function returns True as usual. 'ANY' means the return depends on the result of previous action execution. The callback returns True if previous action gets failed, and the engine continues to run the next action. If previous action executes success, the callback returns False then the next action will be ignored. For strategies that aren't in 'action_execution_rule', the callback always returns True. Please add the next section in the watcher.conf file if your strategy needs this feature. :: [watcher_workflow_engines.taskflow] action_execution_rule = {'your strategy name': 'ANY'} .. _archi_watcher_cli_definition: Watcher CLI ----------- The watcher command-line interface (CLI) can be used to interact with the Watcher system in order to control it or to know its current status. Please, read `the detailed documentation about Watcher CLI `_. .. _archi_watcher_dashboard_definition: Watcher Dashboard ----------------- The Watcher Dashboard can be used to interact with the Watcher system through Horizon in order to control it or to know its current status. Please, read `the detailed documentation about Watcher Dashboard `_. .. _archi_watcher_database_definition: Watcher Database ---------------- This database stores all the Watcher domain objects which can be requested by the :ref:`Watcher API ` or the :ref:`Watcher CLI `: - :ref:`Goals ` - :ref:`Strategies ` - :ref:`Audit templates ` - :ref:`Audits ` - :ref:`Action plans ` - :ref:`Efficacy indicators ` via the Action Plan API. - :ref:`Actions ` The Watcher domain being here "*optimization of some resources provided by an OpenStack system*". .. _archi_watcher_decision_engine_definition: Watcher Decision Engine ----------------------- This component is responsible for computing a set of potential optimization :ref:`Actions ` in order to fulfill the :ref:`Goal ` of an :ref:`Audit `. It first reads the parameters of the :ref:`Audit ` to know the :ref:`Goal ` to achieve. Unless specified, it then selects the most appropriate :ref:`strategy ` from the list of available strategies achieving this goal. The :ref:`Strategy ` is then dynamically loaded (via `stevedore `_). The :ref:`Watcher Decision Engine ` executes the strategy. In order to compute the potential :ref:`Solution ` for the Audit, the :ref:`Strategy ` relies on different sets of data: - :ref:`Cluster data models ` that are periodically synchronized through pluggable cluster data model collectors. These models contain the current state of various :ref:`Managed resources ` (e.g., the data stored in the Nova database). These models gives a strategy the ability to reason on the current state of a given :ref:`cluster `. - The data stored in the :ref:`Cluster Datasource ` which provides information about the past of the :ref:`Cluster `. Here below is a sequence diagram showing how the Decision Engine builds and maintains the :ref:`cluster data models ` that are used by the strategies. .. image:: ./images/sequence_architecture_cdmc_sync.png :width: 100% The execution of a strategy then yields a solution composed of a set of :ref:`Actions ` as well as a set of :ref:`efficacy indicators `. These :ref:`Actions ` are scheduled in time by the :ref:`Watcher Planner ` (i.e., it generates an :ref:`Action Plan `). .. _data_model: Data model ========== The following diagram shows the data model of Watcher, especially the functional dependency of objects from the actors (Admin, Customer) point of view (Goals, Audits, Action Plans, ...): .. image:: ./images/functional_data_model.svg :width: 100% Here below is a diagram representing the main objects in Watcher from a database perspective: .. image:: ./images/watcher_db_schema_diagram.png .. _sequence_diagrams: Sequence diagrams ================= The following paragraph shows the messages exchanged between the different components of Watcher for the most often used scenarios. .. _sequence_diagrams_create_audit_template: Create a new Audit Template --------------------------- The :ref:`Administrator ` first creates an :ref:`Audit template ` providing at least the following parameters: - A name - A goal to achieve - An optional strategy .. image:: ./images/sequence_create_audit_template.png :width: 100% The `Watcher API`_ makes sure that both the specified goal (mandatory) and its associated strategy (optional) are registered inside the :ref:`Watcher Database ` before storing a new audit template in the :ref:`Watcher Database `. .. _sequence_diagrams_create_and_launch_audit: Create and launch a new Audit ----------------------------- The :ref:`Administrator ` can then launch a new :ref:`Audit ` by providing at least the unique UUID of the previously created :ref:`Audit template `: .. image:: ./images/sequence_create_and_launch_audit.png :width: 100% The :ref:`Administrator ` also can specify type of Audit and interval (in case of CONTINUOUS type). There is three types of Audit: ONESHOT, CONTINUOUS and EVENT. ONESHOT Audit is launched once and if it succeeded executed new action plan list will be provided; CONTINUOUS Audit creates action plans with specified interval (in seconds or cron format, cron inteval can be used like: `*/5 * * * *`), if action plan has been created, all previous action plans get CANCELLED state; EVENT audit is launched when receiving webhooks API. A message is sent on the :ref:`AMQP bus ` which triggers the Audit in the :ref:`Watcher Decision Engine `: .. image:: ./images/sequence_trigger_audit_in_decision_engine.png :width: 100% The :ref:`Watcher Decision Engine ` reads the Audit parameters from the :ref:`Watcher Database `. It instantiates the appropriate :ref:`strategy ` (using entry points) given both the :ref:`goal ` and the strategy associated to the parent :ref:`audit template ` of the :ref:`audit `. If no strategy is associated to the audit template, the strategy is dynamically selected by the Decision Engine. The :ref:`Watcher Decision Engine ` also builds the :ref:`Cluster Data Model `. This data model is needed by the :ref:`Strategy ` to know the current state and topology of the audited :ref:`OpenStack cluster `. The :ref:`Watcher Decision Engine ` calls the **execute()** method of the instantiated :ref:`Strategy ` and provides the data model as an input parameter. This method computes a :ref:`Solution ` to achieve the goal and returns it to the :ref:`Decision Engine `. At this point, actions are not scheduled yet. The :ref:`Watcher Decision Engine ` dynamically loads the :ref:`Watcher Planner ` implementation which is configured in Watcher (via entry points) and calls the **schedule()** method of this class with the solution as an input parameter. This method finds an appropriate scheduling of :ref:`Actions ` taking into account some scheduling rules (such as priorities between actions). It generates a new :ref:`Action Plan ` with status **RECOMMENDED** and saves it into the :ref:`Watcher Database `. The saved action plan is now a scheduled flow of actions to which a global efficacy is associated alongside a number of :ref:`Efficacy Indicators ` as specified by the related :ref:`goal `. If every step executed successfully, the :ref:`Watcher Decision Engine ` updates the current status of the Audit to **SUCCEEDED** in the :ref:`Watcher Database ` and sends a notification on the bus to inform other components that the :ref:`Audit ` was successful. This internal workflow the Decision Engine follows to conduct an audit can be seen in the sequence diagram here below: .. image:: ./images/sequence_from_audit_execution_to_actionplan_creation.png :width: 100% .. _sequence_diagrams_launch_action_plan: Launch Action Plan ------------------ The :ref:`Administrator ` can then launch the recommended :ref:`Action Plan `: .. image:: ./images/sequence_launch_action_plan.png :width: 100% A message is sent on the :ref:`AMQP bus ` which triggers the :ref:`Action Plan ` in the :ref:`Watcher Applier `: .. image:: ./images/sequence_launch_action_plan_in_applier.png :width: 100% The :ref:`Watcher Applier ` will get the description of the flow of :ref:`Actions ` from the :ref:`Watcher Database ` and for each :ref:`Action ` it will instantiate a corresponding :ref:`Action ` handler python class. The :ref:`Watcher Applier ` will then call the following methods of the :ref:`Action ` handler: - **validate_parameters()**: this method will make sure that all the provided input parameters are valid: - If all parameters are valid, the Watcher Applier moves on to the next step. - If it is not, an error is raised and the action is not executed. A notification is sent on the bus informing other components of the failure. - **preconditions()**: this method will make sure that all conditions are met before executing the action (for example, it makes sure that an instance still exists before trying to migrate it). - **execute()**: this method is what triggers real commands on other OpenStack services (such as Nova, ...) in order to change target resource state. If the action is successfully executed, a notification message is sent on the bus indicating that the new state of the action is **SUCCEEDED**. If every action of the action flow has been executed successfully, a notification is sent on the bus to indicate that the whole :ref:`Action Plan ` has **SUCCEEDED**. .. _state_machine_diagrams: State Machine diagrams ====================== .. _audit_state_machine: Audit State Machine ------------------- An :ref:`Audit ` has a life-cycle and its current state may be one of the following: - **PENDING** : a request for an :ref:`Audit ` has been submitted (either manually by the :ref:`Administrator ` or automatically via some event handling mechanism) and is in the queue for being processed by the :ref:`Watcher Decision Engine ` - **ONGOING** : the :ref:`Audit ` is currently being processed by the :ref:`Watcher Decision Engine ` - **SUCCEEDED** : the :ref:`Audit ` has been executed successfully and at least one solution was found - **FAILED** : an error occurred while executing the :ref:`Audit ` - **DELETED** : the :ref:`Audit ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Audit ` was in **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` - **SUSPENDED** : the :ref:`Audit ` was in **ONGOING** state and was suspended by the :ref:`Administrator ` The following diagram shows the different possible states of an :ref:`Audit ` and what event makes the state change to a new value: .. image:: ./images/audit_state_machine.png :width: 100% .. _action_plan_state_machine: Action Plan State Machine ------------------------- An :ref:`Action Plan ` has a life-cycle and its current state may be one of the following: - **RECOMMENDED** : the :ref:`Action Plan ` is waiting for a validation from the :ref:`Administrator ` - **PENDING** : a request for an :ref:`Action Plan ` has been submitted (due to an :ref:`Administrator ` executing an :ref:`Audit `) and is in the queue for being processed by the :ref:`Watcher Applier ` - **ONGOING** : the :ref:`Action Plan ` is currently being processed by the :ref:`Watcher Applier ` - **SUCCEEDED** : the :ref:`Action Plan ` has been executed successfully (i.e. all :ref:`Actions ` that it contains have been executed successfully) - **FAILED** : an error occurred while executing the :ref:`Action Plan ` - **DELETED** : the :ref:`Action Plan ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Action Plan ` was in **RECOMMENDED**, **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` - **SUPERSEDED** : the :ref:`Action Plan ` was in RECOMMENDED state and was automatically superseded by Watcher, due to an expiration delay or an update of the :ref:`Cluster data model ` The following diagram shows the different possible states of an :ref:`Action Plan ` and what event makes the state change to a new value: .. image:: ./images/action_plan_state_machine.png :width: 100% .. _Watcher API: https://docs.openstack.org/api-ref/resource-optimization/ python-watcher-4.0.0/doc/source/image_src/0000775000175000017500000000000013656752352020525 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/image_src/dia/0000775000175000017500000000000013656752352021262 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/image_src/dia/functional_data_model.dia0000664000175000017500000000621413656752270026256 0ustar zuulzuul00000000000000‹í]ß“â6~ß¿‚b_Y%Kþ2“ÚlîrW•\®.›gJ¾56e›™ååþö“l˜clŒm1°ÛSµ© Óè³Úv«ûëVëÇŸ¾®‚Á£ˆ? ï‡Ã½h懋ûá_ŸÿþÁþôðîÇ™Ïÿ1_ ä7ÂDýv?\¦éú‡»»§§'lžF1 ü JÄÝÿxð;)t7|x7¼`ÆS®>Û}ÊÓ4ö§›T B¾÷Ã)÷¾,âhΆ¹ÔN΋‚(<òà~ø~žý ïvÃÜÆ©{Íb þ¥zhCþ¸n›¡×">vµŽ_ФÛuI¤bõßW2;©D …‹‡÷éûü’v¼ŒuìB+AÒ~XÆ‘º rEäB¤2˜ë¸†Mm¼×ÉùpÓË—…‹/ ç'“u§1÷Ó2ä4ŠÁÃ57¢=Nâñ@>buÓê0‹¹Ÿ¦Ñ‰ëŸó i2üãç·íÜ7wû³ú· Q1Ê“?K—“¯šÔ•¾Õ4ú£ŸøÓ@»z?L{~ÛÏð‡w'³Þ/_ïø4ä£U.3G0ÁÎ_&’YQ¦b¤åNìî”Öåš*&ÿè`±Î ¾ñnøŸ_–éÁîï,΀{©ÿ¸ÿí•N¢é…—î&û×ï¿ > >åF›Sùìz냨-”X¯_$ƒ4ºÞeó§ ãè©§Õ‹ÂPšeåKÊßÿe°äálç3¥QV¥4xù«ô+KO÷‘ïâÝwqåw÷uZÔ™5»ÚyÓ@6aŒLv^¡d cÓD6a‚älçY5ah rq êuVE -P´@ÑE -P´@ÑE -P´@ÑEÛ3EK;‡î¬‚\⺎Î=Q6²6bjS”…,šÅÕ´-ä5°´š«t%Ô±N ²3$­ö„Kƶ!+[÷=`l/ÏØ.Dš(qî/6¹õÌãhômSúÖì@ßP¿Öè[Öu   9#£u©k#û/1l¦@ÆÔA¦MØ#W'{ÛqR͸[FÈ­—×þñèZ k®ºèZ k®ºèZ k®ºèÚžéZ«º[Du{räí0uV[I$œ…锦=¦6bòÕgжF¾Þö¸ «EMTËÆJÕ–TMC1Œ\Cjô»$nÛÇ1ßA¥­·ôÅ£H€¨½D-»|­Ýy?Í*R½u¶ %›¢D­®ÞJÛÎójÂÖ2d‘BÃ[gn?æö¿ .¸@à .¸@à .¸@àÛ3ëôToÛzëj³ž©žÈ z¬ð,Z[7_i[ÐY¥E.©ïˆ`!̚˵o%5¶ß.Wû«•¥²¶ªZm„«ÛÙZS„GÄDLk_Š –¡ŒM;+)­]g»Îª ÛjfE¾Ý£ì+c[h¢ˆV Zh¢ˆV Zh¢ˆV Zh=Ÿhý˜$‘çg³x»“ýq1'Žz­‹Õ|µÿYq”b6ˆæÝù?Ή‰aßt\²Œž&•Ã÷u¸RôäXÌù2–ç¹Æ‘ i´…š«MúkßóÓ­F”,Œñƒ£ /2;ÜâŒ|+Þ× ÎîÂô"waz‰»0Õy¦ºîBƒúM1 ›ªžÒvGY 9ªZ_õÆ6[Dí:ŠLã¦{cW©°Z”B çÊäXJ9MÄ H }i¡VÉ»Ceº«/QrÂåÂí]®É{;àýpäWáoµç›¸[¸[àn»¥×Ý2mDU§!éô¸Z¸v¾MFº”©©êF¤ÆnØÍªP] CN­ RKS)ŠH3Тà5zapœÉ•zh^–ÝÁC£}zhž¼ÅÇNOî|ÃÈÉ‹u¶ysq-žc¦BMSB2t•†°Y­ Ë΄Æ2ô6’ÓXëcØ%’¾˜ºÆ&ZÍÄ|ÞCÖw£5í{ê >©¬Š‹ùürÒl凾J’H rÔ=¬¹új%w½d2Û\Gé!?7£Õ´hP«*C„²nP‡µžö6@<ðáA}Ø)¦e¿àížéÚ—¤§|Üù¼ªl•b®öÕÐ4±¬×µ5– &U òÃÛ *TwD#ÛpÈY‚F½×¯NýRÚ$­ã?¨º§TŸc±¦ ò›F@Ò5kORk¬æ¯7þ´‡8ȹȢ®eé´ýÒí¶Õd̲U+-W•‰X7¼ë¸êŽ˜tŠ +ŸpSÑSçØJu:%10ý`ú¥=þEÌå$ƒìèɃ“ü„–g\„Öê|xSl{k—º™)G4k¨N[ÈqrÃîÞ0­eJCn  ´€ÒºfJëÓ&I£•6ëRl–²¥ÒÅr̺™Õ¹%°Z(¶5¯ÅT(Ñ1fˆ:r ´[Ÿ®s ÁÌQ½•Åäd µÞ7³ß>:äâÈ'éoV2‰E"/Áƒ¾gÐP¬ …‹ßÅh*;·ñÅÈdŠÅv5Úk‰‘uÌQ £ªÝ©µ§DÇ95kà‹‹^¿}ã-%þsKƺJ@W è*]% «t•€®ÐUºJ@W è*]%n¬«„Ó~‹#ô‘€>°±66¾ýÆF,—†‘l­5Ž=²;Æ9³g¶E»†TOY_G+"H#™Ä-$y¾‰­ƒI󬄅­/cQŸhîÞ {ßýD÷vjDmep¦ÚÖ8¦‚´Ûž—p›©+®Fð´j ¢r>ТRÕEÿê÷hæÏ}HP·Þ)nu1÷Ö¥Ì=1úÙ/Îuµú™†jH–Q{¬@óófÙMo“(k®JÌ=µëÍÈNRh-vì¾2Æ¿ñMè-Áî_hœ­o/EUaÁM>E˜²‘jƒ¨ÓÁ—0†›ÁlÌdY}¥eh­Pê>¹&EJQLhwÎþjŠ”þT %Äb 5JP£5JP£5JP£5JP£5JP£5JP£5J=1N:wú¤2U?bÜþèêF½>DìF5ûdÔ’A¼iÜ0q{\qG™[Ãp¬ÆrjV½ bDJrßQ{ nk(˜M´msÚ–U2¯§Y[‚ßêì bÂqgP¦ eªP¦úMwæèÜêžµ¤µóÄ÷˜H«S¿(¢Œ8ßÄIgN/ǜᬱ}† ‰¬Ö*„Z‚ï³J£½|ïÒ¹{¬êõnޝF#«¦šqå@qÆêÈAÓÍÎÙÁF[à†­×ãß¼úaßßEÜŒàÉÒ^½_çùí÷÷÷÷(~Xyš¡8Z¡exñ¿ Žƒ (t1~ój4Z¯`äý®ú6Èó,š®òp”7áëñ4˜ýµÈÒU2—¥ªr³4N³Ñ]¿wU|ÆU5õ¨û6X„Ó, þª¯Ãǘ6U߆Ùvµ7·é2‚"ùÃíN‘šzìßkeªRK(”,Þ|÷–WÞRõÅS]ûn´$¿ ²E”ìâ@ÛÄeCP¤)¥ÐÂhƒWä±MN‡›  — -/oÓ,Ï‚(ß…œ¦iI‰šg«°=ÎrÄÐÅ™ÕÁŠ«(ÏÓ#÷ÄË&”_m§ŽÜEÍÜ5µÜGóüúò“£æ*kpTû]´Œ¦q¸ïî£$ï­ú‡~ªß~:Åìýô󎽡¬­–æ:¡8«h.t³Í255]WÅ.޵úv¹¦ S~µEÖD<„YUý»'šU¸šqÖZ!þ'œå•y?Çéýì:ÈòÑùè]úiü$;@tDó×ã¿ãÍöÙ¶jƒ©o§nÓϽè\#£±:;'i±múîÄQ‡2nƒd`G,â°2³ ÈLFŠvF8Òí@Ã8¼¹œ¥Y²+z6¯@*¦“]§axí#;`]‡Ñâ:¯£I€ æñ£ÚMÓlfÇLÈT6n0!’ÐNp›ÓÅQ©zF”$M ¦l.g-Ñò:½¿Ü#¹ë‰SPÊž}™óhµ<ôpZvêÛ`>ßÔ*}Õœ‡ŸòñÚ*S×Ò…bß(·%îk¤ý>¬Wi’ï²ß®‚›(~€û ’åx´ÌìÄ 3pùÓ_Âø.Ì£Y°Î/§ßÁΠßûL4Þüðn ›Ü°o¥€É9LÔHrI7¸íqYëNbÜ tø"¹ ÷?Ó0YÝì‘P‡DÔ>P§e.žhüI:`¯*ø=¼•×´?Y@ºÊ K…ÅÙ¹BÆ¡&iQô„HdˆágQ—r ³Y´"„3¼¬6Z3áP æ–ûi5ÑPQ”QÊŸ=÷c<›+í¹ÿ¹sÿÛÿøøjºZ~A  wEÀù»4ž“J\5*)WÊNÚí¹à™ vLN;3¹Ç×>4C¨KÿpàP„g¢ֿçH`ÍRzwûš:ƒ¤¡…»M7Ü!§KÄ(6ž×Oãõ««)_÷<¯?O^ÿwÏ®ÃìÕûpÙ¹pô·d%áK¥y n|Œ7œ bœ;üqÍ%8ü˜’AþÀóS΃l4ÿôå=<Ï:ó¼DºXQw»Œ0Jj 0À†O€~ —š"Êem'Æ$¹ “yaÎéFÖ‰(¥4i¹8}fv»}+û6ßN©4†‡YÌ*Û5ïí£'QX2ò2Ȳô¾¾nJ;W~‡Éâ°¶ÝAŽê—¶R/™»j£ÏU»k¡'Wí3–×ÇoŸmºž¤áNŠæ0›À¼·ÜÑÿ3º†™²Z_ÏÓb)sôô¿0:ðîöìV­ý8^¼'ÇK!3€Û¥ nÿfh‡«½]MÜ-ãнâˆs³öÑÜ»ZÞÕzÉ®ÖÛrú½½½£0ó®Ö@®G fR3)1Ü{Z쨊®ÄN‘œƒ/âtE•"Qá*& Q]x>Ý–9Ò{Ö5!xŽ´èñ(–uÁ*<á{ÂÑ„ÿñWOòÃ<‡Ù›hë•*=ËÅò²s`´B¼ˆW–ˆ:¥yž?'Ë@ÄÊiht6az¨~ÓÝvÉô q!ž‚•)ñÛ¨žê_4Õÿô›§ú¨þüy,`FåHQö\? ׫¸^•YBˆ+Ç\¯Ë$($J®×— ¦„c®ïl`³Ðg½ÁõÆiRXúYdA Ñ߇ý’fÑÓÄGøá ¼¥ðôuê}†á.Ý™»dé/b$™Kæ’ˆ”0‚MÎ1âö‚ÎqË[kÆZÂ)MS’b™D•wI½Kú’]ÒÛxµˆï•ä•2ÄXádØ@MéÒaˆÝt%vFíÂi±‹•CfbJB'L#R 2D¦âö`ž÷H½G:0…ýžÞÞÆeeCQøæ¢•wH]òé|¤'ˆ0˜Ù%r›qÊíNiƒ•˜pƒ„„+ƒˆháÓŒºz0° uiÄé¤È8%Ê­—ª.¶N˜ Ï]ßw}²(xÿnô>ȃi° = Ãc\ FaºP­óD6LF%!}¤TŠÂAqœQ©-'Dé”Ò†‹ $uÛH™“Ò)w ¬ÍøißNr)}*_¿Ã…ö• z]ºOAVvŒ`L5­r¥¦ÂH=Pò–µãе։> Ùg!û,ä¯. Yî û=¿%ÕoùæoÍ@ ̤ó‘\†ø¹0½b—»Ë´ØçµH6Q‡M­¡Ïtyé4Ý©›…Ak¥Ö—¢ž YX³>8¼{íÚ½þ)Œâô&ÌÃlôeÓk^ÊÉÄÕ‚ŸF˜ ìÝê¡ÜêÎcPŒ¸}p1æ4Ú–hDµ,–àMO¨ ^2æL!‚œ8vêÌÜã'hÄd±@D[7ˆw±Ÿ‹Ý9õüœ”QxvK̸T'ˆr‰µ…’Úу”P‚ØÄ4epÛ}ôÆÃ§ÖÔú”uÚöTo?xpòZù]rÓwÒ§ø]b󷪿«Á(ïžz mj»>E„ºäFaåù¹˜pn9\Zh£‰ë•´:Cë‡8FÆSä77Êu£|°ÕÕƒ ¶;= a,0w¹¸€€-ЄJ¿….•›nšƒÅ•Llä'1YË2gÔi¤÷ñüãùêãùŸÿŠ×yäáâÁGòt^Cœhy‘”BùHþV¼t)ÕR×N½åvc,Q”Øa1–vsCQ̸ë¯+÷”Töm_6€R¶ÕôrþK¯x5PÉݳ`0RŠód2 I‹ô¨“™@f‘Ü‹…ÍT2¦B>ÉdíE²É^$;ÉãÀ>p¯‘‡ÑÈŒ!ªõyXLq?ÁÖ¸íû_OµÆˆH¢1ûl óHñÅ ÑÖÖÅŒ¶n/Š¿þm`Ú91AWæhc·)vª L°G(† ÁH± Ž›r<^ê¬ÜûZçî â‡Î3:“¨A¸J*Ö\¸cÀÚž‰@µã“J¢í1 EŠ5h®Ÿ¯4ëÞtM¤™Ý+Ãå›å5aŠq㥙—f^š}EÒì—h™§ÙƒgCÒ¡´9#aæÅ™gƒ‰³µX‚?£ÅŸÁb+¢à³0뜭ͫӕ}q‚qzàu¹qyN0ua&\ j$?³¹ÛmsµÓ,¿¾<KPkc}Q¸I»{x°hµª~JÙö›ÄÖPA›ÃúÐ>ʶOK V@Gé*ßø}6?Ñ!?ý1Çœ²Í$sÚ÷Q‡§(ÕCÐÅèu.hq IÅ„3¤ËèZà k}|`£ù©ÆÀ±<\î¾y9VíO²òó’ëêÙ9G•)$-q3PêNsTHHCÎ(’Z29áľY0ÇÆù±Rufî}ˇ"º8½¢ý!¤>&ò‹ÅD–ðg‘7o^ý›*Ûu¸¹python-watcher-4.0.0/doc/source/image_src/plantuml/0000775000175000017500000000000013656752352022361 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/source/image_src/plantuml/watcher_db_schema_diagram.txt0000664000175000017500000000672113656752270030235 0ustar zuulzuul00000000000000@startuml !define table(x) class x << (T,#FFAAAA) >> !define primary_key(x) x !define foreign_key(x) x hide methods hide stereotypes table(goals) { primary_key(id: Integer) uuid : String[36] name : String[63] display_name : String[63] efficacy_specification : JSONEncodedList, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(strategies) { primary_key(id: Integer) foreign_key(goal_id : Integer) uuid : String[36] name : String[63] display_name : String[63] parameters_spec : JSONEncodedDict, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(audit_templates) { primary_key(id: Integer) foreign_key("goal_id : Integer") foreign_key("strategy_id : Integer, nullable") uuid : String[36] name : String[63], nullable description : String[255], nullable scope : JSONEncodedList created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(audits) { primary_key(id: Integer) foreign_key("goal_id : Integer") foreign_key("strategy_id : Integer, nullable") uuid : String[36] audit_type : String[20] state : String[20], nullable interval : Integer, nullable parameters : JSONEncodedDict, nullable scope : JSONEncodedList, nullable auto_trigger: Boolean created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(action_plans) { primary_key(id: Integer) foreign_key("audit_id : Integer, nullable") foreign_key("strategy_id : Integer") uuid : String[36] state : String[20], nullable global_efficacy : JSONEncodedList, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(actions) { primary_key(id: Integer) foreign_key("action_plan_id : Integer") uuid : String[36] action_type : String[255] input_parameters : JSONEncodedDict, nullable state : String[20], nullable parents : JSONEncodedList, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(efficacy_indicators) { primary_key(id: Integer) foreign_key("action_plan_id : Integer") uuid : String[36] name : String[63] description : String[255], nullable unit : String[63], nullable value : Numeric created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(scoring_engines) { primary_key(id: Integer) uuid : String[36] name : String[63] description : String[255], nullable metainfo : Text, nullable created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } table(service) { primary_key(id: Integer) name: String[255] host: String[255] last_seen_up: DateTime created_at : DateTime updated_at : DateTime deleted_at : DateTime deleted : Integer } "goals" <.. "strategies" : Foreign Key "goals" <.. "audit_templates" : Foreign Key "strategies" <.. "audit_templates" : Foreign Key "goals" <.. "audits" : Foreign Key "strategies" <.. "audits" : Foreign Key "action_plans" <.. "actions" : Foreign Key "action_plans" <.. "efficacy_indicators" : Foreign Key "strategies" <.. "action_plans" : Foreign Key "audits" <.. "action_plans" : Foreign Key @enduml python-watcher-4.0.0/doc/source/image_src/plantuml/sequence_create_audit_template.txt0000664000175000017500000000135713656752270031343 0ustar zuulzuul00000000000000@startuml actor Administrator Administrator -> "Watcher CLI" : watcher audittemplate create \ [--strategy-uuid ] "Watcher CLI" -> "Watcher API" : POST audit_template(parameters) "Watcher API" -> "Watcher Database" : Request if goal exists in database "Watcher API" <-- "Watcher Database" : OK "Watcher API" -> "Watcher Database" : Request if strategy exists in database (if provided) "Watcher API" <-- "Watcher Database" : OK "Watcher API" -> "Watcher Database" : Create new audit_template in database "Watcher API" <-- "Watcher Database" : New audit template UUID "Watcher CLI" <-- "Watcher API" : Return new audit template URL in HTTP Location Header Administrator <-- "Watcher CLI" : New audit template UUID @enduml ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000python-watcher-4.0.0/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txtpython-watcher-4.0.0/doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creat0000664000175000017500000000312613656752270034501 0ustar zuulzuul00000000000000@startuml skinparam maxMessageSize 200 "Decision Engine" -> "Decision Engine" : Execute audit activate "Decision Engine" "Decision Engine" -> "Decision Engine" : Set the audit state to ONGOING "Decision Engine" -> "Strategy selector" : Select strategy activate "Strategy selector" alt A specific strategy is provided "Strategy selector" -> "Strategy selector" : Load strategy and inject the \ cluster data model else Only a goal is specified "Strategy selector" -> "Strategy selector" : select strategy "Strategy selector" -> "Strategy selector" : Load strategy and inject the \ cluster data model end "Strategy selector" -> "Decision Engine" : Return loaded Strategy deactivate "Strategy selector" "Decision Engine" -> "Strategy" : Execute the strategy activate "Strategy" "Strategy" -> "Strategy" : **pre_execute()**Checks if the strategy \ pre-requisites are all set. "Strategy" -> "Strategy" : **do_execute()**Contains the logic of the strategy "Strategy" -> "Strategy" : **post_execute()** Set the efficacy indicators "Strategy" -> "Strategy" : Compute the global efficacy of the solution \ based on the provided efficacy indicators "Strategy" -> "Decision Engine" : Return the solution deactivate "Strategy" "Decision Engine" -> "Planner" : Plan the solution that was computed by the \ strategy activate "Planner" "Planner" -> "Planner" : Store the planned solution as an action plan with its \ related actions and efficacy indicators "Planner" --> "Decision Engine" : Done deactivate "Planner" "Decision Engine" -> "Decision Engine" : Update the audit state to SUCCEEDED deactivate "Decision Engine" @enduml python-watcher-4.0.0/doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt0000664000175000017500000000371313656752270033536 0ustar zuulzuul00000000000000@startuml skinparam maxMessageSize 100 "AMQP Bus" -> "Decision Engine" : trigger audit activate "Decision Engine" "Decision Engine" -> "Database" : update audit.state = ONGOING "AMQP Bus" <[#blue]- "Decision Engine" : notify new audit state = ONGOING "Decision Engine" -> "Database" : get audit parameters (goal, strategy, ...) "Decision Engine" <-- "Database" : audit parameters (goal, strategy, ...) "Decision Engine" --> "Decision Engine"\ : select appropriate optimization strategy (via the Strategy Selector) create Strategy "Decision Engine" -> "Strategy" : execute strategy activate "Strategy" "Strategy" -> "Cluster Data Model Collector" : get cluster data model "Cluster Data Model Collector" --> "Strategy"\ : copy of the in-memory cluster data model loop while enough history data for the strategy "Strategy" -> "Ceilometer API" : get necessary metrics "Strategy" <-- "Ceilometer API" : aggregated metrics end "Strategy" -> "Strategy"\ : compute/set needed actions for the solution so it achieves its goal "Strategy" -> "Strategy" : compute/set efficacy indicators for the solution "Strategy" -> "Strategy" : compute/set the solution global efficacy "Decision Engine" <-- "Strategy"\ : solution (unordered actions, efficacy indicators and global efficacy) deactivate "Strategy" create "Planner" "Decision Engine" -> "Planner" : load actions scheduler "Planner" --> "Decision Engine" : planner plugin "Decision Engine" -> "Planner" : schedule actions activate "Planner" "Planner" -> "Planner"\ : schedule actions according to scheduling rules/policies "Decision Engine" <-- "Planner" : new action plan deactivate "Planner" "Decision Engine" -> "Database" : save new action plan in database "Decision Engine" -> "Database" : update audit.state = SUCCEEDED "AMQP Bus" <[#blue]- "Decision Engine" : notify new audit state = SUCCEEDED deactivate "Decision Engine" hnote over "Decision Engine" : Idle @enduml python-watcher-4.0.0/doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt0000664000175000017500000000254013656752270031561 0ustar zuulzuul00000000000000@startuml actor Administrator == Create some Audit settings == Administrator -> Watcher : create new Audit Template (i.e. Audit settings : goal, scope, ...) Watcher -> Watcher : save Audit Template in database Administrator <-- Watcher : Audit Template UUID == Launch a new Audit == Administrator -> Watcher : launch new Audit of the Openstack infrastructure resources\nwith a previously created Audit Template Administrator <-- Watcher : Audit UUID Administrator -> Watcher : get the Audit state Administrator <-- Watcher : ONGOING Watcher -> Watcher : compute a solution to achieve optimization goal Administrator -> Watcher : get the Audit state Administrator <-- Watcher : SUCCEEDED == Get the result of the Audit == Administrator -> Watcher : get Action Plan Administrator <-- Watcher : recommended Action Plan and estimated efficacy Administrator -> Administrator : verify the recommended actions\nand evaluate the estimated gain vs aggressiveness of the solution == Launch the recommended Action Plan == Administrator -> Watcher : launch the Action Plan Administrator <-- Watcher : Action Plan has been launched Watcher -> Watcher : trigger Actions on Openstack services Administrator -> Watcher : get the Action Plan state Administrator <-- Watcher : ONGOING Administrator -> Watcher : get the Action Plan state Administrator <-- Watcher : SUCCEEDED @enduml python-watcher-4.0.0/doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt0000664000175000017500000000252713656752270033042 0ustar zuulzuul00000000000000@startuml "AMQP Bus" -> "Watcher Applier" : launch_action_plan(action_plan.uuid) "Watcher Applier" -> "Watcher Database" : action_plan.state=ONGOING "Watcher Applier" -[#blue]> "AMQP Bus" : notify action plan state = ONGOING "Watcher Applier" -> "Watcher Database" : get_action_list(action_plan.uuid) "Watcher Applier" <-- "Watcher Database" : actions loop for each action of the action flow create Action "Watcher Applier" -> Action : instantiate Action object with target resource id\n and input parameters "Watcher Applier" -> Action : validate_parameters() "Watcher Applier" <-- Action : OK "Watcher Applier" -[#blue]> "AMQP Bus" : notify action state = ONGOING "Watcher Applier" -> Action : preconditions() "Watcher Applier" <-- Action : OK "Watcher Applier" -> Action : execute() alt action is "migrate instance" Action -> "Nova API" : migrate(instance_id, dest_host_id) Action <-- "Nova API" : OK else action is "disable hypervisor" Action -> "Nova API" : host-update(host_id, maintenance=true) Action <-- "Nova API" : OK end "Watcher Applier" <-- Action : OK "Watcher Applier" -> "Watcher Database" : action.state=SUCCEEDED "Watcher Applier" -[#blue]> "AMQP Bus" : notify action state = SUCCEEDED end "Watcher Applier" -> "Watcher Database" : action_plan.state=SUCCEEDED "Watcher Applier" -[#blue]> "AMQP Bus" : notify action plan state = SUCCEEDED @enduml python-watcher-4.0.0/doc/source/image_src/plantuml/sequence_launch_action_plan.txt0000664000175000017500000000105113656752270030627 0ustar zuulzuul00000000000000@startuml actor Administrator Administrator -> "Watcher CLI" : watcher actionplan start "Watcher CLI" -> "Watcher API" : PATCH action_plan(state=PENDING) "Watcher API" -> "Watcher Database" : action_plan.state=PENDING "Watcher CLI" <-- "Watcher API" : HTTP 200 Administrator <-- "Watcher CLI" : OK "Watcher API" -> "AMQP Bus" : launch_action_plan(action_plan.uuid) "AMQP Bus" -> "Watcher Applier" : launch_action_plan(action_plan.uuid) ref over "Watcher Applier" Launch Action Plan in the Watcher Applier end ref @enduml python-watcher-4.0.0/doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt0000664000175000017500000000122413656752270031615 0ustar zuulzuul00000000000000@startuml actor Administrator Administrator -> "Watcher CLI" : watcher audit create -a "Watcher CLI" -> "Watcher API" : POST audit(parameters) "Watcher API" -> "Watcher Database" : create new audit in database (status=PENDING) "Watcher API" <-- "Watcher Database" : new audit uuid "Watcher CLI" <-- "Watcher API" : return new audit URL Administrator <-- "Watcher CLI" : new audit uuid "Watcher API" -> "AMQP Bus" : trigger_audit(new_audit.uuid) "AMQP Bus" -> "Watcher Decision Engine" : trigger_audit(new_audit.uuid) (status=ONGOING) ref over "Watcher Decision Engine" Trigger audit in the Watcher Decision Engine end ref @enduml python-watcher-4.0.0/doc/source/image_src/plantuml/audit_state_machine.txt0000664000175000017500000000150313656752270027112 0ustar zuulzuul00000000000000@startuml [*] --> PENDING: Audit requested by Administrator PENDING --> ONGOING: Audit request is received\nby the Watcher Decision Engine ONGOING --> FAILED: Audit fails\n(Exception occurred) ONGOING --> SUCCEEDED: The Watcher Decision Engine\ncould find at least one Solution ONGOING --> SUSPENDED: Administrator wants to\nsuspend the Audit SUSPENDED --> ONGOING: Administrator wants to\nresume the Audit FAILED --> DELETED : Administrator wants to\narchive/delete the Audit SUCCEEDED --> DELETED : Administrator wants to\narchive/delete the Audit PENDING --> CANCELLED : Administrator cancels\nthe Audit ONGOING --> CANCELLED : Administrator cancels\nthe Audit CANCELLED --> DELETED : Administrator wants to\narchive/delete the Audit SUSPENDED --> DELETED: Administrator wants to\narchive/delete the Audit DELETED --> [*] @enduml python-watcher-4.0.0/doc/source/image_src/plantuml/README.rst0000664000175000017500000000056713656752270024057 0ustar zuulzuul00000000000000plantuml ======== To build an image from a source file, you have to upload the plantuml JAR file available on http://plantuml.com/download.html. After, just run this command to build your image: .. code-block:: shell $ cd doc/source/images $ java -jar /path/to/plantuml.jar doc/source/image_src/plantuml/my_image.txt $ ls doc/source/images/ my_image.png python-watcher-4.0.0/doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt0000664000175000017500000000257613656752270031527 0ustar zuulzuul00000000000000@startuml skinparam maxMessageSize 100 actor "Administrator" == Initialization == "Administrator" -> "Decision Engine" : Start all services "Decision Engine" -> "Background Task Scheduler" : Start activate "Background Task Scheduler" "Background Task Scheduler" -> "Cluster Model Collector Loader"\ : List available cluster data models "Cluster Model Collector Loader" --> "Background Task Scheduler"\ : list of BaseClusterModelCollector instances loop for every available cluster data model collector "Background Task Scheduler" -> "Background Task Scheduler"\ : add periodic synchronization job create "Jobs Pool" "Background Task Scheduler" -> "Jobs Pool" : Create sync job end deactivate "Background Task Scheduler" hnote over "Background Task Scheduler" : Idle == Job workflow == "Background Task Scheduler" -> "Jobs Pool" : Trigger synchronization job "Jobs Pool" -> "Nova Cluster Data Model Collector" : synchronize activate "Nova Cluster Data Model Collector" "Nova Cluster Data Model Collector" -> "Nova API"\ : Fetch needed data to build the cluster data model "Nova API" --> "Nova Cluster Data Model Collector" : Needed data "Nova Cluster Data Model Collector" -> "Nova Cluster Data Model Collector"\ : Build an in-memory cluster data model ]o<-- "Nova Cluster Data Model Collector" : Done deactivate "Nova Cluster Data Model Collector" @enduml python-watcher-4.0.0/doc/source/image_src/plantuml/action_plan_state_machine.txt0000664000175000017500000000201613656752270030273 0ustar zuulzuul00000000000000@startuml [*] --> RECOMMENDED: The Watcher Planner\ncreates the Action Plan RECOMMENDED --> PENDING: Adminisrator launches\nthe Action Plan PENDING --> ONGOING: The Watcher Applier receives the request\nto launch the Action Plan ONGOING --> FAILED: Something failed while executing\nthe Action Plan in the Watcher Applier ONGOING --> SUCCEEDED: The Watcher Applier executed\nthe Action Plan successfully FAILED --> DELETED : Administrator removes\nAction Plan SUCCEEDED --> DELETED : Administrator removes\nAction Plan ONGOING --> CANCELLING : Administrator cancels\nAction Plan CANCELLING --> CANCELLED : The Watcher Applier cancelled\nthe Action Plan successfully CANCELLING --> FAILED : Something failed while cancelling\nthe Action Plan in the Watcher Applier RECOMMENDED --> CANCELLED : Administrator cancels\nAction Plan RECOMMENDED --> SUPERSEDED : The Watcher Decision Engine supersedes\nAction Plan PENDING --> CANCELLED : Administrator cancels\nAction Plan CANCELLED --> DELETED SUPERSEDED --> DELETED DELETED --> [*] @enduml python-watcher-4.0.0/doc/ext/0000775000175000017500000000000013656752352016074 5ustar zuulzuul00000000000000python-watcher-4.0.0/doc/ext/__init__.py0000664000175000017500000000000013656752270020172 0ustar zuulzuul00000000000000python-watcher-4.0.0/doc/ext/term.py0000664000175000017500000001236513656752270017423 0ustar zuulzuul00000000000000# Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import importlib import inspect from docutils import nodes from docutils.parsers import rst from docutils import statemachine from watcher.version import version_string class BaseWatcherDirective(rst.Directive): def __init__(self, name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): super(BaseWatcherDirective, self).__init__( name, arguments, options, content, lineno, content_offset, block_text, state, state_machine) self.result = statemachine.ViewList() def run(self): raise NotImplementedError('Must override run() is subclass.') def add_line(self, line, *lineno): """Append one line of generated reST to the output.""" self.result.append(line, rst.directives.unchanged, *lineno) def add_textblock(self, textblock): for line in textblock.splitlines(): self.add_line(line) def add_object_docstring(self, obj): obj_raw_docstring = obj.__doc__ or "" # Maybe it's within the __init__ if not obj_raw_docstring and hasattr(obj, "__init__"): if obj.__init__.__doc__: obj_raw_docstring = obj.__init__.__doc__ if not obj_raw_docstring: # Raise a warning to make the tests fail wit doc8 raise self.error("No docstring available for %s!" % obj) obj_docstring = inspect.cleandoc(obj_raw_docstring) self.add_textblock(obj_docstring) class WatcherTerm(BaseWatcherDirective): """Directive to import an RST formatted docstring into the Watcher glossary **How to use it** # inside your .py file class DocumentedObject(object): '''My *.rst* docstring''' # Inside your .rst file .. watcher-term:: import.path.to.your.DocumentedObject This directive will then import the docstring and then interpret it. """ # You need to put an import path as an argument for this directive to work required_arguments = 1 def run(self): cls_path = self.arguments[0] try: try: cls = importlib.import_module(cls_path) except ImportError: module_name, cls_name = cls_path.rsplit('.', 1) mod = importlib.import_module(module_name) cls = getattr(mod, cls_name) except Exception as exc: raise self.error(exc) self.add_object_docstring(cls) node = nodes.paragraph() node.document = self.state.document self.state.nested_parse(self.result, 0, node) return node.children class WatcherFunc(BaseWatcherDirective): """Directive to import a value returned by a func into the Watcher doc **How to use it** # inside your .py file class Bar(object): def foo(object): return foo_string # Inside your .rst file .. watcher-func:: import.path.to.your.Bar.foo node_classname node_classname is decumented here: http://docutils.sourceforge.net/docs/ref/rst/restructuredtext.html This directive will then import the value and then interpret it. """ # You need to put an import path as an argument for this directive to work # required_arguments = 1 # optional_arguments = 1 option_spec = {'format': rst.directives.unchanged} has_content = True def run(self): if not self.content: error = self.state_machine.reporter.error( 'The "%s" directive is empty; content required.' % self.name, nodes.literal_block(self.block_text, self.block_text), line=self.lineno) return [error] func_path = self.content[0] try: cls_path, func_name = func_path.rsplit('.', 1) module_name, cls_name = cls_path.rsplit('.', 1) mod = importlib.import_module(module_name) cls = getattr(mod, cls_name) except Exception as exc: raise self.error(exc) cls_obj = cls() func = getattr(cls_obj, func_name) textblock = func() if not isinstance(textblock, str): textblock = str(textblock) self.add_textblock(textblock) try: node_class = getattr(nodes, self.options.get('format', 'paragraph')) except Exception as exc: raise self.error(exc) node = node_class() node.document = self.state.document self.state.nested_parse(self.result, 0, node) return [node] def setup(app): app.add_directive('watcher-term', WatcherTerm) app.add_directive('watcher-func', WatcherFunc) return {'version': version_string} python-watcher-4.0.0/doc/ext/versioned_notifications.py0000664000175000017500000001022313656752270023372 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This provides a sphinx extension able to list the implemented versioned notifications into the developer documentation. It is used via a single directive in the .rst file .. versioned_notifications:: """ from docutils.parsers.rst import Directive from docutils import nodes from watcher.notifications import base as notification from watcher.objects import base class VersionedNotificationDirective(Directive): SAMPLE_ROOT = 'doc/notification_samples/' TOGGLE_SCRIPT = """ """ def run(self): notifications = self._collect_notifications() return self._build_markup(notifications) def _collect_notifications(self): base.WatcherObjectRegistry.register_notification_objects() notifications = [] ovos = base.WatcherObjectRegistry.obj_classes() for name, cls in ovos.items(): cls = cls[0] if (issubclass(cls, notification.NotificationBase) and cls != notification.NotificationBase): payload_name = cls.fields['payload'].objname payload_cls = ovos[payload_name][0] for sample in cls.samples: notifications.append((cls.__name__, payload_cls.__name__, sample)) return sorted(notifications) def _build_markup(self, notifications): content = [] cols = ['Event type', 'Notification class', 'Payload class', 'Sample'] table = nodes.table() content.append(table) group = nodes.tgroup(cols=len(cols)) table.append(group) head = nodes.thead() group.append(head) for _ in cols: group.append(nodes.colspec(colwidth=1)) body = nodes.tbody() group.append(body) # fill the table header row = nodes.row() body.append(row) for col_name in cols: col = nodes.entry() row.append(col) text = nodes.strong(text=col_name) col.append(text) # fill the table content, one notification per row for name, payload, sample_file in notifications: event_type = sample_file[0: -5].replace('-', '.') row = nodes.row() body.append(row) col = nodes.entry() row.append(col) text = nodes.literal(text=event_type) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=name) col.append(text) col = nodes.entry() row.append(col) text = nodes.literal(text=payload) col.append(text) col = nodes.entry() row.append(col) with open(self.SAMPLE_ROOT + sample_file, 'r') as f: sample_content = f.read() event_type = sample_file[0: -5] html_str = self.TOGGLE_SCRIPT % ((event_type, ) * 3) html_str += ("" % event_type) html_str += ("
%s
" % (event_type, sample_content)) raw = nodes.raw('', html_str, format="html") col.append(raw) return content def setup(app): app.add_directive('versioned_notifications', VersionedNotificationDirective) python-watcher-4.0.0/requirements.txt0000664000175000017500000000325513656752270020017 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. apscheduler>=3.5.1 # MIT License jsonpatch>=1.21 # BSD keystoneauth1>=3.4.0 # Apache-2.0 jsonschema>=2.6.0 # MIT keystonemiddleware>=4.21.0 # Apache-2.0 lxml>=4.1.1 # BSD croniter>=0.3.20 # MIT License os-resource-classes>=0.4.0 oslo.concurrency>=3.26.0 # Apache-2.0 oslo.cache>=1.29.0 # Apache-2.0 oslo.config>=5.2.0 # Apache-2.0 oslo.context>=2.21.0 # Apache-2.0 oslo.db>=4.35.0 # Apache-2.0 oslo.i18n>=3.20.0 # Apache-2.0 oslo.log>=3.37.0 # Apache-2.0 oslo.messaging>=8.1.2 # Apache-2.0 oslo.policy>=1.34.0 # Apache-2.0 oslo.reports>=1.27.0 # Apache-2.0 oslo.serialization>=2.25.0 # Apache-2.0 oslo.service>=1.30.0 # Apache-2.0 oslo.upgradecheck>=0.1.0 # Apache-2.0 oslo.utils>=3.36.0 # Apache-2.0 oslo.versionedobjects>=1.32.0 # Apache-2.0 PasteDeploy>=1.5.2 # MIT pbr>=3.1.1 # Apache-2.0 pecan>=1.3.2 # BSD PrettyTable<0.8,>=0.7.2 # BSD gnocchiclient>=7.0.1 # Apache-2.0 python-ceilometerclient>=2.9.0 # Apache-2.0 python-cinderclient>=3.5.0 # Apache-2.0 python-glanceclient>=2.9.1 # Apache-2.0 python-keystoneclient>=3.15.0 # Apache-2.0 python-monascaclient>=1.12.0 # Apache-2.0 python-neutronclient>=6.7.0 # Apache-2.0 python-novaclient>=14.1.0 # Apache-2.0 python-openstackclient>=3.14.0 # Apache-2.0 python-ironicclient>=2.5.0 # Apache-2.0 six>=1.11.0 # MIT SQLAlchemy>=1.2.5 # MIT stevedore>=1.28.0 # Apache-2.0 taskflow>=3.7.1 # Apache-2.0 WebOb>=1.8.5 # MIT WSME>=0.9.2 # MIT networkx>=2.2;python_version>='3.4' # BSD microversion_parse>=0.2.1 # Apache-2.0 futurist>=1.8.0 # Apache-2.0 python-watcher-4.0.0/setup.cfg0000664000175000017500000001310213656752352016345 0ustar zuulzuul00000000000000[metadata] name = python-watcher summary = OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. description-file = README.rst author = OpenStack author-email = openstack-discuss@lists.openstack.org home-page = https://docs.openstack.org/watcher/latest/ python-requires = >=3.6 classifier = Environment :: OpenStack Intended Audience :: Information Technology Intended Audience :: System Administrators License :: OSI Approved :: Apache Software License Operating System :: POSIX :: Linux Programming Language :: Python Programming Language :: Python :: Implementation :: CPython Programming Language :: Python :: 3 :: Only Programming Language :: Python :: 3 Programming Language :: Python :: 3.6 Programming Language :: Python :: 3.7 [files] packages = watcher data_files = etc/ = etc/* [entry_points] oslo.config.opts = watcher = watcher.conf.opts:list_opts oslo.policy.policies = watcher = watcher.common.policies:list_rules oslo.policy.enforcer = watcher = watcher.common.policy:get_enforcer console_scripts = watcher-api = watcher.cmd.api:main watcher-db-manage = watcher.cmd.dbmanage:main watcher-decision-engine = watcher.cmd.decisionengine:main watcher-applier = watcher.cmd.applier:main watcher-sync = watcher.cmd.sync:main watcher-status = watcher.cmd.status:main wsgi_scripts = watcher-api-wsgi = watcher.api.wsgi:initialize_wsgi_app watcher.database.migration_backend = sqlalchemy = watcher.db.sqlalchemy.migration watcher_goals = unclassified = watcher.decision_engine.goal.goals:Unclassified dummy = watcher.decision_engine.goal.goals:Dummy server_consolidation = watcher.decision_engine.goal.goals:ServerConsolidation thermal_optimization = watcher.decision_engine.goal.goals:ThermalOptimization workload_balancing = watcher.decision_engine.goal.goals:WorkloadBalancing airflow_optimization = watcher.decision_engine.goal.goals:AirflowOptimization noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization saving_energy = watcher.decision_engine.goal.goals:SavingEnergy hardware_maintenance = watcher.decision_engine.goal.goals:HardwareMaintenance cluster_maintaining = watcher.decision_engine.goal.goals:ClusterMaintaining watcher_scoring_engines = dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer watcher_scoring_engine_containers = dummy_scoring_container = watcher.decision_engine.scoring.dummy_scoring_container:DummyScoringContainer watcher_strategies = dummy = watcher.decision_engine.strategy.strategies.dummy_strategy:DummyStrategy dummy_with_scorer = watcher.decision_engine.strategy.strategies.dummy_with_scorer:DummyWithScorer dummy_with_resize = watcher.decision_engine.strategy.strategies.dummy_with_resize:DummyWithResize actuator = watcher.decision_engine.strategy.strategies.actuation:Actuator basic = watcher.decision_engine.strategy.strategies.basic_consolidation:BasicConsolidation outlet_temperature = watcher.decision_engine.strategy.strategies.outlet_temp_control:OutletTempControl saving_energy = watcher.decision_engine.strategy.strategies.saving_energy:SavingEnergy vm_workload_consolidation = watcher.decision_engine.strategy.strategies.vm_workload_consolidation:VMWorkloadConsolidation workload_stabilization = watcher.decision_engine.strategy.strategies.workload_stabilization:WorkloadStabilization workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance uniform_airflow = watcher.decision_engine.strategy.strategies.uniform_airflow:UniformAirflow noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor storage_capacity_balance = watcher.decision_engine.strategy.strategies.storage_capacity_balance:StorageCapacityBalance zone_migration = watcher.decision_engine.strategy.strategies.zone_migration:ZoneMigration host_maintenance = watcher.decision_engine.strategy.strategies.host_maintenance:HostMaintenance node_resource_consolidation = watcher.decision_engine.strategy.strategies.node_resource_consolidation:NodeResourceConsolidation watcher_actions = migrate = watcher.applier.actions.migration:Migrate nop = watcher.applier.actions.nop:Nop sleep = watcher.applier.actions.sleep:Sleep change_nova_service_state = watcher.applier.actions.change_nova_service_state:ChangeNovaServiceState resize = watcher.applier.actions.resize:Resize change_node_power_state = watcher.applier.actions.change_node_power_state:ChangeNodePowerState volume_migrate = watcher.applier.actions.volume_migration:VolumeMigrate watcher_workflow_engines = taskflow = watcher.applier.workflow_engine.default:DefaultWorkFlowEngine watcher_planners = weight = watcher.decision_engine.planner.weight:WeightPlanner workload_stabilization = watcher.decision_engine.planner.workload_stabilization:WorkloadStabilizationPlanner node_resource_consolidation = watcher.decision_engine.planner.node_resource_consolidation:NodeResourceConsolidationPlanner watcher_cluster_data_model_collectors = compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector baremetal = watcher.decision_engine.model.collector.ironic:BaremetalClusterDataModelCollector [compile_catalog] directory = watcher/locale domain = watcher [update_catalog] domain = watcher output_dir = watcher/locale input_file = watcher/locale/watcher.pot [extract_messages] keywords = _ gettext ngettext l_ lazy_gettext _LI _LW _LE _LC mapping_file = babel.cfg output_file = watcher/locale/watcher.pot [egg_info] tag_build = tag_date = 0 python-watcher-4.0.0/rally-jobs/0000775000175000017500000000000013656752352016605 5ustar zuulzuul00000000000000python-watcher-4.0.0/rally-jobs/watcher-watcher.yaml0000664000175000017500000000253213656752270022562 0ustar zuulzuul00000000000000--- Watcher.create_audit_and_delete: - runner: type: "constant" times: 10 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 audit_templates: audit_templates_per_admin: 5 fill_strategy: "round_robin" params: - goal: name: "dummy" strategy: name: "dummy" sla: failure_rate: max: 0 Watcher.create_audit_template_and_delete: - args: goal: name: "dummy" strategy: name: "dummy" runner: type: "constant" times: 10 concurrency: 2 sla: failure_rate: max: 0 Watcher.list_audit_templates: - runner: type: "constant" times: 10 concurrency: 2 context: users: tenants: 2 users_per_tenant: 2 audit_templates: audit_templates_per_admin: 5 fill_strategy: "random" params: - goal: name: "workload_balancing" strategy: name: "workload_stabilization" - goal: name: "dummy" strategy: name: "dummy" sla: failure_rate: max: 0 python-watcher-4.0.0/rally-jobs/README.rst0000664000175000017500000000261513656752270020277 0ustar zuulzuul00000000000000Rally job ========= We provide, with Watcher, a Rally plugin you can use to benchmark the optimization service. To launch this task with configured Rally you just need to run: :: rally task start watcher/rally-jobs/watcher-watcher.yaml Structure --------- * plugins - directory where you can add rally plugins. Almost everything in Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic cleanup resources, .... * extra - all files from this directory will be copy pasted to gates, so you are able to use absolute paths in rally tasks. Files will be located in ~/.rally/extra/* * watcher.yaml is a task that is run in gates against OpenStack deployed by DevStack Useful links ------------ * How to install: https://docs.openstack.org/rally/latest/install_and_upgrade/install.html * How to set Rally up and launch your first scenario: https://rally.readthedocs.io/en/latest/quick_start/tutorial/step_1_setting_up_env_and_running_benchmark_from_samples.html * More about Rally: https://docs.openstack.org/rally/latest/ * Rally project info and release notes: https://docs.openstack.org/rally/latest/project_info/index.html * How to add rally-gates: https://docs.openstack.org/rally/latest/quick_start/gates.html#gate-jobs * About plugins: https://docs.openstack.org/rally/latest/plugins/index.html * Plugin samples: https://github.com/openstack/rally/tree/master/samples/ python-watcher-4.0.0/tox.ini0000664000175000017500000000765513656752270016056 0ustar zuulzuul00000000000000[tox] minversion = 2.0 envlist = py36,py37,pep8 skipsdist = True ignore_basepython_conflict = True [testenv] basepython = python3 usedevelop = True whitelist_externals = find rm install_command = pip install {opts} {packages} setenv = VIRTUAL_ENV={envdir} deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/ussuri} -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = rm -f .testrepository/times.dbm find . -type f -name "*.py[c|o]" -delete stestr run {posargs} passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY [testenv:pep8] commands = doc8 doc/source/ CONTRIBUTING.rst HACKING.rst README.rst flake8 bandit -r watcher -x watcher/tests/* -n5 -ll -s B320 [testenv:venv] setenv = PYTHONHASHSEED=0 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/ussuri} -r{toxinidir}/doc/requirements.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt commands = {posargs} [testenv:cover] setenv = PYTHON=coverage run --source watcher --parallel-mode commands = stestr run {posargs} coverage combine coverage html -d cover coverage xml -o cover/coverage.xml coverage report [testenv:docs] setenv = PYTHONHASHSEED=0 deps = -r{toxinidir}/doc/requirements.txt commands = rm -fr doc/build doc/source/api/ .autogenerated sphinx-build -W --keep-going -b html doc/source doc/build/html [testenv:api-ref] deps = -r{toxinidir}/doc/requirements.txt whitelist_externals = bash commands = bash -c 'rm -rf api-ref/build' sphinx-build -W --keep-going -b html -d api-ref/build/doctrees api-ref/source api-ref/build/html [testenv:debug] commands = oslo_debug_helper -t watcher/tests {posargs} [testenv:genconfig] sitepackages = False commands = oslo-config-generator --config-file etc/watcher/oslo-config-generator/watcher.conf [testenv:genpolicy] commands = oslopolicy-sample-generator --config-file etc/watcher/oslo-policy-generator/watcher-policy-generator.conf [flake8] filename = *.py,app.wsgi show-source=True # W504 line break after binary operator ignore= H105,E123,E226,N320,H202,W504 builtins= _ enable-extensions = H106,H203,H904 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,*sqlalchemy/alembic/versions/*,demo/,releasenotes [testenv:wheel] commands = python setup.py bdist_wheel [hacking] import_exceptions = watcher._i18n [flake8:local-plugins] extension = N319 = checks:no_translate_debug_logs N321 = checks:use_jsonutils N322 = checks:check_assert_called_once_with N325 = checks:check_python3_xrange N326 = checks:check_no_basestring N327 = checks:check_python3_no_iteritems N328 = checks:check_asserttrue N329 = checks:check_assertfalse N330 = checks:check_assertempty N331 = checks:check_assertisinstance N332 = checks:check_assertequal_for_httpcode N333 = checks:check_log_warn_deprecated N340 = checks:check_oslo_i18n_wrapper N341 = checks:check_builtins_gettext N342 = checks:no_redundant_import_alias paths = ./watcher/hacking [doc8] extension=.rst # todo: stop ignoring doc/source/man when https://bugs.launchpad.net/doc8/+bug/1502391 is fixed ignore-path=doc/source/image_src,doc/source/man,doc/source/api [testenv:pdf-docs] envdir = {toxworkdir}/docs deps = {[testenv:docs]deps} whitelist_externals = rm make commands = rm -rf doc/build/pdf sphinx-build -W --keep-going -b latex doc/source doc/build/pdf make -C doc/build/pdf [testenv:releasenotes] deps = -r{toxinidir}/doc/requirements.txt commands = sphinx-build -a -W -E -d releasenotes/build/doctrees --keep-going -b html releasenotes/source releasenotes/build/html [testenv:bandit] deps = -r{toxinidir}/test-requirements.txt commands = bandit -r watcher -x watcher/tests/* -n5 -ll -s B320 [testenv:lower-constraints] deps = -c{toxinidir}/lower-constraints.txt -r{toxinidir}/test-requirements.txt -r{toxinidir}/requirements.txt python-watcher-4.0.0/ChangeLog0000664000175000017500000015063413656752351016311 0ustar zuulzuul00000000000000CHANGES ======= 4.0.0 ----- * Imported Translations from Zanata * Update TOX\_CONSTRAINTS\_FILE for stable/ussuri * Update .gitreview for stable/ussuri 4.0.0.0rc1 ---------- * convert EfficacyIndicator.value to float type * Remove six[6] * Remove six[5] * Remove six[4] * Remove six[3] * Remove six[2] * Remove six[1] * update description about audit argument interval * remove wsmeext.sphinxext * Cleanup py27 support * Block Sphinx 3.0.0 * Update hacking for Python3 * Removed py27 in testing doc * Add procname for uwsgi based service watcher-api * just set necessary config options * simplify doc directory * Add config option enable\_webhooks\_auth * api-ref: Add webhook API reference * Doc: Add EVENT audit description * Community Goal: Project PTL & Contrib Docs Update * Add api version history * releasenotes: Fix reference url * Add releasenote for event-driven-optimization-based * doc: move Concurrency doc to admin guide * doc for event type audit * Move install doc to user guide * Update user guide doc * Add webhook api * Fix duplicated words issue like "an active instance instance" * Add audit type: event * Add list datamodel microversion to api-ref * Add a new microversion for data model API * Releasenote for decision engine threadpool * Use enum class define microversions * Start README.rst with a better title * Change self.node to self.nodes in model\_root * Documentation on concurrency for contributors * replace host\_url with application\_url * Migrate grenade jobs to py3 * [ussuri][goal] Drop python 2.7 support and testing * Refactoring the codes about getting used and free resources * Use threadpool when building compute data model * General purpose threadpool for decision engine * tox: Keeping going with docs * Switch to Ussuri jobs * Don't throw exception when missing metrics * Remove print() * Update master for stable/train 3.0.0.0rc1 ---------- * Fix damodel list return None error When has a compute node * Fix misspelling * skip deleted instance when creating datamodel * Fix unit test failed * Watcher planner slector releasenote * Set strategy planner * Get planner from solution * Build pdf docs * update test about cinderclient v1 * correct watcher project for oslo\_config * Add node resource consolidation planner * Watcher Planner Selector * Add releasenote about bp show-datamodel-api * node resource consolidation * Fix misspell word * Remove redundant word 'strategy' * Add node\_resource\_consolidation doc * Add watcher-specs link to readme.rst * Add get node used and free resources * Implement watcher datamodel list in watcher-api * Implement watcher datamodel list in watcher-decision-engine * Add api-ref doc for data model api * add audit parameter to do\_execute * improve strategies tempest * add placement min\_microversion * set compute min\_microversion * [train][goal] Define new 'watcher-tempest-functional-ipv6-only' job * Remove unused disk\_capacity field * Don't revert Migrate action * update workload\_balance strategy * update node resource capacity for basic\_consolidation * update host\_maintenance strategy * update noisy\_neighbor strategy * update outlet\_temp\_control strategy * add releasenote for bp improve-compute-data-model * update vm\_workload\_consolidation strategy * Remove resource used fields from ComputeNode * Fix var src\_extra\_specs error * Remove stale comment in method execute() * Add resource capacity property * Getting data from placement when updating datamodel * replace disk\_capacity by disk * set disk field to disk capacity * Check resource class before using * remove id field from CDM * Update api-ref location * Improve Compute Data Model * Add call\_retry for ModelBuilder for error recovery * Remove useless gconfig process in watcher/api/scheduling.py * Optimize method list\_opts() in watcher/conf/opts.py * Fix watcher/conf/applier.py default worker value * Remove useless \_opts.py * Baseclass for ModelBuilder with audit scope * Move datasources folder into decision\_engine * Add reource\_name for save\_energy in action input parameter field * Add get\_compute\_node\_by\_uuid * Resolve aggregate error in workload\_stabilization * Remove redundant human\_id fields when creating and updating datamodel * Replace human\_id with name in grafana doc * Add marker option for get\_instance\_list() * remove baremetal nodes when building CDM * Add reource\_name for zone\_migration in action input parameter field * Grafana proxy datasource to retrieve metrics * Add reource\_name in action input parameter field * Add get\_node\_by\_name * Reduce the query time of the instances when call get\_instance\_list() * remove baremetal nodes from hypversior list * Remove notifier\_driver option in Watcher devstack * Improve logging in building of nova data model * Releasenote for grafana datasource * improve OptGroup consistency across configuration * Blacklist sphinx 2.1.0 (autodoc bug) * Add Python 3 Train unit tests * Fix invalid assert states * Add name field for test data * Add uWSGI support * Add name for instance in Watcher datamodel * Documentation configuring grafana datasource * Configure nova notification\_format for grenade * Fix placement\_client group help docs generation * Improve the configuration parameters for grafana * Configure nova notification format in non-grenade CI jobs * improve the process of instance\_created.end * remove tail\_log * Update strategy doc * Implement the configuration for Grafana datasource * Fix missing print format * typo ceilometer url * Replace removed exceptions and prevent regression * Define a new InstanceNotMapped exception * Move datasource query\_retry into baseclass * Fix base enable\_plugin branch for grenade run * Remove dead code * Map instance to its node * update contraints url * Backwards compatibility for node parameter * Fix property access in test\_global\_preference\* tests * Add Placement helper * Cleanup ConfFixture * Fix string formatting * check instance state for instance.update * add strategy tempest job * Remove apidoc * Optimize NovaHelper.get\_compute\_node\_by\_hostname * Optimize hypervisor API calls * Add missing ws separator between words * Group instance methods together in nova\_helper * Audit API supports new force option * Optimize NovaClusterDataModelCollector.add\_instance\_node * Fix test\_metric\_file\_override metric from backend * Add force field to Audit * Remove 2.56 version compatibility check * Require nova\_client.api\_version >= 2.56 * Improve exceptions and logging in ds manager * Improve DevStack documentation to support metrics * formal datasource interface implementation * Improve Gnocchi and Monasca datasource tests * Allow using file to override metric map * support-keystoneclient-option * Fix typo in ceilometer datasource * Handle no nova CDM in notification code * Remove unused utilities file * Update migration notification * Remove bandit from lower-constraints * Update Sphinx requirement * Fix Stein version in watcher-status docs * Add doc/requirements.txt to venv tox target * Remove dead code from NovaClusterDataModelCollector * Enhance the collector\_plugins option help text * Use base\_strategy's add\_action\_migrate method * Fix\_inappropriate\_name * update api version history * allow building docs without ceilometer client * pass default\_config\_dirs variable for config initialization * docs: fix link to install guide from user guide * Remove watcher.openstack.common=WARN from \_DEFAULT\_LOG\_LEVELS * Add force field to api-ref * Fix API version header * Remove unused exceptions * Fix bandit runs with 1.6.0 * Allow for global datasources preference from config * Use the common logging setup function in devstack runs * Fix reraising of exceptions * Using node replace resource\_id in method add\_action\_disable\_node() * Put the method add\_migration() in base.py * update wsme types * Add tempest voting * Resolve problems with audit scope and add tests * Replace git.openstack.org with opendev.org * Add hardware.cpu\_util in workload\_stabilization * Drop use of git.openstack.org * OpenDev Migration Patch * separate launching audit scheduler * Replace HOST\_IP to SERVICE\_HOST * remove py35 * Uncap jsonschema * Fix docs gate failed * Adapt Watcher to Python3.7 * Move eventlet monkey patch code * Fix lower-constraint deps handling * Fix openstack-tox-lower-constraint TIMED\_OUT Error * Update meeting schedule to new bi-weekly format * Make datasource methods match names of metrics * Replace openstack.org git:// URLs with https:// * Imported Translations from Zanata * Update master for stable/stein 2.0.0 ----- * Move client function test to watcher-tempest-plugin * Access to action's uuid by key * Migrate legacy jobs to Ubuntu Bionic * releasenote for data model scope * Fix unittest failed * Remove unused type check 'int' in audit.py * Generalize exceptions & structure of strategies * scope for datamodel * Fix inappropriate description about the audit\_state\_machine.png * improve \_collect\_aggregates * Provide two arguments to exception's message * make ceilometer client import optional * Fix uniform airflow strategy config parameter * Fix outlet\_temp\_control config parameter * remove config parameter 'datasource' * Add the define of vm\_workload\_consolidation job * change config parameter from 'datasource' to 'datasources' * Move datasources metric mappings out of base.py * function get\_sd return 0 early if len(hosts) is 0 * Update storage\_balance job * Add storage balance job * Update user guide * Fix E731 error * trivial * [Trivial fix] Do not use self in classmethod * Add grenade job * Update hacking version * Add version api ref * update api-ref for audit start/end time * Use template for lower-constraints * Remove unused modules * Add host maintenance tempest * Fix mailing list archive URL * Fix stop\_watcher function * Deprecate Ceilometer Datasource * Fix doc about nova notifications * Remove hostname-related playbooks * audit create request can't set scope * Change openstack-dev to openstack-discuss channel * Increase the unit test coverage of host\_maintenance.py * update doc for install ubuntu * Fix spelling error in the comments of file host\_maintenance.py * Add audit scoper for baremetal data model * Increase the unit test coverage of vm\_workload\_consolidation.py * Fix audit\_template\_uuid description * start tls-proxy (if enabled) before checking for api * remove older api doc * Add missing ws separator between words * Update doc for vm\_workload\_consolidation strategy * Increase the unit test coverage of cinder\_helper.py * Increase the unit test coverage of nova\_helper.py * Fix version header in the response * Enhance Watcher Applier Engine * Remove unsuitable brackets * Imported Translations from Zanata * To avoid ambiguity for flavor\_id * Fix accessing to optional cinder pool attributes * Add cover job * Remove redundant docstring * optimize get\_instances\_by\_node * Adjust the same format as above * Make watcherclient-tempest-functional test non-voting * Add detailed unit test documentation for watcher * Update min tox version to 2.0 * Fix parameter type for cinder pool * update datamodel by nova notifications * API Microversioning * remove set\_host\_offline * Watcher doesn't need paramiko * Don't need nova notifications * Fix oslo\_versionedobjects warnings * Add framework for watcher-status upgrade check * Update documentation regarding DataSource for strategies * Use limit -1 for nova servers list * tenant\_id should be project\_id in instance element * add start and end time for continuous audit * Remove uses of rpc\_backend (oslo\_config) * Don't quote {posargs} in tox.ini * Do not pass www\_authenticate\_uri to RequestContext * remove nova legacy notifications * Fix link to Watcher API * Fix audit creation with named goal and strategy * Provide region name while initialize clients * Add efficacy indicators for workload\_stabilization strategy * ignore .testrepository * Fix wrong audit scope * add python 3.6 unit test job * switch documentation job to new PTI * Follow the new PTI for building docs * Imported Translations from Zanata * Remove warning log in common.context * Remove hosts if can't find hosts in host agrregate * Fix goal method in policy * import zuul job settings from project-config * Remove -u root as mysql is executed with root user * Improve logs of Workload Stabilization strategy * Imported Translations from Zanata * Add hostname to API Reference * Update reno for stable/rocky 1.12.0 ------ * Fix TypeError in LOG.debug * fix unit test:test\_execute\_audit\_with\_interval\_no\_job * improve strategy doc * remove get\_flavor\_instance * Fix unittest MismatchError * only check decision engine service * remove extra'\_' and space * remove voluptuous * Update watcher-db-manage help doc * Fix strategies with additional time to initialize CDM * Add apscheduler\_jobs table to models * Fix AttributeError exception 1.11.0 ------ * Rescheduling continuous audits from FAILED nodes * Add HA support * Add noisy neighbor strategy doc * Fix service task interval * Add noisy neighbor description * remove LOG definitions that have not been used * trivial: fix strategy name * update Ubuntu version from 14.04 to 16.04 * Update host\_maintenance doc * Check job before removing it * update monascaclient version * Sync CDM among Decision Engines by using notification pool * Add actionplan list detail api ref * Remove help message about ZeroMQ driver * Switch to stestr * Remove non-voting jobs from gate queue * Remove undefined job * Triggers the api-ref-jobs to publish wather api reference * Fix unit test error * Use jsonschema to validate efficacy indicators * fix the rule name * Correcting url in action\_plan policy 1.10.0 ------ * fix tox python3 overrides * replace windows line endings with unix line endings * Restore requirements versions * Switch to oslo\_messaging.ConfFixture.transport\_url * Add API Reference for Watcher * Amend the spelling error of a word * add doc for host\_maintenance * Update pypi url to new url * Update storage CDM collector * Replace port 35357 with 5000 for test\_clients.py * Add Cinder Cluster Data Model Collector test case * add strategy host\_maintenance * Trivial: update url to new url * Fix to reuse RabbitMQ connection * Refactor watcher API for Action Plan Start * Update auth\_url in install docs * Updated tests on bug, when get list returns deleted items * Fix the openstack endpoint create failed * Update the default value for nova api\_verison * Moved do\_execute method to AuditHandler class * Fix typo in StorageCapacityBalance * Grouped \_add\_\*\_filters methods together * Replace of private \_create methods in tests * Exclude Project By Audit Scope * add strategy doc:storage capacity balance * Update requirements 1.9.0 ----- * add unittest for execute\_audit in audit/continuous.py * amend delete action policy * Replace cold migration to use Nova migration API * Add release notes link to README * Trivial fix of saving\_energy strategy doc * Update auth\_uri option to www\_authenticate\_uri * Added \_get\_model\_list base method for all get\_\*\_list methods * Trivial fix of user guide doc * zuulv3 optimization * Enable mutable config in Watcher * Several fixes of strategies docs * set one worker for watcherclient-tempest-functional job * Remove obsolete playbooks of legacy jobs * Updated from global requirements * add lower-constraints job * Replaced deprecated oslo\_messaging\_rabbit section * ZuulV3 jobs * Delete the unnecessary '-' * Fix sort of \*list command output * Remove version/date from CLI documentation * Adding driver to mysql connection URL * Updated from global requirements * ignore useless WARNING log message * Updated from global requirements * Add the missing markups for the hyperlink titles * Change the outdated links to the latest links in README * basic\_cons fix * Revert "Update OpenStack Installation Tutorial to Rocky" * Add parameter aggregation\_method for basic\_consolidation * Imported Translations from Zanata * Delete the unnecessary '-' * Update OpenStack Installation Tutorial to Rocky * Add parameter aggregation\_method for work\_stab * basic\_consolidation trivial fix * Fix Uuid and virtual\_free elements load error * Fix exception string format * Imported Translations from Zanata * Add the missing title of Configuration Guide * Fix change\_nova\_service\_state action * Updated Hacking doc * [Trivialfix]Modify a grammatical error * Fix old url links in doc * Add a hacking rule for string interpolation at logging * Complete schema of workload\_stabilization strategy * filter exclude instances during migration * Fix grammar errors * workload\_stabilization trivial fix * Updated from global requirements * Imported Translations from Zanata * Add support for networkx v2.0 * Updated from global requirements * Fix some dead link in docs * Update meeting time on odd weeks * fix misspelling of 'return' * Add missing release notes * Imported Translations from Zanata * Update reno for stable/queens 1.8.0 ----- * Zuul: Remove project name * Fix issues with aggregate and granularity attributes * Repalce Chinese double quotes to English double quotes * Fix get\_compute\_node\_by\_hostname in nova\_helper * Add zone migration strategy document * Updated from global requirements * Fixed AttributeError in storage\_model * Update zone\_migration comment * Zuul: Remove project name * Updated from global requirements * [Doc] Add actuator strategy doc * Imported Translations from Zanata * Remove redundant import alias * Fix strategy state * Add datasources to strategies 1.7.0 ----- * Add baremetal strategy validation * Strategy requirements * Add zone migration strategy * Fix workload\_stabilization unavailable nodes and instances * Update unreachable link * Updated from global requirements * Fix compute api ref link * Adapt workload\_balance strategy to multiple datasource backend * Adapt noisy\_neighbor strategy to multiple datasource backend * Adapt basic\_consolidation strategy to multiple datasource backend * check audit name length * Audit scoper for storage CDM * Imported Translations from Zanata * Update link address * Fix tempest devstack error * Add storage capacity balance Strategy * Updated from global requirements * Adapt workload\_stabilization strategy to new datasource backend * Updated from global requirements * Update pike install supermark to queens * Add the title of API Guide * Fix compute scope test bug * Add baremetal data model * Set apscheduler logs to WARN level * Fix configuration doc link * update audit API description * update action API description * use current weighted sd as min\_sd when starting to simulate migrations * correct audit parameter typo * Updated from global requirements * Fix watcher audit list command * check actionplan state when deleting actionplan * TrivialFix: remove redundant import alias * check audit state when deleting audit * reset job interval when audit was updated * Updated from global requirements * Fix releasenotes build * Update getting scoped storage CDM * Updated from global requirements * Fix 'unable to exclude instance' * Register default policies in code * listen to 'compute.instance.rebuild.end' event 1.6.0 ----- * Updated from global requirements * bug fix remove volume migration type 'cold' * Add and identify excluded instances in compute CDM * Server with PAUSE status can also live-migrate * Fix migrate action with migration\_type 'cold' * Updated from global requirements * Add Datasource Abstraction * Make gnocchi as default datasource * Updated from global requirements * Fix Bug Unable to get scoped data model * listen to 'compute.instance.resize.confirm.end' event * Notifications Changes Multiple Global Efficacy * 'get\_volume\_type\_by\_backendname' returns a list * Add app.wsgi to target of pep8 * [Doc] Fix ubuntu version in devstack installation * Remove setting of version/release from releasenotes * Updated from global requirements * Updated from global requirements * Migrate to Zuul v3 * Fix test runner config issues with os-testr 1.0.0 * Multiple global efficacy * Do not use “-y†for package install * check task\_state in the live\_migrate\_instance * Change HTTP to HTTPS * Updated from global requirements * Can't cancell CONTINUOUS audit * add name for audit, update audit notifications * Update doc and add release note about cdm-scoping * Update the useful links for Rally job * update API ref doc for ScoringEngine * Fix the strategy path of outlet\_temp\_control.py * Optimize the link address * Imported Translations from Zanata * Fix a typo * Unify the oslo\_log import usage * Optimise indentation for db client * Correct the schema format * add name for audit, changes for watcher api/db 1.5.0 ----- * Update the nova api\_version default value to 2.53 * Correct the instance migration link * Optimize check\_migrated in cinder\_helper.py * Optimize live\_migrate\_instance * Updated from global requirements * Add saving energy strategy description * Add documentation about saving energy strategy * Invoke version\_string in watcher/version.py directly * Fix \_build\_instance\_node for building Compute CDM * writing convention: do not use “-y†for package install * Update OpenStack Installation Tutorial to pike * Remove explicitly enable neutron * Fix the telemetry-measurements hyperlink for strategies * optimize update\_audit\_state * Optimize the import format by pep8 * Remove the unnecessary word * Fix TypeError in function chunkify * Fix action plan state change when action failed * Remove installation guide for openSUSE and SLES * Notification Cancel Action Plan * Fix migrate action failure * Add exception log when migrate action failed * Add cdm-scoping * [Doc] Fix host option * Use Property setters * Update the description for controller node * Updated from global requirements * cleanup test-requirements * Update the "IAAS" to "IaaS" * Correct the link for watcher cli * Update the documentation link for doc migration * extend-node-status * Updated from global requirements * Fix Watcher DB schema creation * Fix Action 'change\_node\_power\_state' FAILED * Updated from global requirements * Fix incorrect config section name of configure doc * Fix Gate Failure * Remove redundant right parenthesis * Utils: fix usage of strtime * Update the documentation link for doc migration * iso8601.is8601.Utc No Longer Exists * Remove the unused rootwrap config * Remove unused efficacy indicators * Replace DbMigrationError with DBMigrationError * Replace default gnocchi endpoint type * Fix gnocchiclient creation * Fix DEFAULT\_SCHEMA to validate host\_aggreates * Updated from global requirements * Modify display\_name in strategy documentation * [Trivialfix]Fix typos in watcher * Restrict existing strategies to their default scope * Update default Nova API version to 2.53(Pike) * Fix to use . to source script files * Fix to use "." to source script files * Update the documentation link for doc migration * Updated from global requirements * Updated from global requirements * Remove unnecessary dict.keys() method calls (api) * Update the documention for doc migration * Remove watcher\_tempest\_plugin * Updated from global requirements * Fix KeyError exception * Remove pbr warnerrors * Adjust the action state judgment logic * Update reno for stable/pike 1.4.0 ----- * workload balance base on cpu or ram util * [Doc] Fix db creation * get\_config\_opts method was overwritten * Replace map/filter lambda with comprehensions * change ram util metric * Fix failure to load storage plugin * Fix exception.ComputeNodeNotFound * Updated from global requirements * Change exception class from monascaclient * Fix gnocchi repository URL in local.conf.controller * Fix ironic client input parameter * Fix show db version in README * Removed unnecessary setUp calls in tests * Fix compute CDM to include disabled compute node * Update State diagram of Action Plan * Modification of statistic\_aggregation method * Fix incorrect action status in notifications * Added Actuator Strategy * [Doc] Update software version * Fix continuous audit fails once it fails * Updated from global requirements * Fix Hardcoded availability zone in nova-helper * Saving Energy Strategy 1.3.0 ----- * Fix gate-watcher-python27-ubuntu-xenial FAILURE * dynamic action description * [Doc] Add cinder to architecture diagram * Add release notes for Pike * [Doc] Add Configure Cinder Notifications * Update the documention for doc migration * Remove all sphinx warnings * Update the documention for doc migration * Replace voluptuous with JSONSchema in BaseAction * Update URLs in documents according to document migration * Updated from global requirements * Remove testenv for install-guide * Add volume migrate action * Fix devstack plugin * Enable migration to rely on nova-scheduler * Update default ironic endpoint type * Updated from global requirements * remove useless logging * New cron type for audit interval * Fix dbmanage upgrade and downgrade * Update weekly meetings time in docs * Add title to administrator guide * Abort operation for live migration * [Doc] Add gnocchi to system architecture diagram * Ignore autogenerated sample config file * bug fix: Can't get sample through CeilometerHelper * Replace voluptuous with JSONSchema to validate change\_node\_power\_state * move doc/source/webapi content to doc/source/api * Cinder model integration * Update Documentation link in README * Adapt watcher documentation for new standards * Replace default neutron endpoint type * switch to openstackdocs theme * Replace default glance endpoint type * Fix test\_list\_with\_limit failed * Replace the usage of 'manager' with 'os\_primary' * Updated from global requirements * avoid repeated actions in the solution * Update .gitignore * Pass environment variables of proxy to tox * Enable some off-by-default checks * Updated from global requirements * Fix get\_action\_plan\_list filter error * node.status for vm\_workload\_consolidation * Noisy Neighbor Strategy * Updated from global requirements * fix Keyerror in test\_nova\_cdmc * Add action for compute node power on/off * Replace voluptuous with JSONSchema to validate migration action * Updated from global requirements * Replace voluptuous with JSONSchema to validate change\_nova\_service\_state * Replace voluptuous with JSONSchema to validate resize action * Replace voluptuous with JSONSchema to validate sleep action * Replace voluptuous with JSONSchema to validate nop action * Remove log translations and hacking * Remove deprecated oslo\_messaging.get\_transport 1.2.0 ----- * Cancel Action Plan * fix multinode tempest test failure * Updated from global requirements * Add rm to whitelist\_externals in tox.ini * Remove usage of parameter enforce\_type * Replace default cinder endpoint type * Add action description * Watcher official install-guide * Trivial fix typos * Replace oslo\_utils.timeutils.isotime * Updated from global requirements * Deleted audit record still get by 'audit list'cmd * Versioned Notifications for service object * fix clod\_migrate problem * Change cinder api\_version to '3' in default * Updated from global requirements * doc error for WeightPlanner * Remove the deprecated tempest.test.attr * Replace assertRaisesRegexp with assertRaisesRegex * Updated from global requirements * [bugfix]for division use accurate division * Fix a typo * Updated from global requirements * Add Watcher JobStore for background jobs * Updated from global requirements * Add host\_aggregates in exclude rule of audit scope * replace nova endpoint * Add 'rm -f .testrepository/times.dbm' command in testenv * [Doc] fix local.conf.compute * [bugfix]retry is reached but action still success * use instance data replace exception.NoDataFound * Set access\_policy for messaging's dispatcher * Fix devstack plugin * [Doc] messaging -> messagingv2 * Add ironicclient 1.1.0 ----- * Updated from global requirements * Added suspended audit state * Add gnocchi support in uniform\_airflow strategy * Add Apache License Content in index.rst * Optimize the link address * correct syntax error * Updated from global requirements * exception when running 'watcher actionplan start XXX' * Optimize the link address * Add gnocchi support in outlet\_temp\_control strategy * fixed syntax error in json * Replace py34 with py35 * Add gnocchi support in workload\_balance strategy * Add gnocchi plugin support for devstack * Updated from global requirements * Run Watcher-API behind mod-wsgi * oslo messaging notifications driver update * Use tox to generate a sample configuration file * Added tempest test for workload\_stabilization * Add gnocchi support in VM-Workload-Consolidation strategy * Updated from global requirements * Fix for remove verbose option * Use HostAddressOpt for opts that accept IP and hostnames * Add gnocchi support in workload\_stabilization strategy * Prevent the migration of VM with 'optimize' False in VM metadata * Add period input parameter to vm workload consolidation and outlet temp control strategy * Add endpoint\_type option for openstack clients * Updated from global requirements * Add gnocchi support in basic\_consolidation strategy * Imported Translations from Zanata * Remove log translations * Add Gnocchi datasource * exception when running 'watcher service list' * Remove old oslo.messaging transport aliases * stale the action plan * Local copy of scenario test base class * set eager=True for actionplan.list * Use https instead of http * Updated from global requirements * Reduced the code complexity * Updated from global requirements * Updated from global requirements * Updated from global requirements * Adding instance metadata into cluster data model * Add Apache License content in conf.py file * [Fix gate]Update test requirement * Remove unused PNG files in image\_src directory * Updated from global requirements * Fix no endpoints of ceilometer in devstack environment setup * Fix some typos in vm\_workload\_consolidation.py * Optimize audit process * Reactivate watcher dashboard plugin in devstack/local.conf.controller * Add SUPERSEDED description * Add Action Notification * Switch to use test\_utils.call\_until\_true * Adding additional details to notification logs * Add checking audit state * Fix the mapping between the instance and the node * Remove support for py34 * Fix that remove 'strategy' attribute does not work * Fix spelling error in NotificationEndpoint classes * Fix log level error to warning * Fix incorrect auto trigger flag * Using items() instead of six.iteritems() * Update reno for stable/ocata 1.0.0 ----- * Added action\_plan.execution.\* actions * Added action\_plan.create|update|delete notifs * Add release note for action plan notifications * Add first alembic version for db migration * Use RPC cast() to be asynchronous * Updated graph model to use attr\_dict * Fix context error for user * Idiomatic loop for calculate\_num\_migrations * Fix multinode tempest test failure 0.34.0 ------ * add Ocata release notes * Add period input parameter to basic strategy * Fix invalid mock on ceilometerclient * Documentation update * Updated from global requirements * New Applier Workflow Engine * Remove obsolete Resource element * Graph cluster model instead of mapping one * Fix building of model with a scoped exclusion rule * Fix broken gates because of wrong pip command * Fix test\_clients\_monasca failure * Updated from global requirements * New default planner * Modify the field in tox.ini * Add action plan SUPERSEDED state * Fix dummy strategy to use input parameters * Updated from global requirements * Update Server Consolidation global efficacy * Fix a typo in watcher/objects/base.py * resolve KeyError exception * Enable notification for vm task state update * Should use glanceclient to get images * Multi datasource support for Basic Consolidation * Added Monasca Helper * Removed unnecessary utf-8 encoding * Updated from global requirements * Enable coverage report in console output * Fix TypeError if no input\_parameters added * Update configuration document * Add additional depencencies of CentOS 7 * Fix reference http * remove incorrect inline comment * Add auto\_trigger support to watcher * Fix variable name error * Updated from global requirements * Updated from global requirements * Fix bad CDMC update on reception of service.update 0.33.0 ------ * Implemented clients and auth config module * Implemented wacther decision engine config module * Documentation for Uniform Airflow Migration Strategy Fixed issues * Implemented applier config module * Implemented planner config module * Implemented db config module * Implemented exception config module * Implemented paths config module * remove unused log * Repair log parameter error * multinode devstack update for live-migration * Function call pass parameter error * Documentation for Workload Balance Migration Strategy Fixed comments and added the doc primitive call * Specific exception for stale cluster state was added * Implemented utils config module * Fix CI failures * improve statistic\_aggregation * Unnecessary exception * update strategy table when parameters\_spec changes * Implemented api config module * Updated from global requirements * Updated from global requirements * Improve the instruction of vm\_workload\_consolidation * Fix method name in doc/source/dev/plugin/action-plugin.rst * Repairing unit test failures * Fix some incorrect description in doc * [Doc] Fix example code of goal plugin * Use uuidutils instead of uuid.uuid4() * Modify the variable assignment errors * Show team and repo badges on README * Fix 'ImportError' when docbuild * Fix one ref that does not work * Updated from global requirements * Add periods input parameter * Solve some spelling mistakes * Remove redundan lines * Documentation for Outlet Temperature Based Strategy Fixed outstanding comments * Change hardware.cpu\_util in workload\_stabilization * Fix inconsistent descriptions in docstring in action\_plan.py * Removed nullable flag from audit\_id in ActionPlan * Replaces uuid.uuid4 with uuidutils.generate\_uuid() * Fixed update of WatcherObject fields on update * Fix some typos in action.py & action\_plan.py & audit.py * [Doc] Fix default value in workload\_stabilization * Fix the wrong ref for 'Compute node' 0.32.0 ------ * Implemented base + moved plugins & service conf * Add audit.planner events * Add audit.strategy events * Implemented audit.delete notification * Implemented audit.create notification * Implemented audit.update notification * Update devstack plugin to add notification param * Added notification\_level config option * Removed status\_topic config parameter * Remove stale notification code * Added notifications documentation page * Added support for versioned notifications * Add doc for vm\_workload\_consolidation strategy * Fix rally gate test * Updated from global requirements * Add doc for workload-stabilization spec * Fix the typo in efficacy\_indicator module * Fix NoMetricValuesForInstance error * Use oslo\_log instead of logging * optimized 'find\_instance()' * Fix workload stabilization strategy to ignore disabled hosts * Added Tempest API tests for /scoring\_engines * Remove unused SUBMITTED audit state * Added action\_plan ObjectField for Action * Added audit & strategy ObjectField for ActionPlan * Added goal & strategy ObjectField for Audit * Added goal+strategy ObjectField for AuditTemplate * Added 'goal' ObjectField for Strategy object * Refactored Watcher objects to use OVO * Removed deadline, version, extra & host\_aggregate * Eager loading on One-to-X foreign keys * [Doc] Fix strategy list optional argument * Use Enum value instead of String Value Fixing Gating Issue * Change "Openstack" to "OpenStack" * Avoid use xx=[] for parameter to initialize it's value * Transform KB into MB and normalize CPU * Updated from global requirements * Delete python bytecode file * Add strategy template doc * Drop MANIFEST.in - it's not needed by pbr * Add Audit Scope Handler * Add service object to the watcher\_db\_schema * Add service supervisor * Updated from global requirements * Added Model base class + related doc * Add RECOMMENDED state * Enable release notes translation * Added composite unique name constraints * Added missing test on GMR plugin * Moved Watcher doc plugin outside main package * Stop adding ServiceAvailable group option * HasLength() rewritten to assertEqual() * Updated from global requirements * Fix typo in docstring * Remove duplicate unittest * Fix typo in hooks.py * Docstrings should not start with a space * Fix capital letter in doc * Doc updates * Watcher utils cleanup * Fixed GMR configuration issue * Add constraint target to tox.ini and remove 1 dep * Updated from global requirements * 'tox -e py27' failed * Fix typo in docstring from "interprete" to "interpret" * Fix a typo in watcher.po * Deactivate dashboard plugin until fixed * remove redundant word * Fix a typo in basic\_consolidation.py * Update Watcher description * Test code tidy up * Update reno for stable/newton * Fixed issue on compute nodes iteration * Refactored Tests to load scenarios from file * Updated from global requirements * Remove group\_by statement in metric queries 0.30.0 ------ * Add rally-jobs folder to get rally support * Log CDM structure before+after executing strategy * Fixed Tempest test due to notification issues * Use memory mode for sqlite in db test * Added tests on API hooks and related context * When action plan is empty, its state is incorrect * Use parameters instead of config for workload stabilization * Add documentation for Scoring Module * Implemented GMR plugin to show CDM structures * Fix incorrect strings and formatting * Modify use of assertTrue(A in B) * Fixed indentation * The default value of 'is\_admin\_project' 0.29.0 ------ * Add release notes for Newton blueprints * TrivialFix: Remove cfg import unused * TrivialFix: Remove logging import unused * Remove unused LOG * Update configuration section for notifications * Doc on how to add notification endpoints * Notification and CDM partial update * Remove unreachable line * Added start/end date params on ceilometer queries * Correct watcher reraising of exception * Check unspecified parameters create audit * Fix loading of plugin configuration parameters * Add Scoring Module implementation * Add unit tests for continuous.py * Updated from global requirements * Fixed flaky tempest test * Remove pot files * Updated from global requirements * Added strategy ID + Action Plan syncing * Fixes to get cluster data model * Updated from global requirements * Fix double self.\_goal definition * Scheduler of decision\_engine fix * Updated from global requirements * Clean imports in code * Modify libvirt\_opts="-d -l" to libvirtd\_opts="-d -l" * Rename (pre/post)condition to (pre/post)\_condition * Add unit tests for nova\_helper.py * Updated from global requirements * Removed unused function in uniform airflow * Update the home-page info with the developer documentation * Updated from global requirements * Refactored the compute model and its elements * Use more specific asserts * Merged metrics\_engine package into decision\_engine * Updated DE architecture doc + 'period' param * Added DE Background Scheduler w/ model sync jobs * Cluster data model collector plugin documentation * Loadable Cluster Data Model Collectors * Updated from global requirements * Add scoring engines to database and API layers * Implement goal\_id, strategy\_id and host\_aggregate into Audit api * use parameters to set the threshold * Updated from global requirements * Fixed Basic optim tempest test * Fix 2 occurrences of typo: "occured" --> "occurred" * Add hacking checks to watcher * Update docs links to docs.openstack.org * Remove discover from test-requirements * Updated from global requirements * Fix typos and messages in strategies * Remove unused columns parameters in watcher/db/api * test\_context\_hook\_before\_method failed * Fix dict.keys() PY3 compatible 0.28.0 ------ * There are some spelling errors in the code * Add Python 3.5 classifier and venv * Update unitaty tests to verify strategy and goal name * Bad goal and strategy name for Airflow Optimization * Fix unittest in test\_api.py * Optimize local.conf.controller file to enable watche-dashboard * Add continuously optimization * Add Desktop Service Store to .gitignore file * Documentation for strategy parameters * Updated from global requirements * Add installation from Debian packages section * Add new documentation section for Watcher policies rules * Update executor to eventlet * Add policies for API access control to watcher project * Fix watcher doc build error * Fix field type to audit\_type * Remove duplicate unittest * Fix link error in base-setup.rst * Fix failing Tempest tests * Enable strategy parameters * Update Docs links to docs.openstack.org * add dependency for 3rd-party plugins * Make default Planner generic to handle new action * Modify IRC weekly meeting time * Uniform Airflow migration strategy implementation * Updated from global requirements * Centralize plugin loaders in watcher/applier * Add importing modules instead of classes * Centralize plugin loaders in decision engine * Add goal\_name field in strategy * Updated from global requirements * Use disabled/enabled to change service state * Check if nova-service is already disabled * Add bandit in tox -e pep8 * Added filter operators * Fix StrategyContext to use the strategy\_id in the Audit Template * Use proper theme for release notes * Fix releasenotes generation * Documentation on goal and efficacy * Added efficacy indicators to /action\_plans * Added pre/post execution methods to strategies * Added EfficacyIndicator object * Added efficacy specification to /goals * Add reno for release notes management * Added EfficacyIndicator model in DB * Decoupled Goal from Strategy * Fix broken link in doc * Added missing config section for autogeneration * Updated from global requirements * Added audit\_template filter to /audits/detail * Add fix for hardware.cpu.util meter in sd-strategy * Add fix for \_\_init\_\_() error * Updated tempest test creds retrieval mechanism 0.27.0 ------ * Documentation for plugins-parameters * Workload balance migration strategy implementation * Watcher plugins table in Guru meditation reports * Enabled config parameters to plugins * Add Overload standard deviation strategy * Add goal name as filter for strategy list cmd * Update Watcher documentation * Updated from global requirements * Added cold VM migration support * Add goal\_name & strategy\_name in /audit\_templates * Replace assertEqual(None, \*) with assertIsNone in tests * Fix lazy translation issue with watcher-db-manage * Fixed flaky tempest test * Removed telemetry tag from tempest tests * Updated from global requirements * Fix for statistic\_aggregation * Remove direct access to dbapi * Updated from global requirements * Fix documentation watcher sql database * Watcher DB class diagram * Added .pot file * Remove [watcher\_goals] config section * Remove watcher\_goals section from devstack plugin * Documentation update for get-goal-from-strategy * Updated purge to now include goals and strategies * Syncer now syncs stale audit templates * Add strategy\_id & goal\_id fields in audit template * Refactored Strategy selector to select from DB * Added /strategies endpoint in Watcher API * Add Goal in BaseStrategy + Goal API reads from DB * DB sync for Strategies * Added Strategy model * Added Goal object + goal syncing * Added Goal model into Watcher DB * Log "https" if using SSL * [nova\_helper] get keypair name by every admin users * Remove using of UUID field in POST methods of Watcher API * Refactored DE and Applier to use oslo.service * Refactored Watcher API service * Updated from global requirements * Removed unused 'alarm' field * Add parameters verification when Audit is being created * correct the available disk, memory calculating Source data are misused in outlet temperature strategy. This patch fixes it * Upgrade Watcher Tempest tests for multinode * Update .coveragerc to ignore abstract methods * Updated from global requirements * Fix for deleting audit template * Remove unused logging import and LOG global var * Updated from global requirements 0.26.0 ------ * Added missing support for resource states in unicode format in VM workload consolidation strategy * Disabled PATCH, POST and DELETE for /actions * Added information on plugin mechanism to glossary * Invalid states for Action Plan in the glossary * Integrated consolidation strategy with watcher * Added oslo.context to requirements.txt 0.25.0 ------ * Remove the watcher sample configuration file * Updated action-plugin doc to refer to Voluptuous * Rename variable vm\_avg\_cpu\_util * renamed "efficiency" with "efficacy" Closes-Bug:#1558468 * Remove true/false return from action.execute() * Updated from global requirements * Documentation on purge command * Added purge script for soft deleted objects * Added Mixin-related filters on DB queries * Updated from global requirements * Refactored check for invalid goal * Renamed api.py to base.py in metrics engine * Re-generated the watcher.pot * Added Disk Capacity in cluster-data-model * Removing unicode from README.rst 0.24.0 ------ * Doc on how to set up a thirdparty project * Remove tests omission from coverage target in tox.ini * add Goal into RESTful Web API (v1) documentation * Updated Strategy plugin doc * Doc on how to implement a custom Watcher planner * Add Watcher dashboard to the list of projects * Doc on how to implement a custom Watcher action * Fixed wrongly used assertEqual method * Improve DevStack documentation for beginners * Added support for live migration on non-shared storage * Updated Watcher doc to mention Tempest tests * RST directive to discover and generate drivers doc * Rename 'TRIGGERED' state as 'PENDING' * Fixed type in get\_audit\_template\_by\_name method * Updated from global requirements * Cleanup in tests/\_\_init\_\_.py * Update nova service state * Replace "Triggered" state by "Pending" state * Add start directory for oslo\_debug\_helper * Add missing requirements * Updated from global requirements * Re-enable related Tempest test * Useless return statement in validate\_sort\_dir * Pass parameter to the query in get\_last\_sample\_values * Remove unused function and argument * Added goal filter in Watcher API * Improve variable names in strategy implementations * Added unit tests on actions * Clean imports in code * Add Voluptuous to validate the action parameters * Remove KEYSTONE\_CATALOG\_BACKEND from DevStack plugin * Cleanup in test\_objects.py * Better cleanup for Tempest tests * Ceilometer client instantiation fixup * Update the default version of Neutron API * Sync with openstack/requirements master branch * Delete linked actions when deleting an action plan 0.23.2 ------ * Add IRC information into contributing page * Update docs for password auth configuration options * Remove references to SERVERS\_CONSOLIDATION * Create OpenStackClients convenience class * Added Tempest scenario for BASIC\_CONSOLIDATION * Use install instead of mkdir for DevStack dirs * Removed unused parameter in dt\_deserializer() * Remove unused parameter in Actions API controller * Define self.client in MessagingCore * Remove InvalidParameterValue exception * Tempest API tests on /actions * GET on an action\_plan provides first\_action\_uuid * Fixed ActionPlanNotFound typo in msg\_fmt 0.23.1 ------ * Fixed tempest test bug 0.23.0 ------ * Action plan state transition - payload validation * Add 'workers' section into configuration doc * API Tempest tests on goals * Fix HTML warnings on HTML doc * Action Plan state - Changed STARTING to TRIGGERED * Tempest scenario - execute a dummy strategy * Added doc8 * Add reference to Ceilometer developer guide * API Tempest tests on Action plans * Re-organize the Wacher documentation Home Page * Fix 'Module index' broken HTTP link * API Tempest tests on Audits * Refactored existing tempest API tests * Renamed Status to State * Update the user-guide to explain the main steps * Refactor Commands section * Use taskflow library for building and executing action plans * Removed unused parameters from api controllers * Validate audit template UUID on audit create * Add diagrams to the architecture doc page * Fix Warnings generated while building of HTML docu * Reduced the complexity of the execute() method * Missing super() in API collection controllers * Remove shadow BaseException class * Replace message with msg\_fmt for custom exceptions * Removed use of deprecated LOG.warn method * Add a dynamic loading of Actions handlers in the Watcher Applier * Update API documentation for action plan * Renamed diskInfo.py * Fix extraction of \_LI \_LW \_LE \_LC for translation * Clean up flake8 ignore list * Move terminology definition to class related * Keep py3.X compatibility for urllib * Use dict.items() dirrectly instead of six.iteritems * Test: make enforce\_type=True in CONF.set\_override and fix error * Remove incorrect spaces for libvirt\_opts value * Add a generic and extensible way to describe the flow of actions * Add a dynamic loading of the Watcher Planner implementation * Add a common generic dynamic loader for watcher * Add the possibility to store several parameters for an Action * Changed testr to os-testr * Strategy goals should be required in conf * Use assertTrue/False instead of assertEqual(T/F) * Implement DevStack plugin * Remove useless Meta-Action 0.22.0 ------ * outlet Temperature based migration strategy * Move Audit-template management in DefaultStrategyContext * Remove duplicated nova wrapper * Move glossary.rst to root folder of doc * Remove string concatenation in favor of string formatting * Remove useless event factory * Rename NovaWrapper to NovaClient * i18n - Make string translatable * Change default strategy to DummyStrategy * Add Creative Commons Attribution header to documentation * Code refactoring - StrategyContext and Auditendpoint * Remove \*.pyc files before running tox tests * Add missing parameter in prepare\_service for api * Fix generation of watcher config file * Rename command to audit * 'admin\_user' opt (and others) imported twice * Removed duplicated function prepare\_service() * Internationalization (i18n) - Enable French locale * Include terminology definition from docstring * Remove pragma no cover from code * Tidy up - Watcher Decision Engine package * Typo in ClusteStateNotDefined * Some tests are ignored * Tidy up - Rename Base * Refactored Watcher codebase to add py34 support * Added unit tests on nova wrapper * Removed H404, H405, H305 ignore in pep8 * Removed unnecessary code from basic\_consolidation * Remove unreachable code in basic\_consolidation.py * Rename Mapper to Mapping * Tidy up - Primitive * Remove references to removed watcher/openstack directory * Removed py33, pypy support * Remove alembic revision of watcher db * Add Apache license header to all rst documentation * Rename Command to Action * Update the glossary to lay down Watcher terminology * Rename command to action\_plan * Removed unused enum * Rename Meta-Action to Action * Add a checker for the documentation * Rename efficiency to efficacy * Fix Watcher Applier variables in CamelCase * Remove duplicate setup in Watcher API main() * Cleanup deprecated documentation * Provide detailed information on architecture 0.21.0 ------ * Update configuration section for rabbitmq * Created a glossary to lay down Watcher terminology * Update documentation regarding Ceilometer API V2 * Fixed missing attribute in the data model * Removed py26 support * Code refactoring - Watcher Applier package * Removed old (and unused) openstack/oslo libs * Fixed doc generation warning * add missing keystoneclient dependency * Added priority level to Nop action * Removed 'watcher\_messaging' to use oslo.messaging * Improve OpenStack clients API * Added 'dummy' entrypoint within watcher\_strategies * Fixed tense consistency (used past) in statuses * Added LaunchActionPlanCommand's init super call * Explained KEYSTONE\_SERVICE\_PROJECT\_NAME variable * Added missing super call in DefaultApplier's init * AMQP Channel have to be set espacially to 'watcher' * Updated the config sample file generation command * Code refactoring - Watcher Decision Engine package * Strategy plugins documentation * Update requirements from OS Global Requirements * Made Decision Engine extensible via stevedore * Integration of Ceilometer in Watcher * Update configuration file sample * Fix config loading when running the watcher-api * Fix tiny typo on configuration doc * Avoid dividing zero * Should use watcher/common/context.py for consistency * Update policy.py * Watcher notification listener is associated with an executor which integrates the listener with a specific I/O handling framework. blocking executor should be the default one * Update Rabbit MQ server configuration extract * Use a single command to create the 3 watcher endpoints * Use i18n directly * Use olso.log directly * refactoring documentation * Change stackforge to openstack, corrected some heading underlines * use https instead of http for doc link * update Watcher mission * consolidation of watcher * update config file sample * update documentation * fix dependencies version * Documention fixes * initial version * Added .gitreview python-watcher-4.0.0/.mailmap0000664000175000017500000000013113656752270016142 0ustar zuulzuul00000000000000# Format is: # # python-watcher-4.0.0/python_watcher.egg-info/0000775000175000017500000000000013656752352021257 5ustar zuulzuul00000000000000python-watcher-4.0.0/python_watcher.egg-info/entry_points.txt0000664000175000017500000001045513656752351024561 0ustar zuulzuul00000000000000[console_scripts] watcher-api = watcher.cmd.api:main watcher-applier = watcher.cmd.applier:main watcher-db-manage = watcher.cmd.dbmanage:main watcher-decision-engine = watcher.cmd.decisionengine:main watcher-status = watcher.cmd.status:main watcher-sync = watcher.cmd.sync:main [oslo.config.opts] watcher = watcher.conf.opts:list_opts [oslo.policy.enforcer] watcher = watcher.common.policy:get_enforcer [oslo.policy.policies] watcher = watcher.common.policies:list_rules [watcher.database.migration_backend] sqlalchemy = watcher.db.sqlalchemy.migration [watcher_actions] change_node_power_state = watcher.applier.actions.change_node_power_state:ChangeNodePowerState change_nova_service_state = watcher.applier.actions.change_nova_service_state:ChangeNovaServiceState migrate = watcher.applier.actions.migration:Migrate nop = watcher.applier.actions.nop:Nop resize = watcher.applier.actions.resize:Resize sleep = watcher.applier.actions.sleep:Sleep volume_migrate = watcher.applier.actions.volume_migration:VolumeMigrate [watcher_cluster_data_model_collectors] baremetal = watcher.decision_engine.model.collector.ironic:BaremetalClusterDataModelCollector compute = watcher.decision_engine.model.collector.nova:NovaClusterDataModelCollector storage = watcher.decision_engine.model.collector.cinder:CinderClusterDataModelCollector [watcher_goals] airflow_optimization = watcher.decision_engine.goal.goals:AirflowOptimization cluster_maintaining = watcher.decision_engine.goal.goals:ClusterMaintaining dummy = watcher.decision_engine.goal.goals:Dummy hardware_maintenance = watcher.decision_engine.goal.goals:HardwareMaintenance noisy_neighbor = watcher.decision_engine.goal.goals:NoisyNeighborOptimization saving_energy = watcher.decision_engine.goal.goals:SavingEnergy server_consolidation = watcher.decision_engine.goal.goals:ServerConsolidation thermal_optimization = watcher.decision_engine.goal.goals:ThermalOptimization unclassified = watcher.decision_engine.goal.goals:Unclassified workload_balancing = watcher.decision_engine.goal.goals:WorkloadBalancing [watcher_planners] node_resource_consolidation = watcher.decision_engine.planner.node_resource_consolidation:NodeResourceConsolidationPlanner weight = watcher.decision_engine.planner.weight:WeightPlanner workload_stabilization = watcher.decision_engine.planner.workload_stabilization:WorkloadStabilizationPlanner [watcher_scoring_engine_containers] dummy_scoring_container = watcher.decision_engine.scoring.dummy_scoring_container:DummyScoringContainer [watcher_scoring_engines] dummy_scorer = watcher.decision_engine.scoring.dummy_scorer:DummyScorer [watcher_strategies] actuator = watcher.decision_engine.strategy.strategies.actuation:Actuator basic = watcher.decision_engine.strategy.strategies.basic_consolidation:BasicConsolidation dummy = watcher.decision_engine.strategy.strategies.dummy_strategy:DummyStrategy dummy_with_resize = watcher.decision_engine.strategy.strategies.dummy_with_resize:DummyWithResize dummy_with_scorer = watcher.decision_engine.strategy.strategies.dummy_with_scorer:DummyWithScorer host_maintenance = watcher.decision_engine.strategy.strategies.host_maintenance:HostMaintenance node_resource_consolidation = watcher.decision_engine.strategy.strategies.node_resource_consolidation:NodeResourceConsolidation noisy_neighbor = watcher.decision_engine.strategy.strategies.noisy_neighbor:NoisyNeighbor outlet_temperature = watcher.decision_engine.strategy.strategies.outlet_temp_control:OutletTempControl saving_energy = watcher.decision_engine.strategy.strategies.saving_energy:SavingEnergy storage_capacity_balance = watcher.decision_engine.strategy.strategies.storage_capacity_balance:StorageCapacityBalance uniform_airflow = watcher.decision_engine.strategy.strategies.uniform_airflow:UniformAirflow vm_workload_consolidation = watcher.decision_engine.strategy.strategies.vm_workload_consolidation:VMWorkloadConsolidation workload_balance = watcher.decision_engine.strategy.strategies.workload_balance:WorkloadBalance workload_stabilization = watcher.decision_engine.strategy.strategies.workload_stabilization:WorkloadStabilization zone_migration = watcher.decision_engine.strategy.strategies.zone_migration:ZoneMigration [watcher_workflow_engines] taskflow = watcher.applier.workflow_engine.default:DefaultWorkFlowEngine [wsgi_scripts] watcher-api-wsgi = watcher.api.wsgi:initialize_wsgi_app python-watcher-4.0.0/python_watcher.egg-info/dependency_links.txt0000664000175000017500000000000113656752351025324 0ustar zuulzuul00000000000000 python-watcher-4.0.0/python_watcher.egg-info/requires.txt0000664000175000017500000000174213656752351023662 0ustar zuulzuul00000000000000apscheduler>=3.5.1 jsonpatch>=1.21 keystoneauth1>=3.4.0 jsonschema>=2.6.0 keystonemiddleware>=4.21.0 lxml>=4.1.1 croniter>=0.3.20 os-resource-classes>=0.4.0 oslo.concurrency>=3.26.0 oslo.cache>=1.29.0 oslo.config>=5.2.0 oslo.context>=2.21.0 oslo.db>=4.35.0 oslo.i18n>=3.20.0 oslo.log>=3.37.0 oslo.messaging>=8.1.2 oslo.policy>=1.34.0 oslo.reports>=1.27.0 oslo.serialization>=2.25.0 oslo.service>=1.30.0 oslo.upgradecheck>=0.1.0 oslo.utils>=3.36.0 oslo.versionedobjects>=1.32.0 PasteDeploy>=1.5.2 pbr>=3.1.1 pecan>=1.3.2 PrettyTable<0.8,>=0.7.2 gnocchiclient>=7.0.1 python-ceilometerclient>=2.9.0 python-cinderclient>=3.5.0 python-glanceclient>=2.9.1 python-keystoneclient>=3.15.0 python-monascaclient>=1.12.0 python-neutronclient>=6.7.0 python-novaclient>=14.1.0 python-openstackclient>=3.14.0 python-ironicclient>=2.5.0 six>=1.11.0 SQLAlchemy>=1.2.5 stevedore>=1.28.0 taskflow>=3.7.1 WebOb>=1.8.5 WSME>=0.9.2 microversion_parse>=0.2.1 futurist>=1.8.0 [:(python_version>='3.4')] networkx>=2.2 python-watcher-4.0.0/python_watcher.egg-info/pbr.json0000664000175000017500000000005713656752351022736 0ustar zuulzuul00000000000000{"git_version": "870e6d75", "is_release": true}python-watcher-4.0.0/python_watcher.egg-info/PKG-INFO0000664000175000017500000000447613656752351022366 0ustar zuulzuul00000000000000Metadata-Version: 1.2 Name: python-watcher Version: 4.0.0 Summary: OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Home-page: https://docs.openstack.org/watcher/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======= Watcher ======= .. image:: https://governance.openstack.org/tc/badges/watcher.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on .. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Watcher provides a robust framework to realize a wide range of cloud optimization goals, including the reduction of data center operating costs, increased system performance via intelligent virtual machine migration, increased energy efficiency and more! * Free software: Apache license * Wiki: https://wiki.openstack.org/wiki/Watcher * Source: https://opendev.org/openstack/watcher * Bugs: https://bugs.launchpad.net/watcher * Documentation: https://docs.openstack.org/watcher/latest/ * Release notes: https://docs.openstack.org/releasenotes/watcher/ * Design specifications: https://specs.openstack.org/openstack/watcher-specs/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 python-watcher-4.0.0/python_watcher.egg-info/top_level.txt0000664000175000017500000000001013656752351023777 0ustar zuulzuul00000000000000watcher python-watcher-4.0.0/python_watcher.egg-info/not-zip-safe0000664000175000017500000000000113656752351023504 0ustar zuulzuul00000000000000 python-watcher-4.0.0/python_watcher.egg-info/SOURCES.txt0000664000175000017500000012012013656752352023137 0ustar zuulzuul00000000000000.coveragerc .mailmap .stestr.conf .zuul.yaml AUTHORS CONTRIBUTING.rst ChangeLog HACKING.rst LICENSE README.rst babel.cfg lower-constraints.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/index.rst api-ref/source/parameters.yaml api-ref/source/watcher-api-v1-actionplans.inc api-ref/source/watcher-api-v1-actions.inc api-ref/source/watcher-api-v1-audits.inc api-ref/source/watcher-api-v1-audittemplates.inc api-ref/source/watcher-api-v1-datamodel.inc api-ref/source/watcher-api-v1-goals.inc api-ref/source/watcher-api-v1-scoring_engines.inc api-ref/source/watcher-api-v1-services.inc api-ref/source/watcher-api-v1-strategies.inc api-ref/source/watcher-api-v1-webhooks.inc api-ref/source/watcher-api-versions.inc api-ref/source/samples/actionplan-cancel-request-cancelling.json api-ref/source/samples/actionplan-cancel-request-pending.json api-ref/source/samples/actionplan-list-detailed-response.json api-ref/source/samples/actionplan-list-response.json api-ref/source/samples/actionplan-show-response.json api-ref/source/samples/actionplan-start-response.json api-ref/source/samples/actions-list-detailed-response.json api-ref/source/samples/actions-list-response.json api-ref/source/samples/actions-show-response.json api-ref/source/samples/api-root-response.json api-ref/source/samples/api-v1-root-response.json api-ref/source/samples/audit-cancel-request.json api-ref/source/samples/audit-cancel-response.json api-ref/source/samples/audit-create-request-continuous.json api-ref/source/samples/audit-create-request-oneshot.json api-ref/source/samples/audit-create-response.json api-ref/source/samples/audit-list-detailed-response.json api-ref/source/samples/audit-list-response.json api-ref/source/samples/audit-show-response.json api-ref/source/samples/audit-update-request.json api-ref/source/samples/audit-update-response.json api-ref/source/samples/audittemplate-create-request-full.json api-ref/source/samples/audittemplate-create-request-minimal.json api-ref/source/samples/audittemplate-create-response.json api-ref/source/samples/audittemplate-list-detailed-response.json api-ref/source/samples/audittemplate-list-response.json api-ref/source/samples/audittemplate-show-response.json api-ref/source/samples/audittemplate-update-request.json api-ref/source/samples/audittemplate-update-response.json api-ref/source/samples/datamodel-list-response.json api-ref/source/samples/goal-list-response.json api-ref/source/samples/goal-show-response.json api-ref/source/samples/scoring_engine-list-detailed-response.json api-ref/source/samples/scoring_engine-list-response.json api-ref/source/samples/scoring_engine-show-response.json api-ref/source/samples/service-list-detailed-response.json api-ref/source/samples/service-list-response.json api-ref/source/samples/service-show-response.json api-ref/source/samples/strategy-list-detailed-response.json api-ref/source/samples/strategy-list-response.json api-ref/source/samples/strategy-show-response.json api-ref/source/samples/strategy-state-response.json devstack/local.conf.compute devstack/local.conf.controller devstack/override-defaults devstack/plugin.sh devstack/settings devstack/files/apache-watcher-api.template devstack/lib/watcher devstack/upgrade/resources.sh devstack/upgrade/settings devstack/upgrade/shutdown.sh devstack/upgrade/upgrade.sh devstack/upgrade/from_rocky/upgrade-watcher doc/requirements.txt doc/ext/__init__.py doc/ext/term.py doc/ext/versioned_notifications.py doc/notification_samples/action-cancel-end.json doc/notification_samples/action-cancel-error.json doc/notification_samples/action-cancel-start.json doc/notification_samples/action-create.json doc/notification_samples/action-delete.json doc/notification_samples/action-execution-end.json doc/notification_samples/action-execution-error.json doc/notification_samples/action-execution-start.json doc/notification_samples/action-update.json doc/notification_samples/action_plan-cancel-end.json doc/notification_samples/action_plan-cancel-error.json doc/notification_samples/action_plan-cancel-start.json doc/notification_samples/action_plan-create.json doc/notification_samples/action_plan-delete.json doc/notification_samples/action_plan-execution-end.json doc/notification_samples/action_plan-execution-error.json doc/notification_samples/action_plan-execution-start.json doc/notification_samples/action_plan-update.json doc/notification_samples/audit-create.json doc/notification_samples/audit-delete.json doc/notification_samples/audit-planner-end.json doc/notification_samples/audit-planner-error.json doc/notification_samples/audit-planner-start.json doc/notification_samples/audit-strategy-end.json doc/notification_samples/audit-strategy-error.json doc/notification_samples/audit-strategy-start.json doc/notification_samples/audit-update.json doc/notification_samples/infra-optim-exception.json doc/notification_samples/service-update.json doc/source/architecture.rst doc/source/conf.py doc/source/glossary.rst doc/source/index.rst doc/source/_static/.placeholder doc/source/admin/apache-mod-wsgi.rst doc/source/admin/gmr.rst doc/source/admin/index.rst doc/source/admin/policy.rst doc/source/configuration/configuring.rst doc/source/configuration/index.rst doc/source/configuration/watcher.rst doc/source/contributor/api_microversion_history.rst doc/source/contributor/concurrency.rst doc/source/contributor/contributing.rst doc/source/contributor/devstack.rst doc/source/contributor/environment.rst doc/source/contributor/index.rst doc/source/contributor/notifications.rst doc/source/contributor/rally_link.rst doc/source/contributor/testing.rst doc/source/contributor/plugin/action-plugin.rst doc/source/contributor/plugin/base-setup.rst doc/source/contributor/plugin/cdmc-plugin.rst doc/source/contributor/plugin/goal-plugin.rst doc/source/contributor/plugin/index.rst doc/source/contributor/plugin/planner-plugin.rst doc/source/contributor/plugin/plugins.rst doc/source/contributor/plugin/scoring-engine-plugin.rst doc/source/contributor/plugin/strategy-plugin.rst doc/source/datasources/grafana.rst doc/source/datasources/index.rst doc/source/image_src/dia/architecture.dia doc/source/image_src/dia/functional_data_model.dia doc/source/image_src/plantuml/README.rst doc/source/image_src/plantuml/action_plan_state_machine.txt doc/source/image_src/plantuml/audit_state_machine.txt doc/source/image_src/plantuml/sequence_architecture_cdmc_sync.txt doc/source/image_src/plantuml/sequence_create_and_launch_audit.txt doc/source/image_src/plantuml/sequence_create_audit_template.txt doc/source/image_src/plantuml/sequence_from_audit_execution_to_actionplan_creation.txt doc/source/image_src/plantuml/sequence_launch_action_plan.txt doc/source/image_src/plantuml/sequence_launch_action_plan_in_applier.txt doc/source/image_src/plantuml/sequence_overview_watcher_usage.txt doc/source/image_src/plantuml/sequence_trigger_audit_in_decision_engine.txt doc/source/image_src/plantuml/watcher_db_schema_diagram.txt doc/source/images/action_plan_state_machine.png doc/source/images/architecture.svg doc/source/images/audit_state_machine.png doc/source/images/functional_data_model.svg doc/source/images/sequence_architecture_cdmc_sync.png doc/source/images/sequence_create_and_launch_audit.png doc/source/images/sequence_create_audit_template.png doc/source/images/sequence_from_audit_execution_to_actionplan_creation.png doc/source/images/sequence_launch_action_plan.png doc/source/images/sequence_launch_action_plan_in_applier.png doc/source/images/sequence_overview_watcher_usage.png doc/source/images/sequence_trigger_audit_in_decision_engine.png doc/source/images/watcher_db_schema_diagram.png doc/source/install/common_configure.rst doc/source/install/common_prerequisites.rst doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/install-rdo.rst doc/source/install/install-ubuntu.rst doc/source/install/install.rst doc/source/install/next-steps.rst doc/source/install/verify.rst doc/source/man/footer.rst doc/source/man/general-options.rst doc/source/man/index.rst doc/source/man/watcher-api.rst doc/source/man/watcher-applier.rst doc/source/man/watcher-db-manage.rst doc/source/man/watcher-decision-engine.rst doc/source/man/watcher-status.rst doc/source/strategies/actuation.rst doc/source/strategies/basic-server-consolidation.rst doc/source/strategies/host_maintenance.rst doc/source/strategies/index.rst doc/source/strategies/node_resource_consolidation.rst doc/source/strategies/noisy_neighbor.rst doc/source/strategies/outlet_temp_control.rst doc/source/strategies/saving_energy.rst doc/source/strategies/storage_capacity_balance.rst doc/source/strategies/strategy-template.rst doc/source/strategies/uniform_airflow.rst doc/source/strategies/vm_workload_consolidation.rst doc/source/strategies/workload-stabilization.rst doc/source/strategies/workload_balance.rst doc/source/strategies/zone_migration.rst doc/source/user/event_type_audit.rst doc/source/user/index.rst doc/source/user/user-guide.rst doc/source/user/ways-to-install.rst etc/apache2/watcher etc/watcher/README-watcher.conf.txt etc/watcher/oslo-config-generator/watcher.conf etc/watcher/oslo-policy-generator/watcher-policy-generator.conf playbooks/legacy/grenade-devstack-watcher/post.yaml playbooks/legacy/grenade-devstack-watcher/run.yaml python_watcher.egg-info/PKG-INFO python_watcher.egg-info/SOURCES.txt python_watcher.egg-info/dependency_links.txt python_watcher.egg-info/entry_points.txt python_watcher.egg-info/not-zip-safe python_watcher.egg-info/pbr.json python_watcher.egg-info/requires.txt python_watcher.egg-info/top_level.txt rally-jobs/README.rst rally-jobs/watcher-watcher.yaml releasenotes/notes/.placeholder releasenotes/notes/action-plan-cancel-c54726378019e096.yaml releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yaml releasenotes/notes/action-versioned-notifications-api-ff94fc0f401292d0.yaml releasenotes/notes/add-baremetal-scoper-9ef23f5fb8f0be6a.yaml releasenotes/notes/add-force-field-to-audit-4bcaeedfe27233ad.yaml releasenotes/notes/add-ha-support-b9042255e5b76e42.yaml releasenotes/notes/add-name-for-audit-0df1f39f00736f06.yaml releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml releasenotes/notes/add-start-end-time-for-continuous-audit-52c45052cb06d153.yaml releasenotes/notes/add-upgrade-check-framework-5bb9693c8a78931c.yaml releasenotes/notes/api-call-retry-fef741ac684c58dd.yaml releasenotes/notes/api-microversioning-7999a3ee8073bf32.yaml releasenotes/notes/audit-scoper-for-storage-data-model-cdccc803542d22db.yaml releasenotes/notes/audit-tag-vm-metadata-47a3e4468748853c.yaml releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml releasenotes/notes/background-jobs-ha-9d3cf3fe356f4705.yaml releasenotes/notes/bp-audit-scope-exclude-project-511a7720aac00dff.yaml releasenotes/notes/build-baremetal-data-model-in-watcher-3023453a47b61dab.yaml releasenotes/notes/cdm-scoping-8d9c307bad46bfa1.yaml releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml releasenotes/notes/change-ram-util-metric-4a3e6984b9dd968d.yaml releasenotes/notes/check-strategy-requirements-66f9e9262412f8ec.yaml releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml releasenotes/notes/compute-cdm-include-all-instances-f7506ded2d57732f.yaml releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml releasenotes/notes/consume-nova-versioned-notifications-f98361b37e546b4d.yaml releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml releasenotes/notes/cron-based-continuous-audits-c3eedf28d9752b37.yaml releasenotes/notes/datasource-query-retry-00cba5f7e68aec39.yaml releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml releasenotes/notes/deprecate-ceilometer-datasource-446b0be70fbce28b.yaml releasenotes/notes/drop-py-2-7-54f8e806d71f19a7.yaml releasenotes/notes/dynamic-action-description-0e947b9e7ef2a134.yaml releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml releasenotes/notes/enhance-watcher-applier-engine-86c676ce8f179e68.yaml releasenotes/notes/event-driven-optimization-based-4870f112bef8a560.yaml releasenotes/notes/file-based-metric-map-c2af62b5067895df.yaml releasenotes/notes/formal-datasource-interface-implementation-222769d55a127d33.yaml releasenotes/notes/general-purpose-decision-engine-threadpool-0711b23abfc9d409.yaml releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml releasenotes/notes/global-datasource-preference-3ab47b4be09ff3a5.yaml releasenotes/notes/gnocchi-watcher-43c25d391fbd3e9c.yaml releasenotes/notes/grafana-datasource-b672367c23ffa0c6.yaml releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml releasenotes/notes/host-maintenance-strategy-41f640927948fb56.yaml releasenotes/notes/improve-compute-data-model-b427c85e4ed2b6fb.yaml releasenotes/notes/jsonschema-validation-79cab05d5295da00.yaml releasenotes/notes/min-required-nova-train-71f124192d88ae52.yaml releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml releasenotes/notes/multiple-global-efficacy-indicator-fc11c4844a12a7d5.yaml releasenotes/notes/node-resource-consolidation-73bc0c0abfeb0b03.yaml releasenotes/notes/noisy-neighbor-strategy-a71342740b59dddc.yaml releasenotes/notes/notifications-actionplan-cancel-edb2a4a12543e2d0.yaml releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml releasenotes/notes/remove-nova-legacy-notifications-e1b6d10eff58f30a.yaml releasenotes/notes/replace-cold-migrate-to-use-nova-migration-api-cecd9a39ddd3bc58.yaml releasenotes/notes/scope-for-data-model-ea9792f90db14343.yaml releasenotes/notes/service-versioned-notifications-api-70367b79a565d900.yaml releasenotes/notes/show-datamodel-api-6945b744fd5d25d5.yaml releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml releasenotes/notes/storage-workload-balance-0ecabbc1791e6894.yaml releasenotes/notes/support-keystoneclient-option-b30d1ff45f86a2e7.yaml releasenotes/notes/support-placement-api-58ce6bef1bbbe98a.yaml releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml releasenotes/notes/uwsgi-support-8dcea6961e56dad0.yaml releasenotes/notes/volume-migrate-action-fc57b0ce0e4c39ae.yaml releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml releasenotes/notes/watcher-planner-selector-84d77549d46f362a.yaml releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml releasenotes/notes/workload-balance-base-on-cpu-or-ram-util-3ff4ee968c32b2ed.yaml releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml releasenotes/notes/zone-migration-strategy-10f7656a2a01e607.yaml releasenotes/source/conf.py releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/queens.rst releasenotes/source/rocky.rst releasenotes/source/stein.rst releasenotes/source/train.rst releasenotes/source/unreleased.rst releasenotes/source/_static/.placeholder watcher/__init__.py watcher/_i18n.py watcher/version.py watcher/api/__init__.py watcher/api/acl.py watcher/api/app.py watcher/api/app.wsgi watcher/api/config.py watcher/api/hooks.py watcher/api/scheduling.py watcher/api/wsgi.py watcher/api/controllers/__init__.py watcher/api/controllers/base.py watcher/api/controllers/link.py watcher/api/controllers/rest_api_version_history.rst watcher/api/controllers/root.py watcher/api/controllers/v1/__init__.py watcher/api/controllers/v1/action.py watcher/api/controllers/v1/action_plan.py watcher/api/controllers/v1/audit.py watcher/api/controllers/v1/audit_template.py watcher/api/controllers/v1/collection.py watcher/api/controllers/v1/data_model.py watcher/api/controllers/v1/efficacy_indicator.py watcher/api/controllers/v1/goal.py watcher/api/controllers/v1/scoring_engine.py watcher/api/controllers/v1/service.py watcher/api/controllers/v1/strategy.py watcher/api/controllers/v1/types.py watcher/api/controllers/v1/utils.py watcher/api/controllers/v1/versions.py watcher/api/controllers/v1/webhooks.py watcher/api/middleware/__init__.py watcher/api/middleware/auth_token.py watcher/api/middleware/parsable_error.py watcher/applier/__init__.py watcher/applier/base.py watcher/applier/default.py watcher/applier/manager.py watcher/applier/rpcapi.py watcher/applier/sync.py watcher/applier/action_plan/__init__.py watcher/applier/action_plan/base.py watcher/applier/action_plan/default.py watcher/applier/actions/__init__.py watcher/applier/actions/base.py watcher/applier/actions/change_node_power_state.py watcher/applier/actions/change_nova_service_state.py watcher/applier/actions/factory.py watcher/applier/actions/migration.py watcher/applier/actions/nop.py watcher/applier/actions/resize.py watcher/applier/actions/sleep.py watcher/applier/actions/volume_migration.py watcher/applier/loading/__init__.py watcher/applier/loading/default.py watcher/applier/messaging/__init__.py watcher/applier/messaging/trigger.py watcher/applier/workflow_engine/__init__.py watcher/applier/workflow_engine/base.py watcher/applier/workflow_engine/default.py watcher/cmd/__init__.py watcher/cmd/api.py watcher/cmd/applier.py watcher/cmd/dbmanage.py watcher/cmd/decisionengine.py watcher/cmd/status.py watcher/cmd/sync.py watcher/common/__init__.py watcher/common/cinder_helper.py watcher/common/clients.py watcher/common/config.py watcher/common/context.py watcher/common/exception.py watcher/common/ironic_helper.py watcher/common/keystone_helper.py watcher/common/nova_helper.py watcher/common/paths.py watcher/common/placement_helper.py watcher/common/policy.py watcher/common/rpc.py watcher/common/scheduling.py watcher/common/service.py watcher/common/service_manager.py watcher/common/utils.py watcher/common/loader/__init__.py watcher/common/loader/base.py watcher/common/loader/default.py watcher/common/loader/loadable.py watcher/common/policies/__init__.py watcher/common/policies/action.py watcher/common/policies/action_plan.py watcher/common/policies/audit.py watcher/common/policies/audit_template.py watcher/common/policies/base.py watcher/common/policies/data_model.py watcher/common/policies/goal.py watcher/common/policies/scoring_engine.py watcher/common/policies/service.py watcher/common/policies/strategy.py watcher/conf/__init__.py watcher/conf/api.py watcher/conf/applier.py watcher/conf/ceilometer_client.py watcher/conf/cinder_client.py watcher/conf/clients_auth.py watcher/conf/collector.py watcher/conf/datasources.py watcher/conf/db.py watcher/conf/decision_engine.py watcher/conf/exception.py watcher/conf/glance_client.py watcher/conf/gnocchi_client.py watcher/conf/grafana_client.py watcher/conf/grafana_translators.py watcher/conf/ironic_client.py watcher/conf/keystone_client.py watcher/conf/monasca_client.py watcher/conf/neutron_client.py watcher/conf/nova_client.py watcher/conf/opts.py watcher/conf/paths.py watcher/conf/placement_client.py watcher/conf/planner.py watcher/conf/plugins.py watcher/conf/service.py watcher/db/__init__.py watcher/db/api.py watcher/db/migration.py watcher/db/purge.py watcher/db/sqlalchemy/__init__.py watcher/db/sqlalchemy/alembic.ini watcher/db/sqlalchemy/api.py watcher/db/sqlalchemy/job_store.py watcher/db/sqlalchemy/migration.py watcher/db/sqlalchemy/models.py watcher/db/sqlalchemy/alembic/README.rst watcher/db/sqlalchemy/alembic/env.py watcher/db/sqlalchemy/alembic/script.py.mako watcher/db/sqlalchemy/alembic/versions/001_ocata.py watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py watcher/db/sqlalchemy/alembic/versions/3cfc94cecf4e_add_name_for_audit.py watcher/db/sqlalchemy/alembic/versions/4b16194c56bc_add_start_end_time.py watcher/db/sqlalchemy/alembic/versions/52804f2498c4_add_hostname.py watcher/db/sqlalchemy/alembic/versions/609bec748f2a_add_force_field.py watcher/db/sqlalchemy/alembic/versions/a86240e89a29_.py watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py watcher/db/sqlalchemy/alembic/versions/d09a5945e4a0_add_action_description_table.py watcher/decision_engine/__init__.py watcher/decision_engine/gmr.py watcher/decision_engine/manager.py watcher/decision_engine/rpcapi.py watcher/decision_engine/scheduling.py watcher/decision_engine/sync.py watcher/decision_engine/threading.py watcher/decision_engine/audit/__init__.py watcher/decision_engine/audit/base.py watcher/decision_engine/audit/continuous.py watcher/decision_engine/audit/event.py watcher/decision_engine/audit/oneshot.py watcher/decision_engine/datasources/__init__.py watcher/decision_engine/datasources/base.py watcher/decision_engine/datasources/ceilometer.py watcher/decision_engine/datasources/gnocchi.py watcher/decision_engine/datasources/grafana.py watcher/decision_engine/datasources/manager.py watcher/decision_engine/datasources/monasca.py watcher/decision_engine/datasources/grafana_translator/__init__.py watcher/decision_engine/datasources/grafana_translator/base.py watcher/decision_engine/datasources/grafana_translator/influxdb.py watcher/decision_engine/goal/__init__.py watcher/decision_engine/goal/base.py watcher/decision_engine/goal/goals.py watcher/decision_engine/goal/efficacy/__init__.py watcher/decision_engine/goal/efficacy/base.py watcher/decision_engine/goal/efficacy/indicators.py watcher/decision_engine/goal/efficacy/specs.py watcher/decision_engine/loading/__init__.py watcher/decision_engine/loading/default.py watcher/decision_engine/messaging/__init__.py watcher/decision_engine/messaging/audit_endpoint.py watcher/decision_engine/messaging/data_model_endpoint.py watcher/decision_engine/model/__init__.py watcher/decision_engine/model/base.py watcher/decision_engine/model/model_root.py watcher/decision_engine/model/collector/__init__.py watcher/decision_engine/model/collector/base.py watcher/decision_engine/model/collector/cinder.py watcher/decision_engine/model/collector/ironic.py watcher/decision_engine/model/collector/manager.py watcher/decision_engine/model/collector/nova.py watcher/decision_engine/model/element/__init__.py watcher/decision_engine/model/element/baremetal_resource.py watcher/decision_engine/model/element/base.py watcher/decision_engine/model/element/compute_resource.py watcher/decision_engine/model/element/instance.py watcher/decision_engine/model/element/node.py watcher/decision_engine/model/element/storage_resource.py watcher/decision_engine/model/element/volume.py watcher/decision_engine/model/notification/__init__.py watcher/decision_engine/model/notification/base.py watcher/decision_engine/model/notification/cinder.py watcher/decision_engine/model/notification/filtering.py watcher/decision_engine/model/notification/nova.py watcher/decision_engine/planner/__init__.py watcher/decision_engine/planner/base.py watcher/decision_engine/planner/manager.py watcher/decision_engine/planner/node_resource_consolidation.py watcher/decision_engine/planner/weight.py watcher/decision_engine/planner/workload_stabilization.py watcher/decision_engine/scope/__init__.py watcher/decision_engine/scope/baremetal.py watcher/decision_engine/scope/base.py watcher/decision_engine/scope/compute.py watcher/decision_engine/scope/storage.py watcher/decision_engine/scoring/__init__.py watcher/decision_engine/scoring/base.py watcher/decision_engine/scoring/dummy_scorer.py watcher/decision_engine/scoring/dummy_scoring_container.py watcher/decision_engine/scoring/scoring_factory.py watcher/decision_engine/solution/__init__.py watcher/decision_engine/solution/base.py watcher/decision_engine/solution/default.py watcher/decision_engine/solution/efficacy.py watcher/decision_engine/solution/solution_comparator.py watcher/decision_engine/solution/solution_evaluator.py watcher/decision_engine/strategy/__init__.py watcher/decision_engine/strategy/common/__init__.py watcher/decision_engine/strategy/common/level.py watcher/decision_engine/strategy/context/__init__.py watcher/decision_engine/strategy/context/base.py watcher/decision_engine/strategy/context/default.py watcher/decision_engine/strategy/selection/__init__.py watcher/decision_engine/strategy/selection/base.py watcher/decision_engine/strategy/selection/default.py watcher/decision_engine/strategy/strategies/__init__.py watcher/decision_engine/strategy/strategies/actuation.py watcher/decision_engine/strategy/strategies/base.py watcher/decision_engine/strategy/strategies/basic_consolidation.py watcher/decision_engine/strategy/strategies/dummy_strategy.py watcher/decision_engine/strategy/strategies/dummy_with_resize.py watcher/decision_engine/strategy/strategies/dummy_with_scorer.py watcher/decision_engine/strategy/strategies/host_maintenance.py watcher/decision_engine/strategy/strategies/node_resource_consolidation.py watcher/decision_engine/strategy/strategies/noisy_neighbor.py watcher/decision_engine/strategy/strategies/outlet_temp_control.py watcher/decision_engine/strategy/strategies/saving_energy.py watcher/decision_engine/strategy/strategies/storage_capacity_balance.py watcher/decision_engine/strategy/strategies/uniform_airflow.py watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py watcher/decision_engine/strategy/strategies/workload_balance.py watcher/decision_engine/strategy/strategies/workload_stabilization.py watcher/decision_engine/strategy/strategies/zone_migration.py watcher/hacking/__init__.py watcher/hacking/checks.py watcher/locale/de/LC_MESSAGES/watcher.po watcher/locale/en_GB/LC_MESSAGES/watcher.po watcher/notifications/__init__.py watcher/notifications/action.py watcher/notifications/action_plan.py watcher/notifications/audit.py watcher/notifications/base.py watcher/notifications/exception.py watcher/notifications/goal.py watcher/notifications/service.py watcher/notifications/strategy.py watcher/objects/__init__.py watcher/objects/action.py watcher/objects/action_description.py watcher/objects/action_plan.py watcher/objects/audit.py watcher/objects/audit_template.py watcher/objects/base.py watcher/objects/efficacy_indicator.py watcher/objects/fields.py watcher/objects/goal.py watcher/objects/scoring_engine.py watcher/objects/service.py watcher/objects/strategy.py watcher/tests/__init__.py watcher/tests/base.py watcher/tests/conf_fixture.py watcher/tests/config.py watcher/tests/fake_policy.py watcher/tests/fakes.py watcher/tests/policy_fixture.py watcher/tests/test_threading.py watcher/tests/api/__init__.py watcher/tests/api/base.py watcher/tests/api/test_base.py watcher/tests/api/test_config.py watcher/tests/api/test_hooks.py watcher/tests/api/test_root.py watcher/tests/api/test_scheduling.py watcher/tests/api/test_utils.py watcher/tests/api/utils.py watcher/tests/api/v1/__init__.py watcher/tests/api/v1/test_actions.py watcher/tests/api/v1/test_actions_plans.py watcher/tests/api/v1/test_audit_templates.py watcher/tests/api/v1/test_audits.py watcher/tests/api/v1/test_data_model.py watcher/tests/api/v1/test_goals.py watcher/tests/api/v1/test_microversions.py watcher/tests/api/v1/test_root.py watcher/tests/api/v1/test_scoring_engines.py watcher/tests/api/v1/test_services.py watcher/tests/api/v1/test_strategies.py watcher/tests/api/v1/test_types.py watcher/tests/api/v1/test_utils.py watcher/tests/api/v1/test_webhooks.py watcher/tests/applier/__init__.py watcher/tests/applier/test_applier_manager.py watcher/tests/applier/test_rpcapi.py watcher/tests/applier/test_sync.py watcher/tests/applier/action_plan/__init__.py watcher/tests/applier/action_plan/test_default_action_handler.py watcher/tests/applier/actions/__init__.py watcher/tests/applier/actions/test_change_node_power_state.py watcher/tests/applier/actions/test_change_nova_service_state.py watcher/tests/applier/actions/test_migration.py watcher/tests/applier/actions/test_resize.py watcher/tests/applier/actions/test_sleep.py watcher/tests/applier/actions/test_volume_migration.py watcher/tests/applier/actions/loading/__init__.py watcher/tests/applier/actions/loading/test_default_actions_loader.py watcher/tests/applier/messaging/__init__.py watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py watcher/tests/applier/workflow_engine/__init__.py watcher/tests/applier/workflow_engine/test_default_workflow_engine.py watcher/tests/applier/workflow_engine/test_taskflow_action_container.py watcher/tests/applier/workflow_engine/loading/__init__.py watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py watcher/tests/cmd/__init__.py watcher/tests/cmd/test_api.py watcher/tests/cmd/test_applier.py watcher/tests/cmd/test_db_manage.py watcher/tests/cmd/test_decision_engine.py watcher/tests/cmd/test_status.py watcher/tests/common/__init__.py watcher/tests/common/test_cinder_helper.py watcher/tests/common/test_clients.py watcher/tests/common/test_ironic_helper.py watcher/tests/common/test_nova_helper.py watcher/tests/common/test_placement_helper.py watcher/tests/common/test_service.py watcher/tests/common/loader/__init__.py watcher/tests/common/loader/test_loader.py watcher/tests/conf/__init__.py watcher/tests/conf/test_list_opts.py watcher/tests/db/__init__.py watcher/tests/db/base.py watcher/tests/db/test_action.py watcher/tests/db/test_action_description.py watcher/tests/db/test_action_plan.py watcher/tests/db/test_audit.py watcher/tests/db/test_audit_template.py watcher/tests/db/test_efficacy_indicator.py watcher/tests/db/test_goal.py watcher/tests/db/test_purge.py watcher/tests/db/test_scoring_engine.py watcher/tests/db/test_service.py watcher/tests/db/test_strategy.py watcher/tests/db/utils.py watcher/tests/decision_engine/__init__.py watcher/tests/decision_engine/fake_goals.py watcher/tests/decision_engine/fake_strategies.py watcher/tests/decision_engine/test_gmr.py watcher/tests/decision_engine/test_rpcapi.py watcher/tests/decision_engine/test_scheduling.py watcher/tests/decision_engine/test_sync.py watcher/tests/decision_engine/audit/__init__.py watcher/tests/decision_engine/audit/test_audit_handlers.py watcher/tests/decision_engine/cluster/__init__.py watcher/tests/decision_engine/cluster/test_cinder_cdmc.py watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py watcher/tests/decision_engine/cluster/test_nova_cdmc.py watcher/tests/decision_engine/datasources/__init__.py watcher/tests/decision_engine/datasources/test_base.py watcher/tests/decision_engine/datasources/test_ceilometer_helper.py watcher/tests/decision_engine/datasources/test_gnocchi_helper.py watcher/tests/decision_engine/datasources/test_grafana_helper.py watcher/tests/decision_engine/datasources/test_manager.py watcher/tests/decision_engine/datasources/test_monasca_helper.py watcher/tests/decision_engine/datasources/grafana_translators/__init__.py watcher/tests/decision_engine/datasources/grafana_translators/test_base.py watcher/tests/decision_engine/datasources/grafana_translators/test_influxdb.py watcher/tests/decision_engine/event_consumer/__init__.py watcher/tests/decision_engine/loading/__init__.py watcher/tests/decision_engine/loading/test_collector_loader.py watcher/tests/decision_engine/loading/test_default_planner_loader.py watcher/tests/decision_engine/loading/test_default_strategy_loader.py watcher/tests/decision_engine/loading/test_goal_loader.py watcher/tests/decision_engine/messaging/__init__.py watcher/tests/decision_engine/messaging/test_audit_endpoint.py watcher/tests/decision_engine/messaging/test_data_model_endpoint.py watcher/tests/decision_engine/model/__init__.py watcher/tests/decision_engine/model/ceilometer_metrics.py watcher/tests/decision_engine/model/faker_cluster_and_metrics.py watcher/tests/decision_engine/model/faker_cluster_state.py watcher/tests/decision_engine/model/gnocchi_metrics.py watcher/tests/decision_engine/model/monasca_metrics.py watcher/tests/decision_engine/model/test_element.py watcher/tests/decision_engine/model/test_model.py watcher/tests/decision_engine/model/data/ironic_scenario_1.xml watcher/tests/decision_engine/model/data/scenario_1.xml watcher/tests/decision_engine/model/data/scenario_10.xml watcher/tests/decision_engine/model/data/scenario_1_with_1_node_unavailable.xml watcher/tests/decision_engine/model/data/scenario_1_with_all_instances_exclude.xml watcher/tests/decision_engine/model/data/scenario_1_with_all_nodes_disable.xml watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xml watcher/tests/decision_engine/model/data/storage_scenario_1.xml watcher/tests/decision_engine/model/notification/__init__.py watcher/tests/decision_engine/model/notification/fake_managers.py watcher/tests/decision_engine/model/notification/test_cinder_notifications.py watcher/tests/decision_engine/model/notification/test_notifications.py watcher/tests/decision_engine/model/notification/test_nova_notifications.py watcher/tests/decision_engine/model/notification/data/capacity.json watcher/tests/decision_engine/model/notification/data/instance-create-end.json watcher/tests/decision_engine/model/notification/data/instance-delete-end.json watcher/tests/decision_engine/model/notification/data/instance-live_migration_force_complete-end.json watcher/tests/decision_engine/model/notification/data/instance-live_migration_post-end.json watcher/tests/decision_engine/model/notification/data/instance-lock.json watcher/tests/decision_engine/model/notification/data/instance-pause-end.json watcher/tests/decision_engine/model/notification/data/instance-power_off-end.json watcher/tests/decision_engine/model/notification/data/instance-power_on-end.json watcher/tests/decision_engine/model/notification/data/instance-rebuild-end.json watcher/tests/decision_engine/model/notification/data/instance-rescue-end.json watcher/tests/decision_engine/model/notification/data/instance-resize_confirm-end.json watcher/tests/decision_engine/model/notification/data/instance-restore-end.json watcher/tests/decision_engine/model/notification/data/instance-resume-end.json watcher/tests/decision_engine/model/notification/data/instance-shelve-end.json watcher/tests/decision_engine/model/notification/data/instance-shutdown-end.json watcher/tests/decision_engine/model/notification/data/instance-soft_delete-end.json watcher/tests/decision_engine/model/notification/data/instance-suspend-end.json watcher/tests/decision_engine/model/notification/data/instance-unlock.json watcher/tests/decision_engine/model/notification/data/instance-unpause-end.json watcher/tests/decision_engine/model/notification/data/instance-unrescue-end.json watcher/tests/decision_engine/model/notification/data/instance-unshelve-end.json watcher/tests/decision_engine/model/notification/data/instance-update.json watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.json watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.json watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.json watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.json watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.json watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.json watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.json watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.json watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.json watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.json watcher/tests/decision_engine/model/notification/data/service-create.json watcher/tests/decision_engine/model/notification/data/service-delete.json watcher/tests/decision_engine/model/notification/data/service-update.json watcher/tests/decision_engine/planner/__init__.py watcher/tests/decision_engine/planner/test_node_resource_consolidation.py watcher/tests/decision_engine/planner/test_planner_manager.py watcher/tests/decision_engine/planner/test_weight_planner.py watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py watcher/tests/decision_engine/scope/__init__.py watcher/tests/decision_engine/scope/fake_scopes.py watcher/tests/decision_engine/scope/test_baremetal.py watcher/tests/decision_engine/scope/test_compute.py watcher/tests/decision_engine/scope/test_storage.py watcher/tests/decision_engine/scoring/__init__.py watcher/tests/decision_engine/scoring/test_dummy_scorer.py watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py watcher/tests/decision_engine/scoring/test_scoring_factory.py watcher/tests/decision_engine/solution/__init__.py watcher/tests/decision_engine/solution/test_default_solution.py watcher/tests/decision_engine/strategy/__init__.py watcher/tests/decision_engine/strategy/context/__init__.py watcher/tests/decision_engine/strategy/context/test_strategy_context.py watcher/tests/decision_engine/strategy/selector/__init__.py watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py watcher/tests/decision_engine/strategy/strategies/__init__.py watcher/tests/decision_engine/strategy/strategies/test_actuator.py watcher/tests/decision_engine/strategy/strategies/test_base.py watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py watcher/tests/decision_engine/strategy/strategies/test_host_maintenance.py watcher/tests/decision_engine/strategy/strategies/test_node_resource_consolidation.py watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py watcher/tests/decision_engine/strategy/strategies/test_saving_energy.py watcher/tests/decision_engine/strategy/strategies/test_storage_capacity_balance.py watcher/tests/decision_engine/strategy/strategies/test_strategy_endpoint.py watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.py watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.py watcher/tests/decision_engine/strategy/strategies/test_zone_migration.py watcher/tests/notifications/__init__.py watcher/tests/notifications/test_action_notification.py watcher/tests/notifications/test_action_plan_notification.py watcher/tests/notifications/test_audit_notification.py watcher/tests/notifications/test_notification.py watcher/tests/notifications/test_service_notifications.py watcher/tests/objects/__init__.py watcher/tests/objects/test_action.py watcher/tests/objects/test_action_description.py watcher/tests/objects/test_action_plan.py watcher/tests/objects/test_audit.py watcher/tests/objects/test_audit_template.py watcher/tests/objects/test_efficacy_indicator.py watcher/tests/objects/test_goal.py watcher/tests/objects/test_objects.py watcher/tests/objects/test_scoring_engine.py watcher/tests/objects/test_service.py watcher/tests/objects/test_strategy.py watcher/tests/objects/utils.pypython-watcher-4.0.0/etc/0000775000175000017500000000000013656752352015302 5ustar zuulzuul00000000000000python-watcher-4.0.0/etc/apache2/0000775000175000017500000000000013656752352016605 5ustar zuulzuul00000000000000python-watcher-4.0.0/etc/apache2/watcher0000664000175000017500000000224613656752270020170 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using # Watcher API through mod_wsgi Listen 9322 WSGIDaemonProcess watcher-api user=stack group=stack processes=2 threads=2 display-name=%{GROUP} WSGIScriptAlias / /usr/local/bin/watcher-api-wsgi WSGIProcessGroup watcher-api ErrorLog /var/log/httpd/watcher_error.log LogLevel info CustomLog /var/log/httpd/watcher_access.log combined WSGIProcessGroup watcher-api WSGIApplicationGroup %{GLOBAL} AllowOverride All Require all granted python-watcher-4.0.0/etc/watcher/0000775000175000017500000000000013656752352016737 5ustar zuulzuul00000000000000python-watcher-4.0.0/etc/watcher/README-watcher.conf.txt0000664000175000017500000000020313656752270023006 0ustar zuulzuul00000000000000To generate the sample watcher.conf file, run the following command from the top level of the watcher directory: tox -e genconfig python-watcher-4.0.0/etc/watcher/oslo-config-generator/0000775000175000017500000000000013656752352023142 5ustar zuulzuul00000000000000python-watcher-4.0.0/etc/watcher/oslo-config-generator/watcher.conf0000664000175000017500000000062613656752270025451 0ustar zuulzuul00000000000000[DEFAULT] output_file = etc/watcher/watcher.conf.sample wrap_width = 79 namespace = watcher namespace = keystonemiddleware.auth_token namespace = oslo.cache namespace = oslo.concurrency namespace = oslo.db namespace = oslo.log namespace = oslo.messaging namespace = oslo.policy namespace = oslo.reports namespace = oslo.service.periodic_task namespace = oslo.service.service namespace = oslo.service.wsgi python-watcher-4.0.0/etc/watcher/oslo-policy-generator/0000775000175000017500000000000013656752352023174 5ustar zuulzuul00000000000000python-watcher-4.0.0/etc/watcher/oslo-policy-generator/watcher-policy-generator.conf0000664000175000017500000000011413656752270030754 0ustar zuulzuul00000000000000[DEFAULT] output_file = /etc/watcher/policy.yaml.sample namespace = watcher python-watcher-4.0.0/HACKING.rst0000664000175000017500000000056113656752270016326 0ustar zuulzuul00000000000000.. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ ========================== watcher Style Commandments ========================== Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/ python-watcher-4.0.0/releasenotes/0000775000175000017500000000000013656752352017220 5ustar zuulzuul00000000000000python-watcher-4.0.0/releasenotes/notes/0000775000175000017500000000000013656752352020350 5ustar zuulzuul00000000000000python-watcher-4.0.0/releasenotes/notes/db-migration-e1a705a8b54ccdd2.yaml0000664000175000017500000000011213656752270026145 0ustar zuulzuul00000000000000--- features: - Watcher database can now be upgraded thanks to Alembic. python-watcher-4.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000013656752270022620 0ustar zuulzuul00000000000000python-watcher-4.0.0/releasenotes/notes/add-upgrade-check-framework-5bb9693c8a78931c.yaml0000664000175000017500000000073013656752270030632 0ustar zuulzuul00000000000000--- prelude: > Added new tool ``watcher-status upgrade check``. features: - | New framework for ``watcher-status upgrade check`` command is added. This framework allows adding various checks which can be run before a Watcher upgrade to ensure if the upgrade can be performed safely. upgrade: - | Operator can now use new CLI tool ``watcher-status upgrade check`` to check if Watcher deployment can be safely upgraded from N-1 to N release. python-watcher-4.0.0/releasenotes/notes/watcher-notifications-ovo-7b44d52ef6400dd0.yaml0000664000175000017500000000073113656752270030547 0ustar zuulzuul00000000000000--- features: - Provide a notification mechanism into Watcher that supports versioning. Whenever a Watcher object is created, updated or deleted, a versioned notification will, if it's relevant, be automatically sent to notify in order to allow an event-driven style of architecture within Watcher. Moreover, it will also give other services and/or 3rd party softwares (e.g. monitoring solutions or rules engines) the ability to react to such events. python-watcher-4.0.0/releasenotes/notes/configurable-weights-default-planner-3746b33160bc7347.yaml0000664000175000017500000000016613656752270032417 0ustar zuulzuul00000000000000--- features: - Added a way to add a new action without having to amend the source code of the default planner. python-watcher-4.0.0/releasenotes/notes/improve-compute-data-model-b427c85e4ed2b6fb.yaml0000664000175000017500000000167513656752270030764 0ustar zuulzuul00000000000000--- features: - | Watcher can get resource information such as total, allocation ratio and reserved information from Placement API. Now we add some new fields to the Watcher Data Model: * vcpu_reserved: The amount of cpu a node has reserved for its own use. * vcpu_ratio: CPU allocation ratio. * memory_mb_reserved: The amount of memory a node has reserved for its own use. * memory_ratio: Memory allocation ratio. * disk_gb_reserved: The amount of disk a node has reserved for its own use. * disk_ratio: Disk allocation ratio. We also add some new propeties: * vcpu_capacity: The amount of vcpu, take allocation ratio into account, but do not include reserved. * memory_mb_capacity: The amount of memory, take allocation ratio into account, but do not include reserved. * disk_gb_capacity: The amount of disk, take allocation ratio into account, but do not include reserved. python-watcher-4.0.0/releasenotes/notes/watcher-service-list-7b2f4b64f71e9b89.yaml0000664000175000017500000000010513656752270027530 0ustar zuulzuul00000000000000--- features: - Add a service supervisor to watch Watcher deamons. ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000python-watcher-4.0.0/releasenotes/notes/add-start-end-time-for-continuous-audit-52c45052cb06d153.yamlpython-watcher-4.0.0/releasenotes/notes/add-start-end-time-for-continuous-audit-52c45052cb06d153.yam0000664000175000017500000000023413656752270032574 0ustar zuulzuul00000000000000--- features: - | Add start_time and end_time fields in audits table. User can set the start time and/or end time when creating CONTINUOUS audit. python-watcher-4.0.0/releasenotes/notes/dynamic-action-description-0e947b9e7ef2a134.yaml0000664000175000017500000000017313656752270030677 0ustar zuulzuul00000000000000--- features: - Add description property for dynamic action. Admin can see detail information of any specify action. python-watcher-4.0.0/releasenotes/notes/cdm-scoping-8d9c307bad46bfa1.yaml0000664000175000017500000000034713656752270026014 0ustar zuulzuul00000000000000--- features: - | Each CDM collector can have its own CDM scoper now. This changed Scope JSON schema definition for the audit template POST data. Please see audit template create help message in python-watcherclient. python-watcher-4.0.0/releasenotes/notes/remove-nova-legacy-notifications-e1b6d10eff58f30a.yaml0000664000175000017500000000017613656752270032156 0ustar zuulzuul00000000000000--- deprecations: - | Watcher removes the support to Nova legacy notifications because of Nova will deprecate them. python-watcher-4.0.0/releasenotes/notes/multiple-global-efficacy-indicator-fc11c4844a12a7d5.yaml0000664000175000017500000000041313656752270032247 0ustar zuulzuul00000000000000--- features: - Watcher got an ability to calculate multiple global efficacy indicators during audit's execution. Now global efficacy can be calculated for many resource types (like volumes, instances, network) if strategy supports efficacy indicators. python-watcher-4.0.0/releasenotes/notes/add-scoring-module-fa00d013ed2d614e.yaml0000664000175000017500000000041213656752270027160 0ustar zuulzuul00000000000000--- features: - Added a generic scoring engine module, which will standardize interactions with scoring engines through the common API. It is possible to use the scoring engine by different Strategies, which improve the code and data model re-use. python-watcher-4.0.0/releasenotes/notes/cluster-model-objects-wrapper-9c799ea262c56a5b.yaml0000664000175000017500000000026713656752270031354 0ustar zuulzuul00000000000000--- features: - Added an in-memory cache of the cluster model built up and kept fresh via notifications from services of interest in addition to periodic syncing logic. python-watcher-4.0.0/releasenotes/notes/standard-deviation-strategy-cd1d0c443fdfde9c.yaml0000664000175000017500000000036613656752270031374 0ustar zuulzuul00000000000000--- features: - Added a strategy that monitors if there is a higher load on some hosts compared to other hosts in the cluster and re-balances the work across hosts to minimize the standard deviation of the loads in the cluster. python-watcher-4.0.0/releasenotes/notes/watcher-policies-1e86a30f0f11c6fa.yaml0000664000175000017500000000012213656752270026753 0ustar zuulzuul00000000000000--- features: - Added policies to handle user rights to access Watcher API. python-watcher-4.0.0/releasenotes/notes/monasca-support-0b0486b8572ac38b.yaml0000664000175000017500000000014613656752270026521 0ustar zuulzuul00000000000000--- features: - Watcher supports multiple metrics backend and relies on Ceilometer and Monasca. python-watcher-4.0.0/releasenotes/notes/optimization-threshold-21ad38f0470d0e1a.yaml0000664000175000017500000000026613656752270030153 0ustar zuulzuul00000000000000--- features: - Allow decision engine to pass strategy parameters, like optimization threshold, to selected strategy, also strategy to provide parameters info to end user. python-watcher-4.0.0/releasenotes/notes/get-goal-from-strategy-396c9b13a38bb650.yaml0000664000175000017500000000025213656752270027666 0ustar zuulzuul00000000000000--- features: - Added a way to return the of available goals depending on which strategies have been deployed on the node where the decision engine is running. python-watcher-4.0.0/releasenotes/notes/stale-action-plan-b6a6b08df873c128.yaml0000664000175000017500000000016613656752270026771 0ustar zuulzuul00000000000000--- features: - Check the creation time of the action plan, and set its state to SUPERSEDED if it has expired. python-watcher-4.0.0/releasenotes/notes/zone-migration-strategy-10f7656a2a01e607.yaml0000664000175000017500000000031013656752270030072 0ustar zuulzuul00000000000000--- features: - | Added strategy "Zone migration" and it's goal "Hardware maintenance". The strategy migrates many instances and volumes efficiently with minimum downtime automatically. ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/releasenotes/notes/formal-datasource-interface-implementation-222769d55a127d33.yamlpython-watcher-4.0.0/releasenotes/notes/formal-datasource-interface-implementation-222769d55a127d33.0000664000175000017500000000141613656752270032737 0ustar zuulzuul00000000000000--- features: - | Improved interface for datasource baseclass that better defines expected values and types for parameters and return types of all abstract methods. This allows all strategies to work with every datasource provided the metrics are configured for that given datasource. deprecations: - | The new strategy baseclass has significant changes in method parameters and any out-of-tree strategies will have to be adopted. - | Several strategies have changed the `node` parameter to `compute_node` to be better aligned with terminology. These strategies include `basic_consolidation` and `workload_stabilzation`. The `node` parameter will remain supported during Train release and will be removed in the subsequent release. python-watcher-4.0.0/releasenotes/notes/service-versioned-notifications-api-70367b79a565d900.yaml0000664000175000017500000000007713656752270032315 0ustar zuulzuul00000000000000--- features: - Add notifications related to Service object. python-watcher-4.0.0/releasenotes/notes/centralise-config-opts-95670987dfbdb0e7.yaml0000664000175000017500000000010413656752270030043 0ustar zuulzuul00000000000000--- features: - Centralize all configuration options for Watcher. ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000python-watcher-4.0.0/releasenotes/notes/workload-balance-base-on-cpu-or-ram-util-3ff4ee968c32b2ed.yamlpython-watcher-4.0.0/releasenotes/notes/workload-balance-base-on-cpu-or-ram-util-3ff4ee968c32b2ed.ya0000664000175000017500000000036113656752270032752 0ustar zuulzuul00000000000000--- features: - Existing workload_balance strategy based on the VM workloads of CPU. This feature improves the strategy. By the input parameter "metrics", it makes decision to migrate a VM base on CPU or memory utilization.python-watcher-4.0.0/releasenotes/notes/consume-nova-versioned-notifications-f98361b37e546b4d.yaml0000664000175000017500000000160213656752270032657 0ustar zuulzuul00000000000000--- features: - | Watcher consumes Nova notifications to update its internal Compute CDM(Cluster Data Model). All the notifications as below pre-existing: * service.update * instance.update * instance.delete.end new: * instance.lock * instance.unlock * instance.pause.end * instance.power_off.end * instance.power_on.end * instance.resize_confirm.end * instance.restore.end * instance.resume.end * instance.shelve.end * instance.shutdown.end * instance.suspend.end * instance.unpause.end * instance.unrescue.end * instance.unshelve.end * instance.rebuild.end * instance.rescue.end * instance.create.end * instance.live_migration_force_complete.end * instance.live_migration_post_dest.end * instance.soft_delete.end * service.create * service.delete python-watcher-4.0.0/releasenotes/notes/show-datamodel-api-6945b744fd5d25d5.yaml0000664000175000017500000000110213656752270027055 0ustar zuulzuul00000000000000--- features: - | Add show data model api for Watcher. New in version 1.3. User can use 'openstack optimize datamodel list' command to view the current data model information in memory. User can also add '--audit ' to view specific data model in memory filted by the scope in audit. User can also add '--detail' to view detailed information about current data model. User can also add '--type ' to specify the type of data model. Default type is 'compute'. In the future, type 'storage' and 'baremetal' will be supported. python-watcher-4.0.0/releasenotes/notes/suspended-audit-state-07f998c94e9d9a47.yaml0000664000175000017500000000006413656752270027642 0ustar zuulzuul00000000000000--- features: - | Added SUSPENDED audit state python-watcher-4.0.0/releasenotes/notes/storage-workload-balance-0ecabbc1791e6894.yaml0000664000175000017500000000010113656752270030371 0ustar zuulzuul00000000000000--- features: - | Added storage capacity balance strategy. python-watcher-4.0.0/releasenotes/notes/add-ha-support-b9042255e5b76e42.yaml0000664000175000017500000000050213656752270026136 0ustar zuulzuul00000000000000--- features: - Watcher services can be launched in HA mode. From now on Watcher Decision Engine and Watcher Applier services may be deployed on different nodes to run in active-active or active-passive mode. Any ONGOING Audits or Action Plans will be CANCELLED if service they are executed on is restarted. python-watcher-4.0.0/releasenotes/notes/watcher-planner-selector-84d77549d46f362a.yaml0000664000175000017500000000023113656752270030234 0ustar zuulzuul00000000000000--- features: - Now Watcher strategy can select specific planner beyond default. Strategy can set planner property to specify its own planner. python-watcher-4.0.0/releasenotes/notes/api-call-retry-fef741ac684c58dd.yaml0000664000175000017500000000064013656752270026450 0ustar zuulzuul00000000000000--- features: - | API calls while building the Compute data model will be retried upon failure. The amount of failures allowed before giving up and the time before reattempting are configurable. The `api_call_retries` and `api_query_timeout` parameters in the `[collector]` group can be used to adjust these paremeters. 10 retries with a 1 second time in between reattempts is the default. python-watcher-4.0.0/releasenotes/notes/bp-audit-scope-exclude-project-511a7720aac00dff.yaml0000664000175000017500000000033513656752270031415 0ustar zuulzuul00000000000000--- features: - | Feature to exclude instances from audit scope based on project_id is added. Now instances from particular project in OpenStack can be excluded from audit defining scope in audit templates. python-watcher-4.0.0/releasenotes/notes/file-based-metric-map-c2af62b5067895df.yaml0000664000175000017500000000110013656752270027477 0ustar zuulzuul00000000000000--- features: - | Allow using file to override metric map. Override the metric map of each datasource as soon as it is created by the manager. This override comes from a file whose path is provided by a setting in config file. The setting is `watcher_decision_engine/metric_map_path`. The file contains a map per datasource whose keys are the metric names as recognized by watcher and the value is the real name of the metric in the datasource. This setting defaults to `/etc/watcher/metric_map.yaml`, and presence of this file is optional. python-watcher-4.0.0/releasenotes/notes/efficacy-indicator-95380ad7b84e3be2.yaml0000664000175000017500000000016013656752270027174 0ustar zuulzuul00000000000000--- features: - Added a way to compare the efficacy of different strategies for a give optimization goal. python-watcher-4.0.0/releasenotes/notes/audit-scoper-for-storage-data-model-cdccc803542d22db.yaml0000664000175000017500000000024513656752270032427 0ustar zuulzuul00000000000000--- features: - | Adds audit scoper for storage data model, now watcher users can specify audit scope for storage CDM in the same manner as compute scope. python-watcher-4.0.0/releasenotes/notes/continuously-optimization-35364f4d2c0b81fc.yaml0000664000175000017500000000017113656752270030742 0ustar zuulzuul00000000000000--- features: - Added a way to create periodic audit to be able to optimize continuously the cloud infrastructure. python-watcher-4.0.0/releasenotes/notes/add-name-for-audit-0df1f39f00736f06.yaml0000664000175000017500000000020113656752270026716 0ustar zuulzuul00000000000000--- features: - Audits have 'name' field now, that is more friendly to end users. Audit's name can't exceed 63 characters. python-watcher-4.0.0/releasenotes/notes/support-placement-api-58ce6bef1bbbe98a.yaml0000664000175000017500000000057413656752270030215 0ustar zuulzuul00000000000000--- features: - | Added Placement API helper to Watcher. Now Watcher can get information about resource providers, it can be used for the data model and strategies. Config group placement_client with options 'api_version', 'interface' and 'region_name' is also added. The default values for 'api_version' and 'interface' are 1.29 and 'public', respectively. python-watcher-4.0.0/releasenotes/notes/audit-versioned-notifications-api-bca7738e16954bad.yaml0000664000175000017500000000007513656752270032256 0ustar zuulzuul00000000000000--- features: - Add notifications related to Audit object. python-watcher-4.0.0/releasenotes/notes/planner-storage-action-plan-26ef37893c5e8648.yaml0000664000175000017500000000021613656752270030643 0ustar zuulzuul00000000000000--- features: - Watcher can now run specific actions in parallel improving the performances dramatically when executing an action plan. python-watcher-4.0.0/releasenotes/notes/action-versioned-notifications-api-ff94fc0f401292d0.yaml0000664000175000017500000000007613656752270032340 0ustar zuulzuul00000000000000--- features: - Add notifications related to Action object. python-watcher-4.0.0/releasenotes/notes/host-maintenance-strategy-41f640927948fb56.yaml0000664000175000017500000000055513656752270030351 0ustar zuulzuul00000000000000--- features: - | Added a strategy for one compute node maintenance, without having the user's application been interrupted. If given one backup node, the strategy will firstly migrate all instances from the maintenance node to the backup node. If the backup node is not provided, it will migrate all instances, relying on nova-scheduler. ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000python-watcher-4.0.0/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yamlpython-watcher-4.0.0/releasenotes/notes/action-plan-versioned-notifications-api-e8ca4f5d37aa5b4b.yam0000664000175000017500000000010313656752270033322 0ustar zuulzuul00000000000000--- features: - Add notifications related to Action plan object. python-watcher-4.0.0/releasenotes/notes/add-power-on-off-a77673d482568a8b.yaml0000664000175000017500000000007313656752270026373 0ustar zuulzuul00000000000000--- features: - Add action for compute node power on/off python-watcher-4.0.0/releasenotes/notes/volume-migrate-action-fc57b0ce0e4c39ae.yaml0000664000175000017500000000006413656752270030075 0ustar zuulzuul00000000000000--- features: - | Added volume migrate action python-watcher-4.0.0/releasenotes/notes/cron-based-continuous-audits-c3eedf28d9752b37.yaml0000664000175000017500000000055313656752270031256 0ustar zuulzuul00000000000000--- features: - There is new ability to create Watcher continuous audits with cron interval. It means you may use, for example, optional argument '--interval "\*/5 \* \* \* \*"' to launch audit every 5 minutes. These jobs are executed on a best effort basis and therefore, we recommend you to use a minimal cron interval of at least one minute. python-watcher-4.0.0/releasenotes/notes/change-ram-util-metric-4a3e6984b9dd968d.yaml0000664000175000017500000000043413656752270027735 0ustar zuulzuul00000000000000--- features: - Enhancement of vm_workload_consolidation strategy by using 'memory.resident' metric in place of 'memory.usage', as memory.usage shows the memory usage inside guest-os and memory.resident represents volume of RAM used by instance on host machine. python-watcher-4.0.0/releasenotes/notes/support-keystoneclient-option-b30d1ff45f86a2e7.yaml0000664000175000017500000000025013656752270031607 0ustar zuulzuul00000000000000--- features: - | Add keystone_client Group for user to configure 'interface' and 'region_name' by watcher.conf. The default value of 'interface' is 'admin'. python-watcher-4.0.0/releasenotes/notes/gnocchi-watcher-43c25d391fbd3e9c.yaml0000664000175000017500000000022213656752270026573 0ustar zuulzuul00000000000000--- features: - Added gnocchi support as data source for metrics. Administrator can change data source for each strategy using config file. python-watcher-4.0.0/releasenotes/notes/action-plan-cancel-c54726378019e096.yaml0000664000175000017500000000007713656752270026617 0ustar zuulzuul00000000000000--- features: - | Adds feature to cancel an action-plan. python-watcher-4.0.0/releasenotes/notes/min-required-nova-train-71f124192d88ae52.yaml0000664000175000017500000000042013656752270027763 0ustar zuulzuul00000000000000--- upgrade: - | The minimum required version of the ``[nova_client]/api_version`` value is now enforced to be ``2.56`` which is available since the Queens version of the nova compute service. A ``watcher-status upgrade check`` has been added for this. python-watcher-4.0.0/releasenotes/notes/workload-balance-migration-strategy-a0b05148a57815c0.yaml0000664000175000017500000000041113656752270032323 0ustar zuulzuul00000000000000--- features: - Added a strategy based on the VM workloads of hypervisors. This strategy makes decisions to migrate workloads to make the total VM workloads of each hypervisor balanced, when the total VM workloads of hypervisor reaches threshold. python-watcher-4.0.0/releasenotes/notes/global-datasource-preference-3ab47b4be09ff3a5.yaml0000664000175000017500000000060713656752270031313 0ustar zuulzuul00000000000000--- features: - | Watcher now supports configuring which datasource to use and in which order. This configuration is done by specifying datasources in the watcher_datasources section: - ``[watcher_datasources] datasources = gnocchi,monasca,ceilometer`` Specific strategies can override this order and use datasources which are not listed in the global preference.python-watcher-4.0.0/releasenotes/notes/deprecate-ceilometer-datasource-446b0be70fbce28b.yaml0000664000175000017500000000046113656752270032015 0ustar zuulzuul00000000000000--- deprecations: - | Ceilometer Datasource has been deprecated since its API has been deprecated in Ocata cycle. Watcher has supported Ceilometer for some releases after Ocata to let users migrate to Gnocchi/Monasca datasources. Since Train release, Ceilometer support will be removed. python-watcher-4.0.0/releasenotes/notes/automatic-triggering-audit-8a9b0540d547db60.yaml0000664000175000017500000000031513656752270030615 0ustar zuulzuul00000000000000--- features: - Watcher can continuously optimize the OpenStack cloud for a specific strategy or goal by triggering an audit periodically which generates an action plan and run it automatically. python-watcher-4.0.0/releasenotes/notes/compute-cdm-include-all-instances-f7506ded2d57732f.yaml0000664000175000017500000000027613656752270032057 0ustar zuulzuul00000000000000--- features: - Watcher has a whole scope of the cluster, when building compute CDM which includes all instances. It filters excluded instances when migration during the audit.python-watcher-4.0.0/releasenotes/notes/uniform-airflow-strategy-68cdba1419c3f770.yaml0000664000175000017500000000023213656752270030426 0ustar zuulzuul00000000000000--- features: - Added a new strategy based on the airflow of servers. This strategy makes decisions to migrate VMs to make the airflow uniform. python-watcher-4.0.0/releasenotes/notes/jsonschema-validation-79cab05d5295da00.yaml0000664000175000017500000000012713656752270027723 0ustar zuulzuul00000000000000--- features: - Added using of JSONSchema instead of voluptuous to validate Actions. ././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000python-watcher-4.0.0/releasenotes/notes/replace-cold-migrate-to-use-nova-migration-api-cecd9a39ddd3bc58.yamlpython-watcher-4.0.0/releasenotes/notes/replace-cold-migrate-to-use-nova-migration-api-cecd9a39ddd3b0000664000175000017500000000100713656752270033364 0ustar zuulzuul00000000000000--- features: - | Instance cold migration logic is now replaced with using Nova migrate Server(migrate Action) API which has host option since v2.56. upgrade: - | Nova API version is now set to 2.56 by default. This needs the migrate action of migration type cold with destination_node parameter to work. fixes: - | The migrate action of migration type cold with destination_node parameter was fixed. Before fixing, it booted an instance in the service project as a migrated instance. python-watcher-4.0.0/releasenotes/notes/persistent-audit-parameters-ae41dd7252ba9672.yaml0000664000175000017500000000017713656752270031120 0ustar zuulzuul00000000000000--- features: - Copy all audit templates parameters into audit instead of having a reference to the audit template. python-watcher-4.0.0/releasenotes/notes/noisy-neighbor-strategy-a71342740b59dddc.yaml0000664000175000017500000000030413656752270030235 0ustar zuulzuul00000000000000--- features: - Added strategy to identify and migrate a Noisy Neighbor - a low priority VM that negatively affects performance of a high priority VM by over utilizing Last Level Cache. python-watcher-4.0.0/releasenotes/notes/background-jobs-ha-9d3cf3fe356f4705.yaml0000664000175000017500000000022413656752270027121 0ustar zuulzuul00000000000000--- features: - Added binding between apscheduler job and Watcher decision engine service. It will allow to provide HA support in the future. python-watcher-4.0.0/releasenotes/notes/build-baremetal-data-model-in-watcher-3023453a47b61dab.yaml0000664000175000017500000000007513656752270032447 0ustar zuulzuul00000000000000--- features: - | Adds baremetal data model in Watcher python-watcher-4.0.0/releasenotes/notes/datasource-query-retry-00cba5f7e68aec39.yaml0000664000175000017500000000137413656752270030255 0ustar zuulzuul00000000000000--- features: - | All datasources can now be configured to retry retrieving a metric upon encountering an error. Between each attempt will be a set amount of time which can be adjusted from the configuration. These configuration options can be found in the `[watcher_datasources]` group and are named `query_max_retries` and `query_timeout`. upgrade: - | If Gnocchi was configured to have a custom amount of retries and or a custom timeout then the configuration needs to moved into the `[watcher_datasources]` group instead of the `[gnocchi_client]` group. deprecations: - | The configuration options for query retries in `[gnocchi_client]` are deprecated and the option in `[watcher_datasources]` should now be used.python-watcher-4.0.0/releasenotes/notes/audit-tag-vm-metadata-47a3e4468748853c.yaml0000664000175000017500000000040613656752270027325 0ustar zuulzuul00000000000000--- features: - Added the functionality to filter out instances which have metadata field 'optimize' set to False. For now, this is only available for the basic_consolidation strategy (if "check_optimize_metadata" configuration option is enabled). python-watcher-4.0.0/releasenotes/notes/graph-based-cluster-model-523937a6f5e66537.yaml0000664000175000017500000000041213656752270030176 0ustar zuulzuul00000000000000--- features: - The graph model describes how VMs are associated to compute hosts. This allows for seeing relationships upfront between the entities and hence can be used to identify hot/cold spots in the data center and influence a strategy decision. python-watcher-4.0.0/releasenotes/notes/grafana-datasource-b672367c23ffa0c6.yaml0000664000175000017500000000104313656752270027174 0ustar zuulzuul00000000000000--- features: - | Grafana has been added as datasource that can be used for collecting metrics. The configuration options allow to specify what metrics and how they are stored in grafana so that no matter how Grafana is configured it can still be used. The configuration can be done via the typical configuration file but it is recommended to configure most options in the yaml file for metrics. For a complete walkthrough on configuring Grafana see: https://docs.openstack.org/watcher/latest/datasources/grafana.htmlpython-watcher-4.0.0/releasenotes/notes/scope-for-data-model-ea9792f90db14343.yaml0000664000175000017500000000033513656752270027300 0ustar zuulzuul00000000000000--- features: - | For a large cloud infrastructure, retrieving data from Nova may take a long time. To avoid getting too much data from Nova, building the compute data model according to the scope of audit. python-watcher-4.0.0/releasenotes/notes/node-resource-consolidation-73bc0c0abfeb0b03.yaml0000664000175000017500000000036513656752270031260 0ustar zuulzuul00000000000000--- features: - | Added strategy "node resource consolidation". This strategy is used to centralize VMs to as few nodes as possible by VM migration. User can set an input parameter to decide how to select the destination node. python-watcher-4.0.0/releasenotes/notes/drop-py-2-7-54f8e806d71f19a7.yaml0000664000175000017500000000030713656752270025303 0ustar zuulzuul00000000000000--- upgrade: - | Python 2.7 support has been dropped. Last release of Watcher to support py2.7 is OpenStack Train. The minimum version of Python now supported by Watcher is Python 3.6. python-watcher-4.0.0/releasenotes/notes/add-plugins-parameters-376eb6b0b8978b44.yaml0000664000175000017500000000042213656752270027751 0ustar zuulzuul00000000000000--- features: - Added a standard way to both declare and fetch configuration options so that whenever the administrator generates the Watcher configuration sample file, it contains the configuration options of the plugins that are currently available. python-watcher-4.0.0/releasenotes/notes/define-the-audit-scope-e89edc5051dcf3f2.yaml0000664000175000017500000000033413656752270030040 0ustar zuulzuul00000000000000--- features: - Provides a generic way to define the scope of an audit. The set of audited resources will be called "Audit scope" and will be defined in each audit template (which contains the audit settings). python-watcher-4.0.0/releasenotes/notes/uwsgi-support-8dcea6961e56dad0.yaml0000664000175000017500000000065313656752270026470 0ustar zuulzuul00000000000000--- upgrade: - | An Watcher API WSGI application script ``watcher-api-wsgi`` is now available. It is auto-generated by ``pbr`` and allows to run the API service using WSGI server (for example Nginx and uWSGI). deprecations: - | Using ``watcher/api/app.wsgi`` script is deprecated and it will be removed in U release. Please switch to automatically generated ``watcher-api-wsgi`` script instead. python-watcher-4.0.0/releasenotes/notes/api-microversioning-7999a3ee8073bf32.yaml0000664000175000017500000000066013656752270027375 0ustar zuulzuul00000000000000--- features: Watcher starts to support API microversions since Stein cycle. From now onwards all API changes should be made with saving backward compatibility. To specify API version operator should use OpenStack-API-Version HTTP header. If operator wants to know the mininum and maximum supported versions by API, he/she can access /v1 resource and Watcher API will return appropriate headers in response. python-watcher-4.0.0/releasenotes/notes/check-strategy-requirements-66f9e9262412f8ec.yaml0000664000175000017500000000041013656752270031041 0ustar zuulzuul00000000000000--- features: - Added a way to check state of strategy before audit's execution. Administrator can use "watcher strategy state " command to get information about metrics' availability, datasource's availability and CDM's availability. python-watcher-4.0.0/releasenotes/notes/cinder-model-integration-baa394a72a0a33bf.yaml0000664000175000017500000000007013656752270030446 0ustar zuulzuul00000000000000--- features: - | Added cinder cluster data model ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/releasenotes/notes/general-purpose-decision-engine-threadpool-0711b23abfc9d409.yamlpython-watcher-4.0.0/releasenotes/notes/general-purpose-decision-engine-threadpool-0711b23abfc9d409.0000664000175000017500000000200013656752270033050 0ustar zuulzuul00000000000000--- prelude: > Many operations in the decision engine will block on I/O. Such I/O operations can stall the execution of a sequential application significantly. To reduce the potential bottleneck of many operations the general purpose decision engine threadpool is introduced. features: - | A new threadpool for the decision engine that contributors can use to improve the performance of many operations, primarily I/O bound onces. The amount of workers used by the decision engine threadpool can be configured to scale according to the available infrastructure using the `watcher_decision_engine.max_general_workers` config option. Documentation for contributors to effectively use this threadpool is available online: https://docs.openstack.org/watcher/latest/contributor/concurrency.html - | The building of the compute (Nova) data model will be done using the decision engine threadpool, thereby, significantly reducing the total time required to build it. python-watcher-4.0.0/releasenotes/notes/watcher-versioned-objects-fc5abf5c81c4590c.yaml0000664000175000017500000000026713656752270030675 0ustar zuulzuul00000000000000--- features: - all Watcher objects have been refactored to support OVO (oslo.versionedobjects) which was a prerequisite step in order to implement versioned notifications. python-watcher-4.0.0/releasenotes/notes/notifications-actionplan-cancel-edb2a4a12543e2d0.yaml0000664000175000017500000000023313656752270031725 0ustar zuulzuul00000000000000--- features: - Added notifications about cancelling of action plan. Now event based plugins know when action plan cancel started and completed. python-watcher-4.0.0/releasenotes/notes/enhance-watcher-applier-engine-86c676ce8f179e68.yaml0000664000175000017500000000147013656752270031361 0ustar zuulzuul00000000000000--- features: - | Added a new config option 'action_execution_rule' which is a dict type. Its key field is strategy name and the value is 'ALWAYS' or 'ANY'. 'ALWAYS' means the callback function returns True as usual. 'ANY' means the return depends on the result of previous action execution. The callback returns True if previous action gets failed, and the engine continues to run the next action. If previous action executes success, the callback returns False then the next action will be ignored. For strategies that aren't in 'action_execution_rule', the callback always returns True. Please add the next section in the watcher.conf file if your strategy needs this feature. [watcher_workflow_engines.taskflow] action_execution_rule = {'your strategy name': 'ANY'} python-watcher-4.0.0/releasenotes/notes/event-driven-optimization-based-4870f112bef8a560.yaml0000664000175000017500000000047613656752270031603 0ustar zuulzuul00000000000000--- features: - | Add a new webhook API and a new audit type EVENT, the microversion is 1.4. Now Watcher user can create audit with EVENT type and the audit will be triggered by webhook API. The user guide is available online: https://docs.openstack.org/watcher/latest/user/event_type_audit.html python-watcher-4.0.0/releasenotes/notes/add-force-field-to-audit-4bcaeedfe27233ad.yaml0000664000175000017500000000042713656752270030375 0ustar zuulzuul00000000000000--- features: - | Add force field to Audit. User can set --force to enable the new option when launching audit. If force is True, audit will be executed despite of ongoing actionplan. The new audit may create a wrong actionplan if they use the same data model. python-watcher-4.0.0/releasenotes/notes/add-baremetal-scoper-9ef23f5fb8f0be6a.yaml0000664000175000017500000000013513656752270027652 0ustar zuulzuul00000000000000--- features: - Baremetal Model gets Audit scoper with an ability to exclude Ironic nodes. python-watcher-4.0.0/releasenotes/source/0000775000175000017500000000000013656752352020520 5ustar zuulzuul00000000000000python-watcher-4.0.0/releasenotes/source/train.rst0000664000175000017500000000017613656752270022372 0ustar zuulzuul00000000000000========================== Train Series Release Notes ========================== .. release-notes:: :branch: stable/train python-watcher-4.0.0/releasenotes/source/_static/0000775000175000017500000000000013656752352022146 5ustar zuulzuul00000000000000python-watcher-4.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000013656752270024416 0ustar zuulzuul00000000000000python-watcher-4.0.0/releasenotes/source/conf.py0000664000175000017500000002013313656752270022015 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # watcher documentation build configuration file, created by # sphinx-quickstart on Fri Jun 3 11:37:52 2016. # # This file is execfile()d with the current directory set to its containing dir # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ---------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['reno.sphinxext', 'openstackdocstheme'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'watcher' copyright = u'2016, Watcher developers' # Release notes are version independent # The short X.Y version. version = '' # The full version, including alpha/beta/rc tags. release = '' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'watcherdoc' # -- Options for LaTeX output ------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]) latex_documents = [ ('index', 'watcher.tex', u'Watcher Documentation', u'Watcher developers', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output ------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'watcher', u'Watcher Documentation', [u'Watcher developers'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ----------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'watcher', u'Watcher Documentation', u'Watcher developers', 'watcher', 'One line description of project.', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # -- Options for Internationalization output ------------------------------ locale_dirs = ['locale/'] python-watcher-4.0.0/releasenotes/source/stein.rst0000664000175000017500000000022113656752270022366 0ustar zuulzuul00000000000000=================================== Stein Series Release Notes =================================== .. release-notes:: :branch: stable/stein python-watcher-4.0.0/releasenotes/source/queens.rst0000664000175000017500000000022313656752270022546 0ustar zuulzuul00000000000000=================================== Queens Series Release Notes =================================== .. release-notes:: :branch: stable/queens python-watcher-4.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000016013656752270023375 0ustar zuulzuul00000000000000============================== Current Series Release Notes ============================== .. release-notes:: python-watcher-4.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022113656752270022373 0ustar zuulzuul00000000000000=================================== Rocky Series Release Notes =================================== .. release-notes:: :branch: stable/rocky python-watcher-4.0.0/releasenotes/source/index.rst0000664000175000017500000000152513656752270022363 0ustar zuulzuul00000000000000.. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================================= Welcome to watcher's Release Notes documentation! ================================================= Contents: .. toctree:: :maxdepth: 1 unreleased train stein rocky queens pike ocata newton python-watcher-4.0.0/releasenotes/source/ocata.rst0000664000175000017500000000023013656752270022333 0ustar zuulzuul00000000000000=================================== Ocata Series Release Notes =================================== .. release-notes:: :branch: origin/stable/ocata python-watcher-4.0.0/releasenotes/source/newton.rst0000664000175000017500000000023213656752270022560 0ustar zuulzuul00000000000000=================================== Newton Series Release Notes =================================== .. release-notes:: :branch: origin/stable/newton python-watcher-4.0.0/releasenotes/source/pike.rst0000664000175000017500000000021713656752270022201 0ustar zuulzuul00000000000000=================================== Pike Series Release Notes =================================== .. release-notes:: :branch: stable/pike python-watcher-4.0.0/api-ref/0000775000175000017500000000000013656752352016052 5ustar zuulzuul00000000000000python-watcher-4.0.0/api-ref/source/0000775000175000017500000000000013656752352017352 5ustar zuulzuul00000000000000python-watcher-4.0.0/api-ref/source/watcher-api-v1-actionplans.inc0000664000175000017500000001337613656752270025117 0ustar zuulzuul00000000000000.. -*- rst -*- ============ Action Plans ============ An ``Action Plan`` specifies a flow of ``Actions`` that should be executed in order to satisfy a given ``Goal``. It also contains an estimated ``global efficacy`` alongside a set of ``efficacy indicators``. An ``Action Plan`` is generated by Watcher when an ``Audit`` is successful which implies that the ``Strategy`` which was used has found a ``Solution`` to achieve the ``Goal`` of this ``Audit``. In the default implementation of Watcher, an action plan is composed of a graph of linked ``Actions``. Each action may have parent actions, which should be executed prior to child action. Start Action Plan ================= .. rest_method:: POST /v1/action_plans/{actionplan_ident}/start Starts a created Action Plan resource. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - actionplan_ident: actionplan_ident Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links - hostname: actionplan_hostname **Example JSON representation of an Action Plan:** .. literalinclude:: samples/actionplan-start-response.json :language: javascript List Action Plan ================ .. rest_method:: GET /v1/action_plans Returns a list of Action Plan resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - audit_uuid: r_audit - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links **Example JSON representation of an Action Plan:** .. literalinclude:: samples/actionplan-list-response.json :language: javascript List Action Plan detailed ========================= .. rest_method:: GET /v1/action_plans/detail Returns a list of Action Plan resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - audit_uuid: r_audit - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - deleted_at: deleted_at - updated_at: updated_at - created_at: created_at - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links - hostname: actionplan_hostname **Example JSON representation of an Action Plan:** .. literalinclude:: samples/actionplan-list-detailed-response.json :language: javascript Show Action Plan ================ .. rest_method:: GET /v1/action_plans/{actionplan_ident} Shows details for an Action Plan. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - actionplan_ident: actionplan_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links - hostname: actionplan_hostname **Example JSON representation of an Audit:** .. literalinclude:: samples/actionplan-show-response.json :language: javascript Cancel Action Plan ================== .. rest_method:: PATCH /v1/action_plans/{actionplan_ident} Cancels a created Action Plan resource. .. note: If Action Plan is in ONGOING state, then ``state`` attribute should be replaced with ``CANCELLING`` value. Otherwise, ``CANCELLED`` is to be used. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - actionplan_ident: actionplan_ident **Example Action Plan ONGOING cancelling request:** .. literalinclude:: samples/actionplan-cancel-request-cancelling.json :language: javascript **Example Action Plan PENDING cancelling request:** .. literalinclude:: samples/actionplan-cancel-request-pending.json :language: javascript Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - state: actionplan_state - audit_uuid: actionplan_audit_uuid - strategy_uuid: strategy_uuid - strategy_name: strategy_name - efficacy_indicators: actionplan_efficacy_indicators - global_efficacy: actionplan_global_efficacy - links: links - hostname: actionplan_hostname **Example JSON representation of an Action Plan:** .. literalinclude:: samples/actionplan-start-response.json :language: javascript Delete Action Plan ================== .. rest_method:: DELETE /v1/action_plans/{actionplan_ident} Deletes an Action Plan. Action Plan can be deleted only from SUCCEEDED, RECOMMENDED, FAILED, SUPERSEDED, CANCELLED states. Normal response codes: 204 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - actionplan_ident: actionplan_ident python-watcher-4.0.0/api-ref/source/watcher-api-v1-goals.inc0000664000175000017500000000466313656752270023710 0ustar zuulzuul00000000000000.. -*- rst -*- ===== Goals ===== A ``Goal`` is a human readable, observable and measurable end result having one objective to be achieved. Here are some examples of ``Goals``: - minimize the energy consumption - minimize the number of compute nodes (consolidation) - balance the workload among compute nodes - minimize the license cost (some softwares have a licensing model which is based on the number of sockets or cores where the software is deployed) - find the most appropriate moment for a planned maintenance on a given group of host (which may be an entire availability zone): power supply replacement, cooling system replacement, hardware modification, ... List Goal ========= .. rest_method:: GET /v1/goals Returns a list of Goal resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - efficacy_specification: goal_efficacy_specification - name: goal_name - display_name: goal_display_name - links: links **Example JSON representation of a Goal:** .. literalinclude:: samples/goal-list-response.json :language: javascript List Goal Detailed ================== .. rest_method:: GET /v1/goals/detail Returns a list of Goal resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - efficacy_specification: goal_efficacy_specification - name: goal_name - display_name: goal_display_name - links: links **Example JSON representation of a Goal:** .. literalinclude:: samples/goal-list-response.json :language: javascript Show Goal ========= .. rest_method:: GET /v1/goals/{goal_ident} Shows details for an Goal. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - goal_ident: goal_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - efficacy_specification: goal_efficacy_specification - name: goal_name - display_name: goal_display_name - links: links **Example JSON representation of a Goal:** .. literalinclude:: samples/goal-show-response.json :language: javascriptpython-watcher-4.0.0/api-ref/source/watcher-api-v1-webhooks.inc0000664000175000017500000000050713656752270024415 0ustar zuulzuul00000000000000.. -*- rst -*- ======== Webhooks ======== .. versionadded:: 1.4 Triggers an event based Audit. Trigger EVENT Audit =================== .. rest_method:: POST /v1/webhooks/{audit_ident} Normal response codes: 202 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident python-watcher-4.0.0/api-ref/source/watcher-api-v1-audittemplates.inc0000664000175000017500000001264313656752270025625 0ustar zuulzuul00000000000000.. -*- rst -*- =============== Audit Templates =============== There are creating, listing, updating and deleting methods of Watcher Audit Template resources which are implemented via the ``/v1/audit_templates`` resource. An Audit may be launched several times with the same settings (Goal, thresholds, ...). Therefore it makes sense to save those settings in some sort of Audit preset object, which is known as an Audit Template. An Audit Template contains at least the Goal of the Audit. Create Audit Template ===================== .. rest_method:: POST /v1/audit_templates Creates a new Audit Template resource. It requires ``name`` and ``goal`` attributes to be supplied in the request body. Normal response codes: 201 Error codes: 400,404,409 Request ------- .. rest_parameters:: parameters.yaml - name: audittemplate_name - goal: audittemplate_goal - strategy: audittemplate_strategy - description: audittemplate_description - scope: audittemplate_scope **Example Audit Template creation request without a specified strategy:** .. literalinclude:: samples/audittemplate-create-request-minimal.json :language: javascript **Example Audit Template creation request with a specified strategy:** .. literalinclude:: samples/audittemplate-create-request-full.json :language: javascript Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - description: audittemplate_description - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-create-response.json :language: javascript List Audit Template =================== .. rest_method:: GET /v1/audit_templates Returns a list of Audit Template resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-list-response.json :language: javascript List Audit Template Detailed ============================ .. rest_method:: GET /v1/audit_templates/detail Returns a list of Audit Template resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links - description: audittemplate_description **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-list-detailed-response.json :language: javascript Show Audit Template =================== .. rest_method:: GET /v1/audit_templates/{audittemplate_ident} Shows details for an Audit Template. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - audittemplate_ident: audittemplate_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links - description: audittemplate_description **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-show-response.json :language: javascript Update Audit Template ===================== .. rest_method:: PATCH /v1/audit_templates/{audittemplate_ident} Updates an Audit Template with the given information. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audittemplate_ident: audittemplate_ident **Example PATCH document updating Audit Template:** .. literalinclude:: samples/audittemplate-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audittemplate_name - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - scope: audittemplate_scope - links: links - description: audittemplate_description **Example JSON representation of an Audit Template:** .. literalinclude:: samples/audittemplate-update-response.json :language: javascript Delete Audit Template ===================== .. rest_method:: DELETE /v1/audit_templates/{audittemplate_ident} Deletes an Audit Template. Normal response codes: 204 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - audittemplate_ident: audittemplate_identpython-watcher-4.0.0/api-ref/source/parameters.yaml0000664000175000017500000003450213656752270022404 0ustar zuulzuul00000000000000# variables in header header_version: description: | Specific API microversion used to generate this response. in: header required: true type: string openstack-api-max-version: description: | Maximum API microversion supported by this endpoint, eg. "1.1" in: header required: true type: string openstack-api-min-version: description: | Minimum API microversion supported by this endpoint, eg. "1.0" in: header required: true type: string openstack-api-version: description: > A request SHOULD include this header to indicate to the Watcher API service what version the client supports. The server will transform the response object into compliance with the requested version, if it is supported, or return a 406 Not Acceptable error. If this header is not supplied, the server will response with server minimum supported version. in: header required: true type: string openstack-request-id: description: > An unique ID for tracking the request. The request ID associated with the request appears in the log lines for that request. By default, the middleware configuration ensures that the request ID appears in the log files. in: header required: false type: string # Path action_ident: description: | The UUID of the Action. in: path required: true type: string actionplan_ident: description: | The UUID of the Action Plan. in: path required: true type: string audit_ident: description: | The UUID or name of the Audit. in: path required: true type: string audittemplate_ident: description: | The UUID or name of the Audit Template. in: path required: true type: string goal_ident: description: | The UUID or name of the Goal. in: path required: true type: string scoring_engine_ident: description: | The UUID or name of the Scoring Engine. in: path required: true type: string service_ident: description: | The ID or name of the Service. in: path required: true type: string strategy_ident: description: | The UUID or name of the Strategy. in: path required: true type: string # Query body limit: description: | Requests a page size of items. Returns a number of items up to a ``limit`` value. Use the limit parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: integer marker: description: | The ID of the last-seen item. Use the ``limit`` parameter to make an initial limited request and use the ID of the last-seen item from the response as the ``marker`` parameter value in a subsequent limited request. in: query required: false type: string r_action_plan: description: | UUID of the action plan used for filtering. in: query required: false type: string r_audit: description: | Optional UUID of an audit, to get only actions for that audit. in: query required: false type: string r_goal: description: | The UUID or name of the Goal. in: query required: false type: string r_strategy: description: | The UUID or name of the Strategy. in: query required: false type: string r_type: description: | Type of data model user want to list. Default type is compute. Supported values: compute. Future support values: storage, baremetal. in: query required: false type: string sort_dir: description: | Sorts the response by the requested sort direction. A valid value is ``asc`` (ascending) or ``desc`` (descending). Default is ``asc``. in: query required: false type: string sort_key: description: | Sorts the response by the this attribute value. Default is ``id``. in: query required: false type: string # variables in the API response body # Action action_action_plan_uuid: description: | The action plan this action belongs to. in: body required: true type: string action_description: description: | Action description. in: body required: true type: string action_input_parameters: description: | Input parameters which are used by appropriate action type. For example, ``migration`` action takes into account such parameters as ``migration_type``, ``destination_node``, ``resource_id`` and ``source_node``. To see a list of supported action types and their input parameters visit `Action plugins page `_. in: body required: true type: JSON action_parents: description: | UUIDs of parent actions. in: body required: true type: array action_state: description: | State of Action. in: body required: true type: string action_type: description: | Action type based on specific API action. Actions in Watcher are pluggable, to see a list of supported action types visit `Action plugins page `_. in: body required: true type: string # Action Plan actionplan_audit_uuid: description: | The UUID of the audit this acton plan belongs to. in: body required: false type: string actionplan_efficacy_indicators: description: | The list of efficacy indicators associated to this action plan. in: body required: false type: array actionplan_global_efficacy: description: | The global efficacy of this action plan. in: body required: false type: array actionplan_hostname: description: | Hostname the actionplan is running on in: body required: false type: string actionplan_state: description: | State of this action plan. To get more information about states and action plan's lifecycle, visit `Action Plan State Machine page `_. in: body required: false type: string # Audit audit_autotrigger: description: | Auto execute action plan once audit is succeeded. in: body required: false type: boolean audit_endtime_req: description: | The local time after which audit can't be executed. It will be converted to UTC time by Watcher. in: body required: false type: string min_version: 1.1 audit_endtime_resp: description: | The UTC time after which audit can't be executed. in: body required: false type: string min_version: 1.1 audit_force: description: | Launch audit even if action plan is ongoing. in: body required: false type: boolean min_version: 1.2 audit_goal: description: | The UUID or name of the Goal. in: body required: false type: string audit_hostname: description: | Hostname the audit is running on in: body required: false type: string audit_interval: description: | Time interval between audit's execution. Can be set either in seconds or cron syntax. Should be defined only for CONTINUOUS audits. in: body required: false type: string audit_name: description: | Name of this audit. in: body required: false type: string audit_next_run_time: description: | The next time audit launch. Defined only for CONTINUOUS audits. in: body required: false type: string audit_parameters: description: | The strategy parameters for this audit. in: body required: false type: JSON audit_starttime_req: description: | The local time after which audit can be executed in accordance with interval. It will be converted to UTC time by Watcher. in: body required: false type: string min_version: 1.1 audit_starttime_resp: description: | The UTC time after which audit can be executed in accordance with interval. in: body required: false type: string min_version: 1.1 audit_state: description: | State of this audit. To get more information about states and audit's lifecycle, visit `Audit State Machine page `_. in: body required: true type: string audit_strategy: description: | The UUID or name of the Strategy. in: body required: false type: string audit_type: description: | Type of this audit. Can only be either ONESHOT or CONTINUOUS. in: body required: true type: string # Audit Template audittemplate_description: description: | Short description of the Audit Template. in: body required: false type: string audittemplate_goal: description: | The UUID or name of the Goal. in: body required: true type: string audittemplate_name: description: | The name of the Audit template. in: body required: true type: string audittemplate_scope: description: | Audit Scope. in: body required: false type: JSON audittemplate_strategy: description: | The UUID or name of the Strategy. in: body required: false type: string audittemplate_uuid: description: | The UUID of the Audit template. in: body required: true type: string created_at: description: | The date and time when the resource was created. The date and time stamp format is `ISO 8601 `_ in: body required: true type: string deleted_at: description: | The date and time when the resource was deleted. The date and time stamp format is `ISO 8601 `_ in: body required: true type: string # Goal goal_display_name: description: | Localized name of the goal. in: body required: true type: string goal_efficacy_specification: description: | Efficacy specifications as result of stategy's execution. in: body required: true type: array goal_name: description: | Name of the goal. in: body required: true type: string goal_uuid: description: | Unique UUID for this goal. in: body required: true type: string links: description: | A list of relative links. Includes the self and bookmark links. in: body required: true type: array # Data Model Node node_disk: description: | The Disk of the node(in GiB). in: body required: true type: integer node_disk_ratio: description: | The Disk Ratio of the node. in: body required: true type: float node_hostname: description: | The Host Name of the node. in: body required: true type: string node_memory: description: | The Memory of the node(in MiB). in: body required: true type: integer node_memory_ratio: description: | The Memory Ratio of the node. in: body required: true type: float node_state: description: | The State of the node. The value is up or down. in: body required: true type: string node_uuid: description: | The Unique UUID of the node. in: body required: true type: string node_vcpu_ratio: description: | The Vcpu ratio of the node. in: body required: true type: float node_vcpus: description: | The Vcpu of the node. in: body required: true type: integer # Scoring Engine scoring_engine_description: description: | A human readable description of the Scoring Engine. in: body required: true type: string scoring_engine_metainfo: description: | A metadata associated with the scoring engine in: body required: true type: string scoring_engine_name: description: | The name of the scoring engine. in: body required: true type: string # Data Model Server server_disk: description: | The Disk of the server. in: body required: true type: integer server_memory: description: | The Memory of server. in: body required: true type: integer server_name: description: | The Name of the server. in: body required: true type: string server_state: description: | The State of the server. in: body required: true type: string server_uuid: description: | The Unique UUID of the server. in: body required: true type: string server_vcpus: description: | The Vcpu of the server. in: body required: true type: integer # Service service_host: description: | The Name of host where service is placed on. in: body required: true type: string service_id: description: | The ID of service. in: body required: true type: integer service_last_seen_up: description: | The Time when Watcher service sent latest heartbeat. in: body required: true type: string service_name: description: | The Name of service like ``watcher-applier``. in: body required: true type: string service_status: description: | The State of service. It can be either in ACTIVE or FAILED state. in: body required: true type: string # Strategy strategy_check_comment: description: | Requirement comment. in: body required: true type: string strategy_check_mandatory: description: | Whether this requirement mandatory or not. in: body required: true type: boolean strategy_check_state: description: | State of requirement for Strategy. in: body required: true type: string or JSON strategy_check_type: description: | Type of requirement for Strategy. in: body required: true type: string strategy_display_name: description: | Localized name of the strategy. in: body required: true type: string strategy_name: description: | Name of the strategy. in: body required: true type: string strategy_parameters_spec: description: | Parameters specifications for this strategy. in: body required: true type: JSON strategy_uuid: description: | Unique UUID for this strategy. in: body required: true type: string updated_at: description: | The date and time when the resource was updated. The date and time stamp format is `ISO 8601 `_ in: body required: true type: string uuid: description: | The UUID for the resource. in: body required: true type: string # Version version: description: | Versioning of this API response, eg. "1.1". in: body required: true type: string version_description: description: | Descriptive text about the Watcher service. in: body required: true type: string version_id: description: | Major API version, eg, "v1" in: body required: true type: string versions: description: | Array of information about currently supported versions. in: body required: true type: array python-watcher-4.0.0/api-ref/source/conf.py0000664000175000017500000000575313656752270020662 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # nova documentation build configuration file, created by # sphinx-quickstart on Sat May 1 15:17:47 2010. # # This file is execfile()d with the current directory set to # its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from watcher import version as watcher_version extensions = [ 'openstackdocstheme', 'os_api_ref', ] # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. # The suffix of source filenames. source_suffix = '.rst' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Infrastructure Optimization API Reference' copyright = u'2010-present, OpenStack Foundation' # openstackdocstheme options repository_name = 'openstack/watcher' bug_project = 'watcher' bug_tag = '' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The full version, including alpha/beta/rc tags. release = watcher_version.version_info.release_string() # The short X.Y version. version = watcher_version.version_string # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output -------------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { "sidebar_mode": "toc", } # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. html_last_updated_fmt = '%Y-%m-%d %H:%M' # -- Options for LaTeX output ------------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Watcher.tex', u'Infrastructure Optimization API Reference', u'OpenStack Foundation', 'manual'), ] python-watcher-4.0.0/api-ref/source/watcher-api-v1-actions.inc0000664000175000017500000000714213656752270024236 0ustar zuulzuul00000000000000.. -*- rst -*- ======= Actions ======= An ``Action`` is what enables Watcher to transform the current state of a ``Cluster`` after an ``Audit``. An ``Action`` is an atomic task which changes the current state of a target Managed resource of the OpenStack ``Cluster`` such as: - Live migration of an instance from one compute node to another compute node with Nova - Changing the power level of a compute node (ACPI level, ...) - Changing the current state of a compute node (enable or disable) with Nova In most cases, an ``Action`` triggers some concrete commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.). An ``Action`` has a life-cycle and its current state may be one of the following: - **PENDING** : the ``Action`` has not been executed yet by the ``Watcher Applier``. - **ONGOING** : the ``Action`` is currently being processed by the ``Watcher Applier``. - **SUCCEEDED** : the ``Action`` has been executed successfully - **FAILED** : an error occurred while trying to execute the ``Action``. - **DELETED** : the ``Action`` is still stored in the ``Watcher database`` but is not returned any more through the Watcher APIs. - **CANCELLED** : the ``Action`` was in **PENDING** or **ONGOING** state and was cancelled by the ``Administrator`` ``Actions`` are created by ``Watcher Planner`` as result of Audit's execution. ``Action`` can't be created, modified or deleted by user. List Action =========== .. rest_method:: GET /v1/actions Returns a list of Action resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - action_plan_uuid: r_action_plan - audit_uuid: r_audit - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - action_type: action_type - state: action_state - action_plan_uuid: action_action_plan_uuid - parents: action_parents - links: links **Example JSON representation of an Action:** .. literalinclude:: samples/actions-list-response.json :language: javascript List Action Detailed ==================== .. rest_method:: GET /v1/actions/detail Returns a list of Action resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - action_plan_uuid: r_action_plan - audit_uuid: r_audit - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - action_type: action_type - state: action_state - action_plan_uuid: action_action_plan_uuid - parents: action_parents - description: action_description - input_parameters: action_input_parameters - links: links **Example JSON representation of an Action:** .. literalinclude:: samples/actions-list-detailed-response.json :language: javascript Show Action =========== .. rest_method:: GET /v1/actions/{action_ident} Shows details for an Action. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - action_ident: action_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - action_type: action_type - state: action_state - action_plan_uuid: action_action_plan_uuid - parents: action_parents - description: action_description - input_parameters: action_input_parameters - links: links **Example JSON representation of an Action:** .. literalinclude:: samples/actions-show-response.json :language: javascriptpython-watcher-4.0.0/api-ref/source/watcher-api-v1-datamodel.inc0000664000175000017500000000220713656752270024525 0ustar zuulzuul00000000000000.. -*- rst -*- ========== Data Model ========== .. versionadded:: 1.3 ``Data Model`` is very important for Watcher to generate resource optimization solutions. Users can easily view the data model by the API. List Data Model =============== .. rest_method:: GET /v1/data_model Returns the information about Data Model. Normal response codes: 200 Error codes: 400,401,406 Request ------- .. rest_parameters:: parameters.yaml - audit: r_audit - type: r_type Response -------- .. rest_parameters:: parameters.yaml - server_uuid: server_uuid - server_name: server_name - server_vcpus: server_vcpus - server_memory: server_memory - server_disk: server_disk - server_state: server_state - node_uuid: node_uuid - node_hostname: node_hostname - node_vcpus: node_vcpus - node_vcpu_ratio: node_vcpu_ratio - node_memory: node_memory - node_memory_ratio: node_memory_ratio - node_disk: node_disk - node_disk_ratio: node_disk_ratio - node_state: node_state **Example JSON representation of a Data Model:** .. literalinclude:: samples/datamodel-list-response.json :language: javascript python-watcher-4.0.0/api-ref/source/watcher-api-v1-scoring_engines.inc0000664000175000017500000000502513656752270025750 0ustar zuulzuul00000000000000.. -*- rst -*- =============== Scoring Engines =============== A ``Scoring Engine`` is an executable that has a well-defined input, a well-defined output, and performs a purely mathematical task. That is, the calculation does not depend on the environment in which it is running - it would produce the same result anywhere. Because there might be multiple algorithms used to build a particular data model (and therefore a scoring engine), the usage of scoring engine might vary. A metainfo field is supposed to contain any information which might be needed by the user of a given scoring engine. List Scoring Engine =================== .. rest_method:: GET /v1/scoring_engines Returns a list of Scoring Engine resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: scoring_engine_name - description: scoring_engine_description - links: links **Example JSON representation of a Scoring Engine:** .. literalinclude:: samples/scoring_engine-list-response.json :language: javascript List Scoring Engine Detailed ============================ .. rest_method:: GET /v1/scoring_engines/detail Returns a list of Scoring Engine resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: scoring_engine_name - description: scoring_engine_description - metainfo: scoring_engine_metainfo - links: links **Example JSON representation of a Scoring Engine:** .. literalinclude:: samples/scoring_engine-list-detailed-response.json :language: javascript Show Scoring Engine =================== .. rest_method:: GET /v1/scoring_engines/{scoring_engine_ident} Shows details for a Scoring Engine resource. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - scoring_engine_ident: scoring_engine_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: scoring_engine_name - description: scoring_engine_description - metainfo: scoring_engine_metainfo - links: links **Example JSON representation of a Scoring Engine:** .. literalinclude:: samples/scoring_engine-show-response.json :language: javascriptpython-watcher-4.0.0/api-ref/source/watcher-api-v1-strategies.inc0000664000175000017500000000636613656752270024757 0ustar zuulzuul00000000000000.. -*- rst -*- ========== Strategies ========== A ``Strategy`` is an algorithm implementation which is able to find a ``Solution`` for a given ``Goal``. To get more information about strategies that are shipped along with Watcher, visit `strategies page`_. There may be several potential strategies which are able to achieve the same ``Goal``. This is why it is possible to configure which specific ``Strategy`` should be used for each goal. Some strategies may provide better optimization results but may take more time to find an optimal ``Solution``. .. _`strategies page`: https://docs.openstack.org/watcher/latest/strategies/index.html List Strategy ============= .. rest_method:: GET /v1/strategies Returns a list of Strategy resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: strategy_name - display_name: strategy_display_name - goal_name: goal_name - goal_uuid: goal_uuid - links: links **Example JSON representation of a Strategy:** .. literalinclude:: samples/strategy-list-response.json :language: javascript List Strategy Detailed ====================== .. rest_method:: GET /v1/strategies/detail Returns a list of Strategy resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: strategy_name - display_name: strategy_display_name - parameters_spec: strategy_parameters_spec - goal_name: goal_name - goal_uuid: goal_uuid - links: links **Example JSON representation of a Strategy:** .. literalinclude:: samples/strategy-list-detailed-response.json :language: javascript Show Strategy ============= .. rest_method:: GET /v1/strategies/{strategy_ident} Shows details for a Strategy resource. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - strategy_ident: strategy_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: strategy_name - display_name: strategy_display_name - parameters_spec: strategy_parameters_spec - goal_name: goal_name - goal_uuid: goal_uuid - links: links **Example JSON representation of a Strategy:** .. literalinclude:: samples/strategy-show-response.json :language: javascript Show Strategy State =================== .. rest_method:: GET /v1/strategies/{strategy_ident}/state Retrieve an information about strategy requirements. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - strategy_ident: strategy_ident Response -------- .. rest_parameters:: parameters.yaml - state: strategy_check_state - comment: strategy_check_comment - mandatory: strategy_check_mandatory - type: strategy_check_type **Example JSON representation of a Strategy:** .. literalinclude:: samples/strategy-state-response.json :language: javascriptpython-watcher-4.0.0/api-ref/source/watcher-api-v1-audits.inc0000664000175000017500000001746713656752270024102 0ustar zuulzuul00000000000000.. -*- rst -*- ====== Audits ====== There are creating, listing, updating and deleting methods of Watcher Audit resources which are implemented via the ``/v1/audits`` resource. In the Watcher system, an ``Audit`` is a request for optimizing a ``Cluster``. The optimization is done in order to satisfy one ``Goal`` on a given ``Cluster``. For each ``Audit``, the Watcher system generates an ``Action Plan``. Create Audit ============ .. rest_method:: POST /v1/audits Creates a new Audit resource. Mandatory attribute to be supplied: ``audit_type``. ``Audit`` can be created either based on existed ``Audit Template`` or by itself. In the first case, there also should be supplied ``audit_template_uuid``. If ``Audit`` is created without ``Audit Template``, ``goal`` should be provided. Normal response codes: 201 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audit_template_uuid: audittemplate_uuid - audit_type: audit_type - name: audit_name - goal: audit_goal - strategy: audit_strategy - parameters: audit_parameters - interval: audit_interval - auto_trigger: audit_autotrigger - start_time: audit_starttime_req - end_time: audit_endtime_req - force: audit_force **Example ONESHOT Audit creation request:** .. literalinclude:: samples/audit-create-request-oneshot.json :language: javascript **Example CONTINUOUS Audit creation request with a specified strategy:** .. literalinclude:: samples/audit-create-request-continuous.json :language: javascript Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-create-response.json :language: javascript List Audit ========== .. rest_method:: GET /v1/audits Returns a list of Audit resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-list-response.json :language: javascript List Audit Detailed =================== .. rest_method:: GET /v1/audits/detail Returns a list of Audit resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - goal: r_goal - strategy: r_strategy - limit: limit - marker: marker - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-list-detailed-response.json :language: javascript Show Audit ========== .. rest_method:: GET /v1/audits/{audit_ident} Shows details for an Audit. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-show-response.json :language: javascript Cancel Audit ============ .. rest_method:: PATCH /v1/audits/{audit_ident} Cancels an ONGOING Audit resource. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident **Example Audit cancelling request:** .. literalinclude:: samples/audit-cancel-request.json :language: javascript Response -------- The list and example below are representative of the response as of API version 1: .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-cancel-response.json :language: javascript Update Audit ============ .. rest_method:: PATCH /v1/audits/{audit_ident} Updates an Audit with the given information. .. note: ``audit_type`` shouldn't be changed by PATCH method. Normal response codes: 200 Error codes: 400,404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident **Example PATCH document updating Audit:** .. literalinclude:: samples/audit-update-request.json :language: javascript Response -------- .. rest_parameters:: parameters.yaml - uuid: uuid - name: audit_name - audit_type: audit_type - strategy_uuid: strategy_uuid - strategy_name: strategy_name - goal_uuid: goal_uuid - goal_name: goal_name - interval: audit_interval - next_run_time: audit_next_run_time - parameters: audit_parameters - auto_trigger: audit_autotrigger - state: audit_state - scope: audittemplate_scope - links: links - hostname: audit_hostname - start_time: audit_starttime_resp - end_time: audit_endtime_resp - force: audit_force **Example JSON representation of an Audit:** .. literalinclude:: samples/audit-update-response.json :language: javascript Delete Audit ============ .. rest_method:: DELETE /v1/audits/{audit_ident} Deletes an Audit. Audit can be deleted only from FAILED, SUCCEEDED, CANCELLED, SUSPENDED states. Normal response codes: 204 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - audit_ident: audit_ident python-watcher-4.0.0/api-ref/source/watcher-api-v1-services.inc0000664000175000017500000000367613656752270024431 0ustar zuulzuul00000000000000.. -*- rst -*- ======== Services ======== This resource represents Watcher services, their states and hosts they are placed on. List Service ============ .. rest_method:: GET /v1/services Returns a list of Service resources. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - id: service_id - name: service_name - host: service_host - status: service_status - links: links **Example JSON representation of a Service:** .. literalinclude:: samples/service-list-response.json :language: javascript List Service Detailed ===================== .. rest_method:: GET /v1/services/detail Returns a list of Service resources with complete details. Normal response codes: 200 Error codes: 400,401 Request ------- .. rest_parameters:: parameters.yaml - limit: limit - sort_dir: sort_dir - sort_key: sort_key Response -------- .. rest_parameters:: parameters.yaml - id: service_id - name: service_name - host: service_host - status: service_status - last_seen_up: service_last_seen_up - links: links **Example JSON representation of a Service:** .. literalinclude:: samples/service-list-detailed-response.json :language: javascript Show Service ============ .. rest_method:: GET /v1/services/{service_ident} Shows details for a Service resource. Normal response codes: 200 Error codes: 404 Request ------- .. rest_parameters:: parameters.yaml - service_ident: service_ident Response -------- .. rest_parameters:: parameters.yaml - id: service_id - name: service_name - host: service_host - status: service_status - last_seen_up: service_last_seen_up - links: links **Example JSON representation of a Service:** .. literalinclude:: samples/service-show-response.json :language: javascriptpython-watcher-4.0.0/api-ref/source/samples/0000775000175000017500000000000013656752352021016 5ustar zuulzuul00000000000000python-watcher-4.0.0/api-ref/source/samples/goal-list-response.json0000664000175000017500000000416313656752270025443 0ustar zuulzuul00000000000000{ "goals": [ { "efficacy_specification": [], "uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "links": [ { "rel": "self", "href": "http://controller:9322/v1/goals/e1a5a45b-f251-47cf-9c5f-fa1e66e1286a" }, { "rel": "bookmark", "href": "http://controller:9322/goals/e1a5a45b-f251-47cf-9c5f-fa1e66e1286a" } ], "name": "workload_balancing", "display_name": "Workload Balancing" }, { "efficacy_specification": [ { "description": "The total number of enabled compute nodes.", "schema": "Range(min=0, max=None, min_included=True, max_included=True, msg=None)", "name": "compute_nodes_count", "unit": null }, { "description": "The number of compute nodes to be released.", "schema": "Range(min=0, max=None, min_included=True, max_included=True, msg=None)", "name": "released_compute_nodes_count", "unit": null }, { "description": "The number of VM migrations to be performed.", "schema": "Range(min=0, max=None, min_included=True, max_included=True, msg=None)", "name": "instance_migrations_count", "unit": null } ], "uuid": "cb9afa5e-aec7-4a8c-9261-c15c33f2262b", "links": [ { "rel": "self", "href": "http://controller:9322/v1/goals/cb9afa5e-aec7-4a8c-9261-c15c33f2262b" }, { "rel": "bookmark", "href": "http://controller:9322/goals/cb9afa5e-aec7-4a8c-9261-c15c33f2262b" } ], "name": "server_consolidation", "display_name": "Server Consolidation" } ] }python-watcher-4.0.0/api-ref/source/samples/actionplan-list-response.json0000664000175000017500000000153113656752270026645 0ustar zuulzuul00000000000000{ "action_plans": [ { "state": "ONGOING", "efficacy_indicators": [], "strategy_uuid": "7dae0eea-9df7-42b8-bb3e-313958ff2242", "global_efficacy": [], "links": [ { "rel": "self", "href": "http://controller:9322/v1/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" }, { "rel": "bookmark", "href": "http://controller:9322/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" } ], "updated_at": "2018-04-10T11:59:52.640067+00:00", "strategy_name": "dummy_with_resize", "uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a" } ] }python-watcher-4.0.0/api-ref/source/samples/audittemplate-update-response.json0000664000175000017500000000130513656752270027665 0ustar zuulzuul00000000000000{ "description": "test 1", "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at11", "uuid": "0d100c27-14af-4962-86fb-f6079287c9c6", "goal_name": "dummy", "scope": [], "created_at": "2018-04-04T07:48:36.175472+00:00", "deleted_at": null, "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/0d100c27-14af-4962-86fb-f6079287c9c6" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/0d100c27-14af-4962-86fb-f6079287c9c6" } ], "strategy_name": null, "updated_at": "2018-04-05T07:57:42.139127+00:00" }python-watcher-4.0.0/api-ref/source/samples/audittemplate-create-request-full.json0000664000175000017500000000020413656752270030435 0ustar zuulzuul00000000000000{ "name": "at2", "goal": "dummy", "strategy": "dummy", "description": "the second audit template", "scope": [] }python-watcher-4.0.0/api-ref/source/samples/service-show-response.json0000664000175000017500000000100213656752270026153 0ustar zuulzuul00000000000000{ "status": "ACTIVE", "name": "watcher-applier", "host": "controller", "links": [ { "rel": "self", "href": "http://controller:9322/v1/services/1" }, { "rel": "bookmark", "href": "http://controller:9322/services/1" } ], "id": 1, "deleted_at": null, "updated_at": "2018-04-26T09:45:37.653061+00:00", "last_seen_up": "2018-04-26T09:45:37.649314", "created_at": "2018-03-26T11:55:24.075093+00:00" }python-watcher-4.0.0/api-ref/source/samples/audit-create-request-oneshot.json0000664000175000017500000000022113656752270027415 0ustar zuulzuul00000000000000{ "audit_type": "ONESHOT", "auto_trigger": false, "force": true, "audit_template_uuid": "5e70a156-ced7-4012-b1c6-88fcb02ee0c1" } python-watcher-4.0.0/api-ref/source/samples/api-root-response.json0000664000175000017500000000120313656752270025272 0ustar zuulzuul00000000000000{ "default_version": { "id": "v1", "links": [ { "href": "http://controller:9322/v1/", "rel": "self" } ], "min_version": "1.0", "status": "CURRENT", "max_version": "1.1" }, "description": "Watcher is an OpenStack project which aims to improve physical resources usage through better VM placement.", "name": "OpenStack Watcher API", "versions": [ { "id": "v1", "links": [ { "href": "http://controller:9322/v1/", "rel": "self" } ], "min_version": "1.0", "status": "CURRENT", "max_version": "1.1" } ] } python-watcher-4.0.0/api-ref/source/samples/audit-cancel-request.json0000664000175000017500000000013713656752270025730 0ustar zuulzuul00000000000000[ { "op": "replace", "value": "CANCELLED", "path": "/state" } ]python-watcher-4.0.0/api-ref/source/samples/audit-update-request.json0000664000175000017500000000027013656752270025763 0ustar zuulzuul00000000000000[ { "value": "CANCELLED", "path": "/state", "op": "replace" }, { "value": "audit1", "path": "/name", "op": "replace" } ]python-watcher-4.0.0/api-ref/source/samples/scoring_engine-show-response.json0000664000175000017500000000075313656752270027520 0ustar zuulzuul00000000000000{ "description": "Dummy Scorer calculating the maximum value", "uuid": "1ac42282-4e77-473e-898b-62ea007f1deb", "links": [ { "rel": "self", "href": "http://controller:9322/v1/scoring_engines/1ac42282-4e77-473e-898b-62ea007f1deb" }, { "rel": "bookmark", "href": "http://controller:9322/scoring_engines/1ac42282-4e77-473e-898b-62ea007f1deb" } ], "name": "dummy_max_scorer", "metainfo": "" }python-watcher-4.0.0/api-ref/source/samples/audit-create-request-continuous.json0000664000175000017500000000055613656752270030157 0ustar zuulzuul00000000000000{ "auto_trigger": false, "force": false, "audit_template_uuid": "76fddfee-a9c4-40b0-8da0-c19ad6904f09", "name": "test_audit", "parameters": { "metrics": [ "cpu_util" ] }, "audit_type": "CONTINUOUS", "interval": "*/2 * * * *", "start_time":"2018-04-02 20:30:00", "end_time": "2018-04-04 20:30:00" } python-watcher-4.0.0/api-ref/source/samples/audittemplate-show-response.json0000664000175000017500000000130413656752270027362 0ustar zuulzuul00000000000000{ "description": "test 1", "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at1", "uuid": "0d100c27-14af-4962-86fb-f6079287c9c6", "goal_name": "dummy", "scope": [], "created_at": "2018-04-04T07:48:36.175472+00:00", "deleted_at": null, "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/0d100c27-14af-4962-86fb-f6079287c9c6" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/0d100c27-14af-4962-86fb-f6079287c9c6" } ], "strategy_name": null, "updated_at": "2018-04-05T07:57:55.803650+00:00" }python-watcher-4.0.0/api-ref/source/samples/audit-list-detailed-response.json0000664000175000017500000000401113656752270027370 0ustar zuulzuul00000000000000{ "audits": [ { "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "test_audit", "parameters": { "host_choice": "retry", "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" }, "granularity": 300, "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "retry_count": 1, "metrics": [ "cpu_util" ], "periods": { "instance": 720, "node": 600 }, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "ONGOING", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": "2018-04-06T09:46:00", "updated_at": "2018-04-06T09:44:01.604146+00:00", "hostname": "controller", "start_time": null, "end_time": null } ] } python-watcher-4.0.0/api-ref/source/samples/actions-list-response.json0000664000175000017500000000132413656752270026155 0ustar zuulzuul00000000000000{ "actions": [ { "state": "PENDING", "parents": [ "8119d16e-b419-4729-b015-fc04c4e45783" ], "links": [ { "rel": "self", "href": "http://controller:9322/v1/actions/7182a988-e6c4-4152-a0d6-067119475c83" }, { "rel": "bookmark", "href": "http://controller:9322/actions/7182a988-e6c4-4152-a0d6-067119475c83" } ], "action_plan_uuid": "c6bba9ed-a7eb-4370-9993-d873e5e22cba", "uuid": "7182a988-e6c4-4152-a0d6-067119475c83", "action_type": "sleep" } ] }python-watcher-4.0.0/api-ref/source/samples/api-v1-root-response.json0000664000175000017500000000306713656752270025630 0ustar zuulzuul00000000000000{ "scoring_engines": [ { "href": "http://controller:9322/v1/scoring_engines/", "rel": "self" }, { "href": "http://controller:9322/scoring_engines/", "rel": "bookmark" } ], "media_types": [ { "base": "application/json", "type": "application/vnd.openstack.watcher.v1+json" } ], "links": [ { "href": "http://controller:9322/v1/", "rel": "self" }, { "href": "http://docs.openstack.org/developer/watcher/dev/api-spec-v1.html", "type": "text/html", "rel": "describedby" } ], "actions": [ { "href": "http://controller:9322/v1/actions/", "rel": "self" }, { "href": "http://controller:9322/actions/", "rel": "bookmark" } ], "audit_templates": [ { "href": "http://controller:9322/v1/audit_templates/", "rel": "self" }, { "href": "http://controller:9322/audit_templates/", "rel": "bookmark" } ], "action_plans": [ { "href": "http://controller:9322/v1/action_plans/", "rel": "self" }, { "href": "http://controller:9322/action_plans/", "rel": "bookmark" } ], "services": [ { "href": "http://controller:9322/v1/services/", "rel": "self" }, { "href": "http://controller:9322/services/", "rel": "bookmark" } ], "audits": [ { "href": "http://controller:9322/v1/audits/", "rel": "self" }, { "href": "http://controller:9322/audits/", "rel": "bookmark" } ], "id": "v1" } python-watcher-4.0.0/api-ref/source/samples/actions-show-response.json0000664000175000017500000000141713656752270026165 0ustar zuulzuul00000000000000{ "state": "SUCCEEDED", "description": "Logging a NOP message", "parents": [ "b4529294-1de6-4302-b57a-9b5d5dc363c6" ], "links": [ { "rel": "self", "href": "http://controller:9322/v1/actions/54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a" }, { "rel": "bookmark", "href": "http://controller:9322/actions/54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a" } ], "action_plan_uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "uuid": "54acc7a0-91b0-46ea-a5f7-4ae2b9df0b0a", "deleted_at": null, "updated_at": "2018-04-10T11:59:44.026973+00:00", "input_parameters": { "message": "Welcome" }, "action_type": "nop", "created_at": "2018-04-10T11:59:12.725147+00:00" }python-watcher-4.0.0/api-ref/source/samples/strategy-state-response.json0000664000175000017500000000201413656752270026521 0ustar zuulzuul00000000000000[ { "state": "gnocchi: available", "comment": "", "mandatory": true, "type": "Datasource" }, { "state": [ { "compute.node.cpu.percent": "available" }, { "cpu_util": "available" }, { "memory.resident": "available" }, { "hardware.memory.used": "available" } ], "comment": "", "mandatory": false, "type": "Metrics" }, { "state": [ { "compute_model": "available" }, { "storage_model": "not available" }, { "baremetal_model": "not available" } ], "comment": "", "mandatory": true, "type": "CDM" }, { "state": "workload_stabilization", "mandatory": "", "comment": "", "type": "Name" } ]python-watcher-4.0.0/api-ref/source/samples/audittemplate-update-request.json0000664000175000017500000000013513656752270027517 0ustar zuulzuul00000000000000[ { "op": "replace", "value": "PENDING", "path": "/state" } ]python-watcher-4.0.0/api-ref/source/samples/audittemplate-create-request-minimal.json0000664000175000017500000000005213656752270031122 0ustar zuulzuul00000000000000{ "name": "at2", "goal": "dummy" }python-watcher-4.0.0/api-ref/source/samples/audit-create-response.json0000664000175000017500000000300713656752270026113 0ustar zuulzuul00000000000000{ "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "test_audit", "parameters": { "host_choice": "retry", "granularity": 300, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 }, "periods": { "node": 600, "instance": 720 }, "retry_count": 1, "metrics": [ "cpu_util" ], "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "PENDING", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": null, "updated_at": null, "hostname": null, "start_time": null, "end_time": null } python-watcher-4.0.0/api-ref/source/samples/audittemplate-list-response.json0000664000175000017500000000132113656752270027354 0ustar zuulzuul00000000000000{ "audit_templates":[ { "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at3", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" } ], "strategy_name": null, "uuid": "b4041d8c-85d7-4224-851d-649fe48b7196", "goal_name": "dummy", "scope": [] } ] }python-watcher-4.0.0/api-ref/source/samples/actionplan-start-response.json0000664000175000017500000000137513656752270027035 0ustar zuulzuul00000000000000{ "state": "PENDING", "efficacy_indicators": [], "strategy_uuid": "7dae0eea-9df7-42b8-bb3e-313958ff2242", "global_efficacy": [], "links": [ { "rel": "self", "href": "http://controller:9322/v1/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" }, { "rel": "bookmark", "href": "http://controller:9322/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" } ], "updated_at": "2018-04-10T11:59:41.602430+00:00", "strategy_name": "dummy_with_resize", "uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a", "created_at": "2018-04-10T11:59:12.592729+00:00", "deleted_at": null, "hostname": null }python-watcher-4.0.0/api-ref/source/samples/strategy-list-response.json0000664000175000017500000000124013656752270026354 0ustar zuulzuul00000000000000{ "strategies": [ { "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "dummy", "links": [ { "rel": "self", "href": "http://controller:9322/v1/strategies/e311727b-b9b3-43ef-a5f7-8bd7ea80df25" }, { "rel": "bookmark", "href": "http://controller:9322/strategies/e311727b-b9b3-43ef-a5f7-8bd7ea80df25" } ], "uuid": "e311727b-b9b3-43ef-a5f7-8bd7ea80df25", "goal_name": "dummy", "display_name": "Dummy strategy" } ] }python-watcher-4.0.0/api-ref/source/samples/service-list-response.json0000664000175000017500000000167613656752270026167 0ustar zuulzuul00000000000000{ "services": [ { "id": 1, "status": "ACTIVE", "name": "watcher-applier", "host": "controller", "links": [ { "rel": "self", "href": "http://controller:9322/v1/services/1" }, { "rel": "bookmark", "href": "http://controller:9322/services/1" } ] }, { "id": 2, "status": "ACTIVE", "name": "watcher-decision-engine", "host": "controller", "links": [ { "rel": "self", "href": "http://controller:9322/v1/services/2" }, { "rel": "bookmark", "href": "http://controller:9322/services/2" } ] } ] }python-watcher-4.0.0/api-ref/source/samples/strategy-show-response.json0000664000175000017500000000170413656752270026366 0ustar zuulzuul00000000000000{ "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "dummy", "links": [ { "rel": "self", "href": "http://controller:9322/v1/strategies/e311727b-b9b3-43ef-a5f7-8bd7ea80df25" }, { "rel": "bookmark", "href": "http://controller:9322/strategies/e311727b-b9b3-43ef-a5f7-8bd7ea80df25" } ], "parameters_spec": { "properties": { "para2": { "default": "hello", "type": "string", "description": "string parameter example" }, "para1": { "maximum": 10.2, "type": "number", "minimum": 1.0, "description": "number parameter example", "default": 3.2 } } }, "uuid": "e311727b-b9b3-43ef-a5f7-8bd7ea80df25", "goal_name": "dummy", "display_name": "Dummy strategy" }python-watcher-4.0.0/api-ref/source/samples/audit-show-response.json0000664000175000017500000000307613656752270025636 0ustar zuulzuul00000000000000{ "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "test_audit", "parameters": { "host_choice": "retry", "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" }, "granularity": 300, "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "retry_count": 1, "metrics": [ "cpu_util" ], "periods": { "instance": 720, "node": 600 }, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "ONGOING", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": "2018-04-06T11:56:00", "updated_at": "2018-04-06T11:54:01.266447+00:00", "hostname": "controller", "start_time": null, "end_time": null } python-watcher-4.0.0/api-ref/source/samples/actionplan-show-response.json0000664000175000017500000000126713656752270026660 0ustar zuulzuul00000000000000{ "state": "ONGOING", "efficacy_indicators": [], "strategy_uuid": "7dae0eea-9df7-42b8-bb3e-313958ff2242", "global_efficacy": [], "links": [ { "rel": "self", "href": "http://controller:9322/v1/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" }, { "rel": "bookmark", "href": "http://controller:9322/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" } ], "updated_at": "2018-04-10T11:59:52.640067+00:00", "strategy_name": "dummy_with_resize", "uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a", "hostname": "controller" }python-watcher-4.0.0/api-ref/source/samples/strategy-list-detailed-response.json0000664000175000017500000000245013656752270030131 0ustar zuulzuul00000000000000{ "strategies": [ { "goal_uuid": "cb9afa5e-aec7-4a8c-9261-c15c33f2262b", "name": "vm_workload_consolidation", "links": [ { "rel": "self", "href": "http://controller:9322/v1/strategies/6382b2d7-259e-487d-88db-78c852ffea54" }, { "rel": "bookmark", "href": "http://controller:9322/strategies/6382b2d7-259e-487d-88db-78c852ffea54" } ], "parameters_spec": { "properties": { "granularity": { "default": 300, "type": "number", "description": "The time between two measures in an aggregated timeseries of a metric." }, "period": { "default": 3600, "type": "number", "description": "The time interval in seconds for getting statistic aggregation" } } }, "uuid": "6382b2d7-259e-487d-88db-78c852ffea54", "goal_name": "server_consolidation", "display_name": "VM Workload Consolidation Strategy" } ] }python-watcher-4.0.0/api-ref/source/samples/actionplan-list-detailed-response.json0000664000175000017500000000173613656752270030425 0ustar zuulzuul00000000000000{ "action_plans": [ { "state": "ONGOING", "efficacy_indicators": [], "strategy_uuid": "7dae0eea-9df7-42b8-bb3e-313958ff2242", "global_efficacy": [], "links": [ { "rel": "self", "href": "http://controller:9322/v1/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" }, { "rel": "bookmark", "href": "http://controller:9322/action_plans/4cbc4ede-0d25-481b-b86e-998dbbd4f8bf" } ], "updated_at": "2018-04-10T11:59:52.640067+00:00", "strategy_name": "dummy_with_resize", "deleted_at": null, "uuid": "4cbc4ede-0d25-481b-b86e-998dbbd4f8bf", "audit_uuid": "7d100b05-0a86-491f-98a7-f93da19b272a", "created_at": "2018-04-10T11:59:52.640067+00:00", "hostname": "controller" } ] } python-watcher-4.0.0/api-ref/source/samples/actions-list-detailed-response.json0000664000175000017500000000174713656752270027737 0ustar zuulzuul00000000000000{ "actions": [ { "state": "PENDING", "description": "Wait for a given interval in seconds.", "parents": [ "8119d16e-b419-4729-b015-fc04c4e45783" ], "links": [ { "rel": "self", "href": "http://controller:9322/v1/actions/7182a988-e6c4-4152-a0d6-067119475c83" }, { "rel": "bookmark", "href": "http://controller:9322/actions/7182a988-e6c4-4152-a0d6-067119475c83" } ], "action_plan_uuid": "c6bba9ed-a7eb-4370-9993-d873e5e22cba", "uuid": "7182a988-e6c4-4152-a0d6-067119475c83", "deleted_at": null, "updated_at": null, "input_parameters": { "duration": 3.2 }, "action_type": "sleep", "created_at": "2018-03-26T11:56:08.235226+00:00" } ] }python-watcher-4.0.0/api-ref/source/samples/actionplan-cancel-request-cancelling.json0000664000175000017500000000014013656752270031041 0ustar zuulzuul00000000000000[ { "op": "replace", "value": "CANCELLING", "path": "/state" } ]python-watcher-4.0.0/api-ref/source/samples/audit-cancel-response.json0000664000175000017500000000307413656752270026101 0ustar zuulzuul00000000000000{ "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "audit1", "parameters": { "host_choice": "retry", "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" }, "granularity": 300, "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "retry_count": 1, "metrics": [ "cpu_util" ], "periods": { "instance": 720, "node": 600 }, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "CANCELLED", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": "2018-04-06T11:56:00", "updated_at": "2018-04-06T11:54:01.266447+00:00", "hostname": "controller", "start_time": null, "end_time": null } python-watcher-4.0.0/api-ref/source/samples/service-list-detailed-response.json0000664000175000017500000000127613656752270027734 0ustar zuulzuul00000000000000{ "services": [ { "status": "ACTIVE", "name": "watcher-applier", "host": "controller", "links": [ { "rel": "self", "href": "http://controller:9322/v1/services/1" }, { "rel": "bookmark", "href": "http://controller:9322/services/1" } ], "id": 1, "deleted_at": null, "updated_at": "2018-04-26T08:52:37.652895+00:00", "last_seen_up": "2018-04-26T08:52:37.648572", "created_at": "2018-03-26T11:55:24.075093+00:00" } ] }python-watcher-4.0.0/api-ref/source/samples/audit-list-response.json0000664000175000017500000000166013656752270025626 0ustar zuulzuul00000000000000{ "audits": [ { "interval": null, "strategy_uuid": "e311727b-b9b3-43ef-a5f7-8bd7ea80df25", "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "dummy-2018-03-26T11:56:07.950400", "auto_trigger": false, "uuid": "ccc69a5f-114e-46f4-b15e-a77eaa337b01", "goal_name": "dummy", "scope": [], "state": "SUCCEEDED", "audit_type": "ONESHOT", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/ccc69a5f-114e-46f4-b15e-a77eaa337b01" }, { "rel": "bookmark", "href": "http://controller:9322/audits/ccc69a5f-114e-46f4-b15e-a77eaa337b01" } ], "strategy_name": "dummy", "next_run_time": null } ] }python-watcher-4.0.0/api-ref/source/samples/actionplan-cancel-request-pending.json0000664000175000017500000000013713656752270030374 0ustar zuulzuul00000000000000[ { "op": "replace", "value": "CANCELLED", "path": "/state" } ]python-watcher-4.0.0/api-ref/source/samples/scoring_engine-list-detailed-response.json0000664000175000017500000000121613656752270031257 0ustar zuulzuul00000000000000{ "scoring_engines": [ { "description": "Dummy Scorer calculating the average value", "uuid": "5a44f007-55b1-423c-809f-6a274a9bd93b", "links": [ { "rel": "self", "href": "http://controller:9322/v1/scoring_engines/5a44f007-55b1-423c-809f-6a274a9bd93b" }, { "rel": "bookmark", "href": "http://controller:9322/scoring_engines/5a44f007-55b1-423c-809f-6a274a9bd93b" } ], "name": "dummy_avg_scorer", "metainfo": "" } ] }python-watcher-4.0.0/api-ref/source/samples/goal-show-response.json0000664000175000017500000000105413656752270025444 0ustar zuulzuul00000000000000{ "efficacy_specification": [], "name": "saving_energy", "links": [ { "rel": "self", "href": "http://controller:9322/v1/goals/6f52889a-9dd4-4dbb-8e70-39b56c4836cc" }, { "rel": "bookmark", "href": "http://controller:9322/goals/6f52889a-9dd4-4dbb-8e70-39b56c4836cc" } ], "uuid": "6f52889a-9dd4-4dbb-8e70-39b56c4836cc", "updated_at": null, "display_name": "Saving Energy", "created_at": "2018-03-26T11:55:24.365584+00:00", "deleted_at": null }python-watcher-4.0.0/api-ref/source/samples/audittemplate-create-response.json0000664000175000017500000000124213656752270027646 0ustar zuulzuul00000000000000{ "description": null, "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at3", "uuid": "b4041d8c-85d7-4224-851d-649fe48b7196", "goal_name": "dummy", "scope": [], "created_at": "2018-04-04T08:38:33.110432+00:00", "deleted_at": null, "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" } ], "strategy_name": null, "updated_at": null }python-watcher-4.0.0/api-ref/source/samples/audit-update-response.json0000664000175000017500000000307413656752270026136 0ustar zuulzuul00000000000000{ "interval": "*/2 * * * *", "strategy_uuid": "6b3b3902-8508-4cb0-bb85-67f32866b086", "goal_uuid": "e1a5a45b-f251-47cf-9c5f-fa1e66e1286a", "name": "audit1", "parameters": { "host_choice": "retry", "instance_metrics": { "cpu_util": "compute.node.cpu.percent", "memory.resident": "hardware.memory.used" }, "granularity": 300, "weights": { "cpu_util_weight": 1.0, "memory.resident_weight": 1.0 }, "retry_count": 1, "metrics": [ "cpu_util" ], "periods": { "instance": 720, "node": 600 }, "thresholds": { "cpu_util": 0.2, "memory.resident": 0.2 } }, "auto_trigger": false, "force": false, "uuid": "65a5da84-5819-4aea-8278-a28d2b489028", "goal_name": "workload_balancing", "scope": [], "created_at": "2018-04-06T07:27:27.820460+00:00", "deleted_at": null, "state": "CANCELLED", "audit_type": "CONTINUOUS", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audits/65a5da84-5819-4aea-8278-a28d2b489028" }, { "rel": "bookmark", "href": "http://controller:9322/audits/65a5da84-5819-4aea-8278-a28d2b489028" } ], "strategy_name": "workload_stabilization", "next_run_time": "2018-04-06T11:56:00", "updated_at": "2018-04-06T11:54:01.266447+00:00", "hostname": "controller", "start_time": null, "end_time": null } python-watcher-4.0.0/api-ref/source/samples/datamodel-list-response.json0000664000175000017500000000242013656752270026445 0ustar zuulzuul00000000000000{ "context": [ { "server_uuid": "1bf91464-9b41-428d-a11e-af691e5563bb", "server_name": "chenke-test1", "server_vcpus": "1", "server_memory": "512", "server_disk": "1", "server_state": "active", "node_uuid": "253e5dd0-9384-41ab-af13-4f2c2ce26112", "node_hostname": "localhost.localdomain", "node_vcpus": "4", "node_vcpu_ratio": "16.0", "node_memory": "16383", "node_memory_ratio": "1.5", "node_disk": "37" "node_disk_ratio": "1.0", "node_state": "up", }, { "server_uuid": "e2cb5f6f-fa1d-4ba2-be1e-0bf02fa86ba4", "server_name": "chenke-test2", "server_vcpus": "1", "server_memory": "512", "server_disk": "1", "server_state": "active", "node_uuid": "253e5dd0-9384-41ab-af13-4f2c2ce26112", "node_hostname": "localhost.localdomain", "node_vcpus": "4", "node_vcpu_ratio": "16.0", "node_memory": "16383", "node_memory_ratio": "1.5", "node_disk": "37" "node_disk_ratio": "1.0", "node_state": "up", } ] } python-watcher-4.0.0/api-ref/source/samples/audittemplate-list-detailed-response.json0000664000175000017500000000136213656752270031132 0ustar zuulzuul00000000000000{ "audit_templates":[ { "strategy_uuid": null, "goal_uuid": "4690f8ba-18ff-45c1-99e9-159556d23810", "name": "at3", "links": [ { "rel": "self", "href": "http://controller:9322/v1/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" }, { "rel": "bookmark", "href": "http://controller:9322/audit_templates/b4041d8c-85d7-4224-851d-649fe48b7196" } ], "strategy_name": null, "uuid": "b4041d8c-85d7-4224-851d-649fe48b7196", "goal_name": "dummy", "scope": [], "description": null } ] }python-watcher-4.0.0/api-ref/source/samples/scoring_engine-list-response.json0000664000175000017500000000116213656752270027506 0ustar zuulzuul00000000000000{ "scoring_engines": [ { "description": "Dummy Scorer calculating the average value", "uuid": "5a44f007-55b1-423c-809f-6a274a9bd93b", "links": [ { "rel": "self", "href": "http://controller:9322/v1/scoring_engines/5a44f007-55b1-423c-809f-6a274a9bd93b" }, { "rel": "bookmark", "href": "http://controller:9322/scoring_engines/5a44f007-55b1-423c-809f-6a274a9bd93b" } ], "name": "dummy_avg_scorer" } ] }python-watcher-4.0.0/api-ref/source/index.rst0000664000175000017500000000102613656752270021211 0ustar zuulzuul00000000000000:tocdepth: 2 =========== Watcher API =========== .. rest_expand_all:: .. include:: watcher-api-versions.inc .. include:: watcher-api-v1-audittemplates.inc .. include:: watcher-api-v1-audits.inc .. include:: watcher-api-v1-actionplans.inc .. include:: watcher-api-v1-actions.inc .. include:: watcher-api-v1-goals.inc .. include:: watcher-api-v1-strategies.inc .. include:: watcher-api-v1-services.inc .. include:: watcher-api-v1-scoring_engines.inc .. include:: watcher-api-v1-datamodel.inc .. include:: watcher-api-v1-webhooks.inc python-watcher-4.0.0/api-ref/source/watcher-api-versions.inc0000664000175000017500000000400613656752270024116 0ustar zuulzuul00000000000000.. -*- rst -*- ============ API versions ============ In order to bring new features to users over time, the Watcher API supports versioning. There are two kinds of versions in Watcher. - ''major versions'', which have dedicated URLs. - ''microversions'', which can be requested using the ``OpenStack-API-Version`` header. .. note:: The maximum microversion depends on release. Please reference: `API Microversion History `__ for API microversion history details. The Version API resource works differently from other API resources as they *do not* require authentication. If Watcher receives a request with unsupported version, it responds with a 406 Not Acceptable, along with the -Min- and -Max- headers that it can support. List API versions ================= .. rest_method:: GET / This fetches all the information about all known major API versions in the deployment. Links to more specific information will be provided for each major API version, as well as information about supported min and max microversions. Normal response codes: 200 Request ------- Response Example ---------------- .. rest_parameters:: parameters.yaml - description: version_description - versions: versions - version: version - id: version_id - links: links - min_version: openstack-api-min-version - max_version: openstack-api-max-version .. literalinclude:: samples/api-root-response.json :language: javascript Show v1 API =========== .. rest_method:: GET /v1/ Show all the resources within the Watcher v1 API. Normal response codes: 200 Request ------- Response Example ---------------- .. rest_parameters:: parameters.yaml - id: version_id - links: links - OpenStack-API-Version: header_version - OpenStack-API-Minimum-Version: openstack-api-min-version - OpenStack-API-Maximum-Version: openstack-api-max-version .. literalinclude:: samples/api-v1-root-response.json :language: javascript python-watcher-4.0.0/PKG-INFO0000664000175000017500000000447613656752352015637 0ustar zuulzuul00000000000000Metadata-Version: 1.2 Name: python-watcher Version: 4.0.0 Summary: OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Home-page: https://docs.openstack.org/watcher/latest/ Author: OpenStack Author-email: openstack-discuss@lists.openstack.org License: UNKNOWN Description: ======= Watcher ======= .. image:: https://governance.openstack.org/tc/badges/watcher.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on .. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Watcher provides a robust framework to realize a wide range of cloud optimization goals, including the reduction of data center operating costs, increased system performance via intelligent virtual machine migration, increased energy efficiency and more! * Free software: Apache license * Wiki: https://wiki.openstack.org/wiki/Watcher * Source: https://opendev.org/openstack/watcher * Bugs: https://bugs.launchpad.net/watcher * Documentation: https://docs.openstack.org/watcher/latest/ * Release notes: https://docs.openstack.org/releasenotes/watcher/ * Design specifications: https://specs.openstack.org/openstack/watcher-specs/ Platform: UNKNOWN Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: Implementation :: CPython Classifier: Programming Language :: Python :: 3 :: Only Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.6 Classifier: Programming Language :: Python :: 3.7 Requires-Python: >=3.6 python-watcher-4.0.0/test-requirements.txt0000664000175000017500000000104713656752270020771 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. coverage>=4.5.1 # Apache-2.0 doc8>=0.8.0 # Apache-2.0 freezegun>=0.3.10 # Apache-2.0 hacking>=3.0,<3.1.0 # Apache-2.0 mock>=2.0.0 # BSD oslotest>=3.3.0 # Apache-2.0 os-testr>=1.0.0 # Apache-2.0 testscenarios>=0.5.0 # Apache-2.0/BSD testtools>=2.3.0 # MIT stestr>=2.0.0 # Apache-2.0 os-api-ref>=1.4.0 # Apache-2.0 bandit>=1.6.0 # Apache-2.0 python-watcher-4.0.0/lower-constraints.txt0000664000175000017500000000557013656752270020773 0ustar zuulzuul00000000000000alabaster==0.7.10 alembic==0.9.8 amqp==2.2.2 appdirs==1.4.3 APScheduler==3.5.1 asn1crypto==0.24.0 automaton==1.14.0 Babel==2.5.3 beautifulsoup4==4.6.0 cachetools==2.0.1 certifi==2018.1.18 cffi==1.11.5 chardet==3.0.4 cliff==2.11.0 cmd2==0.8.1 contextlib2==0.5.5 coverage==4.5.1 croniter==0.3.20 cryptography==2.1.4 debtcollector==1.19.0 decorator==4.2.1 deprecation==2.0 doc8==0.8.0 docutils==0.14 dogpile.cache==0.6.5 dulwich==0.19.0 enum34==1.1.6 enum-compat==0.0.2 eventlet==0.20.0 extras==1.0.0 fasteners==0.14.1 fixtures==3.0.0 freezegun==0.3.10 future==0.16.0 futurist==1.8.0 gitdb2==2.0.3 GitPython==2.1.8 gnocchiclient==7.0.1 greenlet==0.4.13 idna==2.6 imagesize==1.0.0 iso8601==0.1.12 Jinja2==2.10 jmespath==0.9.3 jsonpatch==1.21 jsonpointer==2.0 jsonschema==2.6.0 keystoneauth1==3.4.0 keystonemiddleware==4.21.0 kombu==4.1.0 linecache2==1.0.0 logutils==0.3.5 lxml==4.1.1 Mako==1.0.7 MarkupSafe==1.0 mccabe==0.2.1 microversion_parse==0.2.1 mock==2.0.0 monotonic==1.4 mox3==0.25.0 msgpack==0.5.6 munch==2.2.0 netaddr==0.7.19 netifaces==0.10.6 networkx==2.2 openstackdocstheme==1.20.0 openstacksdk==0.12.0 os-api-ref===1.4.0 os-client-config==1.29.0 os-service-types==1.2.0 os-testr==1.0.0 osc-lib==1.10.0 os-resource-classes==0.4.0 oslo.cache==1.29.0 oslo.concurrency==3.26.0 oslo.config==5.2.0 oslo.context==2.21.0 oslo.db==4.35.0 oslo.i18n==3.20.0 oslo.log==3.37.0 oslo.messaging==8.1.2 oslo.middleware==3.35.0 oslo.policy==1.34.0 oslo.reports==1.27.0 oslo.serialization==2.25.0 oslo.service==1.30.0 oslo.upgradecheck==0.1.0 oslo.utils==3.36.0 oslo.versionedobjects==1.32.0 oslotest==3.3.0 packaging==17.1 Paste==2.0.3 PasteDeploy==1.5.2 pbr==3.1.1 pecan==1.3.2 pika==0.10.0 pika-pool==0.1.3 prettytable==0.7.2 psutil==5.4.3 pycadf==2.7.0 pycparser==2.18 Pygments==2.2.0 pyinotify==0.9.6 pyOpenSSL==17.5.0 pyparsing==2.2.0 pyperclip==1.6.0 python-ceilometerclient==2.9.0 python-cinderclient==3.5.0 python-dateutil==2.7.0 python-editor==1.0.3 python-glanceclient==2.9.1 python-ironicclient==2.5.0 python-keystoneclient==3.15.0 python-mimeparse==1.6.0 python-monascaclient==1.12.0 python-neutronclient==6.7.0 python-novaclient==14.1.0 python-openstackclient==3.14.0 python-subunit==1.2.0 pytz==2018.3 PyYAML==3.12 reno==2.7.0 repoze.lru==0.7 requests==2.18.4 requestsexceptions==1.4.0 restructuredtext-lint==1.1.3 rfc3986==1.1.0 Routes==2.4.1 simplegeneric==0.8.1 simplejson==3.13.2 six==1.11.0 smmap2==2.0.3 snowballstemmer==1.2.1 Sphinx==1.6.5 sphinxcontrib-httpdomain==1.6.1 sphinxcontrib-pecanwsme==0.8.0 sphinxcontrib-websupport==1.0.1 SQLAlchemy==1.2.5 sqlalchemy-migrate==0.11.0 sqlparse==0.2.4 statsd==3.2.2 stestr==2.0.0 stevedore==1.28.0 taskflow==3.7.1 Tempita==0.5.2 tenacity==4.9.0 testresources==2.0.1 testscenarios==0.5.0 testtools==2.3.0 traceback2==1.4.0 tzlocal==1.5.1 ujson==1.35 unittest2==1.1.0 urllib3==1.22 vine==1.1.4 waitress==1.1.0 warlock==1.3.0 WebOb==1.8.5 WebTest==2.0.29 wrapt==1.10.11 WSME==0.9.2 python-watcher-4.0.0/AUTHORS0000664000175000017500000001505113656752351015600 0ustar zuulzuul0000000000000098k <18552437190@163.com> Akihito INOH Alexander Chadin Alexander Chadin Alexandr Stavitskiy Amy Fong Andrea Frittoli Andreas Jaeger Andreas Jaeger Antoine Cabot Anton Khaldin Atul Pandey Bin Zhou Biswajeeban Mishra Bruno Grazioli Béla Vancsics Cao Xuan Hoang ChangBo Guo(gcb) Chaozhe.Chen Chris Spencer Clark Boylan Daniel Pawlik Dantali0n Dao Cong Tien Darren Shaw David TARDIVEL Doug Hellmann Drew Thorstensen Edwin Zhai Egor Panfilov Fanis Kalimullin Feng Shengqin Flavio Percoco ForestLee Ghanshyam Mann Guang Yee Gábor Antal Ha Van Tu Hidekazu Nakamura Hoang Trung Hieu Ian Wienand Iswarya_Vakati Jaewoo Park James E. Blair Jean-Emile DARTOIS Jeremy Liu Joe Cropper Ken'ichi Ohmichi Kevin_Zheng Kien Nguyen Lance Bragstad Larry Rensing LiXiangyu Lin Yang Luong Anh Tuan M V P Nitesh Margarita Shakhova Matt Riedemann Michael Gugino Michelle Mandel Muzammil Mueen Ngo Quoc Cuong Nguyen Hai Nguyen Hai Truong Nguyen Hung Phuong Nishant Kumar OpenStack Release Bot Palimariu Marius Pradeep Kumar Singh Prashanth Hari Prudhvi Rao Shedimbi Q.hongtao Ralf Rantzau Sampath Priyankara Santhosh Fernandes Sean McGinnis ShangXiao Steve Kowalik Steve Wilkerson Sumit Jamgade Susanne Balle Swapnil Kulkarni (coolsvap) Tatiana Kholkina Taylor Peoples Thierry Carrez Tin Lam Tomasz Kaczynski Tomasz TrÄ™bski Viacheslav Samarin Viktor Varga Vincent Françoise Vladimir Ostroverkhov Vu Cong Tuan XiaojueGuan XieYingYun Yaguo Zhou Yatin Kumbhare Yosef Hoffman Yumeng Bao YumengBao Yumeng_Bao Zhenyu Zheng Zhenzan Zhou aditi aditi akhiljain23 akihito-inoh avnish baiwenteng caoyuan chao liu chenaidong1 chengebj5238 chenghuiyu chenke chenming chenxing cima deepak_mourya digambar ericxiett gaofei gaozx gecong1973 gengchc2 ghanshyam haris tanvir howardlee inspurericzhang iswarya_vakati jacky06 jaugustine jeremy.zhang jinquanni junjie huang licanwei lingyongxu liushuobj liyanhang lvxianguo melissaml mergalievibragim pangliye pengyuesheng qinchunhua rajat29 ricolin sai shangxiaobj sharat.sharma shubhendu sunjia suzhengwei the.bling ting.wang unknown vmahe wangqi wangxiyuan watanabe isao weiweigu wu.chunyang xiaoxue yanxubin yuhui_inspur zhang.lei zhangbailin zhangdebo zhangguoqing zhangjianfeng zhangyanxian zhangyanxian zhengwei6082 zhufl zhulingjie zhurong zhuzeyu zte-hanrong 鲿˜±è’™00205026 <00205026@zte.intra> python-watcher-4.0.0/babel.cfg0000664000175000017500000000002113656752270016245 0ustar zuulzuul00000000000000[python: **.py] python-watcher-4.0.0/CONTRIBUTING.rst0000664000175000017500000000103413656752270017165 0ustar zuulzuul00000000000000If you would like to contribute to the development of OpenStack, you must follow the steps in this page: https://docs.openstack.org/infra/manual/developers.html Once those steps have been completed, changes to OpenStack should be submitted for review via the Gerrit tool, following the workflow documented at: https://docs.openstack.org/infra/manual/developers.html#development-workflow Pull requests submitted through GitHub will be ignored. Bugs should be filed on Launchpad, not GitHub: https://bugs.launchpad.net/watcher python-watcher-4.0.0/README.rst0000664000175000017500000000221313656752270016213 0ustar zuulzuul00000000000000======= Watcher ======= .. image:: https://governance.openstack.org/tc/badges/watcher.svg :target: https://governance.openstack.org/tc/reference/tags/index.html .. Change things from this point on .. Except where otherwise noted, this document is licensed under Creative Commons Attribution 3.0 License. You can view the license at: https://creativecommons.org/licenses/by/3.0/ OpenStack Watcher provides a flexible and scalable resource optimization service for multi-tenant OpenStack-based clouds. Watcher provides a robust framework to realize a wide range of cloud optimization goals, including the reduction of data center operating costs, increased system performance via intelligent virtual machine migration, increased energy efficiency and more! * Free software: Apache license * Wiki: https://wiki.openstack.org/wiki/Watcher * Source: https://opendev.org/openstack/watcher * Bugs: https://bugs.launchpad.net/watcher * Documentation: https://docs.openstack.org/watcher/latest/ * Release notes: https://docs.openstack.org/releasenotes/watcher/ * Design specifications: https://specs.openstack.org/openstack/watcher-specs/ python-watcher-4.0.0/.zuul.yaml0000664000175000017500000001555513656752270016502 0ustar zuulzuul00000000000000- project: templates: - check-requirements - openstack-cover-jobs - openstack-lower-constraints-jobs - openstack-python3-ussuri-jobs - publish-openstack-docs-pti - release-notes-jobs-python3 check: jobs: - watcher-tempest-functional - watcher-grenade - watcher-tempest-strategies - watcher-tempest-actuator - watcherclient-tempest-functional - watcher-tls-test - watcher-tempest-functional-ipv6-only gate: queue: watcher jobs: - watcher-tempest-functional - watcher-tempest-functional-ipv6-only - job: name: watcher-tempest-dummy_optim parent: watcher-tempest-multinode vars: tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_dummy_optim - job: name: watcher-tempest-actuator parent: watcher-tempest-multinode vars: tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_actuator - job: name: watcher-tempest-basic_optim parent: watcher-tempest-multinode vars: tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_basic_optim - job: name: watcher-tempest-vm_workload_consolidation parent: watcher-tempest-multinode vars: tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_vm_workload_consolidation devstack_local_conf: test-config: $WATCHER_CONFIG: watcher_strategies.vm_workload_consolidation: datasource: ceilometer - job: name: watcher-tempest-workload_balancing parent: watcher-tempest-multinode vars: tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_workload_balancing - job: name: watcher-tempest-zone_migration parent: watcher-tempest-multinode vars: tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_zone_migration - job: name: watcher-tempest-host_maintenance parent: watcher-tempest-multinode vars: tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_host_maintenance - job: name: watcher-tempest-storage_balance parent: watcher-tempest-multinode vars: tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_storage_balance devstack_local_conf: test-config: $TEMPEST_CONFIG: volume: backend_names: ['BACKEND_1', 'BACKEND_2'] volume-feature-enabled: multi_backend: true - job: name: watcher-tempest-strategies parent: watcher-tempest-multinode vars: tempest_concurrency: 1 tempest_test_regex: watcher_tempest_plugin.tests.scenario.test_execute_strategies - job: name: watcher-tls-test parent: watcher-tempest-multinode group-vars: subnode: devstack_services: tls-proxy: true vars: devstack_services: tls-proxy: true - job: name: watcher-tempest-multinode parent: watcher-tempest-functional nodeset: openstack-two-node-bionic roles: - zuul: openstack/tempest group-vars: subnode: devstack_local_conf: post-config: $WATCHER_CONF: watcher_cluster_data_model_collectors.compute: period: 120 watcher_cluster_data_model_collectors.baremetal: period: 120 watcher_cluster_data_model_collectors.storage: period: 120 devstack_services: watcher-api: false watcher-decision-engine: true watcher-applier: false # We need to add TLS support for watcher plugin tls-proxy: false ceilometer: false ceilometer-acompute: false ceilometer-acentral: false ceilometer-anotification: false watcher: false gnocchi-api: false gnocchi-metricd: false rabbit: false mysql: false vars: devstack_local_conf: post-config: $WATCHER_CONF: watcher_cluster_data_model_collectors.compute: period: 120 watcher_cluster_data_model_collectors.baremetal: period: 120 watcher_cluster_data_model_collectors.storage: period: 120 test-config: $TEMPEST_CONFIG: compute: min_compute_nodes: 2 min_microversion: 2.56 compute-feature-enabled: live_migration: true block_migration_for_live_migration: true placement: min_microversion: 1.29 devstack_plugins: ceilometer: https://opendev.org/openstack/ceilometer - job: name: watcher-tempest-functional parent: devstack-tempest timeout: 7200 required-projects: &base_required_projects - openstack/ceilometer - openstack/devstack-gate - openstack/python-openstackclient - openstack/python-watcherclient - openstack/watcher - openstack/watcher-tempest-plugin - openstack/tempest vars: &base_vars devstack_plugins: watcher: https://opendev.org/openstack/watcher devstack_services: tls-proxy: false watcher-api: true watcher-decision-engine: true watcher-applier: true tempest: true s-account: false s-container: false s-object: false s-proxy: false devstack_localrc: TEMPEST_PLUGINS: /opt/stack/watcher-tempest-plugin USE_PYTHON3: true tempest_test_regex: watcher_tempest_plugin.tests.api tox_envlist: all tox_environment: # Do we really need to set this? It's cargo culted PYTHONUNBUFFERED: 'true' zuul_copy_output: /etc/hosts: logs - job: name: watcher-tempest-functional-ipv6-only parent: devstack-tempest-ipv6 description: | Watcher devstack tempest tests job for IPv6-only deployment required-projects: *base_required_projects vars: *base_vars - job: name: watcher-grenade parent: legacy-dsvm-base timeout: 10800 run: playbooks/legacy/grenade-devstack-watcher/run.yaml post-run: playbooks/legacy/grenade-devstack-watcher/post.yaml irrelevant-files: - ^(test-|)requirements.txt$ - ^.*\.rst$ - ^api-ref/.*$ - ^doc/.*$ - ^watcher/hacking/.*$ - ^watcher/tests/.*$ - ^releasenotes/.*$ - ^setup.cfg$ - ^tools/.*$ - ^tox.ini$ required-projects: - openstack/grenade - openstack/devstack-gate - openstack/watcher - openstack/python-watcherclient - openstack/watcher-tempest-plugin - job: # This job is used in python-watcherclient repo name: watcherclient-tempest-functional parent: watcher-tempest-functional timeout: 4200 vars: tempest_concurrency: 1 tempest_test_regex: watcher_tempest_plugin.tests.client_functional python-watcher-4.0.0/setup.py0000664000175000017500000000127113656752270016241 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import setuptools setuptools.setup( setup_requires=['pbr>=2.0.0'], pbr=True) python-watcher-4.0.0/watcher/0000775000175000017500000000000013656752352016164 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/__init__.py0000664000175000017500000000120013656752270020265 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version __version__ = pbr.version.VersionInfo('python-watcher').version_string() python-watcher-4.0.0/watcher/version.py0000664000175000017500000000137013656752270020223 0ustar zuulzuul00000000000000# Copyright 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pbr.version version_info = pbr.version.VersionInfo('python-watcher') version_string = version_info.version_string() python-watcher-4.0.0/watcher/common/0000775000175000017500000000000013656752352017454 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/common/utils.py0000664000175000017500000001127313656752270021171 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Utilities and helper functions.""" import datetime import random import re import string from croniter import croniter from jsonschema import validators from oslo_config import cfg from oslo_log import log from oslo_utils import strutils from oslo_utils import uuidutils import six from watcher.common import exception CONF = cfg.CONF LOG = log.getLogger(__name__) class Struct(dict): """Specialized dict where you access an item like an attribute >>> struct = Struct() >>> struct['a'] = 1 >>> struct.b = 2 >>> assert struct.a == 1 >>> assert struct['b'] == 2 """ def __getattr__(self, name): try: return self[name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): try: self[name] = value except KeyError: raise AttributeError(name) generate_uuid = uuidutils.generate_uuid is_uuid_like = uuidutils.is_uuid_like is_int_like = strutils.is_int_like def is_cron_like(value): """Return True is submitted value is like cron syntax""" try: croniter(value, datetime.datetime.now()) except Exception as e: raise exception.CronFormatIsInvalid(message=str(e)) return True def safe_rstrip(value, chars=None): """Removes trailing characters from a string if that does not make it empty :param value: A string value that will be stripped. :param chars: Characters to remove. :return: Stripped value. """ if not isinstance(value, six.string_types): LOG.warning( "Failed to remove trailing character. Returning original object." "Supplied object is not a string: %s,", value) return value return value.rstrip(chars) or value def is_hostname_safe(hostname): """Determine if the supplied hostname is RFC compliant. Check that the supplied hostname conforms to: * http://en.wikipedia.org/wiki/Hostname * http://tools.ietf.org/html/rfc952 * http://tools.ietf.org/html/rfc1123 :param hostname: The hostname to be validated. :returns: True if valid. False if not. """ m = r'^[a-z0-9]([a-z0-9\-]{0,61}[a-z0-9])?$' return (isinstance(hostname, six.string_types) and (re.match(m, hostname) is not None)) def get_cls_import_path(cls): """Return the import path of a given class""" module = cls.__module__ if module is None or module == str.__module__: return cls.__name__ return module + '.' + cls.__name__ # Default value feedback extension as jsonschema doesn't support it def extend_with_default(validator_class): validate_properties = validator_class.VALIDATORS["properties"] def set_defaults(validator, properties, instance, schema): for prop, subschema in properties.items(): if "default" in subschema and instance is not None: instance.setdefault(prop, subschema["default"]) for error in validate_properties( validator, properties, instance, schema ): yield error return validators.extend(validator_class, {"properties": set_defaults}) # Parameter strict check extension as jsonschema doesn't support it def extend_with_strict_schema(validator_class): validate_properties = validator_class.VALIDATORS["properties"] def strict_schema(validator, properties, instance, schema): if instance is None: return for para in instance.keys(): if para not in properties.keys(): raise exception.AuditParameterNotAllowed(parameter=para) for error in validate_properties( validator, properties, instance, schema ): yield error return validators.extend(validator_class, {"properties": strict_schema}) StrictDefaultValidatingDraft4Validator = extend_with_default( extend_with_strict_schema(validators.Draft4Validator)) Draft4Validator = validators.Draft4Validator def random_string(n): return ''.join([random.choice( string.ascii_letters + string.digits) for i in range(n)]) python-watcher-4.0.0/watcher/common/__init__.py0000664000175000017500000000000013656752270021552 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/common/keystone_helper.py0000664000175000017500000001060713656752270023231 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from keystoneauth1.exceptions import http as ks_exceptions from keystoneauth1 import loading from keystoneauth1 import session from watcher._i18n import _ from watcher.common import clients from watcher.common import exception from watcher import conf CONF = conf.CONF LOG = log.getLogger(__name__) class KeystoneHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.keystone = self.osc.keystone() def get_role(self, name_or_id): try: role = self.keystone.roles.get(name_or_id) return role except ks_exceptions.NotFound: roles = self.keystone.roles.list(name=name_or_id) if len(roles) == 0: raise exception.Invalid( message=(_("Role not Found: %s") % name_or_id)) if len(roles) > 1: raise exception.Invalid( message=(_("Role name seems ambiguous: %s") % name_or_id)) return roles[0] def get_user(self, name_or_id): try: user = self.keystone.users.get(name_or_id) return user except ks_exceptions.NotFound: users = self.keystone.users.list(name=name_or_id) if len(users) == 0: raise exception.Invalid( message=(_("User not Found: %s") % name_or_id)) if len(users) > 1: raise exception.Invalid( message=(_("User name seems ambiguous: %s") % name_or_id)) return users[0] def get_project(self, name_or_id): try: project = self.keystone.projects.get(name_or_id) return project except ks_exceptions.NotFound: projects = self.keystone.projects.list(name=name_or_id) if len(projects) == 0: raise exception.Invalid( message=(_("Project not Found: %s") % name_or_id)) if len(projects) > 1: raise exception.Invalid( messsage=(_("Project name seems ambiguous: %s") % name_or_id)) return projects[0] def get_domain(self, name_or_id): try: domain = self.keystone.domains.get(name_or_id) return domain except ks_exceptions.NotFound: domains = self.keystone.domains.list(name=name_or_id) if len(domains) == 0: raise exception.Invalid( message=(_("Domain not Found: %s") % name_or_id)) if len(domains) > 1: raise exception.Invalid( message=(_("Domain name seems ambiguous: %s") % name_or_id)) return domains[0] def create_session(self, user_id, password): user = self.get_user(user_id) loader = loading.get_plugin_loader('password') auth = loader.load_from_options( auth_url=CONF.watcher_clients_auth.auth_url, password=password, user_id=user_id, project_id=user.default_project_id) return session.Session(auth=auth) def create_user(self, user): project = self.get_project(user['project']) domain = self.get_domain(user['domain']) _user = self.keystone.users.create( user['name'], password=user['password'], domain=domain, project=project, ) for role in user['roles']: role = self.get_role(role) self.keystone.roles.grant( role.id, user=_user.id, project=project.id) return _user def delete_user(self, user): try: user = self.get_user(user) self.keystone.users.delete(user) except exception.Invalid: pass python-watcher-4.0.0/watcher/common/nova_helper.py0000664000175000017500000007215713656752270022343 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from novaclient import api_versions from oslo_log import log import glanceclient.exc as glexceptions import novaclient.exceptions as nvexceptions from watcher.common import clients from watcher.common import exception from watcher import conf LOG = log.getLogger(__name__) CONF = conf.CONF class NovaHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.neutron = self.osc.neutron() self.cinder = self.osc.cinder() self.nova = self.osc.nova() self.glance = self.osc.glance() def get_compute_node_list(self): hypervisors = self.nova.hypervisors.list() # filter out baremetal nodes from hypervisors compute_nodes = [node for node in hypervisors if node.hypervisor_type != 'ironic'] return compute_nodes def get_compute_node_by_name(self, node_name, servers=False, detailed=False): """Search for a hypervisor (compute node) by hypervisor_hostname :param node_name: The hypervisor_hostname to search :param servers: If true, include information about servers per hypervisor :param detailed: If true, include information about the compute service per hypervisor (requires microversion 2.53) """ return self.nova.hypervisors.search(node_name, servers=servers, detailed=detailed) def get_compute_node_by_hostname(self, node_hostname): """Get compute node by hostname :param node_hostname: Compute service hostname :returns: novaclient.v2.hypervisors.Hypervisor object if found :raises: ComputeNodeNotFound if no hypervisor is found for the compute service hostname or there was an error communicating with nova """ try: # This is a fuzzy match on hypervisor_hostname so we could get back # more than one compute node. If so, match on the compute service # hostname. compute_nodes = self.get_compute_node_by_name( node_hostname, detailed=True) for cn in compute_nodes: if cn.service['host'] == node_hostname: return cn raise exception.ComputeNodeNotFound(name=node_hostname) except Exception as exc: LOG.exception(exc) raise exception.ComputeNodeNotFound(name=node_hostname) def get_compute_node_by_uuid(self, node_uuid): """Get compute node by uuid :param node_uuid: hypervisor id as uuid after microversion 2.53 :returns: novaclient.v2.hypervisors.Hypervisor object if found """ return self.nova.hypervisors.get(node_uuid) def get_instance_list(self, filters=None, marker=None, limit=-1): """List servers for all tenants with details. This always gets servers with the all_tenants=True filter. :param filters: Dict of additional filters (optional). :param marker: Get servers that appear later in the server list than that represented by this server id (optional). :param limit: Maximum number of servers to return (optional). If limit == -1, all servers will be returned, note that limit == -1 will have a performance penalty. For details, please see: https://bugs.launchpad.net/watcher/+bug/1834679 :returns: list of novaclient Server objects """ search_opts = {'all_tenants': True} if filters: search_opts.update(filters) return self.nova.servers.list(search_opts=search_opts, marker=marker, limit=limit) def get_instance_by_uuid(self, instance_uuid): return [instance for instance in self.nova.servers.list(search_opts={"all_tenants": True, "uuid": instance_uuid})] def get_instance_by_name(self, instance_name): return [instance for instance in self.nova.servers.list(search_opts={"all_tenants": True, "name": instance_name})] def get_instances_by_node(self, host): return [instance for instance in self.nova.servers.list(search_opts={"all_tenants": True, "host": host}, limit=-1)] def get_flavor_list(self): return self.nova.flavors.list(**{'is_public': None}) def get_service(self, service_id): return self.nova.services.find(id=service_id) def get_aggregate_list(self): return self.nova.aggregates.list() def get_aggregate_detail(self, aggregate_id): return self.nova.aggregates.get(aggregate_id) def get_availability_zone_list(self): return self.nova.availability_zones.list(detailed=True) def get_service_list(self): return self.nova.services.list(binary='nova-compute') def find_instance(self, instance_id): return self.nova.servers.get(instance_id) def confirm_resize(self, instance, previous_status, retry=60): instance.confirm_resize() instance = self.nova.servers.get(instance.id) while instance.status != previous_status and retry: instance = self.nova.servers.get(instance.id) retry -= 1 time.sleep(1) if instance.status == previous_status: return True else: LOG.debug("confirm resize failed for the " "instance %s", instance.id) return False def wait_for_volume_status(self, volume, status, timeout=60, poll_interval=1): """Wait until volume reaches given status. :param volume: volume resource :param status: expected status of volume :param timeout: timeout in seconds :param poll_interval: poll interval in seconds """ start_time = time.time() while time.time() - start_time < timeout: volume = self.cinder.volumes.get(volume.id) if volume.status == status: break time.sleep(poll_interval) else: raise Exception("Volume %s did not reach status %s after %d s" % (volume.id, status, timeout)) return volume.status == status def watcher_non_live_migrate_instance(self, instance_id, dest_hostname, retry=120): """This method migrates a given instance This method uses the Nova built-in migrate() action to do a migration of a given instance. For migrating a given dest_hostname, Nova API version must be 2.56 or higher. It returns True if the migration was successful, False otherwise. :param instance_id: the unique id of the instance to migrate. :param dest_hostname: the name of the destination compute node, if destination_node is None, nova scheduler choose the destination host """ LOG.debug( "Trying a cold migrate of instance '%s' ", instance_id) # Looking for the instance to migrate instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance %s not found !", instance_id) return False else: host_name = getattr(instance, "OS-EXT-SRV-ATTR:host") LOG.debug( "Instance %(instance)s found on host '%(host)s'.", {'instance': instance_id, 'host': host_name}) previous_status = getattr(instance, 'status') instance.migrate(host=dest_hostname) instance = self.nova.servers.get(instance_id) while (getattr(instance, 'status') not in ["VERIFY_RESIZE", "ERROR"] and retry): instance = self.nova.servers.get(instance.id) time.sleep(2) retry -= 1 new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host') if (host_name != new_hostname and instance.status == 'VERIFY_RESIZE'): if not self.confirm_resize(instance, previous_status): return False LOG.debug( "cold migration succeeded : " "instance %(instance)s is now on host '%(host)s'.", {'instance': instance_id, 'host': new_hostname}) return True else: LOG.debug( "cold migration for instance %s failed", instance_id) return False def resize_instance(self, instance_id, flavor, retry=120): """This method resizes given instance with specified flavor. This method uses the Nova built-in resize() action to do a resize of a given instance. It returns True if the resize was successful, False otherwise. :param instance_id: the unique id of the instance to resize. :param flavor: the name or ID of the flavor to resize to. """ LOG.debug( "Trying a resize of instance %(instance)s to " "flavor '%(flavor)s'", {'instance': instance_id, 'flavor': flavor}) # Looking for the instance to resize instance = self.find_instance(instance_id) flavor_id = None try: flavor_id = self.nova.flavors.get(flavor).id except nvexceptions.NotFound: flavor_id = [f.id for f in self.nova.flavors.list() if f.name == flavor][0] except nvexceptions.ClientException as e: LOG.debug("Nova client exception occurred while resizing " "instance %s. Exception: %s", instance_id, e) if not flavor_id: LOG.debug("Flavor not found: %s", flavor) return False if not instance: LOG.debug("Instance not found: %s", instance_id) return False instance_status = getattr(instance, 'OS-EXT-STS:vm_state') LOG.debug( "Instance %(id)s is in '%(status)s' status.", {'id': instance_id, 'status': instance_status}) instance.resize(flavor=flavor_id) while getattr(instance, 'OS-EXT-STS:vm_state') != 'resized' \ and retry: instance = self.nova.servers.get(instance.id) LOG.debug( 'Waiting the resize of {0} to {1}'.format( instance, flavor_id)) time.sleep(1) retry -= 1 instance_status = getattr(instance, 'status') if instance_status != 'VERIFY_RESIZE': return False instance.confirm_resize() LOG.debug("Resizing succeeded : instance %s is now on flavor " "'%s'.", instance_id, flavor_id) return True def live_migrate_instance(self, instance_id, dest_hostname, retry=120): """This method does a live migration of a given instance This method uses the Nova built-in live_migrate() action to do a live migration of a given instance. It returns True if the migration was successful, False otherwise. :param instance_id: the unique id of the instance to migrate. :param dest_hostname: the name of the destination compute node, if destination_node is None, nova scheduler choose the destination host """ LOG.debug( "Trying a live migrate instance %(instance)s ", {'instance': instance_id}) # Looking for the instance to migrate instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance not found: %s", instance_id) return False else: host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') LOG.debug( "Instance %(instance)s found on host '%(host)s'.", {'instance': instance_id, 'host': host_name}) # From nova api version 2.25(Mitaka release), the default value of # block_migration is None which is mapped to 'auto'. instance.live_migrate(host=dest_hostname) instance = self.nova.servers.get(instance_id) # NOTE: If destination host is not specified for live migration # let nova scheduler choose the destination host. if dest_hostname is None: while (instance.status not in ['ACTIVE', 'ERROR'] and retry): instance = self.nova.servers.get(instance.id) LOG.debug( 'Waiting the migration of {0}'.format(instance.id)) time.sleep(1) retry -= 1 new_hostname = getattr(instance, 'OS-EXT-SRV-ATTR:host') if host_name != new_hostname and instance.status == 'ACTIVE': LOG.debug( "Live migration succeeded : " "instance %(instance)s is now on host '%(host)s'.", {'instance': instance_id, 'host': new_hostname}) return True else: return False while getattr(instance, 'OS-EXT-SRV-ATTR:host') != dest_hostname \ and retry: instance = self.nova.servers.get(instance.id) if not getattr(instance, 'OS-EXT-STS:task_state'): LOG.debug("Instance task state: %s is null", instance_id) break LOG.debug( 'Waiting the migration of {0} to {1}'.format( instance, getattr(instance, 'OS-EXT-SRV-ATTR:host'))) time.sleep(1) retry -= 1 host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') if host_name != dest_hostname: return False LOG.debug( "Live migration succeeded : " "instance %(instance)s is now on host '%(host)s'.", {'instance': instance_id, 'host': host_name}) return True def abort_live_migrate(self, instance_id, source, destination, retry=240): LOG.debug("Aborting live migration of instance %s", instance_id) migration = self.get_running_migration(instance_id) if migration: migration_id = getattr(migration[0], "id") try: self.nova.server_migrations.live_migration_abort( server=instance_id, migration=migration_id) except exception as e: # Note: Does not return from here, as abort request can't be # accepted but migration still going on. LOG.exception(e) else: LOG.debug( "No running migrations found for instance %s", instance_id) while retry: instance = self.nova.servers.get(instance_id) if (getattr(instance, 'OS-EXT-STS:task_state') is None and getattr(instance, 'status') in ['ACTIVE', 'ERROR']): break time.sleep(2) retry -= 1 instance_host = getattr(instance, 'OS-EXT-SRV-ATTR:host') instance_status = getattr(instance, 'status') # Abort live migration successful, action is cancelled if instance_host == source and instance_status == 'ACTIVE': return True # Nova Unable to abort live migration, action is succeeded elif instance_host == destination and instance_status == 'ACTIVE': return False else: raise Exception("Live migration execution and abort both failed " "for the instance %s" % instance_id) def enable_service_nova_compute(self, hostname): if float(CONF.nova_client.api_version) < 2.53: status = self.nova.services.enable( host=hostname, binary='nova-compute').status == 'enabled' else: service_uuid = self.nova.services.list(host=hostname, binary='nova-compute')[0].id status = self.nova.services.enable( service_uuid=service_uuid).status == 'enabled' return status def disable_service_nova_compute(self, hostname, reason=None): if float(CONF.nova_client.api_version) < 2.53: status = self.nova.services.disable_log_reason( host=hostname, binary='nova-compute', reason=reason).status == 'disabled' else: service_uuid = self.nova.services.list(host=hostname, binary='nova-compute')[0].id status = self.nova.services.disable_log_reason( service_uuid=service_uuid, reason=reason).status == 'disabled' return status def create_image_from_instance(self, instance_id, image_name, metadata={"reason": "instance_migrate"}): """This method creates a new image from a given instance. It waits for this image to be in 'active' state before returning. It returns the unique UUID of the created image if successful, None otherwise. :param instance_id: the uniqueid of the instance to backup as an image. :param image_name: the name of the image to create. :param metadata: a dictionary containing the list of key-value pairs to associate to the image as metadata. """ LOG.debug( "Trying to create an image from instance %s ...", instance_id) # Looking for the instance instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance not found: %s", instance_id) return None else: host_name = getattr(instance, 'OS-EXT-SRV-ATTR:host') LOG.debug( "Instance %(instance)s found on host '%(host)s'.", {'instance': instance_id, 'host': host_name}) # We need to wait for an appropriate status # of the instance before we can build an image from it if self.wait_for_instance_status(instance, ('ACTIVE', 'SHUTOFF'), 5, 10): image_uuid = self.nova.servers.create_image(instance_id, image_name, metadata) image = self.glance.images.get(image_uuid) if not image: return None # Waiting for the new image to be officially in ACTIVE state # in order to make sure it can be used status = image.status retry = 10 while status != 'active' and status != 'error' and retry: time.sleep(5) retry -= 1 # Retrieve the instance again so the status field updates image = self.glance.images.get(image_uuid) if not image: break status = image.status LOG.debug("Current image status: %s", status) if not image: LOG.debug("Image not found: %s", image_uuid) else: LOG.debug( "Image %(image)s successfully created for " "instance %(instance)s", {'image': image_uuid, 'instance': instance_id}) return image_uuid return None def delete_instance(self, instance_id): """This method deletes a given instance. :param instance_id: the unique id of the instance to delete. """ LOG.debug("Trying to remove instance %s ...", instance_id) instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance not found: %s", instance_id) return False else: self.nova.servers.delete(instance_id) LOG.debug("Instance %s removed.", instance_id) return True def stop_instance(self, instance_id): """This method stops a given instance. :param instance_id: the unique id of the instance to stop. """ LOG.debug("Trying to stop instance %s ...", instance_id) instance = self.find_instance(instance_id) if not instance: LOG.debug("Instance not found: %s", instance_id) return False elif getattr(instance, 'OS-EXT-STS:vm_state') == "stopped": LOG.debug("Instance has been stopped: %s", instance_id) return True else: self.nova.servers.stop(instance_id) if self.wait_for_instance_state(instance, "stopped", 8, 10): LOG.debug("Instance %s stopped.", instance_id) return True else: return False def wait_for_instance_state(self, server, state, retry, sleep): """Waits for server to be in a specific state The state can be one of the following : active, stopped :param server: server object. :param state: for which state we are waiting for :param retry: how many times to retry :param sleep: seconds to sleep between the retries """ if not server: return False while getattr(server, 'OS-EXT-STS:vm_state') != state and retry: time.sleep(sleep) server = self.nova.servers.get(server) retry -= 1 return getattr(server, 'OS-EXT-STS:vm_state') == state def wait_for_instance_status(self, instance, status_list, retry, sleep): """Waits for instance to be in a specific status The status can be one of the following : BUILD, ACTIVE, ERROR, VERIFY_RESIZE, SHUTOFF :param instance: instance object. :param status_list: tuple containing the list of status we are waiting for :param retry: how many times to retry :param sleep: seconds to sleep between the retries """ if not instance: return False while instance.status not in status_list and retry: LOG.debug("Current instance status: %s", instance.status) time.sleep(sleep) instance = self.nova.servers.get(instance.id) retry -= 1 LOG.debug("Current instance status: %s", instance.status) return instance.status in status_list def create_instance(self, node_id, inst_name="test", image_id=None, flavor_name="m1.tiny", sec_group_list=["default"], network_names_list=["demo-net"], keypair_name="mykeys", create_new_floating_ip=True, block_device_mapping_v2=None): """This method creates a new instance It also creates, if requested, a new floating IP and associates it with the new instance It returns the unique id of the created instance. """ LOG.debug( "Trying to create new instance '%(inst)s' " "from image '%(image)s' with flavor '%(flavor)s' ...", {'inst': inst_name, 'image': image_id, 'flavor': flavor_name}) try: self.nova.keypairs.findall(name=keypair_name) except nvexceptions.NotFound: LOG.debug("Key pair '%s' not found ", keypair_name) return try: image = self.glance.images.get(image_id) except glexceptions.NotFound: LOG.debug("Image '%s' not found ", image_id) return try: flavor = self.nova.flavors.find(name=flavor_name) except nvexceptions.NotFound: LOG.debug("Flavor '%s' not found ", flavor_name) return # Make sure all security groups exist for sec_group_name in sec_group_list: group_id = self.get_security_group_id_from_name(sec_group_name) if not group_id: LOG.debug("Security group '%s' not found ", sec_group_name) return net_list = list() for network_name in network_names_list: nic_id = self.get_network_id_from_name(network_name) if not nic_id: LOG.debug("Network '%s' not found ", network_name) return net_obj = {"net-id": nic_id} net_list.append(net_obj) # get availability zone of destination host azone = self.nova.services.list(host=node_id, binary='nova-compute')[0].zone instance = self.nova.servers.create( inst_name, image, flavor=flavor, key_name=keypair_name, security_groups=sec_group_list, nics=net_list, block_device_mapping_v2=block_device_mapping_v2, availability_zone="%s:%s" % (azone, node_id)) # Poll at 5 second intervals, until the status is no longer 'BUILD' if instance: if self.wait_for_instance_status(instance, ('ACTIVE', 'ERROR'), 5, 10): instance = self.nova.servers.get(instance.id) if create_new_floating_ip and instance.status == 'ACTIVE': LOG.debug( "Creating a new floating IP" " for instance '%s'", instance.id) # Creating floating IP for the new instance floating_ip = self.nova.floating_ips.create() instance.add_floating_ip(floating_ip) LOG.debug( "Instance %(instance)s associated to " "Floating IP '%(ip)s'", {'instance': instance.id, 'ip': floating_ip.ip}) return instance def get_security_group_id_from_name(self, group_name="default"): """This method returns the security group of the provided group name""" security_groups = self.neutron.list_security_groups(name=group_name) security_group_id = security_groups['security_groups'][0]['id'] return security_group_id def get_network_id_from_name(self, net_name="private"): """This method returns the unique id of the provided network name""" networks = self.neutron.list_networks(name=net_name) # LOG.debug(networks) network_id = networks['networks'][0]['id'] return network_id def get_hostname(self, instance): return str(getattr(instance, 'OS-EXT-SRV-ATTR:host')) def get_running_migration(self, instance_id): return self.nova.server_migrations.list(server=instance_id) def swap_volume(self, old_volume, new_volume, retry=120, retry_interval=10): """Swap old_volume for new_volume""" attachments = old_volume.attachments instance_id = attachments[0]['server_id'] # do volume update self.nova.volumes.update_server_volume( instance_id, old_volume.id, new_volume.id) while getattr(new_volume, 'status') != 'in-use' and retry: new_volume = self.cinder.volumes.get(new_volume.id) LOG.debug('Waiting volume update to {0}'.format(new_volume)) time.sleep(retry_interval) retry -= 1 LOG.debug("retry count: %s", retry) if getattr(new_volume, 'status') != "in-use": LOG.error("Volume update retry timeout or error") return False host_name = getattr(new_volume, "os-vol-host-attr:host") LOG.debug( "Volume update succeeded : " "Volume %s is now on host '%s'.", (new_volume.id, host_name)) return True def _check_nova_api_version(self, client, version): api_version = api_versions.APIVersion(version_str=version) try: api_versions.discover_version(client, api_version) return True except nvexceptions.UnsupportedVersion as e: LOG.exception(e) return False python-watcher-4.0.0/watcher/common/clients.py0000775000175000017500000002740013656752270021474 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from cinderclient import client as ciclient from glanceclient import client as glclient from gnocchiclient import client as gnclient from ironicclient import client as irclient from keystoneauth1 import adapter as ka_adapter from keystoneauth1 import loading as ka_loading from keystoneclient import client as keyclient from monascaclient import client as monclient from neutronclient.neutron import client as netclient from novaclient import api_versions as nova_api_versions from novaclient import client as nvclient from watcher.common import exception try: from ceilometerclient import client as ceclient HAS_CEILCLIENT = True except ImportError: HAS_CEILCLIENT = False CONF = cfg.CONF _CLIENTS_AUTH_GROUP = 'watcher_clients_auth' # NOTE(mriedem): This is the minimum required version of the nova API for # watcher features to work. If new features are added which require new # versions, they should perform version discovery and be backward compatible # for at least one release before raising the minimum required version. MIN_NOVA_API_VERSION = '2.56' def check_min_nova_api_version(config_version): """Validates the minimum required nova API version. :param config_version: The configured [nova_client]/api_version value :raises: ValueError if the configured version is less than the required minimum """ min_required = nova_api_versions.APIVersion(MIN_NOVA_API_VERSION) if nova_api_versions.APIVersion(config_version) < min_required: raise ValueError('Invalid nova_client.api_version %s. %s or ' 'greater is required.' % (config_version, MIN_NOVA_API_VERSION)) class OpenStackClients(object): """Convenience class to create and cache client instances.""" def __init__(self): self.reset_clients() def reset_clients(self): self._session = None self._keystone = None self._nova = None self._glance = None self._gnocchi = None self._cinder = None self._ceilometer = None self._monasca = None self._neutron = None self._ironic = None self._placement = None def _get_keystone_session(self): auth = ka_loading.load_auth_from_conf_options(CONF, _CLIENTS_AUTH_GROUP) sess = ka_loading.load_session_from_conf_options(CONF, _CLIENTS_AUTH_GROUP, auth=auth) return sess @property def auth_url(self): return self.keystone().auth_url @property def session(self): if not self._session: self._session = self._get_keystone_session() return self._session def _get_client_option(self, client, option): return getattr(getattr(CONF, '%s_client' % client), option) @exception.wrap_keystone_exception def keystone(self): if self._keystone: return self._keystone keystone_interface = self._get_client_option('keystone', 'interface') keystone_region_name = self._get_client_option('keystone', 'region_name') self._keystone = keyclient.Client( interface=keystone_interface, region_name=keystone_region_name, session=self.session) return self._keystone @exception.wrap_keystone_exception def nova(self): if self._nova: return self._nova novaclient_version = self._get_client_option('nova', 'api_version') check_min_nova_api_version(novaclient_version) nova_endpoint_type = self._get_client_option('nova', 'endpoint_type') nova_region_name = self._get_client_option('nova', 'region_name') self._nova = nvclient.Client(novaclient_version, endpoint_type=nova_endpoint_type, region_name=nova_region_name, session=self.session) return self._nova @exception.wrap_keystone_exception def glance(self): if self._glance: return self._glance glanceclient_version = self._get_client_option('glance', 'api_version') glance_endpoint_type = self._get_client_option('glance', 'endpoint_type') glance_region_name = self._get_client_option('glance', 'region_name') self._glance = glclient.Client(glanceclient_version, interface=glance_endpoint_type, region_name=glance_region_name, session=self.session) return self._glance @exception.wrap_keystone_exception def gnocchi(self): if self._gnocchi: return self._gnocchi gnocchiclient_version = self._get_client_option('gnocchi', 'api_version') gnocchiclient_interface = self._get_client_option('gnocchi', 'endpoint_type') gnocchiclient_region_name = self._get_client_option('gnocchi', 'region_name') adapter_options = { "interface": gnocchiclient_interface, "region_name": gnocchiclient_region_name } self._gnocchi = gnclient.Client(gnocchiclient_version, adapter_options=adapter_options, session=self.session) return self._gnocchi @exception.wrap_keystone_exception def cinder(self): if self._cinder: return self._cinder cinderclient_version = self._get_client_option('cinder', 'api_version') cinder_endpoint_type = self._get_client_option('cinder', 'endpoint_type') cinder_region_name = self._get_client_option('cinder', 'region_name') self._cinder = ciclient.Client(cinderclient_version, endpoint_type=cinder_endpoint_type, region_name=cinder_region_name, session=self.session) return self._cinder @exception.wrap_keystone_exception def ceilometer(self): if self._ceilometer: return self._ceilometer ceilometerclient_version = self._get_client_option('ceilometer', 'api_version') ceilometer_endpoint_type = self._get_client_option('ceilometer', 'endpoint_type') ceilometer_region_name = self._get_client_option('ceilometer', 'region_name') self._ceilometer = ceclient.get_client( ceilometerclient_version, endpoint_type=ceilometer_endpoint_type, region_name=ceilometer_region_name, session=self.session) return self._ceilometer @exception.wrap_keystone_exception def monasca(self): if self._monasca: return self._monasca monascaclient_version = self._get_client_option( 'monasca', 'api_version') monascaclient_interface = self._get_client_option( 'monasca', 'interface') monascaclient_region = self._get_client_option( 'monasca', 'region_name') token = self.session.get_token() watcher_clients_auth_config = CONF.get(_CLIENTS_AUTH_GROUP) service_type = 'monitoring' monasca_kwargs = { 'auth_url': watcher_clients_auth_config.auth_url, 'cert_file': watcher_clients_auth_config.certfile, 'insecure': watcher_clients_auth_config.insecure, 'key_file': watcher_clients_auth_config.keyfile, 'keystone_timeout': watcher_clients_auth_config.timeout, 'os_cacert': watcher_clients_auth_config.cafile, 'service_type': service_type, 'token': token, 'username': watcher_clients_auth_config.username, 'password': watcher_clients_auth_config.password, } endpoint = self.session.get_endpoint(service_type=service_type, interface=monascaclient_interface, region_name=monascaclient_region) self._monasca = monclient.Client( monascaclient_version, endpoint, **monasca_kwargs) return self._monasca @exception.wrap_keystone_exception def neutron(self): if self._neutron: return self._neutron neutronclient_version = self._get_client_option('neutron', 'api_version') neutron_endpoint_type = self._get_client_option('neutron', 'endpoint_type') neutron_region_name = self._get_client_option('neutron', 'region_name') self._neutron = netclient.Client(neutronclient_version, endpoint_type=neutron_endpoint_type, region_name=neutron_region_name, session=self.session) self._neutron.format = 'json' return self._neutron @exception.wrap_keystone_exception def ironic(self): if self._ironic: return self._ironic ironicclient_version = self._get_client_option('ironic', 'api_version') endpoint_type = self._get_client_option('ironic', 'endpoint_type') ironic_region_name = self._get_client_option('ironic', 'region_name') self._ironic = irclient.get_client(ironicclient_version, interface=endpoint_type, region_name=ironic_region_name, session=self.session) return self._ironic @exception.wrap_keystone_exception def placement(self): if self._placement: return self._placement placement_version = self._get_client_option('placement', 'api_version') placement_interface = self._get_client_option('placement', 'interface') placement_region_name = self._get_client_option('placement', 'region_name') # Set accept header on every request to ensure we notify placement # service of our response body media type preferences. headers = {'accept': 'application/json'} self._placement = ka_adapter.Adapter( session=self.session, service_type='placement', default_microversion=placement_version, interface=placement_interface, region_name=placement_region_name, additional_headers=headers) return self._placement python-watcher-4.0.0/watcher/common/policies/0000775000175000017500000000000013656752352021263 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/common/policies/__init__.py0000664000175000017500000000255013656752270023375 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import itertools from watcher.common.policies import action from watcher.common.policies import action_plan from watcher.common.policies import audit from watcher.common.policies import audit_template from watcher.common.policies import base from watcher.common.policies import data_model from watcher.common.policies import goal from watcher.common.policies import scoring_engine from watcher.common.policies import service from watcher.common.policies import strategy def list_rules(): return itertools.chain( base.list_rules(), action.list_rules(), action_plan.list_rules(), audit.list_rules(), audit_template.list_rules(), data_model.list_rules(), goal.list_rules(), scoring_engine.list_rules(), service.list_rules(), strategy.list_rules(), ) python-watcher-4.0.0/watcher/common/policies/action_plan.py0000664000175000017500000000504513656752270024127 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base ACTION_PLAN = 'action_plan:%s' rules = [ policy.DocumentedRuleDefault( name=ACTION_PLAN % 'delete', check_str=base.RULE_ADMIN_API, description='Delete an action plan.', operations=[ { 'path': '/v1/action_plans/{action_plan_uuid}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve a list of action plans with detail.', operations=[ { 'path': '/v1/action_plans/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'get', check_str=base.RULE_ADMIN_API, description='Get an action plan.', operations=[ { 'path': '/v1/action_plans/{action_plan_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'get_all', check_str=base.RULE_ADMIN_API, description='Get all action plans.', operations=[ { 'path': '/v1/action_plans', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'update', check_str=base.RULE_ADMIN_API, description='Update an action plans.', operations=[ { 'path': '/v1/action_plans/{action_plan_uuid}', 'method': 'PATCH' } ] ), policy.DocumentedRuleDefault( name=ACTION_PLAN % 'start', check_str=base.RULE_ADMIN_API, description='Start an action plans.', operations=[ { 'path': '/v1/action_plans/{action_plan_uuid}/start', 'method': 'POST' } ] ) ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/policies/scoring_engine.py0000664000175000017500000000413013656752270024623 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base SCORING_ENGINE = 'scoring_engine:%s' rules = [ # FIXME(lbragstad): Find someone from watcher to double check this # information. This API isn't listed in watcher's API reference # documentation. policy.DocumentedRuleDefault( name=SCORING_ENGINE % 'detail', check_str=base.RULE_ADMIN_API, description='List scoring engines with details.', operations=[ { 'path': '/v1/scoring_engines/detail', 'method': 'GET' } ] ), # FIXME(lbragstad): Find someone from watcher to double check this # information. This API isn't listed in watcher's API reference # documentation. policy.DocumentedRuleDefault( name=SCORING_ENGINE % 'get', check_str=base.RULE_ADMIN_API, description='Get a scoring engine.', operations=[ { 'path': '/v1/scoring_engines/{scoring_engine_id}', 'method': 'GET' } ] ), # FIXME(lbragstad): Find someone from watcher to double check this # information. This API isn't listed in watcher's API reference # documentation. policy.DocumentedRuleDefault( name=SCORING_ENGINE % 'get_all', check_str=base.RULE_ADMIN_API, description='Get all scoring engines.', operations=[ { 'path': '/v1/scoring_engines', 'method': 'GET' } ] ) ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/policies/data_model.py0000664000175000017500000000203413656752270023724 0ustar zuulzuul00000000000000# Copyright 2019 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base DATA_MODEL = 'data_model:%s' rules = [ policy.DocumentedRuleDefault( name=DATA_MODEL % 'get_all', check_str=base.RULE_ADMIN_API, description='List data model.', operations=[ { 'path': '/v1/data_model', 'method': 'GET' } ] ), ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/policies/action.py0000664000175000017500000000312113656752270023106 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base ACTION = 'action:%s' rules = [ policy.DocumentedRuleDefault( name=ACTION % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve a list of actions with detail.', operations=[ { 'path': '/v1/actions/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION % 'get', check_str=base.RULE_ADMIN_API, description='Retrieve information about a given action.', operations=[ { 'path': '/v1/actions/{action_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=ACTION % 'get_all', check_str=base.RULE_ADMIN_API, description='Retrieve a list of all actions.', operations=[ { 'path': '/v1/actions', 'method': 'GET' } ] ) ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/policies/strategy.py0000664000175000017500000000356013656752270023502 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base STRATEGY = 'strategy:%s' rules = [ policy.DocumentedRuleDefault( name=STRATEGY % 'detail', check_str=base.RULE_ADMIN_API, description='List strategies with detail.', operations=[ { 'path': '/v1/strategies/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=STRATEGY % 'get', check_str=base.RULE_ADMIN_API, description='Get a strategy.', operations=[ { 'path': '/v1/strategies/{strategy_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=STRATEGY % 'get_all', check_str=base.RULE_ADMIN_API, description='List all strategies.', operations=[ { 'path': '/v1/strategies', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=STRATEGY % 'state', check_str=base.RULE_ADMIN_API, description='Get state of strategy.', operations=[ { 'path': '/v1/strategies{strategy_uuid}/state', 'method': 'GET' } ] ) ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/policies/goal.py0000664000175000017500000000301713656752270022557 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base GOAL = 'goal:%s' rules = [ policy.DocumentedRuleDefault( name=GOAL % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve a list of goals with detail.', operations=[ { 'path': '/v1/goals/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=GOAL % 'get', check_str=base.RULE_ADMIN_API, description='Get a goal.', operations=[ { 'path': '/v1/goals/{goal_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=GOAL % 'get_all', check_str=base.RULE_ADMIN_API, description='Get all goals.', operations=[ { 'path': '/v1/goals', 'method': 'GET' } ] ) ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/policies/service.py0000664000175000017500000000305113656752270023273 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base SERVICE = 'service:%s' rules = [ policy.DocumentedRuleDefault( name=SERVICE % 'detail', check_str=base.RULE_ADMIN_API, description='List services with detail.', operations=[ { 'path': '/v1/services/', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=SERVICE % 'get', check_str=base.RULE_ADMIN_API, description='Get a specific service.', operations=[ { 'path': '/v1/services/{service_id}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=SERVICE % 'get_all', check_str=base.RULE_ADMIN_API, description='List all services.', operations=[ { 'path': '/v1/services/', 'method': 'GET' } ] ), ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/policies/audit.py0000664000175000017500000000460213656752270022744 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base AUDIT = 'audit:%s' rules = [ policy.DocumentedRuleDefault( name=AUDIT % 'create', check_str=base.RULE_ADMIN_API, description='Create a new audit.', operations=[ { 'path': '/v1/audits', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'delete', check_str=base.RULE_ADMIN_API, description='Delete an audit.', operations=[ { 'path': '/v1/audits/{audit_uuid}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve audit list with details.', operations=[ { 'path': '/v1/audits/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'get', check_str=base.RULE_ADMIN_API, description='Get an audit.', operations=[ { 'path': '/v1/audits/{audit_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'get_all', check_str=base.RULE_ADMIN_API, description='Get all audits.', operations=[ { 'path': '/v1/audits', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT % 'update', check_str=base.RULE_ADMIN_API, description='Update an audit.', operations=[ { 'path': '/v1/audits/{audit_uuid}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/policies/audit_template.py0000664000175000017500000000513613656752270024642 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy from watcher.common.policies import base AUDIT_TEMPLATE = 'audit_template:%s' rules = [ policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'create', check_str=base.RULE_ADMIN_API, description='Create an audit template.', operations=[ { 'path': '/v1/audit_templates', 'method': 'POST' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'delete', check_str=base.RULE_ADMIN_API, description='Delete an audit template.', operations=[ { 'path': '/v1/audit_templates/{audit_template_uuid}', 'method': 'DELETE' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'detail', check_str=base.RULE_ADMIN_API, description='Retrieve a list of audit templates with details.', operations=[ { 'path': '/v1/audit_templates/detail', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'get', check_str=base.RULE_ADMIN_API, description='Get an audit template.', operations=[ { 'path': '/v1/audit_templates/{audit_template_uuid}', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'get_all', check_str=base.RULE_ADMIN_API, description='Get a list of all audit templates.', operations=[ { 'path': '/v1/audit_templates', 'method': 'GET' } ] ), policy.DocumentedRuleDefault( name=AUDIT_TEMPLATE % 'update', check_str=base.RULE_ADMIN_API, description='Update an audit template.', operations=[ { 'path': '/v1/audit_templates/{audit_template_uuid}', 'method': 'PATCH' } ] ) ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/policies/base.py0000664000175000017500000000165213656752270022552 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_policy import policy RULE_ADMIN_API = 'rule:admin_api' ROLE_ADMIN_OR_ADMINISTRATOR = 'role:admin or role:administrator' ALWAYS_DENY = '!' rules = [ policy.RuleDefault( name='admin_api', check_str=ROLE_ADMIN_OR_ADMINISTRATOR ), policy.RuleDefault( name='show_password', check_str=ALWAYS_DENY ) ] def list_rules(): return rules python-watcher-4.0.0/watcher/common/loader/0000775000175000017500000000000013656752352020722 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/common/loader/__init__.py0000664000175000017500000000000013656752270023020 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/common/loader/loadable.py0000664000175000017500000000431613656752270023042 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from watcher.common import service @six.add_metaclass(abc.ABCMeta) class Loadable(object): """Generic interface for dynamically loading a driver/entry point. This defines the contract in order to let the loader manager inject the configuration parameters during the loading. """ def __init__(self, config): super(Loadable, self).__init__() self.config = config @classmethod @abc.abstractmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ raise NotImplementedError LoadableSingletonMeta = type( "LoadableSingletonMeta", (abc.ABCMeta, service.Singleton), {}) @six.add_metaclass(LoadableSingletonMeta) class LoadableSingleton(object): """Generic interface for dynamically loading a driver as a singleton. This defines the contract in order to let the loader manager inject the configuration parameters during the loading. Classes inheriting from this class will be singletons. """ def __init__(self, config): super(LoadableSingleton, self).__init__() self.config = config @classmethod @abc.abstractmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ raise NotImplementedError python-watcher-4.0.0/watcher/common/loader/default.py0000664000175000017500000000605513656752270022725 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals from oslo_config import cfg from oslo_log import log from stevedore import driver as drivermanager from stevedore import extension as extensionmanager from watcher.common import exception from watcher.common.loader import base from watcher.common import utils LOG = log.getLogger(__name__) class DefaultLoader(base.BaseLoader): def __init__(self, namespace, conf=cfg.CONF): """Entry point loader for Watcher using Stevedore :param namespace: namespace of the entry point(s) to load or list :type namespace: str :param conf: ConfigOpts instance, defaults to cfg.CONF """ super(DefaultLoader, self).__init__() self.namespace = namespace self.conf = conf def load(self, name, **kwargs): try: LOG.debug("Loading in namespace %s => %s ", self.namespace, name) driver_manager = drivermanager.DriverManager( namespace=self.namespace, name=name, invoke_on_load=False, ) driver_cls = driver_manager.driver config = self._load_plugin_config(name, driver_cls) driver = driver_cls(config, **kwargs) except Exception as exc: LOG.exception(exc) raise exception.LoadingError(name=name) return driver def _reload_config(self): self.conf(default_config_files=self.conf.default_config_files) def get_entry_name(self, name): return ".".join([self.namespace, name]) def _load_plugin_config(self, name, driver_cls): """Load the config of the plugin""" config = utils.Struct() config_opts = driver_cls.get_config_opts() if not config_opts: return config group_name = self.get_entry_name(name) self.conf.register_opts(config_opts, group=group_name) # Finalise the opt import by re-checking the configuration # against the provided config files self._reload_config() config_group = self.conf.get(group_name) if not config_group: raise exception.LoadingError(name=name) config.update({ name: value for name, value in config_group.items() }) return config def list_available(self): extension_manager = extensionmanager.ExtensionManager( namespace=self.namespace) return {ext.name: ext.plugin for ext in extension_manager.extensions} python-watcher-4.0.0/watcher/common/loader/base.py0000664000175000017500000000160713656752270022211 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import abc import six @six.add_metaclass(abc.ABCMeta) class BaseLoader(object): @abc.abstractmethod def list_available(self): raise NotImplementedError() @abc.abstractmethod def load(self, name): raise NotImplementedError() python-watcher-4.0.0/watcher/common/config.py0000664000175000017500000000267113656752270021300 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from watcher.common import rpc from watcher import version def parse_args(argv, default_config_files=None, default_config_dirs=None): default_config_files = (default_config_files or cfg.find_config_files(project='watcher')) default_config_dirs = (default_config_dirs or cfg.find_config_dirs(project='watcher')) rpc.set_defaults(control_exchange='watcher') cfg.CONF(argv[1:], project='watcher', version=version.version_info.release_string(), default_config_dirs=default_config_dirs, default_config_files=default_config_files) rpc.init(cfg.CONF) python-watcher-4.0.0/watcher/common/rpc.py0000664000175000017500000001012513656752270020610 0ustar zuulzuul00000000000000# Copyright 2014 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log import oslo_messaging as messaging from oslo_messaging.rpc import dispatcher from watcher.common import context as watcher_context from watcher.common import exception __all__ = [ 'init', 'cleanup', 'set_defaults', 'add_extra_exmods', 'clear_extra_exmods', 'get_allowed_exmods', 'RequestContextSerializer', 'get_client', 'get_server', 'get_notifier', ] CONF = cfg.CONF LOG = log.getLogger(__name__) TRANSPORT = None NOTIFICATION_TRANSPORT = None NOTIFIER = None ALLOWED_EXMODS = [ exception.__name__, ] EXTRA_EXMODS = [] JsonPayloadSerializer = messaging.JsonPayloadSerializer def init(conf): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER exmods = get_allowed_exmods() TRANSPORT = messaging.get_rpc_transport( conf, allowed_remote_exmods=exmods) NOTIFICATION_TRANSPORT = messaging.get_notification_transport( conf, allowed_remote_exmods=exmods) serializer = RequestContextSerializer(JsonPayloadSerializer()) if not conf.notification_level: NOTIFIER = messaging.Notifier( NOTIFICATION_TRANSPORT, serializer=serializer, driver='noop') else: NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT, serializer=serializer) def initialized(): return None not in [TRANSPORT, NOTIFIER] def cleanup(): global TRANSPORT, NOTIFICATION_TRANSPORT, NOTIFIER if NOTIFIER is None: LOG.exception("RPC cleanup: NOTIFIER is None") TRANSPORT.cleanup() NOTIFICATION_TRANSPORT.cleanup() TRANSPORT = NOTIFICATION_TRANSPORT = NOTIFIER = None def set_defaults(control_exchange): messaging.set_transport_defaults(control_exchange) def add_extra_exmods(*args): EXTRA_EXMODS.extend(args) def clear_extra_exmods(): del EXTRA_EXMODS[:] def get_allowed_exmods(): return ALLOWED_EXMODS + EXTRA_EXMODS class RequestContextSerializer(messaging.Serializer): def __init__(self, base): self._base = base def serialize_entity(self, context, entity): if not self._base: return entity return self._base.serialize_entity(context, entity) def deserialize_entity(self, context, entity): if not self._base: return entity return self._base.deserialize_entity(context, entity) def serialize_context(self, context): return context.to_dict() def deserialize_context(self, context): return watcher_context.RequestContext.from_dict(context) def get_client(target, version_cap=None, serializer=None): assert TRANSPORT is not None serializer = RequestContextSerializer(serializer) return messaging.RPCClient(TRANSPORT, target, version_cap=version_cap, serializer=serializer) def get_server(target, endpoints, serializer=None): assert TRANSPORT is not None access_policy = dispatcher.DefaultRPCAccessPolicy serializer = RequestContextSerializer(serializer) return messaging.get_rpc_server(TRANSPORT, target, endpoints, executor='eventlet', serializer=serializer, access_policy=access_policy) def get_notifier(publisher_id): assert NOTIFIER is not None return NOTIFIER.prepare(publisher_id=publisher_id) python-watcher-4.0.0/watcher/common/exception.py0000664000175000017500000003351413656752270022031 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Watcher base exception handling. Includes decorator for re-raising Watcher-type exceptions. SHOULD include dedicated exception logging. """ import functools import sys from keystoneclient import exceptions as keystone_exceptions from oslo_config import cfg from oslo_log import log from watcher._i18n import _ LOG = log.getLogger(__name__) CONF = cfg.CONF def wrap_keystone_exception(func): """Wrap keystone exceptions and throw Watcher specific exceptions.""" @functools.wraps(func) def wrapped(*args, **kw): try: return func(*args, **kw) except keystone_exceptions.AuthorizationFailure: raise AuthorizationFailure( client=func.__name__, reason=sys.exc_info()[1]) except keystone_exceptions.ClientException: raise AuthorizationFailure( client=func.__name__, reason=(_('Unexpected keystone client error occurred: %s') % sys.exc_info()[1])) return wrapped class WatcherException(Exception): """Base Watcher Exception To correctly use this class, inherit from it and define a 'msg_fmt' property. That msg_fmt will get printf'd with the keyword arguments provided to the constructor. """ msg_fmt = _("An unknown exception occurred") code = 500 headers = {} safe = False def __init__(self, message=None, **kwargs): self.kwargs = kwargs if 'code' not in self.kwargs: try: self.kwargs['code'] = self.code except AttributeError: pass if not message: try: message = self.msg_fmt % kwargs except Exception: # kwargs doesn't match a variable in msg_fmt # log the issue and the kwargs LOG.exception('Exception in string format operation') for name, value in kwargs.items(): LOG.error("%(name)s: %(value)s", {'name': name, 'value': value}) if CONF.fatal_exception_format_errors: raise else: # at least get the core msg_fmt out if something happened message = self.msg_fmt super(WatcherException, self).__init__(message) def __str__(self): """Encode to utf-8 then wsme api can consume it as well""" return self.args[0] def __unicode__(self): return str(self.args[0]) def format_message(self): if self.__class__.__name__.endswith('_Remote'): return self.args[0] else: return str(self) class UnsupportedError(WatcherException): msg_fmt = _("Not supported") class NotAuthorized(WatcherException): msg_fmt = _("Not authorized") code = 403 class NotAcceptable(WatcherException): msg_fmt = _("Request not acceptable.") code = 406 class PolicyNotAuthorized(NotAuthorized): msg_fmt = _("Policy doesn't allow %(action)s to be performed.") class OperationNotPermitted(NotAuthorized): msg_fmt = _("Operation not permitted") class Invalid(WatcherException, ValueError): msg_fmt = _("Unacceptable parameters") code = 400 class ObjectNotFound(WatcherException): msg_fmt = _("The %(name)s %(id)s could not be found") class Conflict(WatcherException): msg_fmt = _('Conflict') code = 409 class ResourceNotFound(ObjectNotFound): msg_fmt = _("The %(name)s resource %(id)s could not be found") code = 404 class InvalidParameter(Invalid): msg_fmt = _("%(parameter)s has to be of type %(parameter_type)s") class InvalidIdentity(Invalid): msg_fmt = _("Expected a uuid or int but received %(identity)s") class InvalidOperator(Invalid): msg_fmt = _("Filter operator is not valid: %(operator)s not " "in %(valid_operators)s") class InvalidGoal(Invalid): msg_fmt = _("Goal %(goal)s is invalid") class InvalidStrategy(Invalid): msg_fmt = _("Strategy %(strategy)s is invalid") class InvalidAudit(Invalid): msg_fmt = _("Audit %(audit)s is invalid") class EagerlyLoadedAuditRequired(InvalidAudit): msg_fmt = _("Audit %(audit)s was not eagerly loaded") class InvalidActionPlan(Invalid): msg_fmt = _("Action plan %(action_plan)s is invalid") class EagerlyLoadedActionPlanRequired(InvalidActionPlan): msg_fmt = _("Action plan %(action_plan)s was not eagerly loaded") class EagerlyLoadedActionRequired(InvalidActionPlan): msg_fmt = _("Action %(action)s was not eagerly loaded") class InvalidUUID(Invalid): msg_fmt = _("Expected a uuid but received %(uuid)s") class InvalidName(Invalid): msg_fmt = _("Expected a logical name but received %(name)s") class InvalidUuidOrName(Invalid): msg_fmt = _("Expected a logical name or uuid but received %(name)s") class InvalidIntervalOrCron(Invalid): msg_fmt = _("Expected an interval or cron syntax but received %(name)s") class DataModelTypeNotFound(ResourceNotFound): msg_fmt = _("The %(data_model_type)s data model could not be found") class GoalNotFound(ResourceNotFound): msg_fmt = _("Goal %(goal)s could not be found") class GoalAlreadyExists(Conflict): msg_fmt = _("A goal with UUID %(uuid)s already exists") class StrategyNotFound(ResourceNotFound): msg_fmt = _("Strategy %(strategy)s could not be found") class StrategyAlreadyExists(Conflict): msg_fmt = _("A strategy with UUID %(uuid)s already exists") class AuditTemplateNotFound(ResourceNotFound): msg_fmt = _("AuditTemplate %(audit_template)s could not be found") class AuditTemplateAlreadyExists(Conflict): msg_fmt = _("An audit_template with UUID or name %(audit_template)s " "already exists") class AuditTypeNotFound(Invalid): msg_fmt = _("Audit type %(audit_type)s could not be found") class AuditTypeNotAllowed(Invalid): msg_fmt = _("Audit type %(audit_type)s is disallowed.") class AuditStateNotAllowed(Invalid): msg_fmt = _("Audit state %(state)s is disallowed.") class AuditParameterNotAllowed(Invalid): msg_fmt = _("Audit parameter %(parameter)s are not allowed") class AuditNotFound(ResourceNotFound): msg_fmt = _("Audit %(audit)s could not be found") class AuditAlreadyExists(Conflict): msg_fmt = _("An audit with UUID or name %(audit)s already exists") class AuditIntervalNotSpecified(Invalid): msg_fmt = _("Interval of audit must be specified for %(audit_type)s.") class AuditIntervalNotAllowed(Invalid): msg_fmt = _("Interval of audit must not be set for %(audit_type)s.") class AuditStartEndTimeNotAllowed(Invalid): msg_fmt = _("Start or End time of audit must not be set for " "%(audit_type)s.") class AuditReferenced(Invalid): msg_fmt = _("Audit %(audit)s is referenced by one or multiple action " "plans") class ActionPlanNotFound(ResourceNotFound): msg_fmt = _("ActionPlan %(action_plan)s could not be found") class ActionPlanAlreadyExists(Conflict): msg_fmt = _("An action plan with UUID %(uuid)s already exists") class ActionPlanReferenced(Invalid): msg_fmt = _("Action Plan %(action_plan)s is referenced by one or " "multiple actions") class ActionPlanCancelled(WatcherException): msg_fmt = _("Action Plan with UUID %(uuid)s is cancelled by user") class ActionPlanIsOngoing(Conflict): msg_fmt = _("Action Plan %(action_plan)s is currently running.") class ActionNotFound(ResourceNotFound): msg_fmt = _("Action %(action)s could not be found") class ActionAlreadyExists(Conflict): msg_fmt = _("An action with UUID %(uuid)s already exists") class ActionReferenced(Invalid): msg_fmt = _("Action plan %(action_plan)s is referenced by one or " "multiple goals") class ActionFilterCombinationProhibited(Invalid): msg_fmt = _("Filtering actions on both audit and action-plan is " "prohibited") class UnsupportedActionType(UnsupportedError): msg_fmt = _("Provided %(action_type)s is not supported yet") class EfficacyIndicatorNotFound(ResourceNotFound): msg_fmt = _("Efficacy indicator %(efficacy_indicator)s could not be found") class EfficacyIndicatorAlreadyExists(Conflict): msg_fmt = _("An action with UUID %(uuid)s already exists") class ScoringEngineAlreadyExists(Conflict): msg_fmt = _("A scoring engine with UUID %(uuid)s already exists") class ScoringEngineNotFound(ResourceNotFound): msg_fmt = _("ScoringEngine %(scoring_engine)s could not be found") class HTTPNotFound(ResourceNotFound): pass class PatchError(Invalid): msg_fmt = _("Couldn't apply patch '%(patch)s'. Reason: %(reason)s") class DeleteError(Invalid): msg_fmt = _("Couldn't delete when state is '%(state)s'.") class StartError(Invalid): msg_fmt = _("Couldn't start when state is '%(state)s'.") # decision engine class WorkflowExecutionException(WatcherException): msg_fmt = _('Workflow execution error: %(error)s') class IllegalArgumentException(WatcherException): msg_fmt = _('Illegal argument') class AuthorizationFailure(WatcherException): msg_fmt = _('%(client)s connection failed. Reason: %(reason)s') class ClusterStateStale(WatcherException): msg_fmt = _("The cluster state is stale") class ClusterDataModelCollectionError(WatcherException): msg_fmt = _("The cluster data model '%(cdm)s' could not be built") class ClusterStateNotDefined(WatcherException): msg_fmt = _("The cluster state is not defined") class NoAvailableStrategyForGoal(WatcherException): msg_fmt = _("No strategy could be found to achieve the '%(goal)s' goal.") class InvalidIndicatorValue(WatcherException): msg_fmt = _("The indicator '%(name)s' with value '%(value)s' " "and spec type '%(spec_type)s' is invalid.") class GlobalEfficacyComputationError(WatcherException): msg_fmt = _("Could not compute the global efficacy for the '%(goal)s' " "goal using the '%(strategy)s' strategy.") class UnsupportedDataSource(UnsupportedError): msg_fmt = _("Datasource %(datasource)s is not supported " "by strategy %(strategy)s") class DataSourceNotAvailable(WatcherException): msg_fmt = _("Datasource %(datasource)s is not available.") class MetricNotAvailable(WatcherException): """Indicate that a metric is not configured or does not exists""" msg_fmt = _('Metric: %(metric)s not available') class NoDatasourceAvailable(WatcherException): """No datasources have been configured""" msg_fmt = _('No datasources available') class NoSuchMetricForHost(WatcherException): msg_fmt = _("No %(metric)s metric for %(host)s found.") class ServiceAlreadyExists(Conflict): msg_fmt = _("A service with name %(name)s is already working on %(host)s.") class ServiceNotFound(ResourceNotFound): msg_fmt = _("The service %(service)s cannot be found.") class WildcardCharacterIsUsed(WatcherException): msg_fmt = _("You shouldn't use any other IDs of %(resource)s if you use " "wildcard character.") class CronFormatIsInvalid(WatcherException): msg_fmt = _("Provided cron is invalid: %(message)s") class ActionDescriptionAlreadyExists(Conflict): msg_fmt = _("An action description with type %(action_type)s is " "already exist.") class ActionDescriptionNotFound(ResourceNotFound): msg_fmt = _("The action description %(action_id)s cannot be found.") class ActionExecutionFailure(WatcherException): msg_fmt = _("The action %(action_id)s execution failed.") # Model class ComputeResourceNotFound(WatcherException): msg_fmt = _("The compute resource '%(name)s' could not be found") class InstanceNotFound(ComputeResourceNotFound): msg_fmt = _("The instance '%(name)s' could not be found") class InstanceNotMapped(ComputeResourceNotFound): msg_fmt = _("The mapped compute node for instance '%(uuid)s' " "could not be found.") class ComputeNodeNotFound(ComputeResourceNotFound): msg_fmt = _("The compute node %(name)s could not be found") class StorageResourceNotFound(WatcherException): msg_fmt = _("The storage resource '%(name)s' could not be found") class StorageNodeNotFound(StorageResourceNotFound): msg_fmt = _("The storage node %(name)s could not be found") class PoolNotFound(StorageResourceNotFound): msg_fmt = _("The pool %(name)s could not be found") class VolumeNotFound(StorageResourceNotFound): msg_fmt = _("The volume '%(name)s' could not be found") class BaremetalResourceNotFound(WatcherException): msg_fmt = _("The baremetal resource '%(name)s' could not be found") class IronicNodeNotFound(BaremetalResourceNotFound): msg_fmt = _("The ironic node %(uuid)s could not be found") class LoadingError(WatcherException): msg_fmt = _("Error loading plugin '%(name)s'") class ReservedWord(WatcherException): msg_fmt = _("The identifier '%(name)s' is a reserved word") class NotSoftDeletedStateError(WatcherException): msg_fmt = _("The %(name)s resource %(id)s is not soft deleted") class NegativeLimitError(WatcherException): msg_fmt = _("Limit should be positive") class NotificationPayloadError(WatcherException): msg_fmt = _("Payload not populated when trying to send notification " "\"%(class_name)s\"") class InvalidPoolAttributeValue(Invalid): msg_fmt = _("The %(name)s pool %(attribute)s is not integer") python-watcher-4.0.0/watcher/common/placement_helper.py0000664000175000017500000001464413656752270023345 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log as logging from watcher.common import clients CONF = cfg.CONF LOG = logging.getLogger(__name__) class PlacementHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self._placement = self.osc.placement() def get(self, url): return self._placement.get(url, raise_exc=False) @staticmethod def get_error_msg(resp): json_resp = resp.json() # https://docs.openstack.org/api-ref/placement/#errors if 'errors' in json_resp: error_msg = json_resp['errors'][0].get('detail') else: error_msg = resp.text return error_msg def get_resource_providers(self, rp_name=None): """Calls the placement API for a resource provider record. :param rp_name: Name of the resource provider, if None, list all resource providers. :return: A list of resource providers information or None if the resource provider doesn't exist. """ url = '/resource_providers' if rp_name: url += '?name=%s' % rp_name resp = self.get(url) if resp.status_code == 200: json_resp = resp.json() return json_resp['resource_providers'] if rp_name: msg = "Failed to get resource provider %(name)s. " else: msg = "Failed to get all resource providers. " msg += "Got %(status_code)d: %(err_text)s." args = { 'name': rp_name, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_inventories(self, rp_uuid): """Calls the placement API to get resource inventory information. :param rp_uuid: UUID of the resource provider to get. :return: A dictionary of inventories keyed by resource classes. """ url = '/resource_providers/%s/inventories' % rp_uuid resp = self.get(url) if resp.status_code == 200: json = resp.json() return json['inventories'] msg = ("Failed to get resource provider %(rp_uuid)s inventories. " "Got %(status_code)d: %(err_text)s.") args = { 'rp_uuid': rp_uuid, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_provider_traits(self, rp_uuid): """Queries the placement API for a resource provider's traits. :param rp_uuid: UUID of the resource provider to grab traits for. :return: A list of traits. """ resp = self.get("/resource_providers/%s/traits" % rp_uuid) if resp.status_code == 200: json = resp.json() return json['traits'] msg = ("Failed to get resource provider %(rp_uuid)s traits. " "Got %(status_code)d: %(err_text)s.") args = { 'rp_uuid': rp_uuid, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_allocations_for_consumer(self, consumer_uuid): """Retrieves the allocations for a specific consumer. :param consumer_uuid: the UUID of the consumer resource. :return: A dictionary of allocation records keyed by resource provider uuid. """ url = '/allocations/%s' % consumer_uuid resp = self.get(url) if resp.status_code == 200: json = resp.json() return json['allocations'] msg = ("Failed to get allocations for consumer %(c_uuid). " "Got %(status_code)d: %(err_text)s.") args = { 'c_uuid': consumer_uuid, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_usages_for_resource_provider(self, rp_uuid): """Retrieves the usages for a specific provider. :param rp_uuid: The UUID of the provider. :return: A dictionary that describes how much each class of resource is being consumed on this resource provider. """ url = '/resource_providers/%s/usages' % rp_uuid resp = self.get(url) if resp.status_code == 200: json = resp.json() return json['usages'] msg = ("Failed to get resource provider %(rp_uuid)s usages. " "Got %(status_code)d: %(err_text)s.") args = { 'rp_uuid': rp_uuid, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } LOG.error(msg, args) def get_candidate_providers(self, resources): """Returns a dictionary of resource provider summaries. :param resources: A comma-separated list of strings indicating an amount of resource of a specified class that providers in each allocation request must collectively have the capacity and availability to serve: resources=VCPU:4,DISK_GB:64,MEMORY_MB:2048 :returns: A dict, keyed by resource provider UUID, which can provide the required resources. """ url = "/allocation_candidates?%s" % resources resp = self.get(url) if resp.status_code == 200: data = resp.json() return data['provider_summaries'] args = { 'resource_request': resources, 'status_code': resp.status_code, 'err_text': self.get_error_msg(resp), } msg = ("Failed to get allocation candidates from placement " "API for resources: %(resource_request)s\n" "Got %(status_code)d: %(err_text)s.") LOG.error(msg, args) python-watcher-4.0.0/watcher/common/paths.py0000664000175000017500000000223213656752270021143 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # Copyright 2012 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from watcher import conf CONF = conf.CONF def basedir_rel(*args): """Return a path relative to $pybasedir.""" return os.path.join(CONF.pybasedir, *args) def bindir_rel(*args): """Return a path relative to $bindir.""" return os.path.join(CONF.bindir, *args) def state_path_rel(*args): """Return a path relative to $state_path.""" return os.path.join(CONF.state_path, *args) python-watcher-4.0.0/watcher/common/scheduling.py0000664000175000017500000000414113656752270022152 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from apscheduler import events from apscheduler.executors.pool import BasePoolExecutor from apscheduler.schedulers import background import futurist from oslo_service import service job_events = events class GreenThreadPoolExecutor(BasePoolExecutor): """Green thread pool An executor that runs jobs in a green thread pool. Plugin alias: ``threadpool`` :param max_workers: the maximum number of spawned threads. """ def __init__(self, max_workers=10): pool = futurist.GreenThreadPoolExecutor(int(max_workers)) super(GreenThreadPoolExecutor, self).__init__(pool) executors = { 'default': GreenThreadPoolExecutor(), } class BackgroundSchedulerService(service.ServiceBase, background.BackgroundScheduler): def __init__(self, gconfig={}, **options): if options is None: options = {'executors': executors} else: if 'executors' not in options.keys(): options['executors'] = executors super(BackgroundSchedulerService, self).__init__( gconfig, **options) def start(self): """Start service.""" background.BackgroundScheduler.start(self) def stop(self): """Stop service.""" self.shutdown() def wait(self): """Wait for service to complete.""" def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ python-watcher-4.0.0/watcher/common/cinder_helper.py0000664000175000017500000002257713656752270022645 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from oslo_log import log from cinderclient import exceptions as cinder_exception from cinderclient.v2.volumes import Volume from watcher._i18n import _ from watcher.common import clients from watcher.common import exception from watcher import conf CONF = conf.CONF LOG = log.getLogger(__name__) class CinderHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.cinder = self.osc.cinder() def get_storage_node_list(self): return list(self.cinder.services.list(binary='cinder-volume')) def get_storage_node_by_name(self, name): """Get storage node by name(host@backendname)""" try: storages = [storage for storage in self.get_storage_node_list() if storage.host == name] if len(storages) != 1: raise exception.StorageNodeNotFound(name=name) return storages[0] except Exception as exc: LOG.exception(exc) raise exception.StorageNodeNotFound(name=name) def get_storage_pool_list(self): return self.cinder.pools.list(detailed=True) def get_storage_pool_by_name(self, name): """Get pool by name(host@backend#poolname)""" try: pools = [pool for pool in self.get_storage_pool_list() if pool.name == name] if len(pools) != 1: raise exception.PoolNotFound(name=name) return pools[0] except Exception as exc: LOG.exception(exc) raise exception.PoolNotFound(name=name) def get_volume_list(self): return self.cinder.volumes.list(search_opts={'all_tenants': True}) def get_volume_type_list(self): return self.cinder.volume_types.list() def get_volume_snapshots_list(self): return self.cinder.volume_snapshots.list( search_opts={'all_tenants': True}) def get_volume_type_by_backendname(self, backendname): """Return a list of volume type""" volume_type_list = self.get_volume_type_list() volume_type = [volume_type.name for volume_type in volume_type_list if volume_type.extra_specs.get( 'volume_backend_name') == backendname] return volume_type def get_volume(self, volume): if isinstance(volume, Volume): volume = volume.id try: volume = self.cinder.volumes.get(volume) return volume except cinder_exception.NotFound: return self.cinder.volumes.find(name=volume) def backendname_from_poolname(self, poolname): """Get backendname from poolname""" # pooolname formatted as host@backend#pool since ocata # as of ocata, may as only host backend = poolname.split('#')[0] backendname = "" try: backendname = backend.split('@')[1] except IndexError: pass return backendname def _has_snapshot(self, volume): """Judge volume has a snapshot""" volume = self.get_volume(volume) if volume.snapshot_id: return True return False def get_deleting_volume(self, volume): volume = self.get_volume(volume) all_volume = self.get_volume_list() for _volume in all_volume: if getattr(_volume, 'os-vol-mig-status-attr:name_id') == volume.id: return _volume return False def _can_get_volume(self, volume_id): """Check to get volume with volume_id""" try: volume = self.get_volume(volume_id) if not volume: raise Exception except cinder_exception.NotFound: return False else: return True def check_volume_deleted(self, volume, retry=120, retry_interval=10): """Check volume has been deleted""" volume = self.get_volume(volume) while self._can_get_volume(volume.id) and retry: volume = self.get_volume(volume.id) time.sleep(retry_interval) retry -= 1 LOG.debug("retry count: %s", retry) LOG.debug("Waiting to complete deletion of volume %s", volume.id) if self._can_get_volume(volume.id): LOG.error("Volume deletion error: %s", volume.id) return False LOG.debug("Volume %s was deleted successfully.", volume.id) return True def check_migrated(self, volume, retry_interval=10): volume = self.get_volume(volume) final_status = ('success', 'error') while getattr(volume, 'migration_status') not in final_status: volume = self.get_volume(volume.id) LOG.debug('Waiting the migration of {0}'.format(volume)) time.sleep(retry_interval) if getattr(volume, 'migration_status') == 'error': host_name = getattr(volume, 'os-vol-host-attr:host') error_msg = (("Volume migration error : " "volume %(volume)s is now on host '%(host)s'.") % {'volume': volume.id, 'host': host_name}) LOG.error(error_msg) return False host_name = getattr(volume, 'os-vol-host-attr:host') if getattr(volume, 'migration_status') == 'success': # check original volume deleted deleting_volume = self.get_deleting_volume(volume) if deleting_volume: delete_id = getattr(deleting_volume, 'id') if not self.check_volume_deleted(delete_id): return False else: host_name = getattr(volume, 'os-vol-host-attr:host') error_msg = (("Volume migration error : " "volume %(volume)s is now on host '%(host)s'.") % {'volume': volume.id, 'host': host_name}) LOG.error(error_msg) return False LOG.debug( "Volume migration succeeded : volume %s is now on host '%s'.", ( volume.id, host_name)) return True def migrate(self, volume, dest_node): """Migrate volume to dest_node""" volume = self.get_volume(volume) dest_backend = self.backendname_from_poolname(dest_node) dest_type = self.get_volume_type_by_backendname(dest_backend) if volume.volume_type not in dest_type: raise exception.Invalid( message=(_("Volume type must be same for migrating"))) source_node = getattr(volume, 'os-vol-host-attr:host') LOG.debug("Volume %s found on host '%s'.", (volume.id, source_node)) self.cinder.volumes.migrate_volume( volume, dest_node, False, True) return self.check_migrated(volume) def retype(self, volume, dest_type): """Retype volume to dest_type with on-demand option""" volume = self.get_volume(volume) if volume.volume_type == dest_type: raise exception.Invalid( message=(_("Volume type must be different for retyping"))) source_node = getattr(volume, 'os-vol-host-attr:host') LOG.debug( "Volume %s found on host '%s'.", (volume.id, source_node)) self.cinder.volumes.retype( volume, dest_type, "on-demand") return self.check_migrated(volume) def create_volume(self, cinder, volume, dest_type, retry=120, retry_interval=10): """Create volume of volume with dest_type using cinder""" volume = self.get_volume(volume) LOG.debug("start creating new volume") new_volume = cinder.volumes.create( getattr(volume, 'size'), name=getattr(volume, 'name'), volume_type=dest_type, availability_zone=getattr(volume, 'availability_zone')) while getattr(new_volume, 'status') != 'available' and retry: new_volume = cinder.volumes.get(new_volume.id) LOG.debug('Waiting volume creation of {0}'.format(new_volume)) time.sleep(retry_interval) retry -= 1 LOG.debug("retry count: %s", retry) if getattr(new_volume, 'status') != 'available': error_msg = (_("Failed to create volume '%(volume)s. ") % {'volume': new_volume.id}) raise Exception(error_msg) LOG.debug("Volume %s was created successfully.", new_volume) return new_volume def delete_volume(self, volume): """Delete volume""" volume = self.get_volume(volume) self.cinder.volumes.delete(volume) result = self.check_volume_deleted(volume) if not result: error_msg = (_("Failed to delete volume '%(volume)s. ") % {'volume': volume.id}) raise Exception(error_msg) python-watcher-4.0.0/watcher/common/service.py0000664000175000017500000002452313656752270021473 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 eNovance ## # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import socket from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import _options from oslo_log import log import oslo_messaging as om from oslo_reports import guru_meditation_report as gmr from oslo_reports import opts as gmr_opts from oslo_service import service from oslo_service import wsgi from oslo_messaging.rpc import dispatcher from watcher._i18n import _ from watcher.api import app from watcher.common import config from watcher.common import context from watcher.common import rpc from watcher.common import scheduling from watcher.conf import plugins as plugins_conf from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields from watcher import version NOTIFICATION_OPTS = [ cfg.StrOpt('notification_level', choices=[''] + list(wfields.NotificationPriority.ALL), default=wfields.NotificationPriority.INFO, help=_('Specifies the minimum level for which to send ' 'notifications. If not set, no notifications will ' 'be sent. The default is for this option to be at the ' '`INFO` level.')) ] cfg.CONF.register_opts(NOTIFICATION_OPTS) CONF = cfg.CONF LOG = log.getLogger(__name__) _DEFAULT_LOG_LEVELS = ['amqp=WARN', 'amqplib=WARN', 'qpid.messaging=INFO', 'oslo.messaging=INFO', 'sqlalchemy=WARN', 'keystoneclient=INFO', 'stevedore=INFO', 'eventlet.wsgi.server=WARN', 'iso8601=WARN', 'requests=WARN', 'neutronclient=WARN', 'glanceclient=WARN', 'apscheduler=WARN'] Singleton = service.Singleton class WSGIService(service.ServiceBase): """Provides ability to launch Watcher API from wsgi app.""" def __init__(self, service_name, use_ssl=False): """Initialize, but do not start the WSGI server. :param service_name: The service name of the WSGI server. :param use_ssl: Wraps the socket in an SSL context if True. """ self.service_name = service_name self.app = app.VersionSelectorApplication() self.workers = (CONF.api.workers or processutils.get_worker_count()) self.server = wsgi.Server(CONF, self.service_name, self.app, host=CONF.api.host, port=CONF.api.port, use_ssl=use_ssl, logger_name=self.service_name) def start(self): """Start serving this service using loaded configuration""" self.server.start() def stop(self): """Stop serving this API""" self.server.stop() def wait(self): """Wait for the service to stop serving this API""" self.server.wait() def reset(self): """Reset server greenpool size to default""" self.server.reset() class ServiceHeartbeat(scheduling.BackgroundSchedulerService): service_name = None def __init__(self, gconfig=None, service_name=None, **kwargs): gconfig = None or {} super(ServiceHeartbeat, self).__init__(gconfig, **kwargs) ServiceHeartbeat.service_name = service_name self.context = context.make_context() self.send_beat() def send_beat(self): host = CONF.host watcher_list = objects.Service.list( self.context, filters={'name': ServiceHeartbeat.service_name, 'host': host}) if watcher_list: watcher_service = watcher_list[0] watcher_service.last_seen_up = datetime.datetime.utcnow() watcher_service.save() else: watcher_service = objects.Service(self.context) watcher_service.name = ServiceHeartbeat.service_name watcher_service.host = host watcher_service.create() def add_heartbeat_job(self): self.add_job(self.send_beat, 'interval', seconds=60, next_run_time=datetime.datetime.now()) @classmethod def get_service_name(cls): return CONF.host, cls.service_name def start(self): """Start service.""" self.add_heartbeat_job() super(ServiceHeartbeat, self).start() def stop(self): """Stop service.""" self.shutdown() def wait(self): """Wait for service to complete.""" def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ class Service(service.ServiceBase): API_VERSION = '1.0' def __init__(self, manager_class): super(Service, self).__init__() self.manager = manager_class() self.publisher_id = self.manager.publisher_id self.api_version = self.manager.api_version self.conductor_topic = self.manager.conductor_topic self.notification_topics = self.manager.notification_topics self.heartbeat = None self.service_name = self.manager.service_name if self.service_name: self.heartbeat = ServiceHeartbeat( service_name=self.manager.service_name) self.conductor_endpoints = [ ep(self) for ep in self.manager.conductor_endpoints ] self.notification_endpoints = self.manager.notification_endpoints self.serializer = rpc.RequestContextSerializer( base.WatcherObjectSerializer()) self._transport = None self._notification_transport = None self._conductor_client = None self.conductor_topic_handler = None self.notification_handler = None if self.conductor_topic and self.conductor_endpoints: self.conductor_topic_handler = self.build_topic_handler( self.conductor_topic, self.conductor_endpoints) if self.notification_topics and self.notification_endpoints: self.notification_handler = self.build_notification_handler( self.notification_topics, self.notification_endpoints ) @property def transport(self): if self._transport is None: self._transport = om.get_rpc_transport(CONF) return self._transport @property def notification_transport(self): if self._notification_transport is None: self._notification_transport = om.get_notification_transport(CONF) return self._notification_transport @property def conductor_client(self): if self._conductor_client is None: target = om.Target( topic=self.conductor_topic, version=self.API_VERSION, ) self._conductor_client = om.RPCClient( self.transport, target, serializer=self.serializer) return self._conductor_client @conductor_client.setter def conductor_client(self, c): self.conductor_client = c def build_topic_handler(self, topic_name, endpoints=()): access_policy = dispatcher.DefaultRPCAccessPolicy serializer = rpc.RequestContextSerializer(rpc.JsonPayloadSerializer()) target = om.Target( topic=topic_name, # For compatibility, we can override it with 'host' opt server=CONF.host or socket.gethostname(), version=self.api_version, ) return om.get_rpc_server( self.transport, target, endpoints, executor='eventlet', serializer=serializer, access_policy=access_policy) def build_notification_handler(self, topic_names, endpoints=()): serializer = rpc.RequestContextSerializer(rpc.JsonPayloadSerializer()) targets = [] for topic in topic_names: kwargs = {} if '.' in topic: exchange, topic = topic.split('.') kwargs['exchange'] = exchange kwargs['topic'] = topic targets.append(om.Target(**kwargs)) return om.get_notification_listener( self.notification_transport, targets, endpoints, executor='eventlet', serializer=serializer, allow_requeue=False, pool=CONF.host) def start(self): LOG.debug("Connecting to '%s'", CONF.transport_url) if self.conductor_topic_handler: self.conductor_topic_handler.start() if self.notification_handler: self.notification_handler.start() if self.heartbeat: self.heartbeat.start() def stop(self): LOG.debug("Disconnecting from '%s'", CONF.transport_url) if self.conductor_topic_handler: self.conductor_topic_handler.stop() if self.notification_handler: self.notification_handler.stop() if self.heartbeat: self.heartbeat.stop() def reset(self): """Reset a service in case it received a SIGHUP.""" def wait(self): """Wait for service to complete.""" def check_api_version(self, ctx): api_manager_version = self.conductor_client.call( ctx, 'check_api_version', api_version=self.api_version) return api_manager_version def launch(conf, service_, workers=1, restart_method='mutate'): return service.launch(conf, service_, workers, restart_method) def prepare_service(argv=(), conf=cfg.CONF): log.register_options(conf) gmr_opts.set_defaults(conf) config.parse_args(argv) cfg.set_defaults(_options.log_opts, default_log_levels=_DEFAULT_LOG_LEVELS) log.setup(conf, 'python-watcher') conf.log_opt_values(LOG, log.DEBUG) objects.register_all() gmr.TextGuruMeditation.register_section( _('Plugins'), plugins_conf.show_plugins) gmr.TextGuruMeditation.setup_autorun(version, conf=conf) python-watcher-4.0.0/watcher/common/ironic_helper.py0000664000175000017500000000311113656752270022643 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Authors:Yumeng Bao # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.common import clients from watcher.common import exception from watcher.common import utils LOG = log.getLogger(__name__) class IronicHelper(object): def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.ironic = self.osc.ironic() def get_ironic_node_list(self): return self.ironic.node.list() def get_ironic_node_by_uuid(self, node_uuid): """Get ironic node by node UUID""" try: node = self.ironic.node.get(utils.Struct(uuid=node_uuid)) if not node: raise exception.IronicNodeNotFound(uuid=node_uuid) except Exception as exc: LOG.exception(exc) raise exception.IronicNodeNotFound(uuid=node_uuid) # We need to pass an object with an 'uuid' attribute to make it work return node python-watcher-4.0.0/watcher/common/service_manager.py0000664000175000017500000000251513656752270023162 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2016 Servionica ## # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class ServiceManager(object): @abc.abstractproperty def service_name(self): raise NotImplementedError() @abc.abstractproperty def api_version(self): raise NotImplementedError() @abc.abstractproperty def publisher_id(self): raise NotImplementedError() @abc.abstractproperty def conductor_topic(self): raise NotImplementedError() @abc.abstractproperty def notification_topics(self): raise NotImplementedError() @abc.abstractproperty def conductor_endpoints(self): raise NotImplementedError() @abc.abstractproperty def notification_endpoints(self): raise NotImplementedError() python-watcher-4.0.0/watcher/common/context.py0000664000175000017500000001074313656752270021516 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_context import context from oslo_log import log from oslo_utils import timeutils import six LOG = log.getLogger(__name__) class RequestContext(context.RequestContext): """Extends security contexts from the OpenStack common library.""" def __init__(self, user_id=None, project_id=None, is_admin=None, roles=None, timestamp=None, request_id=None, auth_token=None, overwrite=True, user_name=None, project_name=None, domain_name=None, domain_id=None, auth_token_info=None, **kwargs): """Stores several additional request parameters: :param domain_id: The ID of the domain. :param domain_name: The name of the domain. :param is_public_api: Specifies whether the request should be processed without authentication. """ user = kwargs.pop('user', None) tenant = kwargs.pop('tenant', None) super(RequestContext, self).__init__( auth_token=auth_token, user_id=user_id or user, project_id=project_id or tenant, domain_id=kwargs.pop('domain', None) or domain_name or domain_id, user_domain_id=kwargs.pop('user_domain', None), project_domain_id=kwargs.pop('project_domain', None), is_admin=is_admin, read_only=kwargs.pop('read_only', False), show_deleted=kwargs.pop('show_deleted', False), request_id=request_id, resource_uuid=kwargs.pop('resource_uuid', None), is_admin_project=kwargs.pop('is_admin_project', True), overwrite=overwrite, roles=roles, global_request_id=kwargs.pop('global_request_id', None), system_scope=kwargs.pop('system_scope', None)) self.remote_address = kwargs.pop('remote_address', None) self.read_deleted = kwargs.pop('read_deleted', None) self.service_catalog = kwargs.pop('service_catalog', None) self.quota_class = kwargs.pop('quota_class', None) # FIXME(dims): user_id and project_id duplicate information that is # already present in the oslo_context's RequestContext. We need to # get rid of them. self.domain_name = domain_name self.domain_id = domain_id self.auth_token_info = auth_token_info self.user_id = user_id or user self.project_id = project_id if not timestamp: timestamp = timeutils.utcnow() if isinstance(timestamp, six.string_types): timestamp = timeutils.parse_isotime(timestamp) self.timestamp = timestamp self.user_name = user_name self.project_name = project_name self.is_admin = is_admin # if self.is_admin is None: # self.is_admin = policy.check_is_admin(self) def to_dict(self): values = super(RequestContext, self).to_dict() # FIXME(dims): defensive hasattr() checks need to be # removed once we figure out why we are seeing stack # traces values.update({ 'user_id': getattr(self, 'user_id', None), 'user_name': getattr(self, 'user_name', None), 'project_id': getattr(self, 'project_id', None), 'project_name': getattr(self, 'project_name', None), 'domain_id': getattr(self, 'domain_id', None), 'domain_name': getattr(self, 'domain_name', None), 'auth_token_info': getattr(self, 'auth_token_info', None), 'is_admin': getattr(self, 'is_admin', None), 'timestamp': self.timestamp.isoformat() if hasattr( self, 'timestamp') else None, 'request_id': getattr(self, 'request_id', None), }) return values @classmethod def from_dict(cls, values): return cls(**values) def __str__(self): return "" % self.to_dict() def make_context(*args, **kwargs): return RequestContext(*args, **kwargs) python-watcher-4.0.0/watcher/common/policy.py0000664000175000017500000001143613656752270021331 0ustar zuulzuul00000000000000# Copyright (c) 2011 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Policy Engine For Watcher.""" import sys from oslo_config import cfg from oslo_policy import policy from watcher.common import exception from watcher.common import policies _ENFORCER = None CONF = cfg.CONF # we can get a policy enforcer by this init. # oslo policy support change policy rule dynamically. # at present, policy.enforce will reload the policy rules when it checks # the policy files have been touched. def init(policy_file=None, rules=None, default_rule=None, use_conf=True, overwrite=True): """Init an Enforcer class. :param policy_file: Custom policy file to use, if none is specified, ``conf.policy_file`` will be used. :param rules: Default dictionary / Rules to use. It will be considered just in the first instantiation. If :meth:`load_rules` with ``force_reload=True``, :meth:`clear` or :meth:`set_rules` with ``overwrite=True`` is called this will be overwritten. :param default_rule: Default rule to use, conf.default_rule will be used if none is specified. :param use_conf: Whether to load rules from cache or config file. :param overwrite: Whether to overwrite existing rules when reload rules from config file. """ global _ENFORCER if not _ENFORCER: # https://docs.openstack.org/oslo.policy/latest/admin/index.html _ENFORCER = policy.Enforcer(CONF, policy_file=policy_file, rules=rules, default_rule=default_rule, use_conf=use_conf, overwrite=overwrite) _ENFORCER.register_defaults(policies.list_rules()) return _ENFORCER def enforce(context, rule=None, target=None, do_raise=True, exc=None, *args, **kwargs): """Checks authorization of a rule against the target and credentials. :param dict context: As much information about the user performing the action as possible. :param rule: The rule to evaluate. :param dict target: As much information about the object being operated on as possible. :param do_raise: Whether to raise an exception or not if check fails. :param exc: Class of the exception to raise if the check fails. Any remaining arguments passed to :meth:`enforce` (both positional and keyword arguments) will be passed to the exception class. If not specified, :class:`PolicyNotAuthorized` will be used. :return: ``False`` if the policy does not allow the action and `exc` is not provided; otherwise, returns a value that evaluates to ``True``. Note: for rules using the "case" expression, this ``True`` value will be the specified string from the expression. """ enforcer = init() credentials = context.to_dict() if not exc: exc = exception.PolicyNotAuthorized if target is None: target = {'project_id': context.project_id, 'user_id': context.user_id} return enforcer.enforce(rule, target, credentials, do_raise=do_raise, exc=exc, *args, **kwargs) def get_enforcer(): # This method is for use by oslopolicy CLI scripts. Those scripts need the # 'output-file' and 'namespace' options, but having those in sys.argv means # loading the Watcher config options will fail as those are not expected # to be present. So we pass in an arg list with those stripped out. conf_args = [] # Start at 1 because cfg.CONF expects the equivalent of sys.argv[1:] i = 1 while i < len(sys.argv): if sys.argv[i].strip('-') in ['namespace', 'output-file']: i += 2 continue conf_args.append(sys.argv[i]) i += 1 cfg.CONF(conf_args, project='watcher') init() return _ENFORCER python-watcher-4.0.0/watcher/hacking/0000775000175000017500000000000013656752352017570 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/hacking/__init__.py0000664000175000017500000000000013656752270021666 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/hacking/checks.py0000664000175000017500000002255613656752270021413 0ustar zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import re def flake8ext(f): """Decorator to indicate flake8 extension. This is borrowed from hacking.core.flake8ext(), but at now it is used only for unit tests to know which are watcher flake8 extensions. """ f.name = __name__ f.version = '0.0.1' f.skip_on_py3 = False return f # Guidelines for writing new hacking checks # # - Use only for Watcher specific tests. OpenStack general tests # should be submitted to the common 'hacking' module. # - Pick numbers in the range N3xx. Find the current test with # the highest allocated number and then pick the next value. # - Keep the test method code in the source file ordered based # on the N3xx value. # - List the new rule in the top level HACKING.rst file _all_log_levels = { 'reserved': '_', # this should never be used with a log unless # it is a variable used for a log message and # a exception 'error': '_LE', 'info': '_LI', 'warning': '_LW', 'critical': '_LC', 'exception': '_LE', } _all_hints = set(_all_log_levels.values()) def _regex_for_level(level, hint): return r".*LOG\.%(level)s\(\s*((%(wrong_hints)s)\(|'|\")" % { 'level': level, 'wrong_hints': '|'.join(_all_hints - set([hint])), } log_warn = re.compile( r"(.)*LOG\.(warn)\(\s*('|\"|_)") unittest_imports_dot = re.compile(r"\bimport[\s]+unittest\b") unittest_imports_from = re.compile(r"\bfrom[\s]+unittest\b") re_redundant_import_alias = re.compile(r".*import (.+) as \1$") @flake8ext def use_jsonutils(logical_line, filename): msg = "N321: jsonutils.%(fun)s must be used instead of json.%(fun)s" # Skip list is currently empty. json_check_skipped_patterns = [] for pattern in json_check_skipped_patterns: if pattern in filename: return if "json." in logical_line: json_funcs = ['dumps(', 'dump(', 'loads(', 'load('] for f in json_funcs: pos = logical_line.find('json.%s' % f) if pos != -1: yield (pos, msg % {'fun': f[:-1]}) @flake8ext def no_translate_debug_logs(logical_line, filename): """Check for 'LOG.debug(_(' and 'LOG.debug(_Lx(' As per our translation policy, https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation we shouldn't translate debug level logs. * This check assumes that 'LOG' is a logger. N319 """ for hint in _all_hints: if logical_line.startswith("LOG.debug(%s(" % hint): yield(0, "N319 Don't translate debug level logs") @flake8ext def check_assert_called_once_with(logical_line, filename): # Try to detect unintended calls of nonexistent mock methods like: # assert_called_once # assertCalledOnceWith # assert_has_called # called_once_with if 'watcher/tests/' in filename: if '.assert_called_once_with(' in logical_line: return uncased_line = logical_line.lower().replace('_', '') check_calls = ['.assertcalledonce', '.calledoncewith'] if any(x for x in check_calls if x in uncased_line): msg = ("N322: Possible use of no-op mock method. " "please use assert_called_once_with.") yield (0, msg) if '.asserthascalled' in uncased_line: msg = ("N322: Possible use of no-op mock method. " "please use assert_has_calls.") yield (0, msg) @flake8ext def check_python3_xrange(logical_line): if re.search(r"\bxrange\s*\(", logical_line): yield(0, "N325: Do not use xrange. Use range, or six.moves.range for " "large loops.") @flake8ext def check_no_basestring(logical_line): if re.search(r"\bbasestring\b", logical_line): msg = ("N326: basestring is not Python3-compatible, use " "six.string_types instead.") yield(0, msg) @flake8ext def check_python3_no_iteritems(logical_line): if re.search(r".*\.iteritems\(\)", logical_line): msg = ("N327: Use six.iteritems() instead of dict.iteritems().") yield(0, msg) @flake8ext def check_asserttrue(logical_line, filename): if 'watcher/tests/' in filename: if re.search(r"assertEqual\(\s*True,[^,]*(,[^,]*)?\)", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*True(,[^,]*)?\)", logical_line): msg = ("N328: Use assertTrue(observed) instead of " "assertEqual(True, observed)") yield (0, msg) @flake8ext def check_assertfalse(logical_line, filename): if 'watcher/tests/' in filename: if re.search(r"assertEqual\(\s*False,[^,]*(,[^,]*)?\)", logical_line): msg = ("N329: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) if re.search(r"assertEqual\([^,]*,\s*False(,[^,]*)?\)", logical_line): msg = ("N329: Use assertFalse(observed) instead of " "assertEqual(False, observed)") yield (0, msg) @flake8ext def check_assertempty(logical_line, filename): if 'watcher/tests/' in filename: msg = ("N330: Use assertEqual(*empty*, observed) instead of " "assertEqual(observed, *empty*). *empty* contains " "{}, [], (), set(), '', \"\"") empties = r"(\[\s*\]|\{\s*\}|\(\s*\)|set\(\s*\)|'\s*'|\"\s*\")" reg = r"assertEqual\(([^,]*,\s*)+?%s\)\s*$" % empties if re.search(reg, logical_line): yield (0, msg) @flake8ext def check_assertisinstance(logical_line, filename): if 'watcher/tests/' in filename: if re.search(r"assertTrue\(\s*isinstance\(\s*[^,]*,\s*[^,]*\)\)", logical_line): msg = ("N331: Use assertIsInstance(observed, type) instead " "of assertTrue(isinstance(observed, type))") yield (0, msg) @flake8ext def check_assertequal_for_httpcode(logical_line, filename): msg = ("N332: Use assertEqual(expected_http_code, observed_http_code) " "instead of assertEqual(observed_http_code, expected_http_code)") if 'watcher/tests/' in filename: if re.search(r"assertEqual\(\s*[^,]*,[^,]*HTTP[^\.]*\.code\s*\)", logical_line): yield (0, msg) @flake8ext def check_log_warn_deprecated(logical_line, filename): msg = "N333: Use LOG.warning due to compatibility with py3" if log_warn.match(logical_line): yield (0, msg) @flake8ext def check_oslo_i18n_wrapper(logical_line, filename, noqa): """Check for watcher.i18n usage. N340(watcher/foo/bar.py): from watcher.i18n import _ Okay(watcher/foo/bar.py): from watcher.i18n import _ # noqa """ if noqa: return split_line = logical_line.split() modulename = os.path.normpath(filename).split('/')[0] bad_i18n_module = '%s.i18n' % modulename if (len(split_line) > 1 and split_line[0] in ('import', 'from')): if (split_line[1] == bad_i18n_module or modulename != 'watcher' and split_line[1] in ('watcher.i18n', 'watcher._i18n')): msg = ("N340: %(found)s is found. Use %(module)s._i18n instead." % {'found': split_line[1], 'module': modulename}) yield (0, msg) @flake8ext def check_builtins_gettext(logical_line, tokens, filename, lines, noqa): """Check usage of builtins gettext _(). N341(watcher/foo.py): _('foo') Okay(watcher/i18n.py): _('foo') Okay(watcher/_i18n.py): _('foo') Okay(watcher/foo.py): _('foo') # noqa """ if noqa: return modulename = os.path.normpath(filename).split('/')[0] if '%s/tests' % modulename in filename: return if os.path.basename(filename) in ('i18n.py', '_i18n.py'): return token_values = [t[1] for t in tokens] i18n_wrapper = '%s._i18n' % modulename if '_' in token_values: i18n_import_line_found = False for line in lines: split_line = [elm.rstrip(',') for elm in line.split()] if (len(split_line) > 1 and split_line[0] == 'from' and split_line[1] == i18n_wrapper and '_' in split_line): i18n_import_line_found = True break if not i18n_import_line_found: msg = ("N341: _ from python builtins module is used. " "Use _ from %s instead." % i18n_wrapper) yield (0, msg) @flake8ext def no_redundant_import_alias(logical_line): """Checking no redundant import alias. https://bugs.launchpad.net/watcher/+bug/1745527 N342 """ if re.match(re_redundant_import_alias, logical_line): yield(0, "N342: No redundant import alias.") python-watcher-4.0.0/watcher/tests/0000775000175000017500000000000013656752352017326 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/conf_fixture.py0000664000175000017500000000340213656752270022371 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_config import fixture as conf_fixture from watcher.common import config class ConfFixture(conf_fixture.Config): """Fixture to manage conf settings.""" def setUp(self): super(ConfFixture, self).setUp() self.conf.set_default('connection', "sqlite://", group='database') self.conf.set_default('sqlite_synchronous', False, group='database') config.parse_args([], default_config_files=[]) class ConfReloadFixture(ConfFixture): """Fixture to manage reloads of conf settings.""" def __init__(self, conf=cfg.CONF): self.conf = conf self._original_parse_cli_opts = self.conf._parse_cli_opts def _fake_parser(self, *args, **kw): return cfg.ConfigOpts._parse_cli_opts(self.conf, []) def _restore_parser(self): self.conf._parse_cli_opts = self._original_parse_cli_opts def setUp(self): super(ConfReloadFixture, self).setUp() self.conf._parse_cli_opts = self._fake_parser self.addCleanup(self._restore_parser) python-watcher-4.0.0/watcher/tests/__init__.py0000664000175000017500000000170213656752270021436 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from watcher import objects # NOTE(comstud): Make sure we have all of the objects loaded. We do this # at module import time, because we may be using mock decorators in our # tests that run at import time. objects.register_all() python-watcher-4.0.0/watcher/tests/common/0000775000175000017500000000000013656752352020616 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/common/test_placement_helper.py0000664000175000017500000002627513656752270025551 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.common import placement_helper from watcher.tests import base from watcher.tests import fakes as fake_requests from keystoneauth1 import loading as ka_loading from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import uuidutils CONF = cfg.CONF @mock.patch('keystoneauth1.session.Session.request') class TestPlacementHelper(base.TestCase): def setUp(self): super(TestPlacementHelper, self).setUp() _AUTH_CONF_GROUP = 'watcher_clients_auth' ka_loading.register_auth_conf_options(CONF, _AUTH_CONF_GROUP) ka_loading.register_session_conf_options(CONF, _AUTH_CONF_GROUP) self.client = placement_helper.PlacementHelper() self.fake_err_msg = { 'errors': [{ 'detail': 'The resource could not be found.', }] } def _add_default_kwargs(self, kwargs): kwargs['endpoint_filter'] = { 'service_type': 'placement', 'interface': CONF.placement_client.interface} kwargs['headers'] = {'accept': 'application/json'} kwargs['microversion'] = CONF.placement_client.api_version kwargs['raise_exc'] = False def _assert_keystone_called_once(self, kss_req, url, method, **kwargs): self._add_default_kwargs(kwargs) # request method has added param rate_semaphore since Stein cycle if 'rate_semaphore' in kss_req.call_args[1]: kwargs['rate_semaphore'] = mock.ANY kss_req.assert_called_once_with(url, method, **kwargs) def test_get(self, kss_req): kss_req.return_value = fake_requests.FakeResponse(200) url = '/resource_providers' resp = self.client.get(url) self.assertEqual(200, resp.status_code) self._assert_keystone_called_once(kss_req, url, 'GET') def test_get_resource_providers_OK(self, kss_req): rp_name = 'compute' rp_uuid = uuidutils.generate_uuid() parent_uuid = uuidutils.generate_uuid() fake_rp = [{'uuid': rp_uuid, 'name': rp_name, 'generation': 0, 'parent_provider_uuid': parent_uuid}] mock_json_data = { 'resource_providers': fake_rp } kss_req.return_value = fake_requests.FakeResponse( 200, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_resource_providers(rp_name) expected_url = '/resource_providers?name=compute' self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_rp, result) def test_get_resource_providers_no_rp_OK(self, kss_req): rp_name = None rp_uuid = uuidutils.generate_uuid() parent_uuid = uuidutils.generate_uuid() fake_rp = [{'uuid': rp_uuid, 'name': 'compute', 'generation': 0, 'parent_provider_uuid': parent_uuid}] mock_json_data = { 'resource_providers': fake_rp } kss_req.return_value = fake_requests.FakeResponse( 200, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_resource_providers(rp_name) expected_url = '/resource_providers' self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_rp, result) def test_get_resource_providers_fail(self, kss_req): rp_name = 'compute' kss_req.return_value = fake_requests.FakeResponse( 400, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_resource_providers(rp_name) self.assertIsNone(result) def test_get_inventories_OK(self, kss_req): rp_uuid = uuidutils.generate_uuid() fake_inventories = { "DISK_GB": { "allocation_ratio": 1.0, "max_unit": 35, "min_unit": 1, "reserved": 0, "step_size": 1, "total": 35 }, "MEMORY_MB": { "allocation_ratio": 1.5, "max_unit": 5825, "min_unit": 1, "reserved": 512, "step_size": 1, "total": 5825 }, "VCPU": { "allocation_ratio": 16.0, "max_unit": 4, "min_unit": 1, "reserved": 0, "step_size": 1, "total": 4 }, } mock_json_data = { 'inventories': fake_inventories, "resource_provider_generation": 7 } kss_req.return_value = fake_requests.FakeResponse( 200, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_inventories(rp_uuid) expected_url = '/resource_providers/%s/inventories' % rp_uuid self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_inventories, result) def test_get_inventories_fail(self, kss_req): rp_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( 404, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_inventories(rp_uuid) self.assertIsNone(result) def test_get_provider_traits_OK(self, kss_req): rp_uuid = uuidutils.generate_uuid() fake_traits = ["CUSTOM_HW_FPGA_CLASS1", "CUSTOM_HW_FPGA_CLASS3"] mock_json_data = { 'traits': fake_traits, "resource_provider_generation": 7 } kss_req.return_value = fake_requests.FakeResponse( 200, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_provider_traits(rp_uuid) expected_url = '/resource_providers/%s/traits' % rp_uuid self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_traits, result) def test_get_provider_traits_fail(self, kss_req): rp_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( 404, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_provider_traits(rp_uuid) self.assertIsNone(result) def test_get_allocations_for_consumer_OK(self, kss_req): c_uuid = uuidutils.generate_uuid() fake_allocations = { "92637880-2d79-43c6-afab-d860886c6391": { "generation": 2, "resources": { "DISK_GB": 5 } }, "ba8e1ef8-7fa3-41a4-9bb4-d7cb2019899b": { "generation": 8, "resources": { "MEMORY_MB": 512, "VCPU": 2 } } } mock_json_data = { 'allocations': fake_allocations, "consumer_generation": 1, "project_id": "7e67cbf7-7c38-4a32-b85b-0739c690991a", "user_id": "067f691e-725a-451a-83e2-5c3d13e1dffc" } kss_req.return_value = fake_requests.FakeResponse( 200, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_allocations_for_consumer(c_uuid) expected_url = '/allocations/%s' % c_uuid self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_allocations, result) def test_get_allocations_for_consumer_fail(self, kss_req): c_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( 404, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_allocations_for_consumer(c_uuid) self.assertIsNone(result) def test_get_usages_for_resource_provider_OK(self, kss_req): rp_uuid = uuidutils.generate_uuid() fake_usages = { "DISK_GB": 1, "MEMORY_MB": 512, "VCPU": 1 } mock_json_data = { 'usages': fake_usages, "resource_provider_generation": 7 } kss_req.return_value = fake_requests.FakeResponse( 200, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_usages_for_resource_provider(rp_uuid) expected_url = '/resource_providers/%s/usages' % rp_uuid self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_usages, result) def test_get_usages_for_resource_provider_fail(self, kss_req): rp_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( 404, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_usages_for_resource_provider(rp_uuid) self.assertIsNone(result) def test_get_candidate_providers_OK(self, kss_req): resources = 'VCPU:4,DISK_GB:64,MEMORY_MB:2048' fake_provider_summaries = { "a99bad54-a275-4c4f-a8a3-ac00d57e5c64": { "resources": { "DISK_GB": { "used": 0, "capacity": 1900 }, }, "traits": ["MISC_SHARES_VIA_AGGREGATE"], "parent_provider_uuid": None, "root_provider_uuid": "a99bad54-a275-4c4f-a8a3-ac00d57e5c64" }, "35791f28-fb45-4717-9ea9-435b3ef7c3b3": { "resources": { "VCPU": { "used": 0, "capacity": 384 }, "MEMORY_MB": { "used": 0, "capacity": 196608 }, }, "traits": ["HW_CPU_X86_SSE2", "HW_CPU_X86_AVX2"], "parent_provider_uuid": None, "root_provider_uuid": "35791f28-fb45-4717-9ea9-435b3ef7c3b3" }, } mock_json_data = { 'provider_summaries': fake_provider_summaries, } kss_req.return_value = fake_requests.FakeResponse( 200, content=jsonutils.dump_as_bytes(mock_json_data)) result = self.client.get_candidate_providers(resources) expected_url = "/allocation_candidates?%s" % resources self._assert_keystone_called_once(kss_req, expected_url, 'GET') self.assertEqual(fake_provider_summaries, result) def test_get_candidate_providers_fail(self, kss_req): rp_uuid = uuidutils.generate_uuid() kss_req.return_value = fake_requests.FakeResponse( 404, content=jsonutils.dump_as_bytes(self.fake_err_msg)) result = self.client.get_candidate_providers(rp_uuid) self.assertIsNone(result) python-watcher-4.0.0/watcher/tests/common/__init__.py0000664000175000017500000000000013656752270022714 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/common/test_nova_helper.py0000664000175000017500000006753613656752270024551 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from novaclient import api_versions import mock import glanceclient.exc as glexceptions import novaclient.exceptions as nvexceptions from watcher.common import clients from watcher.common import exception from watcher.common import nova_helper from watcher.common import utils from watcher.tests import base @mock.patch.object(clients.OpenStackClients, 'nova') @mock.patch.object(clients.OpenStackClients, 'neutron') @mock.patch.object(clients.OpenStackClients, 'cinder') @mock.patch.object(clients.OpenStackClients, 'glance') class TestNovaHelper(base.TestCase): def setUp(self): super(TestNovaHelper, self).setUp() self.instance_uuid = "fb5311b7-37f3-457e-9cde-6494a3c59bfe" self.source_node = "ldev-indeedsrv005" self.destination_node = "ldev-indeedsrv006" self.flavor_name = "x1" @staticmethod def fake_server(*args, **kwargs): server = mock.MagicMock() server.id = args[0] server.status = 'ACTIVE' return server @staticmethod def fake_hypervisor(*args, **kwargs): hypervisor = mock.MagicMock() hypervisor.id = args[0] service_dict = {"host": args[1]} hypervisor.service = service_dict hypervisor.hypervisor_hostname = args[1] hypervisor.hypervisor_type = kwargs.pop('hypervisor_type', 'QEMU') return hypervisor @staticmethod def fake_migration(*args, **kwargs): migration = mock.MagicMock() migration.id = args[0] return migration @staticmethod def fake_nova_find_list(nova_util, fake_find=None, fake_list=None): nova_util.nova.servers.get.return_value = fake_find if list is None: nova_util.nova.servers.list.return_value = [] else: nova_util.nova.servers.list.return_value = [fake_list] @staticmethod def fake_nova_hypervisor_list(nova_util, fake_find=None, fake_list=None): nova_util.nova.hypervisors.get.return_value = fake_find nova_util.nova.hypervisors.list.return_value = fake_list @staticmethod def fake_nova_migration_list(nova_util, fake_list=None): if list is None: nova_util.nova.server_migrations.list.return_value = None else: nova_util.nova.server_migration.list.return_value = [fake_list] @staticmethod def fake_live_migrate(server, *args, **kwargs): def side_effect(*args, **kwargs): setattr(server, 'OS-EXT-SRV-ATTR:host', "compute-2") server.live_migrate.side_effect = side_effect @staticmethod def fake_confirm_resize(server, *args, **kwargs): def side_effect(*args, **kwargs): setattr(server, 'status', 'ACTIVE') server.confirm_resize.side_effect = side_effect @staticmethod def fake_cold_migrate(server, *args, **kwargs): def side_effect(*args, **kwargs): setattr(server, 'OS-EXT-SRV-ATTR:host', "compute-2") setattr(server, 'status', 'VERIFY_RESIZE') server.migrate.side_effect = side_effect def test_get_compute_node_by_hostname( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() hypervisor_id = utils.generate_uuid() hypervisor_name = "fake_hypervisor_1" hypervisor = self.fake_hypervisor(hypervisor_id, hypervisor_name) nova_util.nova.hypervisors.search.return_value = [hypervisor] # verify that the compute node can be obtained normally by name self.assertEqual( nova_util.get_compute_node_by_hostname(hypervisor_name), hypervisor) # verify that getting the compute node with the wrong name # will throw an exception. self.assertRaises( exception.ComputeNodeNotFound, nova_util.get_compute_node_by_hostname, "exception_hypervisor_1") # verify that when the result of getting the compute node is empty # will throw an exception. nova_util.nova.hypervisors.search.return_value = [] self.assertRaises( exception.ComputeNodeNotFound, nova_util.get_compute_node_by_hostname, hypervisor_name) def test_get_compute_node_by_hostname_multiple_matches(self, *mocks): # Tests a scenario where get_compute_node_by_name returns multiple # hypervisors and we have to pick the exact match based on the given # compute service hostname. nova_util = nova_helper.NovaHelper() nodes = [] # compute1 is a substring of compute10 to trigger the fuzzy match. for hostname in ('compute1', 'compute10'): node = mock.MagicMock() node.id = utils.generate_uuid() node.hypervisor_hostname = hostname node.service = {'host': hostname} nodes.append(node) # We should get back exact matches based on the service host. nova_util.nova.hypervisors.search.return_value = nodes for index, name in enumerate(['compute1', 'compute10']): result = nova_util.get_compute_node_by_hostname(name) self.assertIs(nodes[index], result) def test_get_compute_node_by_uuid( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() hypervisor_id = utils.generate_uuid() hypervisor_name = "fake_hypervisor_1" hypervisor = self.fake_hypervisor(hypervisor_id, hypervisor_name) nova_util.nova.hypervisors.get.return_value = hypervisor # verify that the compute node can be obtained normally by id self.assertEqual( nova_util.get_compute_node_by_uuid(hypervisor_id), hypervisor) def test_get_instance_list(self, *args): nova_util = nova_helper.NovaHelper() # Call it once with no filters. with mock.patch.object(nova_util, 'nova') as nova_mock: result = nova_util.get_instance_list() nova_mock.servers.list.assert_called_once_with( search_opts={'all_tenants': True}, marker=None, limit=-1) self.assertIs(result, nova_mock.servers.list.return_value) # Call it again with filters. with mock.patch.object(nova_util, 'nova') as nova_mock: result = nova_util.get_instance_list(filters={'host': 'fake-host'}) nova_mock.servers.list.assert_called_once_with( search_opts={'all_tenants': True, 'host': 'fake-host'}, marker=None, limit=-1) self.assertIs(result, nova_mock.servers.list.return_value) @mock.patch.object(time, 'sleep', mock.Mock()) def test_stop_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance_id = utils.generate_uuid() server = self.fake_server(instance_id) setattr(server, 'OS-EXT-STS:vm_state', 'stopped') self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) result = nova_util.stop_instance(instance_id) self.assertTrue(result) setattr(server, 'OS-EXT-STS:vm_state', 'active') result = nova_util.stop_instance(instance_id) self.assertFalse(result) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) result = nova_util.stop_instance(instance_id) self.assertFalse(result) # verify that the method will return True when the state of instance # is in the expected state. setattr(server, 'OS-EXT-STS:vm_state', 'active') with mock.patch.object( nova_util, 'wait_for_instance_state', return_value=True ) as mock_instance_state: result = nova_util.stop_instance(instance_id) self.assertTrue(result) mock_instance_state.assert_called_once_with( mock.ANY, "stopped", 8, 10) # verify that the method stop_instance will return False when the # server is not available. nova_util.nova.servers.get.return_value = None result = nova_util.stop_instance(instance_id) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_delete_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance_id = utils.generate_uuid() # verify that the method will return False when the instance does # not exist. self.fake_nova_find_list(nova_util, fake_find=None, fake_list=None) result = nova_util.delete_instance(instance_id) self.assertFalse(result) # verify that the method will return True when the instance exists. server = self.fake_server(instance_id) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) result = nova_util.delete_instance(instance_id) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_resize_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'status', 'VERIFY_RESIZE') self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) is_success = nova_util.resize_instance(self.instance_uuid, self.flavor_name) self.assertTrue(is_success) setattr(server, 'status', 'SOMETHING_ELSE') is_success = nova_util.resize_instance(self.instance_uuid, self.flavor_name) self.assertFalse(is_success) @mock.patch.object(time, 'sleep', mock.Mock()) def test_live_migrate_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-SRV-ATTR:host', self.destination_node) self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) is_success = nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertTrue(is_success) setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) is_success = nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertFalse(is_success) # verify that the method will return False when the instance does # not exist. setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) self.fake_nova_find_list(nova_util, fake_find=None, fake_list=None) is_success = nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertFalse(is_success) # verify that the method will return False when the instance status # is in other cases. setattr(server, 'status', 'fake_status') self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) is_success = nova_util.live_migrate_instance( self.instance_uuid, None ) self.assertFalse(is_success) @mock.patch.object(time, 'sleep', mock.Mock()) def test_live_migrate_instance_with_task_state( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) setattr(server, 'OS-EXT-STS:task_state', '') self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) time.sleep.assert_not_called() setattr(server, 'OS-EXT-STS:task_state', 'migrating') self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) time.sleep.assert_called_with(1) @mock.patch.object(time, 'sleep', mock.Mock()) def test_live_migrate_instance_no_destination_node( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) self.destination_node = None self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) self.fake_live_migrate(server) is_success = nova_util.live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertTrue(is_success) def test_watcher_non_live_migrate_instance_not_found( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() self.fake_nova_find_list(nova_util, fake_find=None, fake_list=None) is_success = nova_util.watcher_non_live_migrate_instance( self.instance_uuid, self.destination_node) self.assertFalse(is_success) @mock.patch.object(time, 'sleep', mock.Mock()) def test_abort_live_migrate_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) setattr(server, 'OS-EXT-STS:task_state', None) migration = self.fake_migration(2) self.fake_nova_migration_list(nova_util, fake_list=migration) self.fake_nova_find_list( nova_util, fake_find=server, fake_list=server) self.assertTrue(nova_util.abort_live_migrate( self.instance_uuid, self.source_node, self.destination_node)) setattr(server, 'OS-EXT-SRV-ATTR:host', self.destination_node) self.assertFalse(nova_util.abort_live_migrate( self.instance_uuid, self.source_node, self.destination_node)) setattr(server, 'status', 'ERROR') self.assertRaises( Exception, nova_util.abort_live_migrate, self.instance_uuid, self.source_node, self.destination_node) server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-STS:task_state', "fake_task_state") setattr(server, 'OS-EXT-SRV-ATTR:host', self.destination_node) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=None) self.fake_nova_migration_list(nova_util, fake_list=None) self.assertFalse(nova_util.abort_live_migrate( self.instance_uuid, self.source_node, self.destination_node)) def test_non_live_migrate_instance_no_destination_node( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) setattr(server, 'OS-EXT-SRV-ATTR:host', self.source_node) self.destination_node = None self.fake_nova_find_list(nova_util, fake_find=server, fake_list=server) self.fake_cold_migrate(server) self.fake_confirm_resize(server) is_success = nova_util.watcher_non_live_migrate_instance( self.instance_uuid, self.destination_node ) self.assertTrue(is_success) @mock.patch.object(time, 'sleep', mock.Mock()) def test_create_image_from_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance = self.fake_server(self.instance_uuid) image = mock.MagicMock() setattr(instance, 'OS-EXT-SRV-ATTR:host', self.source_node) setattr(instance, 'OS-EXT-STS:vm_state', "stopped") self.fake_nova_find_list( nova_util, fake_find=instance, fake_list=instance) image_uuid = 'fake-image-uuid' nova_util.nova.servers.create_image.return_value = image glance_client = mock.MagicMock() mock_glance.return_value = glance_client glance_client.images = {image_uuid: image} instance = nova_util.create_image_from_instance( self.instance_uuid, "Cirros" ) self.assertIsNotNone(instance) nova_util.glance.images.get.return_value = None instance = nova_util.create_image_from_instance( self.instance_uuid, "Cirros" ) self.assertIsNone(instance) def test_enable_service_nova_compute(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() nova_services = nova_util.nova.services nova_services.enable.return_value = mock.MagicMock( status='enabled') result = nova_util.enable_service_nova_compute('nanjing') self.assertTrue(result) nova_services.enable.return_value = mock.MagicMock( status='disabled') result = nova_util.enable_service_nova_compute('nanjing') self.assertFalse(result) def test_disable_service_nova_compute(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() nova_services = nova_util.nova.services nova_services.disable_log_reason.return_value = mock.MagicMock( status='enabled') result = nova_util.disable_service_nova_compute('nanjing') self.assertFalse(result) nova_services.disable_log_reason.return_value = mock.MagicMock( status='disabled') result = nova_util.disable_service_nova_compute('nanjing') self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_create_instance(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance = self.fake_server(self.instance_uuid) nova_util.nova.servers.create.return_value = instance nova_util.nova.servers.get.return_value = instance create_instance = nova_util.create_instance(self.source_node) self.assertIsNotNone(create_instance) self.assertEqual(create_instance, instance) # verify that the method create_instance will return None when # the method findall raises exception. nova_util.nova.keypairs.findall.side_effect = nvexceptions.NotFound( 404) instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) nova_util.nova.keypairs.findall.side_effect = None # verify that the method create_instance will return None when # the method get raises exception. nova_util.glance.images.get.side_effect = glexceptions.NotFound(404) instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) nova_util.glance.images.get.side_effect = None # verify that the method create_instance will return None when # the method find raises exception. nova_util.nova.flavors.find.side_effect = nvexceptions.NotFound(404) instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) nova_util.nova.flavors.find.side_effect = None # verify that the method create_instance will return None when # the method get_security_group_id_from_name return None. with mock.patch.object( nova_util, 'get_security_group_id_from_name', return_value=None ) as mock_security_group_id: instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) mock_security_group_id.assert_called_once_with("default") # verify that the method create_instance will return None when # the method get_network_id_from_name return None. with mock.patch.object( nova_util, 'get_network_id_from_name', return_value=None ) as mock_get_network_id: instance = nova_util.create_instance(self.source_node) self.assertIsNone(instance) mock_get_network_id.assert_called_once_with("demo-net") # verify that the method create_instance will not return None when # the method wait_for_instance_status return True. with mock.patch.object( nova_util, 'wait_for_instance_status', return_value=True ) as mock_instance_status: instance = nova_util.create_instance(self.source_node) self.assertIsNotNone(instance) mock_instance_status.assert_called_once_with( mock.ANY, ('ACTIVE', 'ERROR'), 5, 10) @staticmethod def fake_volume(**kwargs): volume = mock.MagicMock() volume.id = kwargs.get('id', '45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba') volume.size = kwargs.get('size', '1') volume.status = kwargs.get('status', 'available') volume.snapshot_id = kwargs.get('snapshot_id', None) volume.availability_zone = kwargs.get('availability_zone', 'nova') return volume @mock.patch.object(time, 'sleep', mock.Mock()) def test_swap_volume(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() server = self.fake_server(self.instance_uuid) self.fake_nova_find_list(nova_util, fake_find=server, fake_list=server) old_volume = self.fake_volume( status='in-use', attachments=[{'server_id': self.instance_uuid}]) new_volume = self.fake_volume( id=utils.generate_uuid(), status='in-use') result = nova_util.swap_volume(old_volume, new_volume) self.assertTrue(result) # verify that the method will return False when the status of # new_volume is 'fake-use'. new_volume = self.fake_volume( id=utils.generate_uuid(), status='fake-use') result = nova_util.swap_volume(old_volume, new_volume) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_wait_for_volume_status(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() # verify that the method will return True when the status of volume # is in the expected status. fake_volume_1 = self.fake_volume(status='in-use') nova_util.cinder.volumes.get.return_value = fake_volume_1 result = nova_util.wait_for_volume_status( fake_volume_1, "in-use", timeout=2) self.assertTrue(result) # verify that the method will raise Exception when the status of # volume is not in the expected status. fake_volume_2 = self.fake_volume(status='fake-use') nova_util.cinder.volumes.get.return_value = fake_volume_2 self.assertRaises( Exception, nova_util.wait_for_volume_status, fake_volume_1, "in-use", timeout=2) def test_check_nova_api_version(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() # verify that the method will return True when the version of nova_api # is supported. api_versions.APIVersion = mock.MagicMock() result = nova_util._check_nova_api_version(nova_util.nova, "2.56") self.assertTrue(result) # verify that the method will return False when the version of nova_api # is not supported. side_effect = nvexceptions.UnsupportedVersion() api_versions.discover_version = mock.MagicMock( side_effect=side_effect) result = nova_util._check_nova_api_version(nova_util.nova, "2.56") self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_wait_for_instance_status(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance = self.fake_server(self.instance_uuid) # verify that the method will return True when the status of instance # is in the expected status. result = nova_util.wait_for_instance_status( instance, ('ACTIVE', 'ERROR'), 5, 10) self.assertTrue(result) # verify that the method will return False when the instance is None. result = nova_util.wait_for_instance_status( None, ('ACTIVE', 'ERROR'), 5, 10) self.assertFalse(result) # verify that the method will return False when the status of instance # is not in the expected status. self.fake_nova_find_list(nova_util, fake_find=instance, fake_list=None) result = nova_util.wait_for_instance_status( instance, ('ERROR'), 5, 10) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_confirm_resize(self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() instance = self.fake_server(self.instance_uuid) self.fake_nova_find_list(nova_util, fake_find=instance, fake_list=None) # verify that the method will return True when the status of instance # is not in the expected status. result = nova_util.confirm_resize(instance, instance.status) self.assertTrue(result) # verify that the method will return False when the status of instance # is not in the expected status. result = nova_util.confirm_resize(instance, "fake_status") self.assertFalse(result) def test_get_compute_node_list( self, mock_glance, mock_cinder, mock_neutron, mock_nova): nova_util = nova_helper.NovaHelper() hypervisor1_id = utils.generate_uuid() hypervisor1_name = "fake_hypervisor_1" hypervisor1 = self.fake_hypervisor( hypervisor1_id, hypervisor1_name, hypervisor_type="QEMU") hypervisor2_id = utils.generate_uuid() hypervisor2_name = "fake_ironic" hypervisor2 = self.fake_hypervisor( hypervisor2_id, hypervisor2_name, hypervisor_type="ironic") nova_util.nova.hypervisors.list.return_value = [hypervisor1, hypervisor2] compute_nodes = nova_util.get_compute_node_list() # baremetal node should be removed self.assertEqual(1, len(compute_nodes)) self.assertEqual(hypervisor1_name, compute_nodes[0].hypervisor_hostname) python-watcher-4.0.0/watcher/tests/common/loader/0000775000175000017500000000000013656752352022064 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/common/loader/__init__.py0000664000175000017500000000000013656752270024162 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/common/loader/test_loader.py0000664000175000017500000000706713656752270024754 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import mock from oslo_config import cfg from stevedore import driver as drivermanager from stevedore import extension as stevedore_extension from watcher.common import exception from watcher.common.loader import default from watcher.common.loader import loadable from watcher.tests import base class FakeLoadable(loadable.Loadable): @classmethod def get_config_opts(cls): return [] class FakeLoadableWithOpts(loadable.Loadable): @classmethod def get_config_opts(cls): return [ cfg.StrOpt("test_opt", default="fake_with_opts"), ] class TestLoader(base.TestCase): def setUp(self): super(TestLoader, self).setUp() def _fake_parse(self, *args, **kw): return cfg.ConfigOpts._parse_cli_opts(cfg.CONF, []) cfg.CONF._parse_cli_opts = _fake_parse def test_load_loadable_no_opt(self): fake_driver = drivermanager.DriverManager.make_test_instance( extension=stevedore_extension.Extension( name="fake", entry_point="%s:%s" % (FakeLoadable.__module__, FakeLoadable.__name__), plugin=FakeLoadable, obj=None), namespace="TESTING") loader_manager = default.DefaultLoader(namespace='TESTING') with mock.patch.object(drivermanager, "DriverManager") as m_driver_manager: m_driver_manager.return_value = fake_driver loaded_driver = loader_manager.load(name='fake') self.assertIsInstance(loaded_driver, FakeLoadable) @mock.patch("watcher.common.loader.default.drivermanager.DriverManager") def test_load_loadable_bad_plugin(self, m_driver_manager): m_driver_manager.side_effect = Exception() loader_manager = default.DefaultLoader(namespace='TESTING') self.assertRaises(exception.LoadingError, loader_manager.load, name='bad_driver') def test_load_loadable_with_opts(self): fake_driver = drivermanager.DriverManager.make_test_instance( extension=stevedore_extension.Extension( name="fake", entry_point="%s:%s" % (FakeLoadableWithOpts.__module__, FakeLoadableWithOpts.__name__), plugin=FakeLoadableWithOpts, obj=None), namespace="TESTING") loader_manager = default.DefaultLoader(namespace='TESTING') with mock.patch.object(drivermanager, "DriverManager") as m_driver_manager: m_driver_manager.return_value = fake_driver loaded_driver = loader_manager.load(name='fake') self.assertIsInstance(loaded_driver, FakeLoadableWithOpts) self.assertEqual( "fake_with_opts", loaded_driver.config.get("test_opt")) self.assertEqual( "fake_with_opts", loaded_driver.config.test_opt) python-watcher-4.0.0/watcher/tests/common/test_ironic_helper.py0000664000175000017500000000414413656752270025053 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Authors:Yumeng Bao # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.common import clients from watcher.common import exception from watcher.common import ironic_helper from watcher.common import utils as w_utils from watcher.tests import base class TestIronicHelper(base.TestCase): def setUp(self): super(TestIronicHelper, self).setUp() osc = clients.OpenStackClients() p_ironic = mock.patch.object(osc, 'ironic') p_ironic.start() self.addCleanup(p_ironic.stop) self.ironic_util = ironic_helper.IronicHelper(osc=osc) @staticmethod def fake_ironic_node(): node = mock.MagicMock() node.uuid = w_utils.generate_uuid() return node def test_get_ironic_node_list(self): node1 = self.fake_ironic_node() self.ironic_util.ironic.node.list.return_value = [node1] rt_nodes = self.ironic_util.get_ironic_node_list() self.assertEqual(rt_nodes, [node1]) def test_get_ironic_node_by_uuid_success(self): node1 = self.fake_ironic_node() self.ironic_util.ironic.node.get.return_value = node1 node = self.ironic_util.get_ironic_node_by_uuid(node1.uuid) self.assertEqual(node, node1) def test_get_ironic_node_by_uuid_failure(self): self.ironic_util.ironic.node.get.return_value = None self.assertRaisesRegex( exception.IronicNodeNotFound, "The ironic node node1 could not be found", self.ironic_util.get_ironic_node_by_uuid, 'node1') python-watcher-4.0.0/watcher/tests/common/test_cinder_helper.py0000664000175000017500000004036213656752270025036 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock import time from cinderclient import exceptions as cinder_exception from watcher.common import cinder_helper from watcher.common import clients from watcher.common import exception from watcher.common import utils from watcher.tests import base @mock.patch.object(clients.OpenStackClients, 'cinder') class TestCinderHelper(base.TestCase): @staticmethod def fake_storage_node(**kwargs): node = mock.MagicMock() node.binary = kwargs.get('binary', 'cinder-volume') node.host = kwargs.get('name', 'host@backend') return node def test_get_storage_node_list(self, mock_cinder): node1 = self.fake_storage_node() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.services.list.return_value = [node1] cinder_util.get_storage_node_list() cinder_util.cinder.services.list.assert_called_once_with( binary='cinder-volume') def test_get_storage_node_by_name_success(self, mock_cinder): node1 = self.fake_storage_node() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.services.list.return_value = [node1] node = cinder_util.get_storage_node_by_name('host@backend') self.assertEqual(node, node1) def test_get_storage_node_by_name_failure(self, mock_cinder): node1 = self.fake_storage_node() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.services.list.return_value = [node1] self.assertRaisesRegex( exception.StorageNodeNotFound, "The storage node failure could not be found", cinder_util.get_storage_node_by_name, 'failure') @staticmethod def fake_pool(**kwargs): pool = mock.MagicMock() pool.name = kwargs.get('name', 'host@backend#pool') return pool def test_get_storage_pool_list(self, mock_cinder): pool = self.fake_pool() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.pools.list.return_value = [pool] cinder_util.get_storage_pool_list() cinder_util.cinder.pools.list.assert_called_once_with(detailed=True) def test_get_storage_pool_by_name_success(self, mock_cinder): pool1 = self.fake_pool() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.pools.list.return_value = [pool1] pool = cinder_util.get_storage_pool_by_name('host@backend#pool') self.assertEqual(pool, pool1) def test_get_storage_pool_by_name_failure(self, mock_cinder): pool1 = self.fake_pool() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.services.list.return_value = [pool1] self.assertRaisesRegex( exception.PoolNotFound, "The pool failure could not be found", cinder_util.get_storage_pool_by_name, 'failure') @staticmethod def fake_volume_type(**kwargs): volume_type = mock.MagicMock() volume_type.name = kwargs.get('name', 'fake_type') extra_specs = {'volume_backend_name': 'backend'} volume_type.extra_specs = kwargs.get('extra_specs', extra_specs) return volume_type def test_get_volume_type_list(self, mock_cinder): volume_type1 = self.fake_volume_type() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.volume_types.list.return_value = [volume_type1] cinder_util.get_volume_type_list() cinder_util.cinder.volume_types.list.assert_called_once_with() def test_get_volume_type_by_backendname_with_backend_exist( self, mock_cinder): volume_type1 = self.fake_volume_type() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.volume_types.list.return_value = [volume_type1] volume_type_name = cinder_util.get_volume_type_by_backendname( 'backend') self.assertEqual(volume_type_name[0], volume_type1.name) def test_get_volume_type_by_backendname_with_no_backend_exist( self, mock_cinder): volume_type1 = self.fake_volume_type() cinder_util = cinder_helper.CinderHelper() cinder_util.cinder.volume_types.list.return_value = [volume_type1] volume_type_name = cinder_util.get_volume_type_by_backendname( 'nobackend') self.assertEqual([], volume_type_name) @staticmethod def fake_volume(**kwargs): volume = mock.MagicMock() volume.id = kwargs.get('id', '45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba') volume.name = kwargs.get('name', 'fakename') volume.size = kwargs.get('size', '1') volume.status = kwargs.get('status', 'available') volume.snapshot_id = kwargs.get('snapshot_id', None) volume.availability_zone = kwargs.get('availability_zone', 'nova') volume.volume_type = kwargs.get('volume_type', 'fake_type') return volume @mock.patch.object(time, 'sleep', mock.Mock()) def test_migrate_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'success') cinder_util.cinder.volumes.get.return_value = volume volume_type = self.fake_volume_type() cinder_util.cinder.volume_types.list.return_value = [volume_type] result = cinder_util.migrate(volume, 'host@backend#pool') self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_migrate_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume volume_type = self.fake_volume_type() volume_type.name = 'notbackend' cinder_util.cinder.volume_types.list.return_value = [volume_type] self.assertRaisesRegex( exception.Invalid, "Volume type must be same for migrating", cinder_util.migrate, volume, 'host@backend#pool') volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'error') cinder_util.cinder.volumes.get.return_value = volume volume_type = self.fake_volume_type() cinder_util.cinder.volume_types.list.return_value = [volume_type] result = cinder_util.migrate(volume, 'host@backend#pool') self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_retype_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'success') cinder_util.cinder.volumes.get.return_value = volume result = cinder_util.retype(volume, 'notfake_type') self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_retype_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'success') cinder_util.cinder.volumes.get.return_value = volume self.assertRaisesRegex( exception.Invalid, "Volume type must be different for retyping", cinder_util.retype, volume, 'fake_type') volume = self.fake_volume() setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'migration_status', 'error') cinder_util.cinder.volumes.get.return_value = volume result = cinder_util.retype(volume, 'notfake_type') self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_create_volume_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume cinder_util.cinder.volumes.create.return_value = volume new_vloume = cinder_util.create_volume( cinder_util.cinder, volume, 'fake_type') self.assertEqual(new_vloume, volume) @mock.patch.object(time, 'sleep', mock.Mock()) def test_create_volume_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'status', 'fake_status') cinder_util.cinder.volumes.get.return_value = volume cinder_util.cinder.volumes.create.return_value = volume self.assertRaisesRegex( Exception, "Failed to create volume", cinder_util.create_volume, cinder_util.cinder, volume, 'fake_type', retry=2, retry_interval=1) @mock.patch.object(time, 'sleep', mock.Mock()) def test_delete_volume_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume cinder_util.cinder.volumes.create.return_value = volume cinder_util.check_volume_deleted = mock.MagicMock(return_value=True) result = cinder_util.delete_volume(volume) self.assertIsNone(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_delete_volume_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'status', 'fake_status') cinder_util.cinder.volumes.get.return_value = volume cinder_util.cinder.volumes.create.return_value = volume cinder_util.check_volume_deleted = mock.MagicMock(return_value=False) self.assertRaisesRegex( Exception, "Failed to delete volume", cinder_util.delete_volume, volume) @mock.patch.object(time, 'sleep', mock.Mock()) def test_can_get_volume_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.get_volume = mock.MagicMock(return_value=volume) result = cinder_util._can_get_volume(volume.id) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_can_get_volume_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.get_volume = mock.MagicMock() cinder_util.get_volume.side_effect = cinder_exception.NotFound(404) result = cinder_util._can_get_volume(volume.id) self.assertFalse(result) cinder_util.get_volume = mock.MagicMock(return_value=None) self.assertRaises( Exception, cinder_util._can_get_volume, volume.id) @mock.patch.object(time, 'sleep', mock.Mock()) def test_has_snapshot_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() volume.snapshot_id = utils.generate_uuid() cinder_util.get_volume = mock.MagicMock(return_value=volume) result = cinder_util._has_snapshot(volume) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_has_snapshot_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() volume.snapshot_id = None cinder_util.get_volume = mock.MagicMock(return_value=volume) result = cinder_util._has_snapshot(volume) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_get_volume_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume result = cinder_util.get_volume(volume) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_get_volume_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() side_effect = cinder_exception.NotFound(404) cinder_util.cinder.volumes.get.side_effect = side_effect cinder_util.cinder.volumes.find.return_value = False result = cinder_util.get_volume(volume) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_check_volume_deleted_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume cinder_util._can_get_volume = mock.MagicMock(return_value=None) result = cinder_util.check_volume_deleted( volume, retry=2, retry_interval=1) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_check_volume_deleted_fail(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() cinder_util.cinder.volumes.get.return_value = volume cinder_util._can_get_volume = mock.MagicMock(return_value=volume) result = cinder_util.check_volume_deleted( volume, retry=2, retry_interval=1) self.assertFalse(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_check_migrated_success(self, mock_cinder): cinder_util = cinder_helper.CinderHelper() volume = self.fake_volume() setattr(volume, 'migration_status', 'success') setattr(volume, 'os-vol-host-attr:host', 'host@backend#pool') cinder_util.cinder.volumes.get.return_value = volume cinder_util.check_volume_deleted = mock.MagicMock(return_value=True) result = cinder_util.check_migrated(volume) self.assertTrue(result) @mock.patch.object(time, 'sleep', mock.Mock()) def test_check_migrated_fail(self, mock_cinder): def side_effect(volume): if isinstance(volume, str): volume = self.fake_volume() setattr(volume, 'migration_status', 'error') elif volume.id is None: setattr(volume, 'migration_status', 'fake_status') setattr(volume, 'id', utils.generate_uuid()) return volume cinder_util = cinder_helper.CinderHelper() # verify that the method check_migrated will return False when the # status of migration_status is error. volume = self.fake_volume() setattr(volume, 'migration_status', 'error') setattr(volume, 'os-vol-host-attr:host', 'source_node') cinder_util.cinder.volumes.get.return_value = volume result = cinder_util.check_migrated(volume, retry_interval=1) self.assertFalse(result) # verify that the method check_migrated will return False when the # status of migration_status is in other cases. volume = self.fake_volume() setattr(volume, 'migration_status', 'success') setattr(volume, 'os-vol-host-attr:host', 'source_node') setattr(volume, 'id', None) cinder_util.get_volume = mock.MagicMock() cinder_util.get_volume.side_effect = side_effect result = cinder_util.check_migrated(volume, retry_interval=1) self.assertFalse(result) # verify that the method check_migrated will return False when the # return_value of method check_volume_deleted is False. volume = self.fake_volume() setattr(volume, 'migration_status', 'success') setattr(volume, 'os-vol-host-attr:host', 'source_node') cinder_util.cinder.volumes.get.return_value = volume cinder_util.check_volume_deleted = mock.MagicMock(return_value=False) cinder_util.get_deleting_volume = mock.MagicMock(return_value=volume) result = cinder_util.check_migrated(volume, retry_interval=1) self.assertFalse(result) python-watcher-4.0.0/watcher/tests/common/test_service.py0000664000175000017500000000712213656752270023670 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg import oslo_messaging as om from watcher.common import rpc from watcher.common import service from watcher import objects from watcher.tests import base CONF = cfg.CONF class DummyEndpoint(object): def __init__(self, messaging): self._messaging = messaging class DummyManager(object): API_VERSION = '1.0' conductor_endpoints = [DummyEndpoint] notification_endpoints = [DummyEndpoint] def __init__(self): self.publisher_id = "pub_id" self.conductor_topic = "conductor_topic" self.notification_topics = [] self.api_version = self.API_VERSION self.service_name = None class TestServiceHeartbeat(base.TestCase): @mock.patch.object(objects.Service, 'list') @mock.patch.object(objects.Service, 'create') def test_send_beat_with_creating_service(self, mock_create, mock_list): CONF.set_default('host', 'fake-fqdn') mock_list.return_value = [] service.ServiceHeartbeat(service_name='watcher-service') mock_list.assert_called_once_with(mock.ANY, filters={'name': 'watcher-service', 'host': 'fake-fqdn'}) self.assertEqual(1, mock_create.call_count) @mock.patch.object(objects.Service, 'list') @mock.patch.object(objects.Service, 'save') def test_send_beat_without_creating_service(self, mock_save, mock_list): mock_list.return_value = [objects.Service(mock.Mock(), name='watcher-service', host='controller')] service.ServiceHeartbeat(service_name='watcher-service') self.assertEqual(1, mock_save.call_count) class TestService(base.TestCase): def setUp(self): super(TestService, self).setUp() @mock.patch.object(om.rpc.server, "RPCServer") def test_start(self, m_handler): dummy_service = service.Service(DummyManager) dummy_service.start() self.assertEqual(1, m_handler.call_count) @mock.patch.object(om.rpc.server, "RPCServer") def test_stop(self, m_handler): dummy_service = service.Service(DummyManager) dummy_service.stop() self.assertEqual(1, m_handler.call_count) def test_build_topic_handler(self): topic_name = "mytopic" dummy_service = service.Service(DummyManager) handler = dummy_service.build_topic_handler(topic_name) self.assertIsNotNone(handler) self.assertIsInstance(handler, om.rpc.server.RPCServer) self.assertEqual("mytopic", handler._target.topic) def test_init_service(self): dummy_service = service.Service(DummyManager) self.assertIsInstance(dummy_service.serializer, rpc.RequestContextSerializer) self.assertIsInstance( dummy_service.conductor_topic_handler, om.rpc.server.RPCServer) python-watcher-4.0.0/watcher/tests/common/test_clients.py0000775000175000017500000004750013656752270023700 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from ceilometerclient import client as ceclient import ceilometerclient.v2.client as ceclient_v2 from cinderclient import client as ciclient from cinderclient.v3 import client as ciclient_v3 from glanceclient import client as glclient from gnocchiclient import client as gnclient from gnocchiclient.v1 import client as gnclient_v1 from ironicclient import client as irclient from ironicclient.v1 import client as irclient_v1 from keystoneauth1 import adapter as ka_adapter from keystoneauth1 import loading as ka_loading import mock from monascaclient import client as monclient from monascaclient.v2_0 import client as monclient_v2 from neutronclient.neutron import client as netclient from neutronclient.v2_0 import client as netclient_v2 from novaclient import client as nvclient from watcher.common import clients from watcher import conf from watcher.tests import base CONF = conf.CONF class TestClients(base.TestCase): def _register_watcher_clients_auth_opts(self): _AUTH_CONF_GROUP = 'watcher_clients_auth' ka_loading.register_auth_conf_options(CONF, _AUTH_CONF_GROUP) ka_loading.register_session_conf_options(CONF, _AUTH_CONF_GROUP) CONF.set_override('auth_type', 'password', group=_AUTH_CONF_GROUP) # ka_loading.load_auth_from_conf_options(CONF, _AUTH_CONF_GROUP) # ka_loading.load_session_from_conf_options(CONF, _AUTH_CONF_GROUP) # CONF.set_override( # 'auth-url', 'http://server.ip:5000', group=_AUTH_CONF_GROUP) # If we don't clean up the _AUTH_CONF_GROUP conf options, then other # tests that run after this one will fail, complaining about required # options that _AUTH_CONF_GROUP wants. def cleanup_conf_from_loading(): # oslo_config doesn't seem to allow unregistering groups through a # single method, so we do this instead CONF.reset() del CONF._groups[_AUTH_CONF_GROUP] self.addCleanup(cleanup_conf_from_loading) def reset_register_opts_mock(conf_obj, original_method): conf_obj.register_opts = original_method original_register_opts = CONF.register_opts self.addCleanup(reset_register_opts_mock, CONF, original_register_opts) expected = {'username': 'foousername', 'password': 'foopassword', 'auth_url': 'http://server.ip:5000', 'cafile': None, 'certfile': None, 'keyfile': None, 'insecure': False, 'user_domain_id': 'foouserdomainid', 'project_domain_id': 'fooprojdomainid'} # Because some of the conf options for auth plugins are not registered # until right before they are loaded, and because the method that does # the actual loading of the conf option values is an anonymous method # (see _getter method of load_from_conf_options in # keystoneauth1.loading.conf.py), we need to manually monkey patch # the register opts method so that we can override the conf values to # our custom values. def mock_register_opts(*args, **kwargs): ret = original_register_opts(*args, **kwargs) if 'group' in kwargs and kwargs['group'] == _AUTH_CONF_GROUP: for key, value in expected.items(): CONF.set_override(key, value, group=_AUTH_CONF_GROUP) return ret CONF.register_opts = mock_register_opts def test_get_keystone_session(self): self._register_watcher_clients_auth_opts() osc = clients.OpenStackClients() expected = {'username': 'foousername', 'password': 'foopassword', 'auth_url': 'http://server.ip:5000', 'user_domain_id': 'foouserdomainid', 'project_domain_id': 'fooprojdomainid'} sess = osc.session self.assertEqual(expected['auth_url'], sess.auth.auth_url) self.assertEqual(expected['username'], sess.auth._username) self.assertEqual(expected['password'], sess.auth._password) self.assertEqual(expected['user_domain_id'], sess.auth._user_domain_id) self.assertEqual(expected['project_domain_id'], sess.auth._project_domain_id) @mock.patch.object(nvclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._nova = None osc.nova() mock_call.assert_called_once_with( CONF.nova_client.api_version, endpoint_type=CONF.nova_client.endpoint_type, region_name=CONF.nova_client.region_name, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova_diff_vers(self, mock_session): CONF.set_override('api_version', '2.60', group='nova_client') osc = clients.OpenStackClients() osc._nova = None osc.nova() self.assertEqual('2.60', osc.nova().api_version.get_string()) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova_bad_min_version(self, mock_session): CONF.set_override('api_version', '2.47', group='nova_client') osc = clients.OpenStackClients() osc._nova = None ex = self.assertRaises(ValueError, osc.nova) self.assertIn('Invalid nova_client.api_version 2.47', str(ex)) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova_diff_endpoint(self, mock_session): CONF.set_override('endpoint_type', 'publicURL', group='nova_client') osc = clients.OpenStackClients() osc._nova = None osc.nova() self.assertEqual('publicURL', osc.nova().client.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_nova_cached(self, mock_session): osc = clients.OpenStackClients() osc._nova = None nova = osc.nova() nova_cached = osc.nova() self.assertEqual(nova, nova_cached) @mock.patch.object(glclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_glance(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._glance = None osc.glance() mock_call.assert_called_once_with( CONF.glance_client.api_version, interface=CONF.glance_client.endpoint_type, region_name=CONF.glance_client.region_name, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_glance_diff_vers(self, mock_session): CONF.set_override('api_version', '1', group='glance_client') osc = clients.OpenStackClients() osc._glance = None osc.glance() self.assertEqual(1.0, osc.glance().version) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_glance_diff_endpoint(self, mock_session): CONF.set_override('endpoint_type', 'internalURL', group='glance_client') osc = clients.OpenStackClients() osc._glance = None osc.glance() self.assertEqual('internalURL', osc.glance().http_client.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_glance_cached(self, mock_session): osc = clients.OpenStackClients() osc._glance = None glance = osc.glance() glance_cached = osc.glance() self.assertEqual(glance, glance_cached) @mock.patch.object(gnclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_gnocchi(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._gnocchi = None osc.gnocchi() mock_call.assert_called_once_with( CONF.gnocchi_client.api_version, adapter_options={ "interface": CONF.gnocchi_client.endpoint_type, "region_name": CONF.gnocchi_client.region_name}, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_gnocchi_diff_vers(self, mock_session): # gnocchiclient currently only has one version (v1) CONF.set_override('api_version', '1', group='gnocchi_client') osc = clients.OpenStackClients() osc._gnocchi = None osc.gnocchi() self.assertEqual(gnclient_v1.Client, type(osc.gnocchi())) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_gnocchi_diff_endpoint(self, mock_session): # gnocchiclient currently only has one version (v1) CONF.set_override('endpoint_type', 'publicURL', group='gnocchi_client') osc = clients.OpenStackClients() osc._gnocchi = None osc.gnocchi() self.assertEqual('publicURL', osc.gnocchi().api.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_gnocchi_cached(self, mock_session): osc = clients.OpenStackClients() osc._gnocchi = None gnocchi = osc.gnocchi() gnocchi_cached = osc.gnocchi() self.assertEqual(gnocchi, gnocchi_cached) @mock.patch.object(ciclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_cinder(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._cinder = None osc.cinder() mock_call.assert_called_once_with( CONF.cinder_client.api_version, endpoint_type=CONF.cinder_client.endpoint_type, region_name=CONF.cinder_client.region_name, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_cinder_diff_vers(self, mock_session): CONF.set_override('api_version', '3', group='cinder_client') osc = clients.OpenStackClients() osc._cinder = None osc.cinder() self.assertEqual(ciclient_v3.Client, type(osc.cinder())) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_cinder_diff_endpoint(self, mock_session): CONF.set_override('endpoint_type', 'internalURL', group='cinder_client') osc = clients.OpenStackClients() osc._cinder = None osc.cinder() self.assertEqual('internalURL', osc.cinder().client.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_cinder_cached(self, mock_session): osc = clients.OpenStackClients() osc._cinder = None cinder = osc.cinder() cinder_cached = osc.cinder() self.assertEqual(cinder, cinder_cached) @mock.patch.object(ceclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_ceilometer(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._ceilometer = None osc.ceilometer() mock_call.assert_called_once_with( CONF.ceilometer_client.api_version, None, endpoint_type=CONF.ceilometer_client.endpoint_type, region_name=CONF.ceilometer_client.region_name, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') @mock.patch.object(ceclient_v2.Client, '_get_redirect_client') def test_clients_ceilometer_diff_vers(self, mock_get_redirect_client, mock_session): '''ceilometerclient currently only has one version (v2)''' mock_get_redirect_client.return_value = [mock.Mock(), mock.Mock()] CONF.set_override('api_version', '2', group='ceilometer_client') osc = clients.OpenStackClients() osc._ceilometer = None osc.ceilometer() self.assertEqual(ceclient_v2.Client, type(osc.ceilometer())) @mock.patch.object(clients.OpenStackClients, 'session') @mock.patch.object(ceclient_v2.Client, '_get_redirect_client') def test_clients_ceilometer_diff_endpoint(self, mock_get_redirect_client, mock_session): mock_get_redirect_client.return_value = [mock.Mock(), mock.Mock()] CONF.set_override('endpoint_type', 'publicURL', group='ceilometer_client') osc = clients.OpenStackClients() osc._ceilometer = None osc.ceilometer() self.assertEqual('publicURL', osc.ceilometer().http_client.interface) @mock.patch.object(clients.OpenStackClients, 'session') @mock.patch.object(ceclient_v2.Client, '_get_redirect_client') def test_clients_ceilometer_cached(self, mock_get_redirect_client, mock_session): mock_get_redirect_client.return_value = [mock.Mock(), mock.Mock()] osc = clients.OpenStackClients() osc._ceilometer = None ceilometer = osc.ceilometer() ceilometer_cached = osc.ceilometer() self.assertEqual(ceilometer, ceilometer_cached) @mock.patch.object(netclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_neutron(self, mock_session, mock_call): osc = clients.OpenStackClients() osc._neutron = None osc.neutron() mock_call.assert_called_once_with( CONF.neutron_client.api_version, endpoint_type=CONF.neutron_client.endpoint_type, region_name=CONF.neutron_client.region_name, session=mock_session) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_neutron_diff_vers(self, mock_session): '''neutronclient currently only has one version (v2)''' CONF.set_override('api_version', '2.0', group='neutron_client') osc = clients.OpenStackClients() osc._neutron = None osc.neutron() self.assertEqual(netclient_v2.Client, type(osc.neutron())) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_neutron_diff_endpoint(self, mock_session): '''neutronclient currently only has one version (v2)''' CONF.set_override('endpoint_type', 'internalURL', group='neutron_client') osc = clients.OpenStackClients() osc._neutron = None osc.neutron() self.assertEqual('internalURL', osc.neutron().httpclient.interface) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_neutron_cached(self, mock_session): osc = clients.OpenStackClients() osc._neutron = None neutron = osc.neutron() neutron_cached = osc.neutron() self.assertEqual(neutron, neutron_cached) @mock.patch.object(monclient, 'Client') @mock.patch.object(ka_loading, 'load_session_from_conf_options') def test_clients_monasca(self, mock_session, mock_call): mock_session.return_value = mock.Mock( get_endpoint=mock.Mock(return_value='test_endpoint'), get_token=mock.Mock(return_value='test_token'),) self._register_watcher_clients_auth_opts() osc = clients.OpenStackClients() osc._monasca = None osc.monasca() mock_call.assert_called_once_with( CONF.monasca_client.api_version, 'test_endpoint', auth_url='http://server.ip:5000', cert_file=None, insecure=False, key_file=None, keystone_timeout=None, os_cacert=None, password='foopassword', service_type='monitoring', token='test_token', username='foousername') @mock.patch.object(ka_loading, 'load_session_from_conf_options') def test_clients_monasca_diff_vers(self, mock_session): mock_session.return_value = mock.Mock( get_endpoint=mock.Mock(return_value='test_endpoint'), get_token=mock.Mock(return_value='test_token'),) self._register_watcher_clients_auth_opts() CONF.set_override('api_version', '2_0', group='monasca_client') osc = clients.OpenStackClients() osc._monasca = None osc.monasca() self.assertEqual(monclient_v2.Client, type(osc.monasca())) @mock.patch.object(ka_loading, 'load_session_from_conf_options') def test_clients_monasca_cached(self, mock_session): mock_session.return_value = mock.Mock( get_endpoint=mock.Mock(return_value='test_endpoint'), get_token=mock.Mock(return_value='test_token'),) self._register_watcher_clients_auth_opts() osc = clients.OpenStackClients() osc._monasca = None monasca = osc.monasca() monasca_cached = osc.monasca() self.assertEqual(monasca, monasca_cached) @mock.patch.object(irclient, 'Client') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_ironic(self, mock_session, mock_call): ironic_url = 'http://localhost:6385/' mock_session.get_endpoint.return_value = ironic_url osc = clients.OpenStackClients() osc._ironic = None osc.ironic() mock_call.assert_called() @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_ironic_diff_vers(self, mock_session): ironic_url = 'http://localhost:6385/' mock_session.get_endpoint.return_value = ironic_url CONF.set_override('api_version', '1', group='ironic_client') osc = clients.OpenStackClients() osc._ironic = None osc.ironic() self.assertEqual(irclient_v1.Client, type(osc.ironic())) @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_ironic_diff_endpoint(self, mock_session): ironic_url = 'http://localhost:6385/' mock_session.get_endpoint.return_value = ironic_url osc = clients.OpenStackClients() osc._ironic = None osc.ironic() mock_session.get_endpoint.assert_called_with( interface='publicURL', region_name=None, service_type='baremetal') CONF.set_override('endpoint_type', 'internalURL', group='ironic_client') osc._ironic = None osc.ironic() mock_session.get_endpoint.assert_called_with( interface='internalURL', region_name=None, service_type='baremetal') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_ironic_cached(self, mock_session): ironic_url = 'http://localhost:6385/' mock_session.get_endpoint.return_value = ironic_url osc = clients.OpenStackClients() osc._ironic = None ironic = osc.ironic() ironic_cached = osc.ironic() self.assertEqual(ironic, ironic_cached) @mock.patch.object(ka_adapter, 'Adapter') @mock.patch.object(clients.OpenStackClients, 'session') def test_clients_placement(self, mock_session, mock_call): osc = clients.OpenStackClients() osc.placement() headers = {'accept': 'application/json'} mock_call.assert_called_once_with( session=mock_session, service_type='placement', default_microversion=CONF.placement_client.api_version, interface=CONF.placement_client.interface, region_name=CONF.placement_client.region_name, additional_headers=headers) python-watcher-4.0.0/watcher/tests/config.py0000664000175000017500000000205413656752270021145 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.api import hooks # Server Specific Configurations server = { 'port': '9322', 'host': '0.0.0.0' } # Pecan Application Configurations app = { 'root': 'watcher.api.controllers.root.RootController', 'modules': ['watcher.api'], 'hooks': [ hooks.ContextHook(), ], 'acl_public_routes': [ '/' ], } # Custom Configurations must be in Python dictionary format:: # # foo = {'bar':'baz'} # # All configurations are accessible at:: # pecan.conf python-watcher-4.0.0/watcher/tests/api/0000775000175000017500000000000013656752352020077 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/api/utils.py0000664000175000017500000000723213656752270021614 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Utils for testing the API service. """ import datetime from oslo_serialization import jsonutils from watcher.api.controllers.v1 import action as action_ctrl from watcher.api.controllers.v1 import action_plan as action_plan_ctrl from watcher.api.controllers.v1 import audit as audit_ctrl from watcher.api.controllers.v1 import audit_template as audit_template_ctrl from watcher.tests.db import utils as db_utils ADMIN_TOKEN = '4562138218392831' MEMBER_TOKEN = '4562138218392832' class FakeMemcache(object): """Fake cache that is used for keystone tokens lookup.""" _cache = { 'tokens/%s' % ADMIN_TOKEN: { 'access': { 'token': {'id': ADMIN_TOKEN, 'expires': '2100-09-11T00:00:00'}, 'user': {'id': 'user_id1', 'name': 'user_name1', 'tenantId': '123i2910', 'tenantName': 'mytenant', 'roles': [{'name': 'admin'}] }, } }, 'tokens/%s' % MEMBER_TOKEN: { 'access': { 'token': {'id': MEMBER_TOKEN, 'expires': '2100-09-11T00:00:00'}, 'user': {'id': 'user_id2', 'name': 'user-good', 'tenantId': 'project-good', 'tenantName': 'goodies', 'roles': [{'name': 'Member'}] } } } } def __init__(self): self.set_key = None self.set_value = None self.token_expiration = None def get(self, key): dt = datetime.datetime.utcnow() + datetime.timedelta(minutes=5) return jsonutils.dumps((self._cache.get(key), dt.isoformat())) def set(self, key, value, time=0, min_compress_len=0): self.set_value = value self.set_key = key def remove_internal(values, internal): # NOTE(yuriyz): internal attributes should not be posted, except uuid int_attr = [attr.lstrip('/') for attr in internal if attr != '/uuid'] return dict( (k, v) for (k, v) in values.items() if k not in int_attr ) def audit_post_data(**kw): audit = db_utils.get_test_audit(**kw) internal = audit_ctrl.AuditPatchType.internal_attrs() return remove_internal(audit, internal) def audit_template_post_data(**kw): attrs = audit_template_ctrl.AuditTemplatePostType._wsme_attributes audit_template = db_utils.get_test_audit_template() fields = [field.key for field in attrs] post_data = {k: v for k, v in audit_template.items() if k in fields} post_data.update({k: v for k, v in kw.items() if k in fields}) return post_data def action_post_data(**kw): action = db_utils.get_test_action(**kw) internal = action_ctrl.ActionPatchType.internal_attrs() return remove_internal(action, internal) def action_plan_post_data(**kw): act_plan = db_utils.get_test_action_plan(**kw) internal = action_plan_ctrl.ActionPlanPatchType.internal_attrs() return remove_internal(act_plan, internal) python-watcher-4.0.0/watcher/tests/api/__init__.py0000664000175000017500000000000013656752270022175 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/api/test_hooks.py0000664000175000017500000002445013656752270022637 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for the Pecan API hooks.""" from __future__ import unicode_literals import mock from oslo_config import cfg import oslo_messaging as messaging from oslo_serialization import jsonutils import six from six.moves import http_client from watcher.api.controllers import root from watcher.api import hooks from watcher.common import context from watcher.tests.api import base class FakeRequest(object): def __init__(self, headers, context, environ): self.headers = headers self.context = context self.environ = environ or {} self.version = (1, 0) self.host_url = 'http://127.0.0.1:6385' class FakeRequestState(object): def __init__(self, headers=None, context=None, environ=None): self.request = FakeRequest(headers, context, environ) self.response = FakeRequest(headers, context, environ) def set_context(self): headers = self.request.headers creds = { 'user': headers.get('X-User') or headers.get('X-User-Id'), 'domain_id': headers.get('X-User-Domain-Id'), 'domain_name': headers.get('X-User-Domain-Name'), 'auth_token': headers.get('X-Auth-Token'), 'roles': headers.get('X-Roles', '').split(','), } is_admin = ('admin' in creds['roles'] or 'administrator' in creds['roles']) is_public_api = self.request.environ.get('is_public_api', False) self.request.context = context.RequestContext( is_admin=is_admin, is_public_api=is_public_api, **creds) def fake_headers(admin=False): headers = { 'X-Auth-Token': '8d9f235ca7464dd7ba46f81515797ea0', 'X-Domain-Id': 'None', 'X-Domain-Name': 'None', 'X-Project-Domain-Id': 'default', 'X-Project-Domain-Name': 'Default', 'X-Role': '_member_,admin', 'X-Roles': '_member_,admin', # 'X-Tenant': 'foo', # 'X-Tenant-Id': 'b4efa69d4ffa4973863f2eefc094f7f8', # 'X-Tenant-Name': 'foo', 'X-User': 'foo', 'X-User-Domain-Id': 'default', 'X-User-Domain-Name': 'Default', 'X-User-Id': '604ab2a197c442c2a84aba66708a9e1e', 'X-User-Name': 'foo', } if admin: headers.update({ 'X-Project-Name': 'admin', 'X-Role': '_member_,admin', 'X-Roles': '_member_,admin', 'X-Tenant': 'admin', # 'X-Tenant-Name': 'admin', # 'X-Tenant': 'admin' 'X-Tenant-Name': 'admin', 'X-Tenant-Id': 'c2a3a69d456a412376efdd9dac38', 'X-Project-Name': 'admin', 'X-Project-Id': 'c2a3a69d456a412376efdd9dac38', }) else: headers.update({ 'X-Role': '_member_', 'X-Roles': '_member_', 'X-Tenant': 'foo', 'X-Tenant-Name': 'foo', 'X-Tenant-Id': 'b4efa69d,4ffa4973863f2eefc094f7f8', 'X-Project-Name': 'foo', 'X-Project-Id': 'b4efa69d4ffa4973863f2eefc094f7f8', }) return headers class TestNoExceptionTracebackHook(base.FunctionalTest): TRACE = ['Traceback (most recent call last):', ' File "/opt/stack/watcher/watcher/common/rpc/amqp.py",' ' line 434, in _process_data\\n **args)', ' File "/opt/stack/watcher/watcher/common/rpc/' 'dispatcher.py", line 172, in dispatch\\n result =' ' getattr(proxyobj, method)(ctxt, **kwargs)'] MSG_WITHOUT_TRACE = "Test exception message." MSG_WITH_TRACE = MSG_WITHOUT_TRACE + "\n" + "\n".join(TRACE) def setUp(self): super(TestNoExceptionTracebackHook, self).setUp() p = mock.patch.object(root.Root, 'convert') self.root_convert_mock = p.start() self.addCleanup(p.stop) cfg.CONF.set_override('debug', False) def test_hook_exception_success(self): self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] self.assertEqual(self.MSG_WITHOUT_TRACE, actual_msg) def test_hook_remote_error_success(self): test_exc_type = 'TestException' self.root_convert_mock.side_effect = messaging.rpc.RemoteError( test_exc_type, self.MSG_WITHOUT_TRACE, self.TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) # NOTE(max_lobur): For RemoteError the client message will still have # some garbage because in RemoteError traceback is serialized as a list # instead of'\n'.join(trace). But since RemoteError is kind of very # rare thing (happens due to wrong deserialization settings etc.) # we don't care about this garbage. expected_msg = ("Remote error: %s %s" % (test_exc_type, self.MSG_WITHOUT_TRACE) + ("\n[u'" if six.PY2 else "\n['")) actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] self.assertEqual(expected_msg, actual_msg) def _test_hook_without_traceback(self): msg = "Error message without traceback \n but \n multiline" self.root_convert_mock.side_effect = Exception(msg) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] self.assertEqual(msg, actual_msg) def test_hook_without_traceback(self): self._test_hook_without_traceback() def test_hook_without_traceback_debug(self): cfg.CONF.set_override('debug', True) self._test_hook_without_traceback() def _test_hook_on_serverfault(self): self.root_convert_mock.side_effect = Exception(self.MSG_WITH_TRACE) response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] return actual_msg def test_hook_on_serverfault(self): cfg.CONF.set_override('debug', False) msg = self._test_hook_on_serverfault() self.assertEqual(self.MSG_WITHOUT_TRACE, msg) def test_hook_on_serverfault_debug(self): cfg.CONF.set_override('debug', True) msg = self._test_hook_on_serverfault() self.assertEqual(self.MSG_WITH_TRACE, msg) def _test_hook_on_clientfault(self): client_error = Exception(self.MSG_WITH_TRACE) client_error.code = http_client.BAD_REQUEST self.root_convert_mock.side_effect = client_error response = self.get_json('/', path_prefix='', expect_errors=True) actual_msg = jsonutils.loads( response.json['error_message'])['faultstring'] return actual_msg def test_hook_on_clientfault(self): msg = self._test_hook_on_clientfault() self.assertEqual(self.MSG_WITHOUT_TRACE, msg) def test_hook_on_clientfault_debug_tracebacks(self): cfg.CONF.set_override('debug', True) msg = self._test_hook_on_clientfault() self.assertEqual(self.MSG_WITH_TRACE, msg) class TestContextHook(base.FunctionalTest): @mock.patch.object(context, 'RequestContext') def test_context_hook_not_admin(self, mock_ctx): cfg.CONF.set_override( 'auth_type', 'password', group='watcher_clients_auth') headers = fake_headers(admin=False) reqstate = FakeRequestState(headers=headers) context_hook = hooks.ContextHook() context_hook.before(reqstate) mock_ctx.assert_called_with( auth_token=headers['X-Auth-Token'], user=headers['X-User'], user_id=headers['X-User-Id'], domain_id=headers['X-User-Domain-Id'], domain_name=headers['X-User-Domain-Name'], project=headers['X-Project-Name'], project_id=headers['X-Project-Id'], show_deleted=None, auth_token_info=self.token_info, roles=headers['X-Roles'].split(',')) @mock.patch.object(context, 'RequestContext') def test_context_hook_admin(self, mock_ctx): cfg.CONF.set_override( 'auth_type', 'password', group='watcher_clients_auth') headers = fake_headers(admin=True) reqstate = FakeRequestState(headers=headers) context_hook = hooks.ContextHook() context_hook.before(reqstate) mock_ctx.assert_called_with( auth_token=headers['X-Auth-Token'], user=headers['X-User'], user_id=headers['X-User-Id'], domain_id=headers['X-User-Domain-Id'], domain_name=headers['X-User-Domain-Name'], project=headers['X-Project-Name'], project_id=headers['X-Project-Id'], show_deleted=None, auth_token_info=self.token_info, roles=headers['X-Roles'].split(',')) @mock.patch.object(context, 'RequestContext') def test_context_hook_public_api(self, mock_ctx): cfg.CONF.set_override( 'auth_type', 'password', group='watcher_clients_auth') headers = fake_headers(admin=True) env = {'is_public_api': True} reqstate = FakeRequestState(headers=headers, environ=env) context_hook = hooks.ContextHook() context_hook.before(reqstate) mock_ctx.assert_called_with( auth_token=headers['X-Auth-Token'], user=headers['X-User'], user_id=headers['X-User-Id'], domain_id=headers['X-User-Domain-Id'], domain_name=headers['X-User-Domain-Name'], project=headers['X-Project-Name'], project_id=headers['X-Project-Id'], show_deleted=None, auth_token_info=self.token_info, roles=headers['X-Roles'].split(',')) python-watcher-4.0.0/watcher/tests/api/test_root.py0000664000175000017500000000541613656752270022500 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from watcher.tests.api import base class TestRoot(base.FunctionalTest): def test_get_root(self): data = self.get_json('/', path_prefix='') self.assertEqual('v1', data['default_version']['id']) # Check fields are not empty [self.assertNotIn(f, ['', []]) for f in data.keys()] class TestV1Root(base.FunctionalTest): def test_get_v1_root_all(self): data = self.get_json( '/', headers={'OpenStack-API-Version': 'infra-optim 1.4'}) self.assertEqual('v1', data['id']) # Check fields are not empty for f in data.keys(): self.assertNotIn(f, ['', []]) # Check if all known resources are present and there are no extra ones. not_resources = ('id', 'links', 'media_types') actual_resources = tuple(set(data.keys()) - set(not_resources)) expected_resources = ('audit_templates', 'audits', 'actions', 'action_plans', 'data_model', 'scoring_engines', 'services', 'webhooks') self.assertEqual(sorted(expected_resources), sorted(actual_resources)) self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json', 'base': 'application/json'}, data['media_types']) def test_get_v1_root_without_datamodel(self): data = self.get_json( '/', headers={'OpenStack-API-Version': 'infra-optim 1.2'}) self.assertEqual('v1', data['id']) # Check fields are not empty for f in data.keys(): self.assertNotIn(f, ['', []]) # Check if all known resources are present and there are no extra ones. not_resources = ('id', 'links', 'media_types') actual_resources = tuple(set(data.keys()) - set(not_resources)) expected_resources = ('audit_templates', 'audits', 'actions', 'action_plans', 'scoring_engines', 'services') self.assertEqual(sorted(expected_resources), sorted(actual_resources)) self.assertIn({'type': 'application/vnd.openstack.watcher.v1+json', 'base': 'application/json'}, data['media_types']) python-watcher-4.0.0/watcher/tests/api/v1/0000775000175000017500000000000013656752352020425 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/api/v1/test_services.py0000664000175000017500000001747413656752270023675 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_serialization import jsonutils from six.moves.urllib import parse as urlparse from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListService(api_base.FunctionalTest): def _assert_service_fields(self, service): service_fields = ['id', 'name', 'host', 'status'] for field in service_fields: self.assertIn(field, service) def test_one(self): service = obj_utils.create_test_service(self.context) response = self.get_json('/services') self.assertEqual(service.id, response['services'][0]["id"]) self._assert_service_fields(response['services'][0]) def test_get_one_by_id(self): service = obj_utils.create_test_service(self.context) response = self.get_json('/services/%s' % service.id) self.assertEqual(service.id, response["id"]) self.assertEqual(service.name, response["name"]) self._assert_service_fields(response) def test_get_one_by_name(self): service = obj_utils.create_test_service(self.context) response = self.get_json(urlparse.quote( '/services/%s' % service['name'])) self.assertEqual(service.id, response['id']) self._assert_service_fields(response) def test_get_one_soft_deleted(self): service = obj_utils.create_test_service(self.context) service.soft_delete() response = self.get_json( '/services/%s' % service['id'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(service.id, response['id']) self._assert_service_fields(response) response = self.get_json( '/services/%s' % service['id'], expect_errors=True) self.assertEqual(404, response.status_int) def test_detail(self): service = obj_utils.create_test_service(self.context) response = self.get_json('/services/detail') self.assertEqual(service.id, response['services'][0]["id"]) self._assert_service_fields(response['services'][0]) for service in response['services']: self.assertTrue( all(val is not None for key, val in service.items() if key in ['id', 'name', 'host', 'status']) ) def test_detail_against_single(self): service = obj_utils.create_test_service(self.context) response = self.get_json('/services/%s/detail' % service.id, expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): service_list = [] for idx in range(1, 4): service = obj_utils.create_test_service( self.context, id=idx, host='CONTROLLER1', name='SERVICE_{0}'.format(idx)) service_list.append(service.id) for idx in range(1, 4): service = obj_utils.create_test_service( self.context, id=3+idx, host='CONTROLLER2', name='SERVICE_{0}'.format(idx)) service_list.append(service.id) response = self.get_json('/services') self.assertEqual(6, len(response['services'])) for service in response['services']: self.assertTrue( all(val is not None for key, val in service.items() if key in ['id', 'name', 'host', 'status'])) def test_many_without_soft_deleted(self): service_list = [] for id_ in [1, 2, 3]: service = obj_utils.create_test_service( self.context, id=id_, host='CONTROLLER', name='SERVICE_{0}'.format(id_)) service_list.append(service.id) for id_ in [4, 5]: service = obj_utils.create_test_service( self.context, id=id_, host='CONTROLLER', name='SERVICE_{0}'.format(id_)) service.soft_delete() response = self.get_json('/services') self.assertEqual(3, len(response['services'])) ids = [s['id'] for s in response['services']] self.assertEqual(sorted(service_list), sorted(ids)) def test_services_collection_links(self): for idx in range(1, 6): obj_utils.create_test_service( self.context, id=idx, host='CONTROLLER', name='SERVICE_{0}'.format(idx)) response = self.get_json('/services/?limit=2') self.assertEqual(2, len(response['services'])) def test_services_collection_links_default_limit(self): for idx in range(1, 6): obj_utils.create_test_service( self.context, id=idx, host='CONTROLLER', name='SERVICE_{0}'.format(idx)) cfg.CONF.set_override('max_limit', 3, 'api') response = self.get_json('/services') self.assertEqual(3, len(response['services'])) def test_many_with_sort_key_name(self): service_list = [] for id_ in range(1, 4): service = obj_utils.create_test_service( self.context, id=id_, host='CONTROLLER', name='SERVICE_{0}'.format(id_)) service_list.append(service.name) response = self.get_json('/services/?sort_key=name') self.assertEqual(3, len(response['services'])) names = [s['name'] for s in response['services']] self.assertEqual(sorted(service_list), names) def test_sort_key_validation(self): response = self.get_json( '/services?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(400, response.status_int) class TestServicePolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "service:get_all", self.get_json, '/services', expect_errors=True) def test_policy_disallow_get_one(self): service = obj_utils.create_test_service(self.context) self._common_policy_check( "service:get", self.get_json, '/services/%s' % service.id, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "service:detail", self.get_json, '/services/detail', expect_errors=True) class TestServiceEnforcementWithAdminContext(TestListService, api_base.AdminRoleTest): def setUp(self): super(TestServiceEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "service:detail": "rule:default", "service:get": "rule:default", "service:get_all": "rule:default"}) python-watcher-4.0.0/watcher/tests/api/v1/test_actions_plans.py0000664000175000017500000007151213656752270024700 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import itertools import mock from oslo_config import cfg from oslo_serialization import jsonutils from watcher.applier import rpcapi as aapi from watcher.common import utils from watcher.db import api as db_api from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListActionPlan(api_base.FunctionalTest): def setUp(self): super(TestListActionPlan, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) def test_empty(self): response = self.get_json('/action_plans') self.assertEqual([], response['action_plans']) def _assert_action_plans_fields(self, action_plan): action_plan_fields = [ 'uuid', 'audit_uuid', 'strategy_uuid', 'strategy_name', 'state', 'global_efficacy', 'efficacy_indicators'] for field in action_plan_fields: self.assertIn(field, action_plan) def test_one(self): action_plan = obj_utils.create_test_action_plan(self.context) response = self.get_json('/action_plans') self.assertEqual(action_plan.uuid, response['action_plans'][0]["uuid"]) self._assert_action_plans_fields(response['action_plans'][0]) def test_one_soft_deleted(self): action_plan = obj_utils.create_test_action_plan(self.context) action_plan.soft_delete() response = self.get_json('/action_plans', headers={'X-Show-Deleted': 'True'}) self.assertEqual(action_plan.uuid, response['action_plans'][0]["uuid"]) self._assert_action_plans_fields(response['action_plans'][0]) response = self.get_json('/action_plans') self.assertEqual([], response['action_plans']) def test_get_one_ok(self): action_plan = obj_utils.create_test_action_plan(self.context) obj_utils.create_test_efficacy_indicator( self.context, action_plan_id=action_plan['id']) response = self.get_json('/action_plans/%s' % action_plan['uuid']) self.assertEqual(action_plan.uuid, response['uuid']) self._assert_action_plans_fields(response) self.assertEqual( [{'description': 'Test indicator', 'name': 'test_indicator', 'value': 0.0, 'unit': '%'}], response['efficacy_indicators']) def test_get_one_soft_deleted(self): action_plan = obj_utils.create_test_action_plan(self.context) action_plan.soft_delete() response = self.get_json('/action_plans/%s' % action_plan['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(action_plan.uuid, response['uuid']) self._assert_action_plans_fields(response) response = self.get_json('/action_plans/%s' % action_plan['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_detail(self): action_plan = obj_utils.create_test_action_plan(self.context) response = self.get_json('/action_plans/detail') self.assertEqual(action_plan.uuid, response['action_plans'][0]["uuid"]) self._assert_action_plans_fields(response['action_plans'][0]) def test_detail_soft_deleted(self): action_plan = obj_utils.create_test_action_plan(self.context) action_plan.soft_delete() response = self.get_json('/action_plans/detail', headers={'X-Show-Deleted': 'True'}) self.assertEqual(action_plan.uuid, response['action_plans'][0]["uuid"]) self._assert_action_plans_fields(response['action_plans'][0]) response = self.get_json('/action_plans/detail') self.assertEqual([], response['action_plans']) def test_detail_against_single(self): action_plan = obj_utils.create_test_action_plan(self.context) response = self.get_json( '/action_plan/%s/detail' % action_plan['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): action_plan_list = [] for id_ in range(5): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan_list.append(action_plan.uuid) response = self.get_json('/action_plans') self.assertEqual(len(action_plan_list), len(response['action_plans'])) uuids = [s['uuid'] for s in response['action_plans']] self.assertEqual(sorted(action_plan_list), sorted(uuids)) def test_many_with_soft_deleted_audit_uuid(self): action_plan_list = [] audit1 = obj_utils.create_test_audit( self.context, id=2, uuid=utils.generate_uuid(), name='My Audit {0}'.format(2)) audit2 = obj_utils.create_test_audit( self.context, id=3, uuid=utils.generate_uuid(), name='My Audit {0}'.format(3)) for id_ in range(0, 2): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit1.id) action_plan_list.append(action_plan.uuid) for id_ in range(2, 4): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit2.id) action_plan_list.append(action_plan.uuid) new_state = objects.audit.State.CANCELLED self.patch_json( '/audits/%s' % audit1.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) self.delete('/audits/%s' % audit1.uuid) response = self.get_json('/action_plans') self.assertEqual(len(action_plan_list), len(response['action_plans'])) for id_ in range(0, 2): action_plan = response['action_plans'][id_] self.assertIsNone(action_plan['audit_uuid']) for id_ in range(2, 4): action_plan = response['action_plans'][id_] self.assertEqual(audit2.uuid, action_plan['audit_uuid']) def test_many_with_audit_uuid(self): action_plan_list = [] audit = obj_utils.create_test_audit( self.context, id=2, uuid=utils.generate_uuid(), name='My Audit {0}'.format(2)) for id_ in range(2, 5): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit.id) action_plan_list.append(action_plan.uuid) response = self.get_json('/action_plans') self.assertEqual(len(action_plan_list), len(response['action_plans'])) for action in response['action_plans']: self.assertEqual(audit.uuid, action['audit_uuid']) def test_many_with_audit_uuid_filter(self): action_plan_list1 = [] audit1 = obj_utils.create_test_audit( self.context, id=2, uuid=utils.generate_uuid(), name='My Audit {0}'.format(2)) for id_ in range(2, 5): action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit1.id) action_plan_list1.append(action_plan.uuid) audit2 = obj_utils.create_test_audit( self.context, id=3, uuid=utils.generate_uuid(), name='My Audit {0}'.format(3)) action_plan_list2 = [] for id_ in [5, 6, 7]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit2.id) action_plan_list2.append(action_plan.uuid) response = self.get_json('/action_plans?audit_uuid=%s' % audit2.uuid) self.assertEqual(len(action_plan_list2), len(response['action_plans'])) for action in response['action_plans']: self.assertEqual(audit2.uuid, action['audit_uuid']) def test_many_without_soft_deleted(self): action_plan_list = [] for id_ in [1, 2, 3]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan_list.append(action_plan.uuid) for id_ in [4, 5]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan.soft_delete() response = self.get_json('/action_plans') self.assertEqual(3, len(response['action_plans'])) uuids = [s['uuid'] for s in response['action_plans']] self.assertEqual(sorted(action_plan_list), sorted(uuids)) def test_many_with_soft_deleted(self): action_plan_list = [] for id_ in [1, 2, 3]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan_list.append(action_plan.uuid) for id_ in [4, 5]: action_plan = obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) action_plan.soft_delete() action_plan_list.append(action_plan.uuid) response = self.get_json('/action_plans', headers={'X-Show-Deleted': 'True'}) self.assertEqual(5, len(response['action_plans'])) uuids = [s['uuid'] for s in response['action_plans']] self.assertEqual(sorted(action_plan_list), sorted(uuids)) def test_many_with_sort_key_audit_uuid(self): audit_list = [] for id_ in range(2, 5): audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid(), audit_id=audit.id) audit_list.append(audit.uuid) response = self.get_json('/action_plans/?sort_key=audit_uuid') self.assertEqual(3, len(response['action_plans'])) uuids = [s['audit_uuid'] for s in response['action_plans']] self.assertEqual(sorted(audit_list), uuids) def test_sort_key_validation(self): response = self.get_json( '/action_plans?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(400, response.status_int) def test_links(self): uuid = utils.generate_uuid() obj_utils.create_test_action_plan(self.context, id=1, uuid=uuid) response = self.get_json('/action_plans/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) response = self.get_json('/action_plans/?limit=3') self.assertEqual(3, len(response['action_plans'])) next_marker = response['action_plans'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_action_plan( self.context, id=id_, uuid=utils.generate_uuid()) response = self.get_json('/action_plans') self.assertEqual(3, len(response['action_plans'])) next_marker = response['action_plans'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan( self.context) p = mock.patch.object(db_api.BaseConnection, 'destroy_action_plan') self.mock_action_plan_delete = p.start() self.mock_action_plan_delete.side_effect = \ self._simulate_rpc_action_plan_delete self.addCleanup(p.stop) def _simulate_rpc_action_plan_delete(self, audit_uuid): action_plan = objects.ActionPlan.get_by_uuid(self.context, audit_uuid) action_plan.destroy() def test_delete_action_plan_without_action(self): response = self.delete('/action_plans/%s' % self.action_plan.uuid, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.action_plan.state = objects.action_plan.State.SUCCEEDED self.action_plan.save() self.delete('/action_plans/%s' % self.action_plan.uuid) response = self.get_json('/action_plans/%s' % self.action_plan.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_delete_action_plan_with_action(self): action = obj_utils.create_test_action( self.context, id=1) self.action_plan.state = objects.action_plan.State.SUCCEEDED self.action_plan.save() self.delete('/action_plans/%s' % self.action_plan.uuid) ap_response = self.get_json('/action_plans/%s' % self.action_plan.uuid, expect_errors=True) acts_response = self.get_json( '/actions/?action_plan_uuid=%s' % self.action_plan.uuid) act_response = self.get_json( '/actions/%s' % action.uuid, expect_errors=True) # The action plan does not exist anymore self.assertEqual(404, ap_response.status_int) self.assertEqual('application/json', ap_response.content_type) self.assertTrue(ap_response.json['error_message']) # Nor does the action self.assertEqual(0, len(acts_response['actions'])) self.assertEqual(404, act_response.status_int) self.assertEqual('application/json', act_response.content_type) self.assertTrue(act_response.json['error_message']) def test_delete_action_plan_not_found(self): uuid = utils.generate_uuid() response = self.delete('/action_plans/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestStart(api_base.FunctionalTest): def setUp(self): super(TestStart, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan( self.context, state=objects.action_plan.State.RECOMMENDED) p = mock.patch.object(db_api.BaseConnection, 'update_action_plan') self.mock_action_plan_update = p.start() self.mock_action_plan_update.side_effect = \ self._simulate_rpc_action_plan_update self.addCleanup(p.stop) def _simulate_rpc_action_plan_update(self, action_plan): action_plan.save() return action_plan @mock.patch('watcher.common.policy.enforce') def test_start_action_plan_not_found(self, mock_policy): mock_policy.return_value = True uuid = utils.generate_uuid() response = self.post('/v1/action_plans/%s/%s' % (uuid, 'start'), expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch('watcher.common.policy.enforce') def test_start_action_plan(self, mock_policy): mock_policy.return_value = True action = obj_utils.create_test_action( self.context, id=1) self.action_plan.state = objects.action_plan.State.SUCCEEDED response = self.post('/v1/action_plans/%s/%s/' % (self.action_plan.uuid, 'start'), expect_errors=True) self.assertEqual(200, response.status_int) act_response = self.get_json( '/actions/%s' % action.uuid, expect_errors=True) self.assertEqual(200, act_response.status_int) self.assertEqual('PENDING', act_response.json['state']) self.assertEqual('application/json', act_response.content_type) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan( self.context, state=objects.action_plan.State.RECOMMENDED) p = mock.patch.object(db_api.BaseConnection, 'update_action_plan') self.mock_action_plan_update = p.start() self.mock_action_plan_update.side_effect = \ self._simulate_rpc_action_plan_update self.addCleanup(p.stop) def _simulate_rpc_action_plan_update(self, action_plan): action_plan.save() return action_plan @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_denied(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_state = objects.action_plan.State.DELETED response = self.get_json( '/action_plans/%s' % self.action_plan.uuid) self.assertNotEqual(new_state, response['state']) response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_replace_non_existent_action_plan_denied(self): response = self.patch_json( '/action_plans/%s' % utils.generate_uuid(), [{'path': '/state', 'value': objects.action_plan.State.PENDING, 'op': 'replace'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_add_non_existent_property_denied(self): response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_remove_denied(self): # We should not be able to remove the state of an action plan response = self.get_json( '/action_plans/%s' % self.action_plan.uuid) self.assertIsNotNone(response['state']) response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/state', 'op': 'remove'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_remove_uuid_denied(self): response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_non_existent_property_denied(self): response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch.object(aapi.ApplierAPI, 'launch_action_plan') def test_replace_state_pending_ok(self, applier_mock): new_state = objects.action_plan.State.PENDING response = self.get_json( '/action_plans/%s' % self.action_plan.uuid) self.assertNotEqual(new_state, response['state']) response = self.patch_json( '/action_plans/%s' % self.action_plan.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) applier_mock.assert_called_once_with(mock.ANY, self.action_plan.uuid) ALLOWED_TRANSITIONS = [ {"original_state": objects.action_plan.State.RECOMMENDED, "new_state": objects.action_plan.State.PENDING}, {"original_state": objects.action_plan.State.RECOMMENDED, "new_state": objects.action_plan.State.CANCELLED}, {"original_state": objects.action_plan.State.ONGOING, "new_state": objects.action_plan.State.CANCELLING}, {"original_state": objects.action_plan.State.PENDING, "new_state": objects.action_plan.State.CANCELLED}, ] class TestPatchStateTransitionDenied(api_base.FunctionalTest): STATES = [ ap_state for ap_state in objects.action_plan.State.__dict__ if not ap_state.startswith("_") ] scenarios = [ ( "%s -> %s" % (original_state, new_state), {"original_state": original_state, "new_state": new_state}, ) for original_state, new_state in list(itertools.product(STATES, STATES)) # from DELETED to ... # NOTE: Any state transition from DELETED (To RECOMMENDED, PENDING, # ONGOING, CANCELLED, SUCCEEDED and FAILED) will cause a 404 Not Found # because we cannot retrieve them with a GET (soft_deleted state). # This is the reason why they are not listed here but they have a # special test to cover it if original_state != objects.action_plan.State.DELETED and original_state != new_state and {"original_state": original_state, "new_state": new_state} not in ALLOWED_TRANSITIONS ] def setUp(self): super(TestPatchStateTransitionDenied, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) @mock.patch.object( db_api.BaseConnection, 'update_action_plan', mock.Mock(side_effect=lambda ap: ap.save() or ap)) def test_replace_state_pending_denied(self): action_plan = obj_utils.create_test_action_plan( self.context, state=self.original_state) initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid) response = self.patch_json( '/action_plans/%s' % action_plan.uuid, [{'path': '/state', 'value': self.new_state, 'op': 'replace'}], expect_errors=True) updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid) self.assertNotEqual(self.new_state, initial_ap['state']) self.assertEqual(self.original_state, updated_ap['state']) self.assertEqual(400, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestPatchStateTransitionOk(api_base.FunctionalTest): scenarios = [ ( "%s -> %s" % (transition["original_state"], transition["new_state"]), transition ) for transition in ALLOWED_TRANSITIONS ] def setUp(self): super(TestPatchStateTransitionOk, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) @mock.patch.object( db_api.BaseConnection, 'update_action_plan', mock.Mock(side_effect=lambda ap: ap.save() or ap)) @mock.patch.object(aapi.ApplierAPI, 'launch_action_plan', mock.Mock()) def test_replace_state_pending_ok(self): action_plan = obj_utils.create_test_action_plan( self.context, state=self.original_state) initial_ap = self.get_json('/action_plans/%s' % action_plan.uuid) response = self.patch_json( '/action_plans/%s' % action_plan.uuid, [{'path': '/state', 'value': self.new_state, 'op': 'replace'}]) updated_ap = self.get_json('/action_plans/%s' % action_plan.uuid) self.assertNotEqual(self.new_state, initial_ap['state']) self.assertEqual(self.new_state, updated_ap['state']) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) class TestActionPlanPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestActionPlanPolicyEnforcement, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:defaut"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "action_plan:get_all", self.get_json, '/action_plans', expect_errors=True) def test_policy_disallow_get_one(self): action_plan = obj_utils.create_test_action_plan(self.context) self._common_policy_check( "action_plan:get", self.get_json, '/action_plans/%s' % action_plan.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "action_plan:detail", self.get_json, '/action_plans/detail', expect_errors=True) def test_policy_disallow_update(self): action_plan = obj_utils.create_test_action_plan(self.context) self._common_policy_check( "action_plan:update", self.patch_json, '/action_plans/%s' % action_plan.uuid, [{'path': '/state', 'value': objects.action_plan.State.DELETED, 'op': 'replace'}], expect_errors=True) def test_policy_disallow_delete(self): action_plan = obj_utils.create_test_action_plan(self.context) self._common_policy_check( "action_plan:delete", self.delete, '/action_plans/%s' % action_plan.uuid, expect_errors=True) class TestActionPlanPolicyEnforcementWithAdminContext(TestListActionPlan, api_base.AdminRoleTest): def setUp(self): super(TestActionPlanPolicyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "action_plan:delete": "rule:default", "action_plan:detail": "rule:default", "action_plan:get": "rule:default", "action_plan:get_all": "rule:default", "action_plan:update": "rule:default", "action_plan:start": "rule:default"}) python-watcher-4.0.0/watcher/tests/api/v1/__init__.py0000664000175000017500000000000013656752270022523 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/api/v1/test_root.py0000664000175000017500000000127613656752270023026 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from watcher.tests.api import base as api_base class TestV1Routing(api_base.FunctionalTest): pass python-watcher-4.0.0/watcher/tests/api/v1/test_data_model.py0000664000175000017500000000614113656752270024130 0ustar zuulzuul00000000000000# Copyright 2019 ZTE corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_serialization import jsonutils from watcher.decision_engine import rpcapi as deapi from watcher.tests.api import base as api_base class TestListDataModel(api_base.FunctionalTest): def setUp(self): super(TestListDataModel, self).setUp() p_dcapi = mock.patch.object(deapi, 'DecisionEngineAPI') self.mock_dcapi = p_dcapi.start() self.mock_dcapi().get_data_model_info.return_value = \ 'fake_response_value' self.addCleanup(p_dcapi.stop) def test_get_all(self): response = self.get_json( '/data_model/?data_model_type=compute', headers={'OpenStack-API-Version': 'infra-optim 1.3'}) self.assertEqual('fake_response_value', response) def test_get_all_not_acceptable(self): response = self.get_json( '/data_model/?data_model_type=compute', headers={'OpenStack-API-Version': 'infra-optim 1.2'}, expect_errors=True) self.assertEqual(406, response.status_int) class TestDataModelPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestDataModelPolicyEnforcement, self).setUp() p_dcapi = mock.patch.object(deapi, 'DecisionEngineAPI') self.mock_dcapi = p_dcapi.start() self.addCleanup(p_dcapi.stop) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:defaut"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "data_model:get_all", self.get_json, "/data_model/?data_model_type=compute", headers={'OpenStack-API-Version': 'infra-optim 1.3'}, expect_errors=True) class TestDataModelEnforcementWithAdminContext( TestListDataModel, api_base.AdminRoleTest): def setUp(self): super(TestDataModelEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "data_model:get_all": "rule:default"}) python-watcher-4.0.0/watcher/tests/api/v1/test_microversions.py0000664000175000017500000001147513656752270024747 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.api.controllers.v1 import versions from watcher.tests.api import base as api_base SERVICE_TYPE = 'infra-optim' H_MIN_VER = 'openstack-api-minimum-version' H_MAX_VER = 'openstack-api-maximum-version' H_RESP_VER = 'openstack-api-version' MIN_VER = versions.min_version_string() MAX_VER = versions.max_version_string() class TestMicroversions(api_base.FunctionalTest): controller_list_response = [ 'scoring_engines', 'audit_templates', 'audits', 'actions', 'action_plans', 'services'] def setUp(self): super(TestMicroversions, self).setUp() def test_wrong_major_version(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, '10'])}, expect_errors=True, return_json=False) self.assertEqual('application/json', response.content_type) self.assertEqual(406, response.status_int) expected_error_msg = ('Invalid value for' ' OpenStack-API-Version header') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) def test_extend_initial_version_with_micro(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, '1'])}, return_json=False) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) self.assertEqual(response.headers[H_RESP_VER], ' '.join([SERVICE_TYPE, MIN_VER])) self.assertTrue(all(x in response.json.keys() for x in self.controller_list_response)) def test_without_microversion(self): response = self.get_json('/', return_json=False) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) self.assertEqual(response.headers[H_RESP_VER], ' '.join([SERVICE_TYPE, MIN_VER])) self.assertTrue(all(x in response.json.keys() for x in self.controller_list_response)) def test_new_client_new_api(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, '1.1'])}, return_json=False) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) self.assertEqual(response.headers[H_RESP_VER], ' '.join([SERVICE_TYPE, '1.1'])) self.assertTrue(all(x in response.json.keys() for x in self.controller_list_response)) def test_latest_microversion(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, 'latest'])}, return_json=False) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) self.assertEqual(response.headers[H_RESP_VER], ' '.join([SERVICE_TYPE, MAX_VER])) self.assertTrue(all(x in response.json.keys() for x in self.controller_list_response)) def test_unsupported_version(self): response = self.get_json( '/', headers={'OpenStack-API-Version': ' '.join([SERVICE_TYPE, '1.999'])}, expect_errors=True) self.assertEqual(406, response.status_int) self.assertEqual(response.headers[H_MIN_VER], MIN_VER) self.assertEqual(response.headers[H_MAX_VER], MAX_VER) expected_error_msg = ('Version 1.999 was requested but the minor ' 'version is not supported by this service. ' 'The supported version range is') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) python-watcher-4.0.0/watcher/tests/api/v1/test_utils.py0000664000175000017500000000425313656752270023201 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import wsme from oslo_config import cfg from watcher.api.controllers.v1 import utils from watcher.tests import base CONF = cfg.CONF class TestApiUtils(base.TestCase): def test_validate_limit(self): limit = utils.validate_limit(10) self.assertEqual(10, 10) # max limit limit = utils.validate_limit(999999999) self.assertEqual(CONF.api.max_limit, limit) # negative self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, -1) # zero self.assertRaises(wsme.exc.ClientSideError, utils.validate_limit, 0) def test_validate_sort_dir(self): # if sort_dir is valid, nothing should happen try: utils.validate_sort_dir('asc') except Exception as exc: self.fail(exc) # invalid sort_dir parameter self.assertRaises(wsme.exc.ClientSideError, utils.validate_sort_dir, 'fake-sort') def test_validate_search_filters(self): allowed_fields = ["allowed", "authorized"] test_filters = {"allowed": 1, "authorized": 2} try: utils.validate_search_filters(test_filters, allowed_fields) except Exception as exc: self.fail(exc) def test_validate_search_filters_with_invalid_key(self): allowed_fields = ["allowed", "authorized"] test_filters = {"allowed": 1, "unauthorized": 2} self.assertRaises( wsme.exc.ClientSideError, utils.validate_search_filters, test_filters, allowed_fields) python-watcher-4.0.0/watcher/tests/api/v1/test_actions.py0000664000175000017500000005535113656752270023506 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import itertools import mock from oslo_config import cfg from oslo_serialization import jsonutils from wsme import types as wtypes from watcher.api.controllers.v1 import action as api_action from watcher.common import utils from watcher.db import api as db_api from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.api import utils as api_utils from watcher.tests import base from watcher.tests.db import utils as db_utils from watcher.tests.objects import utils as obj_utils def post_get_test_action(**kw): action = api_utils.action_post_data(**kw) action_plan = db_utils.get_test_action_plan() del action['action_plan_id'] action['action_plan_uuid'] = kw.get('action_plan_uuid', action_plan['uuid']) action['parents'] = None return action class TestActionObject(base.TestCase): def test_action_init(self): action_dict = api_utils.action_post_data(action_plan_id=None, parents=None) del action_dict['state'] action = api_action.Action(**action_dict) self.assertEqual(wtypes.Unset, action.state) class TestListAction(api_base.FunctionalTest): def setUp(self): super(TestListAction, self).setUp() self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy(self.context) self.audit = obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan(self.context) def test_empty(self): response = self.get_json('/actions') self.assertEqual([], response['actions']) def _assert_action_fields(self, action): action_fields = ['uuid', 'state', 'action_plan_uuid', 'action_type'] for field in action_fields: self.assertIn(field, action) def test_one(self): action = obj_utils.create_test_action(self.context, parents=None) response = self.get_json('/actions') self.assertEqual(action.uuid, response['actions'][0]["uuid"]) self._assert_action_fields(response['actions'][0]) def test_one_soft_deleted(self): action = obj_utils.create_test_action(self.context, parents=None) action.soft_delete() response = self.get_json('/actions', headers={'X-Show-Deleted': 'True'}) self.assertEqual(action.uuid, response['actions'][0]["uuid"]) self._assert_action_fields(response['actions'][0]) response = self.get_json('/actions') self.assertEqual([], response['actions']) def test_get_one(self): action = obj_utils.create_test_action(self.context, parents=None) response = self.get_json('/actions/%s' % action['uuid']) self.assertEqual(action.uuid, response['uuid']) self.assertEqual(action.action_type, response['action_type']) self.assertEqual(action.input_parameters, response['input_parameters']) self._assert_action_fields(response) def test_get_one_soft_deleted(self): action = obj_utils.create_test_action(self.context, parents=None) action.soft_delete() response = self.get_json('/actions/%s' % action['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(action.uuid, response['uuid']) self._assert_action_fields(response) response = self.get_json('/actions/%s' % action['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_detail(self): action = obj_utils.create_test_action(self.context, parents=None) response = self.get_json('/actions/detail') self.assertEqual(action.uuid, response['actions'][0]["uuid"]) self._assert_action_fields(response['actions'][0]) def test_detail_soft_deleted(self): action = obj_utils.create_test_action(self.context, parents=None) action.soft_delete() response = self.get_json('/actions/detail', headers={'X-Show-Deleted': 'True'}) self.assertEqual(action.uuid, response['actions'][0]["uuid"]) self._assert_action_fields(response['actions'][0]) response = self.get_json('/actions/detail') self.assertEqual([], response['actions']) def test_detail_against_single(self): action = obj_utils.create_test_action(self.context, parents=None) response = self.get_json('/actions/%s/detail' % action['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): action_list = [] for id_ in range(5): action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action_list.append(action.uuid) response = self.get_json('/actions') self.assertEqual(len(action_list), len(response['actions'])) uuids = [s['uuid'] for s in response['actions']] self.assertEqual(sorted(action_list), sorted(uuids)) def test_many_with_action_plan_uuid(self): action_plan = obj_utils.create_test_action_plan( self.context, id=2, uuid=utils.generate_uuid(), audit_id=1) action_list = [] for id_ in range(5): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=2, uuid=utils.generate_uuid()) action_list.append(action.uuid) response = self.get_json('/actions') self.assertEqual(len(action_list), len(response['actions'])) for action in response['actions']: self.assertEqual(action_plan.uuid, action['action_plan_uuid']) def test_filter_by_audit_uuid(self): action_plan_1 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid()) action_list = [] for id_ in range(3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_1.id, uuid=utils.generate_uuid()) action_list.append(action.uuid) audit2 = obj_utils.create_test_audit( self.context, id=2, uuid=utils.generate_uuid(), name='My Audit {0}'.format(2)) action_plan_2 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=audit2.id) for id_ in range(4, 5, 6): obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_2.id, uuid=utils.generate_uuid()) response = self.get_json('/actions?audit_uuid=%s' % self.audit.uuid) self.assertEqual(len(action_list), len(response['actions'])) for action in response['actions']: self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) def test_filter_by_action_plan_uuid(self): action_plan_1 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) action_list = [] for id_ in range(3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_1.id, uuid=utils.generate_uuid()) action_list.append(action.uuid) action_plan_2 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) for id_ in range(4, 5, 6): obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_2.id, uuid=utils.generate_uuid()) response = self.get_json( '/actions?action_plan_uuid=%s' % action_plan_1.uuid) self.assertEqual(len(action_list), len(response['actions'])) for action in response['actions']: self.assertEqual(action_plan_1.uuid, action['action_plan_uuid']) response = self.get_json( '/actions?action_plan_uuid=%s' % action_plan_2.uuid) for action in response['actions']: self.assertEqual(action_plan_2.uuid, action['action_plan_uuid']) def test_details_and_filter_by_action_plan_uuid(self): action_plan = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) for id_ in range(1, 3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan.id, uuid=utils.generate_uuid()) response = self.get_json( '/actions/detail?action_plan_uuid=%s' % action_plan.uuid) for action in response['actions']: self.assertEqual(action_plan.uuid, action['action_plan_uuid']) def test_details_and_filter_by_audit_uuid(self): action_plan = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) for id_ in range(1, 3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan.id, uuid=utils.generate_uuid()) response = self.get_json( '/actions/detail?audit_uuid=%s' % self.audit.uuid) for action in response['actions']: self.assertEqual(action_plan.uuid, action['action_plan_uuid']) def test_filter_by_action_plan_and_audit_uuids(self): action_plan = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) url = '/actions?action_plan_uuid=%s&audit_uuid=%s' % ( action_plan.uuid, self.audit.uuid) response = self.get_json(url, expect_errors=True) self.assertEqual(400, response.status_int) def test_many_with_sort_key_uuid(self): action_plan = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) actions_list = [] for id_ in range(1, 3): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan.id, uuid=utils.generate_uuid()) actions_list.append(action) response = self.get_json('/actions?sort_key=%s' % 'uuid') names = [s['uuid'] for s in response['actions']] self.assertEqual( sorted([a.uuid for a in actions_list]), names) def test_many_with_sort_key_action_plan_uuid(self): action_plan_1 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) action_plan_2 = obj_utils.create_test_action_plan( self.context, uuid=utils.generate_uuid(), audit_id=self.audit.id) action_plans_uuid_list = [] for id_, action_plan_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(action_plan_1.id, 3), itertools.repeat(action_plan_2.id, 2)]), 1): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan_id, uuid=utils.generate_uuid()) action_plans_uuid_list.append(action.action_plan.uuid) for direction in ['asc', 'desc']: response = self.get_json( '/actions?sort_key={0}&sort_dir={1}' .format('action_plan_uuid', direction)) action_plan_uuids = \ [s['action_plan_uuid'] for s in response['actions']] self.assertEqual( sorted(action_plans_uuid_list, reverse=(direction == 'desc')), action_plan_uuids, message='Failed on %s direction' % direction) def test_sort_key_validation(self): response = self.get_json( '/actions?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(400, response.status_int) def test_many_with_soft_deleted_action_plan_uuid(self): action_plan1 = obj_utils.create_test_action_plan( self.context, id=2, uuid=utils.generate_uuid(), audit_id=1) action_plan2 = obj_utils.create_test_action_plan( self.context, id=3, uuid=utils.generate_uuid(), audit_id=1) ap1_action_list = [] ap2_action_list = [] for id_ in range(0, 2): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan1.id, uuid=utils.generate_uuid()) ap1_action_list.append(action) for id_ in range(2, 4): action = obj_utils.create_test_action( self.context, id=id_, action_plan_id=action_plan2.id, uuid=utils.generate_uuid()) ap2_action_list.append(action) action_plan1.state = objects.action_plan.State.CANCELLED action_plan1.save() self.delete('/action_plans/%s' % action_plan1.uuid) response = self.get_json('/actions') # We deleted the actions from the 1st action plan so we've got 2 left self.assertEqual(len(ap2_action_list), len(response['actions'])) # We deleted them so that's normal self.assertEqual([], [act for act in response['actions'] if act['action_plan_uuid'] == action_plan1.uuid]) # Here are the 2 actions left self.assertEqual( set([act.as_dict()['uuid'] for act in ap2_action_list]), set([act['uuid'] for act in response['actions'] if act['action_plan_uuid'] == action_plan2.uuid])) def test_many_with_parents(self): action_list = [] for id_ in range(5): if id_ > 0: action = obj_utils.create_test_action( self.context, id=id_, uuid=utils.generate_uuid(), parents=[action_list[id_ - 1]]) else: action = obj_utils.create_test_action( self.context, id=id_, uuid=utils.generate_uuid(), parents=[]) action_list.append(action.uuid) response = self.get_json('/actions') response_actions = response['actions'] for id_ in range(4): self.assertEqual(response_actions[id_]['uuid'], response_actions[id_ + 1]['parents'][0]) def test_many_without_soft_deleted(self): action_list = [] for id_ in [1, 2, 3]: action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action_list.append(action.uuid) for id_ in [4, 5]: action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action.soft_delete() response = self.get_json('/actions') self.assertEqual(3, len(response['actions'])) uuids = [s['uuid'] for s in response['actions']] self.assertEqual(sorted(action_list), sorted(uuids)) def test_many_with_soft_deleted(self): action_list = [] for id_ in [1, 2, 3]: action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action_list.append(action.uuid) for id_ in [4, 5]: action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) action.soft_delete() action_list.append(action.uuid) response = self.get_json('/actions', headers={'X-Show-Deleted': 'True'}) self.assertEqual(5, len(response['actions'])) uuids = [s['uuid'] for s in response['actions']] self.assertEqual(sorted(action_list), sorted(uuids)) def test_links(self): uuid = utils.generate_uuid() obj_utils.create_test_action(self.context, id=1, uuid=uuid) response = self.get_json('/actions/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): parents = None for id_ in range(5): action = obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid(), parents=parents) parents = [action.uuid] response = self.get_json('/actions/?limit=3') self.assertEqual(3, len(response['actions'])) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_action(self.context, id=id_, uuid=utils.generate_uuid()) response = self.get_json('/actions') self.assertEqual(3, len(response['actions'])) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) obj_utils.create_test_action_plan(self.context) self.action = obj_utils.create_test_action(self.context, parents=None) p = mock.patch.object(db_api.BaseConnection, 'update_action') self.mock_action_update = p.start() self.mock_action_update.side_effect = self._simulate_rpc_action_update self.addCleanup(p.stop) def _simulate_rpc_action_update(self, action): action.save() return action @mock.patch('oslo_utils.timeutils.utcnow') def test_patch_not_allowed(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_state = objects.audit.State.SUCCEEDED response = self.get_json('/actions/%s' % self.action.uuid) self.assertNotEqual(new_state, response['state']) response = self.patch_json( '/actions/%s' % self.action.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(403, response.status_int) self.assertTrue(response.json['error_message']) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy(self.context) self.audit = obj_utils.create_test_audit(self.context) self.action_plan = obj_utils.create_test_action_plan(self.context) self.action = obj_utils.create_test_action(self.context, parents=None) p = mock.patch.object(db_api.BaseConnection, 'update_action') self.mock_action_update = p.start() self.mock_action_update.side_effect = self._simulate_rpc_action_update self.addCleanup(p.stop) def _simulate_rpc_action_update(self, action): action.save() return action @mock.patch('oslo_utils.timeutils.utcnow') def test_delete_action_not_allowed(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.delete('/actions/%s' % self.action.uuid, expect_errors=True) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestActionPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestActionPolicyEnforcement, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit(self.context) obj_utils.create_test_action_plan(self.context) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:defaut"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "action:get_all", self.get_json, '/actions', expect_errors=True) def test_policy_disallow_get_one(self): action = obj_utils.create_test_action(self.context) self._common_policy_check( "action:get", self.get_json, '/actions/%s' % action.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "action:detail", self.get_json, '/actions/detail', expect_errors=True) class TestActionPolicyEnforcementWithAdminContext(TestListAction, api_base.AdminRoleTest): def setUp(self): super(TestActionPolicyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "action:detail": "rule:default", "action:get": "rule:default", "action:get_all": "rule:default"}) python-watcher-4.0.0/watcher/tests/api/v1/test_scoring_engines.py0000664000175000017500000001737713656752270025230 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_serialization import jsonutils from watcher.common import utils from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListScoringEngine(api_base.FunctionalTest): def _assert_scoring_engine_fields(self, scoring_engine): scoring_engine_fields = ['uuid', 'name', 'description'] for field in scoring_engine_fields: self.assertIn(field, scoring_engine) def test_one(self): scoring_engine = obj_utils.create_test_scoring_engine(self.context) response = self.get_json('/scoring_engines') self.assertEqual( scoring_engine.name, response['scoring_engines'][0]['name']) self._assert_scoring_engine_fields(response['scoring_engines'][0]) def test_get_one_soft_deleted(self): scoring_engine = obj_utils.create_test_scoring_engine(self.context) scoring_engine.soft_delete() response = self.get_json( '/scoring_engines/%s' % scoring_engine['name'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(scoring_engine.name, response['name']) self._assert_scoring_engine_fields(response) response = self.get_json( '/scoring_engines/%s' % scoring_engine['name'], expect_errors=True) self.assertEqual(404, response.status_int) def test_detail(self): obj_utils.create_test_goal(self.context) scoring_engine = obj_utils.create_test_scoring_engine(self.context) response = self.get_json('/scoring_engines/detail') self.assertEqual( scoring_engine.name, response['scoring_engines'][0]['name']) self._assert_scoring_engine_fields(response['scoring_engines'][0]) for scoring_engine in response['scoring_engines']: self.assertTrue( all(val is not None for key, val in scoring_engine.items() if key in ['uuid', 'name', 'description', 'metainfo'])) def test_detail_against_single(self): scoring_engine = obj_utils.create_test_scoring_engine(self.context) response = self.get_json( '/scoring_engines/%s/detail' % scoring_engine.id, expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): scoring_engine_list = [] for idx in range(1, 6): scoring_engine = obj_utils.create_test_scoring_engine( self.context, id=idx, uuid=utils.generate_uuid(), name=str(idx), description='SE_{0}'.format(idx)) scoring_engine_list.append(scoring_engine.name) response = self.get_json('/scoring_engines') self.assertEqual(5, len(response['scoring_engines'])) for scoring_engine in response['scoring_engines']: self.assertTrue( all(val is not None for key, val in scoring_engine.items() if key in ['name', 'description', 'metainfo'])) def test_many_without_soft_deleted(self): scoring_engine_list = [] for id_ in [1, 2, 3]: scoring_engine = obj_utils.create_test_scoring_engine( self.context, id=id_, uuid=utils.generate_uuid(), name=str(id_), description='SE_{0}'.format(id_)) scoring_engine_list.append(scoring_engine.name) for id_ in [4, 5]: scoring_engine = obj_utils.create_test_scoring_engine( self.context, id=id_, uuid=utils.generate_uuid(), name=str(id_), description='SE_{0}'.format(id_)) scoring_engine.soft_delete() response = self.get_json('/scoring_engines') self.assertEqual(3, len(response['scoring_engines'])) names = [s['name'] for s in response['scoring_engines']] self.assertEqual(sorted(scoring_engine_list), sorted(names)) def test_scoring_engines_collection_links(self): for idx in range(1, 6): obj_utils.create_test_scoring_engine( self.context, id=idx, uuid=utils.generate_uuid(), name=str(idx), description='SE_{0}'.format(idx)) response = self.get_json('/scoring_engines/?limit=2') self.assertEqual(2, len(response['scoring_engines'])) def test_scoring_engines_collection_links_default_limit(self): for idx in range(1, 6): obj_utils.create_test_scoring_engine( self.context, id=idx, uuid=utils.generate_uuid(), name=str(idx), description='SE_{0}'.format(idx)) cfg.CONF.set_override('max_limit', 3, 'api') response = self.get_json('/scoring_engines') self.assertEqual(3, len(response['scoring_engines'])) def test_many_with_sort_key_uuid(self): scoring_engine_list = [] for idx in range(1, 6): scoring_engine = obj_utils.create_test_scoring_engine( self.context, id=idx, uuid=utils.generate_uuid(), name=str(idx), description='SE_{0}'.format(idx)) scoring_engine_list.append(scoring_engine.uuid) response = self.get_json('/scoring_engines/?sort_key=uuid') self.assertEqual(5, len(response['scoring_engines'])) uuids = [s['uuid'] for s in response['scoring_engines']] self.assertEqual(sorted(scoring_engine_list), uuids) def test_sort_key_validation(self): response = self.get_json( '/goals?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(400, response.status_int) class TestScoringEnginePolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "scoring_engine:get_all", self.get_json, '/scoring_engines', expect_errors=True) def test_policy_disallow_get_one(self): se = obj_utils.create_test_scoring_engine(self.context) self._common_policy_check( "scoring_engine:get", self.get_json, '/scoring_engines/%s' % se.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "scoring_engine:detail", self.get_json, '/scoring_engines/detail', expect_errors=True) class TestScoringEnginePolicyEnforcementWithAdminContext( TestListScoringEngine, api_base.AdminRoleTest): def setUp(self): super(TestScoringEnginePolicyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "scoring_engine:detail": "rule:default", "scoring_engine:get": "rule:default", "scoring_engine:get_all": "rule:default"}) python-watcher-4.0.0/watcher/tests/api/v1/test_goals.py0000664000175000017500000001611713656752270023150 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_serialization import jsonutils from six.moves.urllib import parse as urlparse from watcher.common import utils from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListGoal(api_base.FunctionalTest): def _assert_goal_fields(self, goal): goal_fields = ['uuid', 'name', 'display_name', 'efficacy_specification'] for field in goal_fields: self.assertIn(field, goal) def test_one(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json('/goals') self.assertEqual(goal.uuid, response['goals'][0]["uuid"]) self._assert_goal_fields(response['goals'][0]) def test_get_one_by_uuid(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json('/goals/%s' % goal.uuid) self.assertEqual(goal.uuid, response["uuid"]) self.assertEqual(goal.name, response["name"]) self._assert_goal_fields(response) def test_get_one_by_name(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json(urlparse.quote( '/goals/%s' % goal['name'])) self.assertEqual(goal.uuid, response['uuid']) self._assert_goal_fields(response) def test_get_one_soft_deleted(self): goal = obj_utils.create_test_goal(self.context) goal.soft_delete() response = self.get_json( '/goals/%s' % goal['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(goal.uuid, response['uuid']) self._assert_goal_fields(response) response = self.get_json( '/goals/%s' % goal['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_detail(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json('/goals/detail') self.assertEqual(goal.uuid, response['goals'][0]["uuid"]) self._assert_goal_fields(response['goals'][0]) def test_detail_against_single(self): goal = obj_utils.create_test_goal(self.context) response = self.get_json('/goals/%s/detail' % goal.uuid, expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): goal_list = [] for idx in range(1, 6): goal = obj_utils.create_test_goal( self.context, id=idx, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(idx)) goal_list.append(goal.uuid) response = self.get_json('/goals') self.assertGreater(len(response['goals']), 2) def test_many_without_soft_deleted(self): goal_list = [] for id_ in [1, 2, 3]: goal = obj_utils.create_test_goal( self.context, id=id_, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(id_)) goal_list.append(goal.uuid) for id_ in [4, 5]: goal = obj_utils.create_test_goal( self.context, id=id_, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(id_)) goal.soft_delete() response = self.get_json('/goals') self.assertEqual(3, len(response['goals'])) uuids = [s['uuid'] for s in response['goals']] self.assertEqual(sorted(goal_list), sorted(uuids)) def test_goals_collection_links(self): for idx in range(1, 6): obj_utils.create_test_goal( self.context, id=idx, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(idx)) response = self.get_json('/goals/?limit=2') self.assertEqual(2, len(response['goals'])) def test_goals_collection_links_default_limit(self): for idx in range(1, 6): obj_utils.create_test_goal( self.context, id=idx, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(idx)) cfg.CONF.set_override('max_limit', 3, 'api') response = self.get_json('/goals') self.assertEqual(3, len(response['goals'])) def test_many_with_sort_key_uuid(self): goal_list = [] for idx in range(1, 6): goal = obj_utils.create_test_goal( self.context, id=idx, uuid=utils.generate_uuid(), name='GOAL_{0}'.format(idx)) goal_list.append(goal.uuid) response = self.get_json('/goals/?sort_key=uuid') self.assertEqual(5, len(response['goals'])) uuids = [s['uuid'] for s in response['goals']] self.assertEqual(sorted(goal_list), uuids) def test_sort_key_validation(self): response = self.get_json( '/goals?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(400, response.status_int) class TestGoalPolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:default"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "goal:get_all", self.get_json, '/goals', expect_errors=True) def test_policy_disallow_get_one(self): goal = obj_utils.create_test_goal(self.context) self._common_policy_check( "goal:get", self.get_json, '/goals/%s' % goal.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "goal:detail", self.get_json, '/goals/detail', expect_errors=True) class TestGoalPolicyEnforcementWithAdminContext(TestListGoal, api_base.AdminRoleTest): def setUp(self): super(TestGoalPolicyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "goal:detail": "rule:default", "goal:get_all": "rule:default", "goal:get_one": "rule:default"}) python-watcher-4.0.0/watcher/tests/api/v1/test_audit_templates.py0000664000175000017500000010512613656752270025226 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import itertools import mock from webtest.app import AppError from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils from six.moves.urllib import parse as urlparse from wsme import types as wtypes from watcher.api.controllers.v1 import audit_template as api_audit_template from watcher.common import exception from watcher.common import utils from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.api import utils as api_utils from watcher.tests import base from watcher.tests.db import utils as db_utils from watcher.tests.objects import utils as obj_utils def post_get_test_audit_template(**kw): goal = db_utils.get_test_goal() strategy = db_utils.get_test_strategy(goal_id=goal['id']) kw['goal'] = kw.get('goal', goal['uuid']) kw['strategy'] = kw.get('strategy', strategy['uuid']) kw['scope'] = kw.get('scope', []) audit_template = api_utils.audit_template_post_data(**kw) return audit_template class TestAuditTemplateObject(base.TestCase): def test_audit_template_init(self): audit_template_dict = post_get_test_audit_template() del audit_template_dict['name'] audit_template = api_audit_template.AuditTemplate( **audit_template_dict) self.assertEqual(wtypes.Unset, audit_template.name) class FunctionalTestWithSetup(api_base.FunctionalTest): def setUp(self): super(FunctionalTestWithSetup, self).setUp() self.fake_goal1 = obj_utils.create_test_goal( self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1") self.fake_goal2 = obj_utils.create_test_goal( self.context, id=2, uuid=utils.generate_uuid(), name="dummy_2") self.fake_strategy1 = obj_utils.create_test_strategy( self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1", goal_id=self.fake_goal1.id) self.fake_strategy2 = obj_utils.create_test_strategy( self.context, id=2, uuid=utils.generate_uuid(), name="strategy_2", goal_id=self.fake_goal2.id) class TestListAuditTemplate(FunctionalTestWithSetup): def test_empty(self): response = self.get_json('/audit_templates') self.assertEqual([], response['audit_templates']) def _assert_audit_template_fields(self, audit_template): audit_template_fields = ['name', 'goal_uuid', 'goal_name', 'strategy_uuid', 'strategy_name'] for field in audit_template_fields: self.assertIn(field, audit_template) def test_one(self): audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=self.fake_strategy1.id) response = self.get_json('/audit_templates') self.assertEqual(audit_template.uuid, response['audit_templates'][0]["uuid"]) self._assert_audit_template_fields(response['audit_templates'][0]) def test_get_one_soft_deleted_ok(self): audit_template = obj_utils.create_test_audit_template(self.context) audit_template.soft_delete() response = self.get_json('/audit_templates', headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit_template.uuid, response['audit_templates'][0]["uuid"]) self._assert_audit_template_fields(response['audit_templates'][0]) response = self.get_json('/audit_templates') self.assertEqual([], response['audit_templates']) def test_get_one_by_uuid(self): audit_template = obj_utils.create_test_audit_template(self.context) response = self.get_json( '/audit_templates/%s' % audit_template['uuid']) self.assertEqual(audit_template.uuid, response['uuid']) self._assert_audit_template_fields(response) def test_get_one_by_name(self): audit_template = obj_utils.create_test_audit_template(self.context) response = self.get_json(urlparse.quote( '/audit_templates/%s' % audit_template['name'])) self.assertEqual(audit_template.uuid, response['uuid']) self._assert_audit_template_fields(response) def test_get_one_soft_deleted(self): audit_template = obj_utils.create_test_audit_template(self.context) audit_template.soft_delete() response = self.get_json( '/audit_templates/%s' % audit_template['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit_template.uuid, response['uuid']) self._assert_audit_template_fields(response) response = self.get_json( '/audit_templates/%s' % audit_template['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_detail(self): audit_template = obj_utils.create_test_audit_template(self.context) response = self.get_json('/audit_templates/detail') self.assertEqual(audit_template.uuid, response['audit_templates'][0]["uuid"]) self._assert_audit_template_fields(response['audit_templates'][0]) def test_detail_soft_deleted(self): audit_template = obj_utils.create_test_audit_template(self.context) audit_template.soft_delete() response = self.get_json('/audit_templates/detail', headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit_template.uuid, response['audit_templates'][0]["uuid"]) self._assert_audit_template_fields(response['audit_templates'][0]) response = self.get_json('/audit_templates/detail') self.assertEqual([], response['audit_templates']) def test_detail_against_single(self): audit_template = obj_utils.create_test_audit_template(self.context) response = self.get_json( '/audit_templates/%s/detail' % audit_template['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): audit_template_list = [] for id_ in range(1, 6): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) audit_template_list.append(audit_template) response = self.get_json('/audit_templates') self.assertEqual(len(audit_template_list), len(response['audit_templates'])) uuids = [s['uuid'] for s in response['audit_templates']] self.assertEqual( sorted([at.uuid for at in audit_template_list]), sorted(uuids)) def test_many_without_soft_deleted(self): audit_template_list = [] for id_ in range(1, 6): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) audit_template_list.append(audit_template) # We soft delete the ones with ID 4 and 5 [at.soft_delete() for at in audit_template_list[3:]] response = self.get_json('/audit_templates') self.assertEqual(3, len(response['audit_templates'])) uuids = [s['uuid'] for s in response['audit_templates']] self.assertEqual( sorted([at.uuid for at in audit_template_list[:3]]), sorted(uuids)) def test_many_with_soft_deleted(self): audit_template_list = [] for id_ in range(1, 6): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) audit_template_list.append(audit_template) # We soft delete the ones with ID 4 and 5 [at.soft_delete() for at in audit_template_list[3:]] response = self.get_json('/audit_templates', headers={'X-Show-Deleted': 'True'}) self.assertEqual(5, len(response['audit_templates'])) uuids = [s['uuid'] for s in response['audit_templates']] self.assertEqual( sorted([at.uuid for at in audit_template_list]), sorted(uuids)) def test_links(self): uuid = utils.generate_uuid() obj_utils.create_test_audit_template(self.context, id=1, uuid=uuid) response = self.get_json('/audit_templates/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) response = self.get_json('/audit_templates/?limit=3') self.assertEqual(3, len(response['audit_templates'])) next_marker = response['audit_templates'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) response = self.get_json('/audit_templates') self.assertEqual(3, len(response['audit_templates'])) next_marker = response['audit_templates'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_filter_by_goal_uuid(self): for id_, goal_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_goal1.id, 3), itertools.repeat(self.fake_goal2.id, 2)]), 1): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), goal_id=goal_id) response = self.get_json( '/audit_templates?goal=%s' % self.fake_goal2.uuid) self.assertEqual(2, len(response['audit_templates'])) def test_filter_by_goal_name(self): for id_, goal_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_goal1.id, 3), itertools.repeat(self.fake_goal2.id, 2)]), 1): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), goal_id=goal_id) response = self.get_json( '/audit_templates?goal=%s' % self.fake_goal2.name) self.assertEqual(2, len(response['audit_templates'])) def test_filter_by_strategy_uuid(self): for id_, strategy_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_strategy1.id, 3), itertools.repeat(self.fake_strategy2.id, 2)]), 1): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), strategy_id=strategy_id) response = self.get_json( '/audit_templates?strategy=%s' % self.fake_strategy2.uuid) self.assertEqual(2, len(response['audit_templates'])) def test_filter_by_strategy_name(self): for id_, strategy_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_strategy1.id, 3), itertools.repeat(self.fake_strategy2.id, 2)]), 1): obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), strategy_id=strategy_id) response = self.get_json( '/audit_templates?strategy=%s' % self.fake_strategy2.name) self.assertEqual(2, len(response['audit_templates'])) def test_many_with_sort_key_name(self): audit_template_list = [] for id_ in range(1, 6): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_)) audit_template_list.append(audit_template) response = self.get_json('/audit_templates?sort_key=%s' % 'name') names = [s['name'] for s in response['audit_templates']] self.assertEqual( sorted([at.name for at in audit_template_list]), names) def test_many_with_sort_key_goal_name(self): goal_names_list = [] for id_, goal_id in enumerate(itertools.chain.from_iterable([ itertools.repeat(self.fake_goal1.id, 3), itertools.repeat(self.fake_goal2.id, 2)]), 1): audit_template = obj_utils.create_test_audit_template( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit Template {0}'.format(id_), goal_id=goal_id) goal_names_list.append(audit_template.goal.name) for direction in ['asc', 'desc']: response = self.get_json( '/audit_templates?sort_key={0}&sort_dir={1}' .format('goal_name', direction)) goal_names = [s['goal_name'] for s in response['audit_templates']] self.assertEqual( sorted(goal_names_list, reverse=(direction == 'desc')), goal_names) def test_sort_key_validation(self): response = self.get_json( '/audit_templates?sort_key=%s' % 'goal_bad_name', expect_errors=True) self.assertEqual(400, response.status_int) class TestPatch(FunctionalTestWithSetup): def setUp(self): super(TestPatch, self).setUp() obj_utils.create_test_goal(self.context) self.audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=None) @mock.patch.object(timeutils, 'utcnow') def test_replace_goal_uuid(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_goal_uuid = self.fake_goal2.uuid response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertNotEqual(new_goal_uuid, response['goal_uuid']) response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/goal', 'value': new_goal_uuid, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertEqual(new_goal_uuid, response['goal_uuid']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) @mock.patch.object(timeutils, 'utcnow') def test_replace_goal_uuid_by_name(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_goal_uuid = self.fake_goal2.uuid response = self.get_json(urlparse.quote( '/audit_templates/%s' % self.audit_template.name)) self.assertNotEqual(new_goal_uuid, response['goal_uuid']) response = self.patch_json( '/audit_templates/%s' % self.audit_template.name, [{'path': '/goal', 'value': new_goal_uuid, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json( '/audit_templates/%s' % self.audit_template.name) self.assertEqual(new_goal_uuid, response['goal_uuid']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) def test_replace_non_existent_audit_template(self): response = self.patch_json( '/audit_templates/%s' % utils.generate_uuid(), [{'path': '/goal', 'value': self.fake_goal1.uuid, 'op': 'replace'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_replace_invalid_goal(self): with mock.patch.object( self.dbapi, 'update_audit_template', wraps=self.dbapi.update_audit_template ) as cn_mock: response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/goal', 'value': utils.generate_uuid(), 'op': 'replace'}], expect_errors=True) self.assertEqual(400, response.status_int) assert not cn_mock.called def test_add_goal_uuid(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/goal', 'value': self.fake_goal2.uuid, 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_int) response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertEqual(self.fake_goal2.uuid, response['goal_uuid']) def test_add_strategy_uuid(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/strategy', 'value': self.fake_strategy1.uuid, 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_int) response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertEqual(self.fake_strategy1.uuid, response['strategy_uuid']) def test_replace_strategy_uuid(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/strategy', 'value': self.fake_strategy2['uuid'], 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_int) response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertEqual( self.fake_strategy2['uuid'], response['strategy_uuid']) def test_replace_invalid_strategy(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/strategy', 'value': utils.generate_uuid(), # Does not exist 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_add_non_existent_property(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_remove_strategy(self): audit_template = obj_utils.create_test_audit_template( self.context, uuid=utils.generate_uuid(), name="AT_%s" % utils.generate_uuid(), goal_id=self.fake_goal1.id, strategy_id=self.fake_strategy1.id) response = self.get_json( '/audit_templates/%s' % audit_template.uuid) self.assertIsNotNone(response['strategy_uuid']) response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/strategy', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) def test_remove_goal(self): response = self.get_json( '/audit_templates/%s' % self.audit_template.uuid) self.assertIsNotNone(response['goal_uuid']) response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/goal', 'op': 'remove'}], expect_errors=True) self.assertEqual(403, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_uuid(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_non_existent_property(self): response = self.patch_json( '/audit_templates/%s' % self.audit_template.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestPost(FunctionalTestWithSetup): @mock.patch.object(timeutils, 'utcnow') def test_create_audit_template(self, mock_utcnow): audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=self.fake_strategy1.uuid) test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.post_json('/audit_templates', audit_template_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = \ '/v1/audit_templates/%s' % response.json['uuid'] self.assertEqual(urlparse.urlparse(response.location).path, expected_location) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) self.assertNotIn('updated_at', response.json.keys) self.assertNotIn('deleted_at', response.json.keys) self.assertEqual(self.fake_goal1.uuid, response.json['goal_uuid']) self.assertEqual(self.fake_strategy1.uuid, response.json['strategy_uuid']) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) def test_create_audit_template_validation_with_aggregates(self): scope = [{'compute': [{'host_aggregates': [{'id': '*'}]}, {'availability_zones': [{'name': 'AZ1'}, {'name': 'AZ2'}]}, {'exclude': [ {'instances': [ {'uuid': 'INSTANCE_1'}, {'uuid': 'INSTANCE_2'}]}, {'compute_nodes': [ {'name': 'Node_1'}, {'name': 'Node_2'}]}, {'host_aggregates': [{'id': '*'}]} ]} ] } ] audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=self.fake_strategy1.uuid, scope=scope) with self.assertRaisesRegex(AppError, "be included and excluded together"): self.post_json('/audit_templates', audit_template_dict) scope = [{'host_aggregates': [{'id1': '*'}]}] audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=self.fake_strategy1.uuid, scope=scope) response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual(500, response.status_int) def test_create_audit_template_does_autogenerate_id(self): audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=None) with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: response = self.post_json('/audit_templates', audit_template_dict) self.assertEqual(audit_template_dict['goal'], response.json['goal_uuid']) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cn_mock.call_args[0][0]) def test_create_audit_template_generate_uuid(self): audit_template_dict = post_get_test_audit_template( goal=self.fake_goal1.uuid, strategy=None) response = self.post_json('/audit_templates', audit_template_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) def test_create_audit_template_with_invalid_goal(self): with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: audit_template_dict = post_get_test_audit_template( goal_uuid=utils.generate_uuid()) response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual(400, response.status_int) assert not cn_mock.called def test_create_audit_template_with_invalid_strategy(self): with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: audit_template_dict = post_get_test_audit_template( goal_uuid=self.fake_goal1['uuid'], strategy_uuid=utils.generate_uuid()) response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual(400, response.status_int) assert not cn_mock.called def test_create_audit_template_with_unrelated_strategy(self): with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: audit_template_dict = post_get_test_audit_template( goal_uuid=self.fake_goal1['uuid'], strategy=self.fake_strategy2['uuid']) response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual(400, response.status_int) assert not cn_mock.called def test_create_audit_template_with_uuid(self): with mock.patch.object( self.dbapi, 'create_audit_template', wraps=self.dbapi.create_audit_template ) as cn_mock: audit_template_dict = post_get_test_audit_template() response = self.post_json('/audit_templates', audit_template_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) assert not cn_mock.called class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() obj_utils.create_test_goal(self.context) self.audit_template = obj_utils.create_test_audit_template( self.context) @mock.patch.object(timeutils, 'utcnow') def test_delete_audit_template_by_uuid(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time self.delete(urlparse.quote('/audit_templates/%s' % self.audit_template.uuid)) response = self.get_json( urlparse.quote('/audit_templates/%s' % self.audit_template.uuid), expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertRaises(exception.AuditTemplateNotFound, objects.AuditTemplate.get_by_uuid, self.context, self.audit_template.uuid) self.context.show_deleted = True at = objects.AuditTemplate.get_by_uuid(self.context, self.audit_template.uuid) self.assertEqual(self.audit_template.name, at.name) @mock.patch.object(timeutils, 'utcnow') def test_delete_audit_template_by_name(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time self.delete(urlparse.quote('/audit_templates/%s' % self.audit_template.name)) response = self.get_json( urlparse.quote('/audit_templates/%s' % self.audit_template.name), expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.assertRaises(exception.AuditTemplateNotFound, objects.AuditTemplate.get_by_name, self.context, self.audit_template.name) self.context.show_deleted = True at = objects.AuditTemplate.get_by_name(self.context, self.audit_template.name) self.assertEqual(self.audit_template.uuid, at.uuid) def test_delete_audit_template_not_found(self): uuid = utils.generate_uuid() response = self.delete( '/audit_templates/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestAuditTemplatePolicyEnforcement(api_base.FunctionalTest): def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:defaut"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "audit_template:get_all", self.get_json, '/audit_templates', expect_errors=True) def test_policy_disallow_get_one(self): obj_utils.create_test_goal(self.context) audit_template = obj_utils.create_test_audit_template(self.context) self._common_policy_check( "audit_template:get", self.get_json, '/audit_templates/%s' % audit_template.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "audit_template:detail", self.get_json, '/audit_templates/detail', expect_errors=True) def test_policy_disallow_update(self): obj_utils.create_test_goal(self.context) audit_template = obj_utils.create_test_audit_template(self.context) self._common_policy_check( "audit_template:update", self.patch_json, '/audit_templates/%s' % audit_template.uuid, [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): fake_goal1 = obj_utils.get_test_goal( self.context, id=1, uuid=utils.generate_uuid(), name="dummy_1") fake_goal1.create() fake_strategy1 = obj_utils.get_test_strategy( self.context, id=1, uuid=utils.generate_uuid(), name="strategy_1", goal_id=fake_goal1.id) fake_strategy1.create() audit_template_dict = post_get_test_audit_template( goal=fake_goal1.uuid, strategy=fake_strategy1.uuid) self._common_policy_check( "audit_template:create", self.post_json, '/audit_templates', audit_template_dict, expect_errors=True) def test_policy_disallow_delete(self): obj_utils.create_test_goal(self.context) audit_template = obj_utils.create_test_audit_template(self.context) self._common_policy_check( "audit_template:delete", self.delete, '/audit_templates/%s' % audit_template.uuid, expect_errors=True) class TestAuditTemplatePolicyWithAdminContext(TestListAuditTemplate, api_base.AdminRoleTest): def setUp(self): super(TestAuditTemplatePolicyWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "audit_template:create": "rule:default", "audit_template:delete": "rule:default", "audit_template:detail": "rule:default", "audit_template:get": "rule:default", "audit_template:get_all": "rule:default", "audit_template:update": "rule:default"}) python-watcher-4.0.0/watcher/tests/api/v1/test_audits.py0000664000175000017500000013716013656752270023336 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from dateutil import tz import itertools import mock from oslo_config import cfg from oslo_serialization import jsonutils from oslo_utils import timeutils from wsme import types as wtypes from six.moves.urllib import parse as urlparse from watcher.api.controllers.v1 import audit as api_audit from watcher.common import utils from watcher.db import api as db_api from watcher.decision_engine import rpcapi as deapi from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.api import utils as api_utils from watcher.tests import base from watcher.tests.db import utils as db_utils from watcher.tests.objects import utils as obj_utils def post_get_test_audit(**kw): audit = api_utils.audit_post_data(**kw) audit_template = db_utils.get_test_audit_template() goal = db_utils.get_test_goal() del_keys = ['goal_id', 'strategy_id'] del_keys.extend(kw.get('params_to_exclude', [])) add_keys = {'audit_template_uuid': audit_template['uuid'], 'goal': goal['uuid'], } if kw.get('use_named_goal'): add_keys['goal'] = 'TEST' for k in add_keys: audit[k] = kw.get(k, add_keys[k]) for k in del_keys: del audit[k] return audit def post_get_test_audit_with_predefined_strategy(**kw): spec = kw.pop('strategy_parameters_spec', {}) strategy_id = 2 strategy = db_utils.get_test_strategy(parameters_spec=spec, id=strategy_id) audit = api_utils.audit_post_data(**kw) audit_template = db_utils.get_test_audit_template( strategy_id=strategy['id']) del_keys = ['goal_id', 'strategy_id'] add_keys = {'audit_template_uuid': audit_template['uuid'], } for k in del_keys: del audit[k] for k in add_keys: audit[k] = kw.get(k, add_keys[k]) return audit class TestAuditObject(base.TestCase): def test_audit_init(self): audit_dict = api_utils.audit_post_data(audit_template_id=None, goal_id=None, strategy_id=None) del audit_dict['state'] audit = api_audit.Audit(**audit_dict) self.assertEqual(wtypes.Unset, audit.state) class TestListAudit(api_base.FunctionalTest): def setUp(self): super(TestListAudit, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) def test_empty(self): response = self.get_json('/audits') self.assertEqual([], response['audits']) def _assert_audit_fields(self, audit): audit_fields = ['audit_type', 'scope', 'state', 'goal_uuid', 'strategy_uuid'] for field in audit_fields: self.assertIn(field, audit) def test_one(self): audit = obj_utils.create_test_audit(self.context) response = self.get_json('/audits') self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) self._assert_audit_fields(response['audits'][0]) def test_one_soft_deleted(self): audit = obj_utils.create_test_audit(self.context) audit.soft_delete() response = self.get_json('/audits', headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) self._assert_audit_fields(response['audits'][0]) response = self.get_json('/audits') self.assertEqual([], response['audits']) def test_get_one(self): audit = obj_utils.create_test_audit(self.context) response = self.get_json('/audits/%s' % audit['uuid']) self.assertEqual(audit.uuid, response['uuid']) self._assert_audit_fields(response) def test_get_one_soft_deleted(self): audit = obj_utils.create_test_audit(self.context) audit.soft_delete() response = self.get_json('/audits/%s' % audit['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit.uuid, response['uuid']) self._assert_audit_fields(response) response = self.get_json('/audits/%s' % audit['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_detail(self): audit = obj_utils.create_test_audit(self.context) response = self.get_json('/audits/detail') self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) self._assert_audit_fields(response['audits'][0]) def test_detail_soft_deleted(self): audit = obj_utils.create_test_audit(self.context) audit.soft_delete() response = self.get_json('/audits/detail', headers={'X-Show-Deleted': 'True'}) self.assertEqual(audit.uuid, response['audits'][0]["uuid"]) self._assert_audit_fields(response['audits'][0]) response = self.get_json('/audits/detail') self.assertEqual([], response['audits']) def test_detail_against_single(self): audit = obj_utils.create_test_audit(self.context) response = self.get_json('/audits/%s/detail' % audit['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): audit_list = [] for id_ in range(5): audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit_list.append(audit.uuid) response = self.get_json('/audits') self.assertEqual(len(audit_list), len(response['audits'])) uuids = [s['uuid'] for s in response['audits']] self.assertEqual(sorted(audit_list), sorted(uuids)) def test_many_without_soft_deleted(self): audit_list = [] for id_ in [1, 2, 3]: audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit_list.append(audit.uuid) for id_ in [4, 5]: audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit.soft_delete() response = self.get_json('/audits') self.assertEqual(3, len(response['audits'])) uuids = [s['uuid'] for s in response['audits']] self.assertEqual(sorted(audit_list), sorted(uuids)) def test_many_with_soft_deleted(self): audit_list = [] for id_ in [1, 2, 3]: audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit_list.append(audit.uuid) for id_ in [4, 5]: audit = obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) audit.soft_delete() audit_list.append(audit.uuid) response = self.get_json('/audits', headers={'X-Show-Deleted': 'True'}) self.assertEqual(5, len(response['audits'])) uuids = [s['uuid'] for s in response['audits']] self.assertEqual(sorted(audit_list), sorted(uuids)) def test_many_with_sort_key_goal_uuid(self): goal_list = [] for id_ in range(5): goal = obj_utils.create_test_goal( self.context, name='gl{0}'.format(id_), uuid=utils.generate_uuid()) obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), goal_id=goal.id, name='My Audit {0}'.format(id_)) goal_list.append(goal.uuid) response = self.get_json('/audits/?sort_key=goal_uuid') self.assertEqual(5, len(response['audits'])) uuids = [s['goal_uuid'] for s in response['audits']] self.assertEqual(sorted(goal_list), uuids) def test_sort_key_validation(self): response = self.get_json( '/audits?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(400, response.status_int) def test_links(self): uuid = utils.generate_uuid() obj_utils.create_test_audit( self.context, id=1, uuid=uuid, name='My Audit {0}'.format(1)) response = self.get_json('/audits/%s' % uuid) self.assertIn('links', response.keys()) self.assertEqual(2, len(response['links'])) self.assertIn(uuid, response['links'][0]['href']) for l in response['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark)) def test_collection_links(self): for id_ in range(5): obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) response = self.get_json('/audits/?limit=3') self.assertEqual(3, len(response['audits'])) next_marker = response['audits'][-1]['uuid'] self.assertIn(next_marker, response['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') for id_ in range(5): obj_utils.create_test_audit( self.context, id=id_, uuid=utils.generate_uuid(), name='My Audit {0}'.format(id_)) response = self.get_json('/audits') self.assertEqual(3, len(response['audits'])) next_marker = response['audits'][-1]['uuid'] self.assertIn(next_marker, response['next']) class TestPatch(api_base.FunctionalTest): def setUp(self): super(TestPatch, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) self.audit = obj_utils.create_test_audit(self.context) p = mock.patch.object(db_api.BaseConnection, 'update_audit') self.mock_audit_update = p.start() self.mock_audit_update.side_effect = self._simulate_rpc_audit_update self.addCleanup(p.stop) def _simulate_rpc_audit_update(self, audit): audit.save() return audit @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_state = objects.audit.State.CANCELLED response = self.get_json('/audits/%s' % self.audit.uuid) self.assertNotEqual(new_state, response['state']) response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertEqual(new_state, response['state']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) def test_replace_non_existent_audit(self): response = self.patch_json( '/audits/%s' % utils.generate_uuid(), [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, 'op': 'replace'}], expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_add_ok(self): new_state = objects.audit.State.SUCCEEDED response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': new_state, 'op': 'add'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_int) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertEqual(new_state, response['state']) def test_add_non_existent_property(self): response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/foo', 'value': 'bar', 'op': 'add'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) self.assertTrue(response.json['error_message']) def test_remove_ok(self): response = self.get_json('/audits/%s' % self.audit.uuid) self.assertIsNotNone(response['interval']) response = self.patch_json('/audits/%s' % self.audit.uuid, [{'path': '/interval', 'op': 'remove'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertIsNone(response['interval']) def test_remove_uuid(self): response = self.patch_json('/audits/%s' % self.audit.uuid, [{'path': '/uuid', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_remove_non_existent_property(self): response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/non-existent', 'op': 'remove'}], expect_errors=True) self.assertEqual(400, response.status_code) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) ALLOWED_TRANSITIONS = [ {"original_state": key, "new_state": value} for key, values in ( objects.audit.AuditStateTransitionManager.TRANSITIONS.items()) for value in values] class TestPatchStateTransitionDenied(api_base.FunctionalTest): STATES = [ ap_state for ap_state in objects.audit.State.__dict__ if not ap_state.startswith("_") ] scenarios = [ ( "%s -> %s" % (original_state, new_state), {"original_state": original_state, "new_state": new_state}, ) for original_state, new_state in list(itertools.product(STATES, STATES)) if original_state != new_state and {"original_state": original_state, "new_state": new_state} not in ALLOWED_TRANSITIONS ] def setUp(self): super(TestPatchStateTransitionDenied, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) self.audit = obj_utils.create_test_audit(self.context, state=self.original_state) p = mock.patch.object(db_api.BaseConnection, 'update_audit') self.mock_audit_update = p.start() self.mock_audit_update.side_effect = self._simulate_rpc_audit_update self.addCleanup(p.stop) def _simulate_rpc_audit_update(self, audit): audit.save() return audit def test_replace_denied(self): response = self.get_json('/audits/%s' % self.audit.uuid) self.assertNotEqual(self.new_state, response['state']) response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': self.new_state, 'op': 'replace'}], expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_code) self.assertTrue(response.json['error_message']) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertEqual(self.original_state, response['state']) class TestPatchStateTransitionOk(api_base.FunctionalTest): scenarios = [ ( "%s -> %s" % (transition["original_state"], transition["new_state"]), transition ) for transition in ALLOWED_TRANSITIONS ] def setUp(self): super(TestPatchStateTransitionOk, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) self.audit = obj_utils.create_test_audit(self.context, state=self.original_state) p = mock.patch.object(db_api.BaseConnection, 'update_audit') self.mock_audit_update = p.start() self.mock_audit_update.side_effect = self._simulate_rpc_audit_update self.addCleanup(p.stop) def _simulate_rpc_audit_update(self, audit): audit.save() return audit @mock.patch('oslo_utils.timeutils.utcnow') def test_replace_ok(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time response = self.get_json('/audits/%s' % self.audit.uuid) self.assertNotEqual(self.new_state, response['state']) response = self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': self.new_state, 'op': 'replace'}]) self.assertEqual('application/json', response.content_type) self.assertEqual(200, response.status_code) response = self.get_json('/audits/%s' % self.audit.uuid) self.assertEqual(self.new_state, response['state']) return_updated_at = timeutils.parse_isotime( response['updated_at']).replace(tzinfo=None) self.assertEqual(test_time, return_updated_at) class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) p = mock.patch.object(db_api.BaseConnection, 'create_audit') self.mock_create_audit = p.start() self.mock_create_audit.side_effect = ( self._simulate_rpc_audit_create) self.addCleanup(p.stop) def _simulate_rpc_audit_create(self, audit): audit.create() return audit @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit(self, mock_utcnow, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) # Check location header self.assertIsNotNone(response.location) expected_location = '/v1/audits/%s' % response.json['uuid'] self.assertEqual(urlparse.urlparse(response.location).path, expected_location) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertNotIn('updated_at', response.json.keys) self.assertNotIn('deleted_at', response.json.keys) return_created_at = timeutils.parse_isotime( response.json['created_at']).replace(tzinfo=None) self.assertEqual(test_time, return_created_at) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit_with_state_not_allowed(self, mock_utcnow, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit(state=objects.audit.State.SUCCEEDED) response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit_with_at_uuid_and_goal_specified(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname']) response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_goal(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'audit_template_uuid']) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_goal_without_strategy(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'audit_template_uuid', 'strategy']) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_named_goal(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'audit_template_uuid'], use_named_goal=True) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit_invalid_audit_template_uuid(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) # Make the audit template UUID some garbage value audit_dict['audit_template_uuid'] = ( '01234567-8910-1112-1314-151617181920') response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual("application/json", response.content_type) expected_error_msg = ('The audit template UUID or name specified is ' 'invalid') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_doesnt_contain_id(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) state = audit_dict['state'] del audit_dict['state'] with mock.patch.object(self.dbapi, 'create_audit', wraps=self.dbapi.create_audit) as cn_mock: response = self.post_json('/audits', audit_dict) self.assertEqual(state, response.json['state']) cn_mock.assert_called_once_with(mock.ANY) # Check that 'id' is not in first arg of positional args self.assertNotIn('id', cn_mock.call_args[0][0]) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_generate_uuid(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_interval(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = '1200' response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertEqual(audit_dict['interval'], response.json['interval']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_cron_interval(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = '* * * * *' response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertEqual(audit_dict['interval'], response.json['interval']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_wrong_interval(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = 'zxc' response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(500, response.status_int) expected_error_msg = ('Exactly 5 or 6 columns has to be ' 'specified for iteratorexpression.') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_without_period(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) expected_error_msg = ('Interval of audit must be specified ' 'for CONTINUOUS.') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_oneshot_audit_with_period(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['audit_type'] = objects.audit.AuditType.ONESHOT.value response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) expected_error_msg = 'Interval of audit must not be set for ONESHOT.' self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) def test_create_audit_trigger_decision_engine(self): with mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') as de_mock: audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json('/audits', audit_dict) de_mock.assert_called_once_with(mock.ANY, response.json['uuid']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_uuid(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit(state=objects.audit.State.PENDING) del audit_dict['scope'] response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) assert not mock_trigger_audit.called @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_parameters_no_predefined_strategy( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( parameters={'name': 'Tom'}, params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) expected_error_msg = ('Specify parameters but no predefined ' 'strategy for audit, or no ' 'parameter spec in predefined strategy') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) assert not mock_trigger_audit.called @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_parameters_no_schema( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit_with_predefined_strategy( parameters={'name': 'Tom'}) del audit_dict['uuid'] del audit_dict['state'] del audit_dict['interval'] del audit_dict['scope'] del audit_dict['next_run_time'] del audit_dict['hostname'] response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(400, response.status_int) expected_error_msg = ('Specify parameters but no predefined ' 'strategy for audit, or no ' 'parameter spec in predefined strategy') self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) assert not mock_trigger_audit.called @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_parameter_not_allowed( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_template = self.prepare_audit_template_strategy_with_parameter() audit_dict = api_utils.audit_post_data( parameters={'fake1': 1, 'fake2': "hello"}) audit_dict['audit_template_uuid'] = audit_template['uuid'] del_keys = ['uuid', 'goal_id', 'strategy_id', 'state', 'interval', 'scope', 'next_run_time', 'hostname'] for k in del_keys: del audit_dict[k] response = self.post_json('/audits', audit_dict, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual("application/json", response.content_type) expected_error_msg = 'Audit parameter fake2 are not allowed' self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) assert not mock_trigger_audit.called def prepare_audit_template_strategy_with_parameter(self): fake_spec = { "properties": { "fake1": { "description": "number parameter example", "type": "number", "default": 3.2, "minimum": 1.0, "maximum": 10.2, } } } template_uuid = 'e74c40e0-d825-11e2-a28f-0800200c9a67' strategy_uuid = 'e74c40e0-d825-11e2-a28f-0800200c9a68' template_name = 'my template' strategy_name = 'my strategy' strategy_id = 3 strategy = db_utils.get_test_strategy(parameters_spec=fake_spec, id=strategy_id, uuid=strategy_uuid, name=strategy_name) obj_utils.create_test_strategy(self.context, parameters_spec=fake_spec, id=strategy_id, uuid=strategy_uuid, name=strategy_name) obj_utils.create_test_audit_template(self.context, strategy_id=strategy_id, uuid=template_uuid, name='name') audit_template = db_utils.get_test_audit_template( strategy_id=strategy['id'], uuid=template_uuid, name=template_name) return audit_template @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') @mock.patch('oslo_utils.timeutils.utcnow') def test_create_audit_with_name(self, mock_utcnow, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time audit_dict = post_get_test_audit( params_to_exclude=['state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) normal_name = 'this audit name is just for test' # long_name length exceeds 63 characters long_name = normal_name + audit_dict['uuid'] del audit_dict['uuid'] audit_dict['name'] = normal_name response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(normal_name, response.json['name']) audit_dict['name'] = long_name response = self.post_json('/audits', audit_dict) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertNotEqual(long_name, response.json['name']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_start_end_time( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY start_time = datetime.datetime(2018, 3, 1, 0, 0) end_time = datetime.datetime(2018, 4, 1, 0, 0) audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal'] ) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = '1200' audit_dict['start_time'] = str(start_time) audit_dict['end_time'] = str(end_time) response = self.post_json( '/audits', audit_dict, headers={'OpenStack-API-Version': 'infra-optim 1.1'}) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertEqual(objects.audit.State.PENDING, response.json['state']) self.assertEqual(audit_dict['interval'], response.json['interval']) self.assertTrue(utils.is_uuid_like(response.json['uuid'])) return_start_time = timeutils.parse_isotime( response.json['start_time']) return_end_time = timeutils.parse_isotime( response.json['end_time']) iso_start_time = start_time.replace( tzinfo=tz.tzlocal()).astimezone(tz.tzutc()) iso_end_time = end_time.replace( tzinfo=tz.tzlocal()).astimezone(tz.tzutc()) self.assertEqual(iso_start_time, return_start_time) self.assertEqual(iso_end_time, return_end_time) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_continuous_audit_with_start_end_time_incompatible_version( self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY start_time = datetime.datetime(2018, 3, 1, 0, 0) end_time = datetime.datetime(2018, 4, 1, 0, 0) audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal'] ) audit_dict['audit_type'] = objects.audit.AuditType.CONTINUOUS.value audit_dict['interval'] = '1200' audit_dict['start_time'] = str(start_time) audit_dict['end_time'] = str(end_time) response = self.post_json( '/audits', audit_dict, headers={'OpenStack-API-Version': 'infra-optim 1.0'}, expect_errors=True) self.assertEqual('application/json', response.content_type) self.assertEqual(406, response.status_int) expected_error_msg = 'Request not acceptable.' self.assertTrue(response.json['error_message']) self.assertIn(expected_error_msg, response.json['error_message']) assert not mock_trigger_audit.called @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_force_false(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) response = self.post_json( '/audits', audit_dict, headers={'OpenStack-API-Version': 'infra-optim 1.2'}) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertFalse(response.json['force']) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_create_audit_with_force_true(self, mock_trigger_audit): mock_trigger_audit.return_value = mock.ANY audit_dict = post_get_test_audit( params_to_exclude=['uuid', 'state', 'interval', 'scope', 'next_run_time', 'hostname', 'goal']) audit_dict['force'] = True response = self.post_json( '/audits', audit_dict, headers={'OpenStack-API-Version': 'infra-optim 1.2'}) self.assertEqual('application/json', response.content_type) self.assertEqual(201, response.status_int) self.assertTrue(response.json['force']) class TestDelete(api_base.FunctionalTest): def setUp(self): super(TestDelete, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) self.audit = obj_utils.create_test_audit(self.context) p = mock.patch.object(db_api.BaseConnection, 'update_audit') self.mock_audit_update = p.start() self.mock_audit_update.side_effect = self._simulate_rpc_audit_update self.addCleanup(p.stop) def _simulate_rpc_audit_update(self, audit): audit.save() return audit @mock.patch('oslo_utils.timeutils.utcnow') def test_delete_audit(self, mock_utcnow): test_time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = test_time new_state = objects.audit.State.ONGOING self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) response = self.delete('/audits/%s' % self.audit.uuid, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) new_state = objects.audit.State.CANCELLED self.patch_json( '/audits/%s' % self.audit.uuid, [{'path': '/state', 'value': new_state, 'op': 'replace'}]) self.delete('/audits/%s' % self.audit.uuid) response = self.get_json('/audits/%s' % self.audit.uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) self.context.show_deleted = True audit = objects.Audit.get_by_uuid(self.context, self.audit.uuid) return_deleted_at = \ audit['deleted_at'].strftime('%Y-%m-%dT%H:%M:%S.%f') self.assertEqual(test_time.strftime('%Y-%m-%dT%H:%M:%S.%f'), return_deleted_at) self.assertEqual(objects.audit.State.DELETED, audit['state']) def test_delete_audit_not_found(self): uuid = utils.generate_uuid() response = self.delete('/audits/%s' % uuid, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) class TestAuditPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestAuditPolicyEnforcement, self).setUp() obj_utils.create_test_goal(self.context) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:defaut"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "audit:get_all", self.get_json, '/audits', expect_errors=True) def test_policy_disallow_get_one(self): audit = obj_utils.create_test_audit(self.context) self._common_policy_check( "audit:get", self.get_json, '/audits/%s' % audit.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "audit:detail", self.get_json, '/audits/detail', expect_errors=True) def test_policy_disallow_update(self): audit = obj_utils.create_test_audit(self.context) self._common_policy_check( "audit:update", self.patch_json, '/audits/%s' % audit.uuid, [{'path': '/state', 'value': objects.audit.State.SUCCEEDED, 'op': 'replace'}], expect_errors=True) def test_policy_disallow_create(self): audit_dict = post_get_test_audit( state=objects.audit.State.PENDING, params_to_exclude=['uuid', 'state', 'scope', 'next_run_time', 'hostname', 'goal']) self._common_policy_check( "audit:create", self.post_json, '/audits', audit_dict, expect_errors=True) def test_policy_disallow_delete(self): audit = obj_utils.create_test_audit(self.context) self._common_policy_check( "audit:delete", self.delete, '/audits/%s' % audit.uuid, expect_errors=True) class TestAuditEnforcementWithAdminContext(TestListAudit, api_base.AdminRoleTest): def setUp(self): super(TestAuditEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "audit:create": "rule:default", "audit:delete": "rule:default", "audit:detail": "rule:default", "audit:get": "rule:default", "audit:get_all": "rule:default", "audit:update": "rule:default"}) python-watcher-4.0.0/watcher/tests/api/v1/test_webhooks.py0000664000175000017500000000573213656752270023665 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.decision_engine import rpcapi as deapi from watcher import objects from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestPost(api_base.FunctionalTest): def setUp(self): super(TestPost, self).setUp() obj_utils.create_test_goal(self.context) obj_utils.create_test_strategy(self.context) obj_utils.create_test_audit_template(self.context) @mock.patch.object(deapi.DecisionEngineAPI, 'trigger_audit') def test_trigger_audit(self, mock_trigger_audit): audit = obj_utils.create_test_audit( self.context, audit_type=objects.audit.AuditType.EVENT.value) response = self.post_json( '/webhooks/%s' % audit['uuid'], {}, headers={'OpenStack-API-Version': 'infra-optim 1.4'}) self.assertEqual(202, response.status_int) mock_trigger_audit.assert_called_once_with( mock.ANY, audit['uuid']) def test_trigger_audit_with_no_audit(self): response = self.post_json( '/webhooks/no-audit', {}, headers={'OpenStack-API-Version': 'infra-optim 1.4'}, expect_errors=True) self.assertEqual(404, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_trigger_audit_with_not_allowed_audittype(self): audit = obj_utils.create_test_audit(self.context) response = self.post_json( '/webhooks/%s' % audit['uuid'], {}, headers={'OpenStack-API-Version': 'infra-optim 1.4'}, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) def test_trigger_audit_with_not_allowed_audit_state(self): audit = obj_utils.create_test_audit( self.context, audit_type=objects.audit.AuditType.EVENT.value, state=objects.audit.State.FAILED) response = self.post_json( '/webhooks/%s' % audit['uuid'], {}, headers={'OpenStack-API-Version': 'infra-optim 1.4'}, expect_errors=True) self.assertEqual(400, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue(response.json['error_message']) python-watcher-4.0.0/watcher/tests/api/v1/test_strategies.py0000664000175000017500000002752013656752270024215 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_serialization import jsonutils from six.moves.urllib import parse as urlparse from watcher.common import utils from watcher.decision_engine import rpcapi as deapi from watcher.tests.api import base as api_base from watcher.tests.objects import utils as obj_utils class TestListStrategy(api_base.FunctionalTest): def setUp(self): super(TestListStrategy, self).setUp() self.fake_goal = obj_utils.create_test_goal( self.context, uuid=utils.generate_uuid()) def _assert_strategy_fields(self, strategy): strategy_fields = ['uuid', 'name', 'display_name', 'goal_uuid'] for field in strategy_fields: self.assertIn(field, strategy) @mock.patch.object(deapi.DecisionEngineAPI, 'get_strategy_info') def test_state(self, mock_strategy_info): strategy = obj_utils.create_test_strategy(self.context) mock_state = [ {"type": "Datasource", "mandatory": True, "comment": "", "state": "gnocchi: True"}, {"type": "Metrics", "mandatory": False, "comment": "", "state": [{"compute.node.cpu.percent": "available"}, {"cpu_util": "available"}]}, {"type": "CDM", "mandatory": True, "comment": "", "state": [{"compute_model": "available"}, {"storage_model": "not available"}]}, {"type": "Name", "mandatory": "", "comment": "", "state": strategy.name} ] mock_strategy_info.return_value = mock_state response = self.get_json('/strategies/%s/state' % strategy.uuid) strategy_name = [requirement["state"] for requirement in response if requirement["type"] == "Name"][0] self.assertEqual(strategy.name, strategy_name) def test_one(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json('/strategies') self.assertEqual(strategy.uuid, response['strategies'][0]["uuid"]) self._assert_strategy_fields(response['strategies'][0]) def test_get_one_by_uuid(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json('/strategies/%s' % strategy.uuid) self.assertEqual(strategy.uuid, response["uuid"]) self.assertEqual(strategy.name, response["name"]) self._assert_strategy_fields(response) def test_get_one_by_name(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json(urlparse.quote( '/strategies/%s' % strategy['name'])) self.assertEqual(strategy.uuid, response['uuid']) self._assert_strategy_fields(response) def test_get_one_soft_deleted(self): strategy = obj_utils.create_test_strategy(self.context) strategy.soft_delete() response = self.get_json( '/strategies/%s' % strategy['uuid'], headers={'X-Show-Deleted': 'True'}) self.assertEqual(strategy.uuid, response['uuid']) self._assert_strategy_fields(response) response = self.get_json( '/strategies/%s' % strategy['uuid'], expect_errors=True) self.assertEqual(404, response.status_int) def test_detail(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json('/strategies/detail') self.assertEqual(strategy.uuid, response['strategies'][0]["uuid"]) self._assert_strategy_fields(response['strategies'][0]) for strategy in response['strategies']: self.assertTrue( all(val is not None for key, val in strategy.items() if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) def test_detail_against_single(self): strategy = obj_utils.create_test_strategy(self.context) response = self.get_json('/strategies/%s/detail' % strategy.uuid, expect_errors=True) self.assertEqual(404, response.status_int) def test_many(self): strategy_list = [] for idx in range(1, 6): strategy = obj_utils.create_test_strategy( self.context, id=idx, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(idx)) strategy_list.append(strategy.uuid) response = self.get_json('/strategies') self.assertEqual(5, len(response['strategies'])) for strategy in response['strategies']: self.assertTrue( all(val is not None for key, val in strategy.items() if key in ['uuid', 'name', 'display_name', 'goal_uuid'])) def test_many_without_soft_deleted(self): strategy_list = [] for id_ in [1, 2, 3]: strategy = obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(id_)) strategy_list.append(strategy.uuid) for id_ in [4, 5]: strategy = obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(id_)) strategy.soft_delete() response = self.get_json('/strategies') self.assertEqual(3, len(response['strategies'])) uuids = [s['uuid'] for s in response['strategies']] self.assertEqual(sorted(strategy_list), sorted(uuids)) def test_strategies_collection_links(self): for idx in range(1, 6): obj_utils.create_test_strategy( self.context, id=idx, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(idx)) response = self.get_json('/strategies/?limit=2') self.assertEqual(2, len(response['strategies'])) def test_strategies_collection_links_default_limit(self): for idx in range(1, 6): obj_utils.create_test_strategy( self.context, id=idx, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(idx)) cfg.CONF.set_override('max_limit', 3, 'api') response = self.get_json('/strategies') self.assertEqual(3, len(response['strategies'])) def test_filter_by_goal_uuid(self): goal1 = obj_utils.create_test_goal( self.context, id=2, uuid=utils.generate_uuid(), name='My_Goal 1') goal2 = obj_utils.create_test_goal( self.context, id=3, uuid=utils.generate_uuid(), name='My Goal 2') for id_ in range(1, 3): obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='Goal %s' % id_, goal_id=goal1['id']) for id_ in range(3, 5): obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='Goal %s' % id_, goal_id=goal2['id']) response = self.get_json('/strategies/?goal=%s' % goal1['uuid']) strategies = response['strategies'] self.assertEqual(2, len(strategies)) for strategy in strategies: self.assertEqual(goal1['uuid'], strategy['goal_uuid']) def test_filter_by_goal_name(self): goal1 = obj_utils.create_test_goal( self.context, id=2, uuid=utils.generate_uuid(), name='My_Goal 1') goal2 = obj_utils.create_test_goal( self.context, id=3, uuid=utils.generate_uuid(), name='My Goal 2') for id_ in range(1, 3): obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='Goal %s' % id_, goal_id=goal1['id']) for id_ in range(3, 5): obj_utils.create_test_strategy( self.context, id=id_, uuid=utils.generate_uuid(), name='Goal %s' % id_, goal_id=goal2['id']) response = self.get_json('/strategies/?goal=%s' % goal1['name']) strategies = response['strategies'] self.assertEqual(2, len(strategies)) for strategy in strategies: self.assertEqual(goal1['uuid'], strategy['goal_uuid']) def test_many_with_sort_key_goal_uuid(self): goals_uuid_list = [] for idx in range(1, 6): strategy = obj_utils.create_test_strategy( self.context, id=idx, uuid=utils.generate_uuid(), name='STRATEGY_{0}'.format(idx)) goals_uuid_list.append(strategy.goal.uuid) response = self.get_json('/strategies/?sort_key=goal_uuid') self.assertEqual(5, len(response['strategies'])) goal_uuids = [s['goal_uuid'] for s in response['strategies']] self.assertEqual(sorted(goals_uuid_list), goal_uuids) def test_sort_key_validation(self): response = self.get_json( '/strategies?sort_key=%s' % 'bad_name', expect_errors=True) self.assertEqual(400, response.status_int) class TestStrategyPolicyEnforcement(api_base.FunctionalTest): def setUp(self): super(TestStrategyPolicyEnforcement, self).setUp() self.fake_goal = obj_utils.create_test_goal( self.context, uuid=utils.generate_uuid()) def _common_policy_check(self, rule, func, *arg, **kwarg): self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", rule: "rule:defaut"}) response = func(*arg, **kwarg) self.assertEqual(403, response.status_int) self.assertEqual('application/json', response.content_type) self.assertTrue( "Policy doesn't allow %s to be performed." % rule, jsonutils.loads(response.json['error_message'])['faultstring']) def test_policy_disallow_get_all(self): self._common_policy_check( "strategy:get_all", self.get_json, '/strategies', expect_errors=True) def test_policy_disallow_get_one(self): strategy = obj_utils.create_test_strategy(self.context) self._common_policy_check( "strategy:get", self.get_json, '/strategies/%s' % strategy.uuid, expect_errors=True) def test_policy_disallow_detail(self): self._common_policy_check( "strategy:detail", self.get_json, '/strategies/detail', expect_errors=True) def test_policy_disallow_state(self): strategy = obj_utils.create_test_strategy(self.context) self._common_policy_check( "strategy:get", self.get_json, '/strategies/%s/state' % strategy.uuid, expect_errors=True) class TestStrategyEnforcementWithAdminContext( TestListStrategy, api_base.AdminRoleTest): def setUp(self): super(TestStrategyEnforcementWithAdminContext, self).setUp() self.policy.set_rules({ "admin_api": "(role:admin or role:administrator)", "default": "rule:admin_api", "strategy:detail": "rule:default", "strategy:get": "rule:default", "strategy:get_all": "rule:default", "strategy:state": "rule:default"}) python-watcher-4.0.0/watcher/tests/api/v1/test_types.py0000664000175000017500000002156713656752270023214 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import webtest import wsme from wsme import types as wtypes from watcher.api.controllers.v1 import types from watcher.common import exception from watcher.common import utils from watcher.tests import base class TestUuidType(base.TestCase): def test_valid_uuid(self): test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' self.assertEqual(test_uuid, types.UuidType.validate(test_uuid)) def test_invalid_uuid(self): self.assertRaises(exception.InvalidUUID, types.UuidType.validate, 'invalid-uuid') class TestNameType(base.TestCase): def test_valid_name(self): test_name = 'hal-9000' self.assertEqual(test_name, types.NameType.validate(test_name)) def test_invalid_name(self): self.assertRaises(exception.InvalidName, types.NameType.validate, '-this is not valid-') class TestUuidOrNameType(base.TestCase): @mock.patch.object(utils, 'is_uuid_like') @mock.patch.object(utils, 'is_hostname_safe') def test_valid_uuid(self, host_mock, uuid_mock): test_uuid = '1a1a1a1a-2b2b-3c3c-4d4d-5e5e5e5e5e5e' host_mock.return_value = False uuid_mock.return_value = True self.assertTrue(types.UuidOrNameType.validate(test_uuid)) uuid_mock.assert_called_once_with(test_uuid) @mock.patch.object(utils, 'is_uuid_like') @mock.patch.object(utils, 'is_hostname_safe') def test_valid_name(self, host_mock, uuid_mock): test_name = 'dc16-database5' uuid_mock.return_value = False host_mock.return_value = True self.assertTrue(types.UuidOrNameType.validate(test_name)) host_mock.assert_called_once_with(test_name) def test_invalid_uuid_or_name(self): self.assertRaises(exception.InvalidUuidOrName, types.UuidOrNameType.validate, 'inval#uuid%or*name') class MyPatchType(types.JsonPatchType): """Helper class for TestJsonPatchType tests.""" @staticmethod def mandatory_attrs(): return ['/mandatory'] @staticmethod def internal_attrs(): return ['/internal'] class MyRoot(wsme.WSRoot): """Helper class for TestJsonPatchType tests.""" @wsme.expose([wsme.types.text], body=[MyPatchType]) @wsme.validate([MyPatchType]) def test(self, patch): return patch class TestJsonPatchType(base.TestCase): def setUp(self): super(TestJsonPatchType, self).setUp() self.app = webtest.TestApp(MyRoot(['restjson']).wsgiapp()) def _patch_json(self, params, expect_errors=False): return self.app.patch_json( '/test', params=params, headers={'Accept': 'application/json'}, expect_errors=expect_errors ) def test_valid_patches(self): valid_patches = [{'path': '/extra/foo', 'op': 'remove'}, {'path': '/extra/foo', 'op': 'add', 'value': 'bar'}, {'path': '/str', 'op': 'replace', 'value': 'bar'}, {'path': '/bool', 'op': 'add', 'value': True}, {'path': '/int', 'op': 'add', 'value': 1}, {'path': '/float', 'op': 'add', 'value': 0.123}, {'path': '/list', 'op': 'add', 'value': [1, 2]}, {'path': '/none', 'op': 'add', 'value': None}, {'path': '/empty_dict', 'op': 'add', 'value': {}}, {'path': '/empty_list', 'op': 'add', 'value': []}, {'path': '/dict', 'op': 'add', 'value': {'cat': 'meow'}}] ret = self._patch_json(valid_patches, False) self.assertEqual(200, ret.status_int) self.assertEqual(valid_patches, ret.json) def test_cannot_update_internal_attr(self): patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_cannot_update_internal_dict_attr(self): patch = [{'path': '/internal', 'op': 'replace', 'value': 'foo'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_mandatory_attr(self): patch = [{'op': 'replace', 'path': '/mandatory', 'value': 'foo'}] ret = self._patch_json(patch, False) self.assertEqual(200, ret.status_int) self.assertEqual(patch, ret.json) def test_cannot_remove_mandatory_attr(self): patch = [{'op': 'remove', 'path': '/mandatory'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_missing_required_fields_path(self): missing_path = [{'op': 'remove'}] ret = self._patch_json(missing_path, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_missing_required_fields_op(self): missing_op = [{'path': '/foo'}] ret = self._patch_json(missing_op, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_invalid_op(self): patch = [{'path': '/foo', 'op': 'invalid'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_invalid_path(self): patch = [{'path': 'invalid-path', 'op': 'remove'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_cannot_add_with_no_value(self): patch = [{'path': '/extra/foo', 'op': 'add'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) def test_cannot_replace_with_no_value(self): patch = [{'path': '/foo', 'op': 'replace'}] ret = self._patch_json(patch, True) self.assertEqual(400, ret.status_int) self.assertTrue(ret.json['faultstring']) class TestBooleanType(base.TestCase): def test_valid_true_values(self): v = types.BooleanType() self.assertTrue(v.validate("true")) self.assertTrue(v.validate("TRUE")) self.assertTrue(v.validate("True")) self.assertTrue(v.validate("t")) self.assertTrue(v.validate("1")) self.assertTrue(v.validate("y")) self.assertTrue(v.validate("yes")) self.assertTrue(v.validate("on")) def test_valid_false_values(self): v = types.BooleanType() self.assertFalse(v.validate("false")) self.assertFalse(v.validate("FALSE")) self.assertFalse(v.validate("False")) self.assertFalse(v.validate("f")) self.assertFalse(v.validate("0")) self.assertFalse(v.validate("n")) self.assertFalse(v.validate("no")) self.assertFalse(v.validate("off")) def test_invalid_value(self): v = types.BooleanType() self.assertRaises(exception.Invalid, v.validate, "invalid-value") self.assertRaises(exception.Invalid, v.validate, "01") class TestJsonType(base.TestCase): def test_valid_values(self): vt = types.jsontype value = vt.validate("hello") self.assertEqual("hello", value) value = vt.validate(10) self.assertEqual(10, value) value = vt.validate(0.123) self.assertEqual(0.123, value) value = vt.validate(True) self.assertTrue(value) value = vt.validate([1, 2, 3]) self.assertEqual([1, 2, 3], value) value = vt.validate({'foo': 'bar'}) self.assertEqual({'foo': 'bar'}, value) value = vt.validate(None) self.assertIsNone(value) def test_invalid_values(self): vt = types.jsontype self.assertRaises(exception.Invalid, vt.validate, object()) def test_apimultitype_tostring(self): vts = str(types.jsontype) self.assertIn(str(wtypes.text), vts) self.assertIn(str(int), vts) self.assertIn(str(float), vts) self.assertIn(str(types.BooleanType), vts) self.assertIn(str(list), vts) self.assertIn(str(dict), vts) self.assertIn(str(None), vts) python-watcher-4.0.0/watcher/tests/api/test_utils.py0000664000175000017500000000377013656752270022656 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals from oslo_config import cfg import wsme from watcher.api.controllers.v1 import utils as v1_utils from watcher.tests import base class TestApiUtilsValidScenarios(base.TestCase): scenarios = [ ("limit=None + max_limit=None", {"limit": None, "max_limit": None, "expected": None}), ("limit=None + max_limit=1", {"limit": None, "max_limit": 1, "expected": 1}), # ("limit=0 + max_limit=None", # {"limit": 0, "max_limit": None, "expected": 0}), ("limit=1 + max_limit=None", {"limit": 1, "max_limit": None, "expected": 1}), ("limit=1 + max_limit=1", {"limit": 1, "max_limit": 1, "expected": 1}), ("limit=2 + max_limit=1", {"limit": 2, "max_limit": 1, "expected": 1}), ] def test_validate_limit(self): cfg.CONF.set_override("max_limit", self.max_limit, group="api") actual_limit = v1_utils.validate_limit(self.limit) self.assertEqual(self.expected, actual_limit) class TestApiUtilsInvalidScenarios(base.TestCase): scenarios = [ ("limit=0 + max_limit=None", {"limit": 0, "max_limit": None}), ] def test_validate_limit_invalid_cases(self): cfg.CONF.set_override("max_limit", self.max_limit, group="api") self.assertRaises( wsme.exc.ClientSideError, v1_utils.validate_limit, self.limit ) python-watcher-4.0.0/watcher/tests/api/test_scheduling.py0000664000175000017500000001177213656752270023644 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from apscheduler.schedulers import background import datetime import freezegun import mock from watcher.api import scheduling from watcher.notifications import service from watcher import objects from watcher.tests import base from watcher.tests.db import base as db_base from watcher.tests.db import utils class TestSchedulingService(base.TestCase): @mock.patch.object(background.BackgroundScheduler, 'start') def test_start_scheduling_service(self, m_start): scheduler = scheduling.APISchedulingService() scheduler.start() m_start.assert_called_once_with(scheduler) jobs = scheduler.get_jobs() self.assertEqual(1, len(jobs)) class TestSchedulingServiceFunctions(db_base.DbTestCase): def setUp(self): super(TestSchedulingServiceFunctions, self).setUp() fake_service = utils.get_test_service( created_at=datetime.datetime.utcnow()) self.fake_service = objects.Service(**fake_service) @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') @mock.patch.object(objects.Service, 'list') @mock.patch.object(service, 'send_service_update') def test_get_services_status_without_services_in_list( self, mock_service_update, mock_get_list, mock_service_status): scheduler = scheduling.APISchedulingService() mock_get_list.return_value = [self.fake_service] mock_service_status.return_value = 'ACTIVE' scheduler.get_services_status(mock.ANY) mock_service_status.assert_called_once_with(mock.ANY, self.fake_service.id) mock_service_update.assert_not_called() @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') @mock.patch.object(objects.Service, 'list') @mock.patch.object(service, 'send_service_update') def test_get_services_status_with_services_in_list_same_status( self, mock_service_update, mock_get_list, mock_service_status): scheduler = scheduling.APISchedulingService() mock_get_list.return_value = [self.fake_service] scheduler.services_status = {1: 'ACTIVE'} mock_service_status.return_value = 'ACTIVE' scheduler.get_services_status(mock.ANY) mock_service_status.assert_called_once_with(mock.ANY, self.fake_service.id) mock_service_update.assert_not_called() @mock.patch.object(scheduling.APISchedulingService, 'get_service_status') @mock.patch.object(objects.Service, 'list') @mock.patch.object(service, 'send_service_update') def test_get_services_status_with_services_in_list_diff_status( self, mock_service_update, mock_get_list, mock_service_status): scheduler = scheduling.APISchedulingService() mock_get_list.return_value = [self.fake_service] scheduler.services_status = {1: 'FAILED'} mock_service_status.return_value = 'ACTIVE' scheduler.get_services_status(mock.ANY) mock_service_status.assert_called_once_with(mock.ANY, self.fake_service.id) mock_service_update.assert_called_once_with(mock.ANY, self.fake_service, state='ACTIVE') @mock.patch.object(objects.Service, 'get') def test_get_service_status_failed_service( self, mock_get): scheduler = scheduling.APISchedulingService() mock_get.return_value = self.fake_service service_status = scheduler.get_service_status(mock.ANY, self.fake_service.id) mock_get.assert_called_once_with(mock.ANY, self.fake_service.id) self.assertEqual('FAILED', service_status) @freezegun.freeze_time('2016-09-22T08:32:26.219414') @mock.patch.object(objects.Service, 'get') def test_get_service_status_failed_active( self, mock_get): scheduler = scheduling.APISchedulingService() mock_get.return_value = self.fake_service service_status = scheduler.get_service_status(mock.ANY, self.fake_service.id) mock_get.assert_called_once_with(mock.ANY, self.fake_service.id) self.assertEqual('ACTIVE', service_status) python-watcher-4.0.0/watcher/tests/api/test_base.py0000664000175000017500000000217713656752270022430 0ustar zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from watcher.tests.api import base class TestBase(base.FunctionalTest): def test_api_setup(self): pass def test_bad_uri(self): response = self.get_json('/bad/path', expect_errors=True, headers={"Accept": "application/json"}) self.assertEqual(404, response.status_int) self.assertEqual("application/json", response.content_type) self.assertTrue(response.json['error_message']) python-watcher-4.0.0/watcher/tests/api/test_config.py0000664000175000017500000000250713656752270022760 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import imp from oslo_config import cfg from watcher.api import config as api_config from watcher.tests.api import base class TestRoot(base.FunctionalTest): def test_config_enable_webhooks_auth(self): acl_public_routes = ['/'] cfg.CONF.set_override('enable_webhooks_auth', True, 'api') imp.reload(api_config) self.assertEqual(acl_public_routes, api_config.app['acl_public_routes']) def test_config_disable_webhooks_auth(self): acl_public_routes = ['/', '/v1/webhooks/.*'] cfg.CONF.set_override('enable_webhooks_auth', False, 'api') imp.reload(api_config) self.assertEqual(acl_public_routes, api_config.app['acl_public_routes']) python-watcher-4.0.0/watcher/tests/api/base.py0000664000175000017500000002727713656752270021401 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Base classes for API tests.""" # NOTE: Ported from ceilometer/tests/api.py (subsequently moved to # ceilometer/tests/api/__init__.py). This should be oslo'ified: # https://bugs.launchpad.net/watcher/+bug/1255115. # NOTE(deva): import auth_token so we can override a config option import copy import mock from oslo_config import cfg import pecan import pecan.testing from six.moves.urllib import parse as urlparse from watcher.api import hooks from watcher.common import context as watcher_context from watcher.notifications import service as n_service from watcher.tests.db import base PATH_PREFIX = '/v1' class FunctionalTest(base.DbTestCase): """Pecan controller functional testing class. Used for functional tests of Pecan controllers where you need to test your literal application and its integration with the framework. """ SOURCE_DATA = {'test_source': {'somekey': '666'}} def setUp(self): super(FunctionalTest, self).setUp() cfg.CONF.set_override("auth_version", "v2.0", group='keystone_authtoken') cfg.CONF.set_override("admin_user", "admin", group='keystone_authtoken') p_services = mock.patch.object(n_service, "send_service_update", new_callable=mock.PropertyMock) self.m_services = p_services.start() self.addCleanup(p_services.stop) self.app = self._make_app() def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) def _make_app(self, enable_acl=False): # Determine where we are so we can set up paths in the config root_dir = self.get_path() self.config = { 'app': { 'root': 'watcher.api.controllers.root.RootController', 'modules': ['watcher.api'], 'hooks': [ hooks.ContextHook(), hooks.NoExceptionTracebackHook() ], 'template_path': '%s/api/templates' % root_dir, 'enable_acl': enable_acl, 'acl_public_routes': ['/', '/v1'], }, } return pecan.testing.load_test_app(self.config) def _request_json(self, path, params, expect_errors=False, headers=None, method="post", extra_environ=None, status=None, path_prefix=PATH_PREFIX): """Sends simulated HTTP request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param method: Request method type. Appropriate method function call should be used rather than passing attribute in. :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response :param path_prefix: prefix of the url path """ full_path = path_prefix + path response = getattr(self.app, "%s_json" % method)( str(full_path), params=params, headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors ) return response def put_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PUT request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="put") def post(self, *args, **kwargs): headers = kwargs.pop('headers', {}) headers.setdefault('Accept', 'application/json') kwargs['headers'] = headers return self.app.post(*args, **kwargs) def post_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP POST request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="post") def patch_json(self, path, params, expect_errors=False, headers=None, extra_environ=None, status=None): """Sends simulated HTTP PATCH request to Pecan test app. :param path: url path of target service :param params: content for wsgi.input of request :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response """ return self._request_json(path=path, params=params, expect_errors=expect_errors, headers=headers, extra_environ=extra_environ, status=status, method="patch") def delete(self, path, expect_errors=False, headers=None, extra_environ=None, status=None, path_prefix=PATH_PREFIX): """Sends simulated HTTP DELETE request to Pecan test app. :param path: url path of target service :param expect_errors: Boolean value; whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param status: expected status code of response :param path_prefix: prefix of the url path """ full_path = path_prefix + path response = self.app.delete(str(full_path), headers=headers, status=status, extra_environ=extra_environ, expect_errors=expect_errors) return response def get_json(self, path, expect_errors=False, headers=None, extra_environ=None, q=[], path_prefix=PATH_PREFIX, return_json=True, **params): """Sends simulated HTTP GET request to Pecan test app. :param path: url path of target service :param expect_errors: Boolean value;whether an error is expected based on request :param headers: a dictionary of headers to send along with the request :param extra_environ: a dictionary of environ variables to send along with the request :param q: list of queries consisting of: field, value, op, and type keys :param path_prefix: prefix of the url path :param params: content for wsgi.input of request """ full_path = path_prefix + path query_params = {'q.field': [], 'q.value': [], 'q.op': [], } for query in q: for name in ['field', 'op', 'value']: query_params['q.%s' % name].append(query.get(name, '')) all_params = {} all_params.update(params) if q: all_params.update(query_params) response = self.app.get(full_path, params=all_params, headers=headers, extra_environ=extra_environ, expect_errors=expect_errors) if return_json and not expect_errors: response = response.json return response def validate_link(self, link, bookmark=False): """Checks if the given link can get correct data.""" # removes the scheme and net location parts of the link url_parts = list(urlparse.urlparse(link)) url_parts[0] = url_parts[1] = '' # bookmark link should not have the version in the URL if bookmark and url_parts[2].startswith(PATH_PREFIX): return False full_path = urlparse.urlunparse(url_parts) try: self.get_json(full_path, path_prefix='') return True except Exception: return False class AdminRoleTest(base.DbTestCase): def setUp(self): super(AdminRoleTest, self).setUp() token_info = { 'token': { 'project': { 'id': 'admin' }, 'user': { 'id': 'admin' } } } self.context = watcher_context.RequestContext( auth_token_info=token_info, project_id='admin', user_id='admin') def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'admin' if not kwargs.get('user_id'): kwargs['user_id'] = 'admin' if not kwargs.get('roles'): kwargs['roles'] = ['admin'] context = watcher_context.RequestContext(*args, **kwargs) return watcher_context.RequestContext.from_dict(context.to_dict()) p = mock.patch.object(watcher_context, 'make_context', side_effect=make_context) self.mock_make_context = p.start() self.addCleanup(p.stop) python-watcher-4.0.0/watcher/tests/fake_policy.py0000664000175000017500000000400013656752270022156 0ustar zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. policy_data = """ { "admin_api": "role:admin or role:administrator", "show_password": "!", "default": "rule:admin_api", "action:detail": "", "action:get": "", "action:get_all": "", "action_plan:delete": "", "action_plan:detail": "", "action_plan:get": "", "action_plan:get_all": "", "action_plan:update": "", "audit:create": "", "audit:delete": "", "audit:detail": "", "audit:get": "", "audit:get_all": "", "audit:update": "", "audit_template:create": "", "audit_template:delete": "", "audit_template:detail": "", "audit_template:get": "", "audit_template:get_all": "", "audit_template:update": "", "goal:detail": "", "goal:get": "", "goal:get_all": "", "scoring_engine:detail": "", "scoring_engine:get": "", "scoring_engine:get_all": "", "strategy:detail": "", "strategy:get": "", "strategy:get_all": "", "strategy:state": "", "service:detail": "", "service:get": "", "service:get_all": "", "data_model:get_all": "" } """ policy_data_compat_juno = """ { "admin": "role:admin or role:administrator", "admin_api": "is_admin:True", "default": "rule:admin_api" } """ def get_policy_data(compat): if not compat: return policy_data elif compat == 'juno': return policy_data_compat_juno else: raise Exception('Policy data for %s not available' % compat) python-watcher-4.0.0/watcher/tests/notifications/0000775000175000017500000000000013656752352022177 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/notifications/test_notification.py0000664000175000017500000003606413656752270026306 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections import mock from oslo_versionedobjects import fixture from watcher.common import exception from watcher.common import rpc from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields from watcher.tests import base as testbase from watcher.tests.objects import test_objects class TestNotificationBase(testbase.TestCase): @base.WatcherObjectRegistry.register_if(False) class TestObject(base.WatcherObject): VERSION = '1.0' fields = { 'field_1': wfields.StringField(), 'field_2': wfields.IntegerField(), 'not_important_field': wfields.IntegerField(), } @base.WatcherObjectRegistry.register_if(False) class TestNotificationPayload(notificationbase.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': wfields.StringField(), # filled by ctor 'field_1': wfields.StringField(), # filled by the schema 'field_2': wfields.IntegerField(), # filled by the schema } def populate_schema(self, source_field): super(TestNotificationBase.TestNotificationPayload, self).populate_schema(source_field=source_field) @base.WatcherObjectRegistry.register_if(False) class TestNotificationPayloadEmptySchema( notificationbase.NotificationPayloadBase): VERSION = '1.0' fields = { 'extra_field': wfields.StringField(), # filled by ctor } @notificationbase.notification_sample('test-update-1.json') @notificationbase.notification_sample('test-update-2.json') @base.WatcherObjectRegistry.register_if(False) class TestNotification(notificationbase.NotificationBase): VERSION = '1.0' fields = { 'payload': wfields.ObjectField('TestNotificationPayload') } @base.WatcherObjectRegistry.register_if(False) class TestNotificationEmptySchema(notificationbase.NotificationBase): VERSION = '1.0' fields = { 'payload': wfields.ObjectField( 'TestNotificationPayloadEmptySchema') } expected_payload = { 'watcher_object.name': 'TestNotificationPayload', 'watcher_object.data': { 'extra_field': 'test string', 'field_1': 'test1', 'field_2': 42}, 'watcher_object.version': '1.0', 'watcher_object.namespace': 'watcher'} def setUp(self): super(TestNotificationBase, self).setUp() self.my_obj = self.TestObject(field_1='test1', field_2=42, not_important_field=13) self.payload = self.TestNotificationPayload( extra_field='test string') self.payload.populate_schema(source_field=self.my_obj) self.notification = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE, phase=wfields.NotificationPhase.START), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=self.payload) def _verify_notification(self, mock_notifier, mock_context, expected_event_type, expected_payload): mock_notifier.prepare.assert_called_once_with( publisher_id='watcher-fake:fake-host') mock_notify = mock_notifier.prepare.return_value.info self.assertTrue(mock_notify.called) self.assertEqual(mock_notify.call_args[0][0], mock_context) self.assertEqual(mock_notify.call_args[1]['event_type'], expected_event_type) actual_payload = mock_notify.call_args[1]['payload'] self.assertEqual(expected_payload, actual_payload) @mock.patch.object(rpc, 'NOTIFIER') def test_emit_notification(self, mock_notifier): mock_context = mock.Mock() mock_context.to_dict.return_value = {} self.notification.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update.start', expected_payload=self.expected_payload) @mock.patch.object(rpc, 'NOTIFIER') def test_no_emit_notifs_disabled(self, mock_notifier): # Make sure notifications aren't emitted when notification_level # isn't defined, indicating notifications should be disabled self.config(notification_level=None) notif = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE, phase=wfields.NotificationPhase.START), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() notif.emit(mock_context) self.assertFalse(mock_notifier.called) @mock.patch.object(rpc, 'NOTIFIER') def test_no_emit_level_too_low(self, mock_notifier): # Make sure notification doesn't emit when set notification # level < config level self.config(notification_level='warning') notif = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE, phase=wfields.NotificationPhase.START), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() notif.emit(mock_context) self.assertFalse(mock_notifier.called) @mock.patch.object(rpc, 'NOTIFIER') def test_emit_event_type_without_phase(self, mock_notifier): noti = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=self.payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload=self.expected_payload) @mock.patch.object(rpc, 'NOTIFIER') def test_not_possible_to_emit_if_not_populated(self, mock_notifier): non_populated_payload = self.TestNotificationPayload( extra_field='test string') noti = self.TestNotification( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() self.assertRaises(exception.NotificationPayloadError, noti.emit, mock_context) self.assertFalse(mock_notifier.called) @mock.patch.object(rpc, 'NOTIFIER') def test_empty_schema(self, mock_notifier): non_populated_payload = self.TestNotificationPayloadEmptySchema( extra_field='test string') noti = self.TestNotificationEmptySchema( event_type=notificationbase.EventType( object='test_object', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host='fake-host', binary='watcher-fake'), priority=wfields.NotificationPriority.INFO, payload=non_populated_payload) mock_context = mock.Mock() mock_context.to_dict.return_value = {} noti.emit(mock_context) self._verify_notification( mock_notifier, mock_context, expected_event_type='test_object.update', expected_payload={ 'watcher_object.name': 'TestNotificationPayloadEmptySchema', 'watcher_object.data': {'extra_field': 'test string'}, 'watcher_object.version': '1.0', 'watcher_object.namespace': 'watcher'}) def test_sample_decorator(self): self.assertEqual(2, len(self.TestNotification.samples)) self.assertIn('test-update-1.json', self.TestNotification.samples) self.assertIn('test-update-2.json', self.TestNotification.samples) expected_notification_fingerprints = { 'EventType': '1.3-bc4f4bc4a497d789e5a3c30f921edae1', 'ExceptionNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ExceptionPayload': '1.0-4516ae282a55fe2fd5c754967ee6248b', 'NotificationPublisher': '1.0-bbbc1402fb0e443a3eb227cc52b61545', 'TerseAuditPayload': '1.2-0fda1751c39f29b539944c2b44690f65', 'AuditPayload': '1.2-d30cc1639404ed380b0742b781db690e', 'AuditStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', 'AuditUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'AuditUpdatePayload': '1.1-e32c3f69c353d47948afa44359246828', 'AuditCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'AuditCreatePayload': '1.1-d30cc1639404ed380b0742b781db690e', 'AuditDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', 'AuditDeletePayload': '1.1-d30cc1639404ed380b0742b781db690e', 'AuditActionNotification': '1.0-9b69de0724fda8310d05e18418178866', 'AuditActionPayload': '1.1-3d19c75dd9cdf2a833d0367b234e20d2', 'GoalPayload': '1.0-fa1fecb8b01dd047eef808ded4d50d1a', 'StrategyPayload': '1.0-94f01c137b083ac236ae82573c1fcfc1', 'ActionPlanActionPayload': '1.1-5be9fa7ca9e544322bdded5593e36edb', 'ActionPlanCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionPlanCreatePayload': '1.1-6a3c3bf1d1f822e33633c49088699d4e', 'ActionPlanDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionPlanDeletePayload': '1.1-6a3c3bf1d1f822e33633c49088699d4e', 'ActionPlanPayload': '1.1-6a3c3bf1d1f822e33633c49088699d4e', 'ActionPlanStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', 'ActionPlanUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionPlanUpdatePayload': '1.1-4ecd6571784cec2656725003ce431fdd', 'ActionPlanActionNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionPlanCancelNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionCancelNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionCreateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionCreatePayload': '1.0-519b93b7450319d8928b4b6e6362df31', 'ActionDeleteNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionDeletePayload': '1.0-519b93b7450319d8928b4b6e6362df31', 'ActionExecutionNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionExecutionPayload': '1.0-bff9f820a2abf7bb6d7027b7450157df', 'ActionPayload': '1.0-519b93b7450319d8928b4b6e6362df31', 'ActionStateUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', 'ActionUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ActionUpdatePayload': '1.0-03306c7e7f4d49ac328c261eff6b30b8', 'ActionPlanCancelPayload': '1.1-5be9fa7ca9e544322bdded5593e36edb', 'ActionCancelPayload': '1.0-bff9f820a2abf7bb6d7027b7450157df', 'TerseActionPlanPayload': '1.1-63008f013817407df9194c2a59fda6b0', 'ServiceUpdateNotification': '1.0-9b69de0724fda8310d05e18418178866', 'ServicePayload': '1.0-9c5a9bc51e6606e0ec3cf95baf698f4f', 'ServiceStatusUpdatePayload': '1.0-1a1b606bf14a2c468800c2b010801ce5', 'ServiceUpdatePayload': '1.0-e0e9812a45958974693a723a2c820c3f' } class TestNotificationObjectVersions(testbase.TestCase): def setUp(self): super(TestNotificationObjectVersions, self).setUp() base.WatcherObjectRegistry.register_notification_objects() def test_versions(self): checker = fixture.ObjectVersionChecker( test_objects.get_watcher_objects()) expected_notification_fingerprints.update( test_objects.expected_object_fingerprints) expected, actual = checker.test_hashes( expected_notification_fingerprints) self.assertEqual(expected, actual, 'Some notification objects have changed; please make ' 'sure the versions have been bumped, and then update ' 'their hashes here.') def test_notification_payload_version_depends_on_the_schema(self): @base.WatcherObjectRegistry.register_if(False) class TestNotificationPayload( notificationbase.NotificationPayloadBase): VERSION = '1.0' SCHEMA = { 'field_1': ('source_field', 'field_1'), 'field_2': ('source_field', 'field_2'), } fields = { 'extra_field': wfields.StringField(), # filled by ctor 'field_1': wfields.StringField(), # filled by the schema 'field_2': wfields.IntegerField(), # filled by the schema } checker = fixture.ObjectVersionChecker( {'TestNotificationPayload': (TestNotificationPayload,)}) old_hash = checker.get_hashes(extra_data_func=get_extra_data) TestNotificationPayload.SCHEMA['field_3'] = ('source_field', 'field_3') new_hash = checker.get_hashes(extra_data_func=get_extra_data) self.assertNotEqual(old_hash, new_hash) def get_extra_data(obj_class): extra_data = tuple() # Get the SCHEMA items to add to the fingerprint # if we are looking at a notification if issubclass(obj_class, notificationbase.NotificationPayloadBase): schema_data = collections.OrderedDict( sorted(obj_class.SCHEMA.items())) extra_data += (schema_data,) return extra_data python-watcher-4.0.0/watcher/tests/notifications/__init__.py0000664000175000017500000000000013656752270024275 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/notifications/test_action_notification.py0000664000175000017500000005365213656752270027645 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import freezegun import mock import oslo_messaging as om from watcher.common import exception from watcher.common import rpc from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils @freezegun.freeze_time('2016-10-18T09:52:05.219414') class TestActionNotification(base.DbTestCase): def setUp(self): super(TestActionNotification, self).setUp() p_get_notifier = mock.patch.object(rpc, 'get_notifier') m_get_notifier = p_get_notifier.start() self.addCleanup(p_get_notifier.stop) self.m_notifier = mock.Mock(spec=om.Notifier) def fake_get_notifier(publisher_id): self.m_notifier.publisher_id = publisher_id return self.m_notifier m_get_notifier.side_effect = fake_get_notifier self.goal = utils.create_test_goal(mock.Mock()) self.strategy = utils.create_test_strategy(mock.Mock()) self.audit = utils.create_test_audit(mock.Mock(), strategy_id=self.strategy.id) self.action_plan = utils.create_test_action_plan(mock.Mock()) def test_send_invalid_action_plan(self): action_plan = utils.get_test_action_plan( mock.Mock(), state='DOESNOTMATTER', audit_id=1) self.assertRaises( exception.InvalidActionPlan, notifications.action_plan.send_update, mock.MagicMock(), action_plan, host='node0') def test_send_action_update(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.ONGOING, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_update( mock.MagicMock(), action, host='node0', old_state=objects.action.State.PENDING) # The 1st notification is because we created the object. # The 2nd notification is because we created the action plan object. self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionUpdatePayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state_update': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionStateUpdatePayload', 'watcher_object.data': { 'old_state': 'PENDING', 'state': 'ONGOING' } }, 'state': 'ONGOING', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } }, payload ) def test_send_action_plan_create(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.PENDING, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_create(mock.MagicMock(), action, host='node0') self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionCreatePayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'PENDING', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } }, payload ) def test_send_action_delete(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.DELETED, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_delete(mock.MagicMock(), action, host='node0') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionDeletePayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'DELETED', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } }, payload ) def test_send_action_execution(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.PENDING, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_execution_notification( mock.MagicMock(), action, 'execution', phase='start', host='node0') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'event_type': 'action.execution.start', 'payload': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionExecutionPayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'fault': None, 'updated_at': None, 'state': 'PENDING', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } } }, notification ) def test_send_action_execution_with_error(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.FAILED, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.action.send_execution_notification( mock.MagicMock(), action, 'execution', phase='error', host='node0', priority='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'event_type': 'action.execution.error', 'payload': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionExecutionPayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'fault': { 'watcher_object.data': { 'exception': u'WatcherException', 'exception_message': u'TEST', 'function_name': ( 'test_send_action_execution_with_error'), 'module_name': ( 'watcher.tests.notifications.' 'test_action_notification') }, 'watcher_object.name': 'ExceptionPayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' }, 'updated_at': None, 'state': 'FAILED', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } } }, notification ) def test_send_action_cancel(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.PENDING, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) notifications.action.send_cancel_notification( mock.MagicMock(), action, 'cancel', phase='start', host='node0') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(4, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'event_type': 'action.cancel.start', 'payload': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionCancelPayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'fault': None, 'updated_at': None, 'state': 'PENDING', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } } }, notification ) def test_send_action_cancel_with_error(self): action = utils.create_test_action( mock.Mock(), state=objects.action.State.FAILED, action_type='nop', input_parameters={'param1': 1, 'param2': 2}, parents=[], action_plan_id=self.action_plan.id) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.action.send_cancel_notification( mock.MagicMock(), action, 'cancel', phase='error', host='node0', priority='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { 'event_type': 'action.cancel.error', 'payload': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0', 'watcher_object.name': 'ActionCancelPayload', 'watcher_object.data': { 'uuid': '10a47dd1-4874-4298-91cf-eff046dbdb8d', 'input_parameters': { 'param2': 2, 'param1': 1 }, 'created_at': '2016-10-18T09:52:05Z', 'fault': { 'watcher_object.data': { 'exception': u'WatcherException', 'exception_message': u'TEST', 'function_name': ( 'test_send_action_cancel_with_error'), 'module_name': ( 'watcher.tests.notifications.' 'test_action_notification') }, 'watcher_object.name': 'ExceptionPayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' }, 'updated_at': None, 'state': 'FAILED', 'action_plan': { 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.1', 'watcher_object.name': 'TerseActionPlanPayload', 'watcher_object.data': { 'uuid': '76be87bd-3422-43f9-93a0-e85a577e3061', 'global_efficacy': [], 'created_at': '2016-10-18T09:52:05Z', 'updated_at': None, 'state': 'ONGOING', 'audit_uuid': '10a47dd1-4874-4298' '-91cf-eff046dbdb8d', 'strategy_uuid': 'cb3d0b58-4415-4d90' '-b75b-1e96878730e3', 'deleted_at': None } }, 'parents': [], 'action_type': 'nop', 'deleted_at': None } } }, notification ) python-watcher-4.0.0/watcher/tests/notifications/test_action_plan_notification.py0000664000175000017500000007024113656752270030650 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import freezegun import mock import oslo_messaging as om from watcher.common import exception from watcher.common import rpc from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils @freezegun.freeze_time('2016-10-18T09:52:05.219414') class TestActionPlanNotification(base.DbTestCase): def setUp(self): super(TestActionPlanNotification, self).setUp() p_get_notifier = mock.patch.object(rpc, 'get_notifier') m_get_notifier = p_get_notifier.start() self.addCleanup(p_get_notifier.stop) self.m_notifier = mock.Mock(spec=om.Notifier) def fake_get_notifier(publisher_id): self.m_notifier.publisher_id = publisher_id return self.m_notifier m_get_notifier.side_effect = fake_get_notifier self.goal = utils.create_test_goal(mock.Mock()) self.audit = utils.create_test_audit(mock.Mock(), interval=None) self.strategy = utils.create_test_strategy(mock.Mock()) def test_send_invalid_action_plan(self): action_plan = utils.get_test_action_plan( mock.Mock(), state='DOESNOTMATTER', audit_id=1) self.assertRaises( exception.InvalidActionPlan, notifications.action_plan.send_update, mock.MagicMock(), action_plan, host='node0') def test_send_action_plan_update(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) notifications.action_plan.send_update( mock.MagicMock(), action_plan, host='node0', old_state=objects.action_plan.State.PENDING) # The 1st notification is because we created the object. # The 2nd notification is because we created the action plan object. self.assertEqual(3, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "deleted_at": None, "state": "ONGOING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "state_update": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" }, "watcher_object.name": "ActionPlanStateUpdatePayload" }, }, "watcher_object.name": "ActionPlanUpdatePayload" }, payload ) def test_send_action_plan_create(self): action_plan = utils.get_test_action_plan( mock.Mock(), state=objects.action_plan.State.PENDING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit.as_dict(), strategy=self.strategy.as_dict()) notifications.action_plan.send_create( mock.MagicMock(), action_plan, host='node0') self.assertEqual(2, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "deleted_at": None, "state": "PENDING", "updated_at": None, "created_at": None, }, "watcher_object.name": "ActionPlanCreatePayload" }, payload ) def test_send_action_plan_delete(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.DELETED, audit_id=self.audit.id, strategy_id=self.strategy.id) notifications.action_plan.send_delete( mock.MagicMock(), action_plan, host='node0') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(3, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "global_efficacy": [], "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061", "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "deleted_at": None, "state": "DELETED", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", }, "watcher_object.name": "ActionPlanDeletePayload" }, payload ) def test_send_action_plan_action(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) notifications.action_plan.send_action_notification( mock.MagicMock(), action_plan, host='node0', action='execution', phase='start') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(3, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "action_plan.execution.start", "payload": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": None, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.2", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" } }, "global_efficacy": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" }, "watcher_object.name": "ActionPlanActionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) def test_send_action_plan_action_with_error(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.action_plan.send_action_notification( mock.MagicMock(), action_plan, host='node0', action='execution', priority='error', phase='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "action_plan.execution.error", "payload": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": ( "test_send_action_plan_action_with_error"), "module_name": "watcher.tests.notifications." "test_action_plan_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "global_efficacy": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" }, "watcher_object.name": "ActionPlanActionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) def test_send_action_plan_cancel(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) notifications.action_plan.send_cancel_notification( mock.MagicMock(), action_plan, host='node0', action='cancel', phase='start') # The 1st notification is because we created the audit object. # The 2nd notification is because we created the action plan object. self.assertEqual(3, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "action_plan.cancel.start", "payload": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": None, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.namespace": "watcher", "watcher_object.name": "TerseAuditPayload", "watcher_object.version": "1.2", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", 'name': 'My Audit', "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" } }, "global_efficacy": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" }, "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) def test_send_action_plan_cancel_with_error(self): action_plan = utils.create_test_action_plan( mock.Mock(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.action_plan.send_cancel_notification( mock.MagicMock(), action_plan, host='node0', action='cancel', priority='error', phase='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "action_plan.cancel.error", "payload": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": ( "test_send_action_plan_cancel_with_error"), "module_name": "watcher.tests.notifications." "test_action_plan_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "audit_uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "audit": { "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", 'name': 'My Audit', "strategy_uuid": None, "goal_uuid": ( "f7ad87ae-4298-91cf-93a0-f35a852e3652"), "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "TerseAuditPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.2" }, "global_efficacy": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "76be87bd-3422-43f9-93a0-e85a577e3061" }, "watcher_object.name": "ActionPlanCancelPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) python-watcher-4.0.0/watcher/tests/notifications/test_audit_notification.py0000664000175000017500000005274113656752270027474 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import freezegun import mock import oslo_messaging as om from watcher.common import exception from watcher.common import rpc from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils @freezegun.freeze_time('2016-10-18T09:52:05.219414') class TestAuditNotification(base.DbTestCase): def setUp(self): super(TestAuditNotification, self).setUp() p_get_notifier = mock.patch.object(rpc, 'get_notifier') m_get_notifier = p_get_notifier.start() self.addCleanup(p_get_notifier.stop) self.m_notifier = mock.Mock(spec=om.Notifier) def fake_get_notifier(publisher_id): self.m_notifier.publisher_id = publisher_id return self.m_notifier m_get_notifier.side_effect = fake_get_notifier self.goal = utils.create_test_goal(mock.Mock()) self.strategy = utils.create_test_strategy(mock.Mock()) def test_send_invalid_audit(self): audit = utils.get_test_audit( mock.Mock(), interval=None, state='DOESNOTMATTER', goal_id=1) self.assertRaises( exception.InvalidAudit, notifications.audit.send_update, mock.MagicMock(), audit, host='node0') def test_send_audit_update_with_strategy(self): audit = utils.create_test_audit( mock.Mock(), interval=None, state=objects.audit.State.ONGOING, goal_id=self.goal.id, strategy_id=self.strategy.id, goal=self.goal, strategy=self.strategy) notifications.audit.send_update( mock.MagicMock(), audit, host='node0', old_state=objects.audit.State.PENDING) # The 1st notification is because we created the object. self.assertEqual(2, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "name": "TEST", "efficacy_specification": [], "created_at": "2016-10-18T09:52:05Z", "display_name": "test goal", "deleted_at": None }, "watcher_object.name": "GoalPayload" }, "deleted_at": None, "scope": [], "state": "ONGOING", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "state_update": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" }, "watcher_object.name": "AuditStateUpdatePayload" }, "audit_type": "ONESHOT" }, "watcher_object.name": "AuditUpdatePayload" }, payload ) def test_send_audit_update_without_strategy(self): audit = utils.get_test_audit( mock.Mock(), interval=None, state=objects.audit.State.ONGOING, goal_id=self.goal.id, goal=self.goal) notifications.audit.send_update( mock.MagicMock(), audit, host='node0', old_state=objects.audit.State.PENDING) self.assertEqual(1, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "name": "TEST", "efficacy_specification": [], "created_at": "2016-10-18T09:52:05Z", "display_name": "test goal", "deleted_at": None }, "watcher_object.name": "GoalPayload" }, "strategy_uuid": None, "strategy": None, "deleted_at": None, "scope": [], "state": "ONGOING", "updated_at": None, "created_at": None, "state_update": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "old_state": "PENDING", "state": "ONGOING" }, "watcher_object.name": "AuditStateUpdatePayload" }, "audit_type": "ONESHOT" }, "watcher_object.name": "AuditUpdatePayload" }, payload ) def test_send_audit_create(self): audit = utils.get_test_audit( mock.Mock(), interval=None, state=objects.audit.State.PENDING, goal_id=self.goal.id, strategy_id=self.strategy.id, goal=self.goal.as_dict(), strategy=self.strategy.as_dict()) notifications.audit.send_create( mock.MagicMock(), audit, host='node0') self.assertEqual(1, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "name": "TEST", "efficacy_specification": [], "created_at": "2016-10-18T09:52:05Z", "display_name": "test goal", "deleted_at": None }, "watcher_object.name": "GoalPayload" }, "deleted_at": None, "scope": [], "state": "PENDING", "updated_at": None, "created_at": None, "audit_type": "ONESHOT" }, "watcher_object.name": "AuditCreatePayload" }, payload ) def test_send_audit_delete(self): audit = utils.create_test_audit( mock.Mock(), interval=None, state=objects.audit.State.DELETED, goal_id=self.goal.id, strategy_id=self.strategy.id) notifications.audit.send_delete( mock.MagicMock(), audit, host='node0') # The 1st notification is because we created the object. self.assertEqual(2, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "watcher_object.namespace": "watcher", "watcher_object.version": "1.1", "watcher_object.data": { "interval": None, "next_run_time": None, "auto_trigger": False, "strategy_uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "strategy": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3", "name": "TEST", "parameters_spec": {}, "created_at": "2016-10-18T09:52:05Z", "display_name": "test strategy", "deleted_at": None }, "watcher_object.name": "StrategyPayload" }, "parameters": {}, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d", "name": "My Audit", "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.namespace": "watcher", "watcher_object.version": "1.0", "watcher_object.data": { "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "name": "TEST", "efficacy_specification": [], "created_at": "2016-10-18T09:52:05Z", "display_name": "test goal", "deleted_at": None }, "watcher_object.name": "GoalPayload" }, "deleted_at": None, "scope": [], "state": "DELETED", "updated_at": None, "created_at": "2016-10-18T09:52:05Z", "audit_type": "ONESHOT" }, "watcher_object.name": "AuditDeletePayload" }, payload ) def test_send_audit_action(self): audit = utils.create_test_audit( mock.Mock(), interval=None, state=objects.audit.State.ONGOING, goal_id=self.goal.id, strategy_id=self.strategy.id, goal=self.goal, strategy=self.strategy) notifications.audit.send_action_notification( mock.MagicMock(), audit, host='node0', action='strategy', phase='start') # The 1st notification is because we created the object. self.assertEqual(2, self.m_notifier.info.call_count) notification = self.m_notifier.info.call_args[1] notification = self.m_notifier.info.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "audit.strategy.start", "payload": { "watcher_object.data": { "audit_type": "ONESHOT", "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": None, "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test goal", "efficacy_specification": [], "name": "TEST", "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652" }, "watcher_object.name": "GoalPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "interval": None, "next_run_time": None, "auto_trigger": False, "name": "My Audit", "parameters": {}, "scope": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d" }, "watcher_object.name": "AuditActionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) def test_send_audit_action_with_error(self): audit = utils.create_test_audit( mock.Mock(), interval=None, state=objects.audit.State.ONGOING, goal_id=self.goal.id, strategy_id=self.strategy.id, goal=self.goal, strategy=self.strategy) try: # This is to load the exception in sys.exc_info() raise exception.WatcherException("TEST") except exception.WatcherException: notifications.audit.send_action_notification( mock.MagicMock(), audit, host='node0', action='strategy', priority='error', phase='error') self.assertEqual(1, self.m_notifier.error.call_count) notification = self.m_notifier.error.call_args[1] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual( { "event_type": "audit.strategy.error", "payload": { "watcher_object.data": { "audit_type": "ONESHOT", "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "fault": { "watcher_object.data": { "exception": "WatcherException", "exception_message": "TEST", "function_name": ( "test_send_audit_action_with_error"), "module_name": "watcher.tests.notifications." "test_audit_notification" }, "watcher_object.name": "ExceptionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "goal_uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652", "goal": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test goal", "efficacy_specification": [], "name": "TEST", "updated_at": None, "uuid": "f7ad87ae-4298-91cf-93a0-f35a852e3652" }, "watcher_object.name": "GoalPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "interval": None, "next_run_time": None, "auto_trigger": False, "name": "My Audit", "parameters": {}, "scope": [], "state": "ONGOING", "strategy_uuid": ( "cb3d0b58-4415-4d90-b75b-1e96878730e3"), "strategy": { "watcher_object.data": { "created_at": "2016-10-18T09:52:05Z", "deleted_at": None, "display_name": "test strategy", "name": "TEST", "parameters_spec": {}, "updated_at": None, "uuid": "cb3d0b58-4415-4d90-b75b-1e96878730e3" }, "watcher_object.name": "StrategyPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.0" }, "updated_at": None, "uuid": "10a47dd1-4874-4298-91cf-eff046dbdb8d" }, "watcher_object.name": "AuditActionPayload", "watcher_object.namespace": "watcher", "watcher_object.version": "1.1" } }, notification ) python-watcher-4.0.0/watcher/tests/notifications/test_service_notifications.py0000664000175000017500000000554413656752270030210 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import freezegun import mock import oslo_messaging as om from watcher.common import rpc from watcher import notifications from watcher.objects import service as w_service from watcher.tests.db import base from watcher.tests.objects import utils @freezegun.freeze_time('2016-10-18T09:52:05.219414') class TestActionPlanNotification(base.DbTestCase): def setUp(self): super(TestActionPlanNotification, self).setUp() p_get_notifier = mock.patch.object(rpc, 'get_notifier') m_get_notifier = p_get_notifier.start() self.addCleanup(p_get_notifier.stop) self.m_notifier = mock.Mock(spec=om.Notifier) def fake_get_notifier(publisher_id): self.m_notifier.publisher_id = publisher_id return self.m_notifier m_get_notifier.side_effect = fake_get_notifier def test_service_failed(self): service = utils.get_test_service(mock.Mock(), created_at=datetime.datetime.utcnow()) state = w_service.ServiceStatus.FAILED notifications.service.send_service_update(mock.MagicMock(), service, state, host='node0') notification = self.m_notifier.warning.call_args[1] payload = notification['payload'] self.assertEqual("infra-optim:node0", self.m_notifier.publisher_id) self.assertDictEqual({ 'watcher_object.data': { 'last_seen_up': '2016-09-22T08:32:06Z', 'name': 'watcher-service', 'sevice_host': 'controller', 'status_update': { 'watcher_object.data': { 'old_state': 'ACTIVE', 'state': 'FAILED' }, 'watcher_object.name': 'ServiceStatusUpdatePayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' } }, 'watcher_object.name': 'ServiceUpdatePayload', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.0' }, payload ) python-watcher-4.0.0/watcher/tests/fakes.py0000664000175000017500000001002613656752270020767 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import requests fakeAuthTokenHeaders = {'X-User-Id': u'773a902f022949619b5c2f32cd89d419', 'X-Roles': u'admin, ResellerAdmin, _member_', 'X-Project-Id': u'5588aebbcdc24e17a061595f80574376', 'X-Project-Name': 'test', 'X-User-Name': 'test', 'X-Auth-Token': u'5588aebbcdc24e17a061595f80574376', 'X-Forwarded-For': u'10.10.10.10, 11.11.11.11', 'X-Service-Catalog': u'{test: 12345}', 'X-Identity-Status': 'Confirmed', 'X-User-Domain-Name': 'domain', 'X-Project-Domain-Id': 'project_domain_id', 'X-User-Domain-Id': 'user_domain_id', } class FakePecanRequest(mock.Mock): def __init__(self, **kwargs): super(FakePecanRequest, self).__init__(**kwargs) self.host_url = 'http://test_url:8080/test' self.context = {} self.body = '' self.content_type = 'text/unicode' self.params = {} self.path = '/v1/services' self.headers = fakeAuthTokenHeaders self.environ = {} def __setitem__(self, index, value): setattr(self, index, value) class FakePecanResponse(mock.Mock): def __init__(self, **kwargs): super(FakePecanResponse, self).__init__(**kwargs) self.status = None class FakeApp(object): pass class FakeService(mock.Mock): def __init__(self, **kwargs): super(FakeService, self).__init__(**kwargs) self.__tablename__ = 'service' self.__resource__ = 'services' self.user_id = 'fake user id' self.project_id = 'fake project id' self.uuid = 'test_uuid' self.id = 8 self.name = 'james' self.service_type = 'not_this' self.description = 'amazing' self.tags = ['this', 'and that'] self.read_only = True def as_dict(self): return dict(service_type=self.service_type, user_id=self.user_id, project_id=self.project_id, uuid=self.uuid, id=self.id, name=self.name, tags=self.tags, read_only=self.read_only, description=self.description) class FakeAuthProtocol(mock.Mock): def __init__(self, **kwargs): super(FakeAuthProtocol, self).__init__(**kwargs) self.app = FakeApp() self.config = '' class FakeResponse(requests.Response): def __init__(self, status_code, content=None, headers=None): """A requests.Response that can be used as a mock return_value. A key feature is that the instance will evaluate to True or False like a real Response, based on the status_code. Properties like ok, status_code, text, and content, and methods like json(), work as expected based on the inputs. :param status_code: Integer HTTP response code (200, 404, etc.) :param content: String supplying the payload content of the response. Using a json-encoded string will make the json() method behave as expected. :param headers: Dict of HTTP header values to set. """ super(FakeResponse, self).__init__() self.status_code = status_code if content: self._content = content if headers: self.headers = headers python-watcher-4.0.0/watcher/tests/decision_engine/0000775000175000017500000000000013656752352022450 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/fake_goals.py0000664000175000017500000000423313656752270025116 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.goal import base as base_goal from watcher.decision_engine.goal.efficacy import base as efficacy_base from watcher.decision_engine.goal.efficacy import indicators from watcher.decision_engine.goal.efficacy import specs class FakeGoal(base_goal.Goal): NAME = NotImplemented DISPLAY_NAME = NotImplemented @classmethod def get_name(cls): return cls.NAME @classmethod def get_display_name(cls): return cls.DISPLAY_NAME @classmethod def get_translatable_display_name(cls): return cls.DISPLAY_NAME @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class DummyIndicator(indicators.IndicatorSpecification): def __init__(self): super(DummyIndicator, self).__init__( name="dummy", description="Dummy indicator", unit="%", ) @property def schema(self): return { "type": "integer", "minimum": 0 } class DummySpec1(efficacy_base.EfficacySpecification): def get_indicators_specifications(self): return [DummyIndicator()] def get_global_efficacy_indicator(self, indicators_map): return None class FakeDummy1(FakeGoal): NAME = "dummy_1" DISPLAY_NAME = "Dummy 1" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return DummySpec1() class FakeDummy2(FakeGoal): NAME = "dummy_2" DISPLAY_NAME = "Dummy 2" python-watcher-4.0.0/watcher/tests/decision_engine/__init__.py0000664000175000017500000000010113656752270024550 0ustar zuulzuul00000000000000__author__ = 'Jean-Emile DARTOIS ' python-watcher-4.0.0/watcher/tests/decision_engine/scoring/0000775000175000017500000000000013656752352024114 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/scoring/__init__.py0000664000175000017500000000000013656752270026212 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/scoring/test_dummy_scorer.py0000664000175000017500000000372213656752270030240 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from watcher.decision_engine.scoring import dummy_scorer from watcher.tests import base class TestDummyScorer(base.TestCase): def test_metadata(self): scorer = dummy_scorer.DummyScorer(config=None) self.assertEqual('dummy_scorer', scorer.get_name()) self.assertIn('Dummy', scorer.get_description()) metainfo = scorer.get_metainfo() self.assertIn('feature_columns', metainfo) self.assertIn('result_columns', metainfo) self.assertIn('workloads', metainfo) def test_calculate_score(self): scorer = dummy_scorer.DummyScorer(config=None) self._assert_result(scorer, 0, '[0, 0, 0, 0, 0, 0, 0, 0, 0]') self._assert_result(scorer, 0, '[50, 0, 0, 600, 0, 0, 0, 0, 0]') self._assert_result(scorer, 0, '[0, 0, 0, 0, 600, 0, 0, 0, 0]') self._assert_result(scorer, 1, '[85, 0, 0, 0, 0, 0, 0, 0, 0]') self._assert_result(scorer, 2, '[0, 0, 0, 1100, 1100, 0, 0, 0, 0]') self._assert_result(scorer, 3, '[0, 0, 0, 0, 0, 70000000, 70000000, 0, 0]') def _assert_result(self, scorer, expected, features): result_str = scorer.calculate_score(features) actual_result = jsonutils.loads(result_str)[0] self.assertEqual(expected, actual_result) python-watcher-4.0.0/watcher/tests/decision_engine/scoring/test_dummy_scoring_container.py0000664000175000017500000000355513656752270032455 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from watcher.decision_engine.scoring import dummy_scoring_container from watcher.tests import base class TestDummyScoringContainer(base.TestCase): def test_get_scoring_engine_list(self): scorers = (dummy_scoring_container.DummyScoringContainer .get_scoring_engine_list()) self.assertEqual(3, len(scorers)) self.assertEqual('dummy_min_scorer', scorers[0].get_name()) self.assertEqual('dummy_max_scorer', scorers[1].get_name()) self.assertEqual('dummy_avg_scorer', scorers[2].get_name()) def test_scorers(self): scorers = (dummy_scoring_container.DummyScoringContainer .get_scoring_engine_list()) self._assert_result(scorers[0], 1.1, '[1.1, 2.2, 4, 8]') self._assert_result(scorers[1], 8, '[1.1, 2.2, 4, 8]') # float(1 + 2 + 4 + 8) / 4 = 15.0 / 4 = 3.75 self._assert_result(scorers[2], 3.75, '[1, 2, 4, 8]') def _assert_result(self, scorer, expected, features): result_str = scorer.calculate_score(features) actual_result = jsonutils.loads(result_str)[0] self.assertEqual(expected, actual_result) python-watcher-4.0.0/watcher/tests/decision_engine/scoring/test_scoring_factory.py0000664000175000017500000000346013656752270030722 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.scoring import scoring_factory from watcher.tests import base class TestScoringFactory(base.TestCase): def test_get_scoring_engine(self): scorer = scoring_factory.get_scoring_engine('dummy_scorer') self.assertEqual('dummy_scorer', scorer.get_name()) scorer = scoring_factory.get_scoring_engine('dummy_min_scorer') self.assertEqual('dummy_min_scorer', scorer.get_name()) scorer = scoring_factory.get_scoring_engine('dummy_max_scorer') self.assertEqual('dummy_max_scorer', scorer.get_name()) scorer = scoring_factory.get_scoring_engine('dummy_avg_scorer') self.assertEqual('dummy_avg_scorer', scorer.get_name()) self.assertRaises( KeyError, scoring_factory.get_scoring_engine, 'non_existing_scorer') def test_get_scoring_engine_list(self): scoring_engines = scoring_factory.get_scoring_engine_list() engine_names = {'dummy_scorer', 'dummy_min_scorer', 'dummy_max_scorer', 'dummy_avg_scorer'} for scorer in scoring_engines: self.assertIn(scorer.get_name(), engine_names) python-watcher-4.0.0/watcher/tests/decision_engine/messaging/0000775000175000017500000000000013656752352024425 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/messaging/__init__.py0000664000175000017500000000000013656752270026523 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/messaging/test_audit_endpoint.py0000664000175000017500000000564013656752270031050 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.decision_engine.audit import continuous as continuous_handler from watcher.decision_engine.audit import oneshot as oneshot_handler from watcher.decision_engine.messaging import audit_endpoint from watcher.decision_engine.model.collector import manager from watcher.tests.db import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.objects import utils as obj_utils class TestAuditEndpoint(base.DbTestCase): def setUp(self): super(TestAuditEndpoint, self).setUp() self.goal = obj_utils.create_test_goal(self.context) self.audit_template = obj_utils.create_test_audit_template( self.context) self.audit = obj_utils.create_test_audit( self.context, audit_template_id=self.audit_template.id) @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start') @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") def test_do_trigger_audit(self, mock_collector, mock_handler): mock_collector.return_value = faker_cluster_state.FakerModelCollector() audit_handler = oneshot_handler.OneShotAuditHandler endpoint = audit_endpoint.AuditEndpoint(audit_handler) with mock.patch.object(oneshot_handler.OneShotAuditHandler, 'execute') as mock_call: mock_call.return_value = 0 endpoint.do_trigger_audit(self.context, self.audit.uuid) self.assertEqual(mock_call.call_count, 1) @mock.patch.object(continuous_handler.ContinuousAuditHandler, 'start') @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") def test_trigger_audit(self, mock_collector, mock_handler): mock_collector.return_value = faker_cluster_state.FakerModelCollector() audit_handler = oneshot_handler.OneShotAuditHandler endpoint = audit_endpoint.AuditEndpoint(audit_handler) with mock.patch.object(endpoint.executor, 'submit') as mock_call: mock_execute = mock.call(endpoint.do_trigger_audit, self.context, self.audit.uuid) endpoint.trigger_audit(self.context, self.audit.uuid) mock_call.assert_has_calls([mock_execute]) self.assertEqual(mock_call.call_count, 1) python-watcher-4.0.0/watcher/tests/decision_engine/messaging/test_data_model_endpoint.py0000664000175000017500000000371213656752270032031 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2019 ZTE Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import unittest from watcher.common import exception from watcher.common import utils from watcher.decision_engine.messaging import data_model_endpoint from watcher.decision_engine.model.collector import manager from watcher.objects import audit class TestDataModelEndpoint(unittest.TestCase): def setUp(self): self.endpoint_instance = data_model_endpoint.DataModelEndpoint('fake') @mock.patch.object(audit.Audit, 'get') def test_get_audit_scope(self, mock_get): mock_get.return_value = mock.Mock(scope='fake_scope') audit_uuid = utils.generate_uuid() result = self.endpoint_instance.get_audit_scope( context=None, audit=audit_uuid) self.assertEqual('fake_scope', result) @mock.patch.object(audit.Audit, 'get_by_name') def test_get_audit_scope_with_error_name(self, mock_get_by_name): mock_get_by_name.side_effect = exception.AuditNotFound() audit_name = 'error_audit_name' self.assertRaises( exception.InvalidIdentity, self.endpoint_instance.get_audit_scope, context=None, audit=audit_name) @mock.patch.object(manager, 'CollectorManager', mock.Mock()) def test_get_data_model_info(self): result = self.endpoint_instance.get_data_model_info(context='fake') self.assertIn('context', result) python-watcher-4.0.0/watcher/tests/decision_engine/planner/0000775000175000017500000000000013656752352024107 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/planner/__init__.py0000664000175000017500000000000013656752270026205 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/planner/test_planner_manager.py0000664000175000017500000000215613656752270030654 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.decision_engine.planner import manager as planner from watcher.decision_engine.planner import weight from watcher.tests import base class TestPlannerManager(base.TestCase): def test_load(self): cfg.CONF.set_override('planner', "weight", group='watcher_planner') manager = planner.PlannerManager() selected_planner = cfg.CONF.watcher_planner.planner self.assertIsInstance(manager.load(selected_planner), weight.WeightPlanner) python-watcher-4.0.0/watcher/tests/decision_engine/planner/test_weight_planner.py0000664000175000017500000012502613656752270030533 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.common import nova_helper from watcher.common import utils from watcher.db import api as db_api from watcher.decision_engine.planner import weight as pbase from watcher.decision_engine.solution import default as dsol from watcher.decision_engine.strategy import strategies from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils as db_utils from watcher.tests.decision_engine.model import ceilometer_metrics as fake from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.objects import utils as obj_utils class SolutionFaker(object): @staticmethod def build(): metrics = fake.FakerMetricsCollector() current_state_cluster = faker_cluster_state.FakerModelCollector() sercon = strategies.BasicConsolidation(config=mock.Mock()) sercon.compute_model = current_state_cluster.generate_scenario_1() sercon.ceilometer = mock.MagicMock( get_statistics=metrics.mock_get_statistics) return sercon.execute() class SolutionFakerSingleHyp(object): @staticmethod def build(): metrics = fake.FakerMetricsCollector() current_state_cluster = faker_cluster_state.FakerModelCollector() sercon = strategies.BasicConsolidation(config=mock.Mock()) sercon.compute_model = ( current_state_cluster.generate_scenario_3_with_2_nodes()) sercon.ceilometer = mock.MagicMock( get_statistics=metrics.mock_get_statistics) return sercon.execute() class TestActionScheduling(base.DbTestCase): def setUp(self): super(TestActionScheduling, self).setUp() self.goal = db_utils.create_test_goal(name="dummy") self.strategy = db_utils.create_test_strategy(name="dummy") self.audit = db_utils.create_test_audit( uuid=utils.generate_uuid(), strategy_id=self.strategy.id) self.planner = pbase.WeightPlanner( mock.Mock( weights={ 'turn_host_to_acpi_s3_state': 10, 'resize': 20, 'migrate': 30, 'sleep': 40, 'change_nova_service_state': 50, 'nop': 60, 'new_action_type': 70, }, parallelization={ 'turn_host_to_acpi_s3_state': 2, 'resize': 2, 'migrate': 2, 'sleep': 1, 'change_nova_service_state': 1, 'nop': 1, 'new_action_type': 70, })) @mock.patch.object(utils, "generate_uuid") def test_schedule_actions(self, m_generate_uuid): m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", "33333333-3333-3333-3333-333333333333", # "44444444-4444-4444-4444-444444444444", # "55555555-5555-5555-5555-555555555555", # "66666666-6666-6666-6666-666666666666", # "77777777-7777-7777-7777-777777777777", # "88888888-8888-8888-8888-888888888888", # "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) self.planner.config.weights = {'migrate': 3} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = [] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_two_actions(self, m_generate_uuid): m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", "22222222-2222-2222-2222-222222222222", "33333333-3333-3333-3333-333333333333", "44444444-4444-4444-4444-444444444444", # Migrate 1 "55555555-5555-5555-5555-555555555555", # Nop 1 ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) # We create the migrate action before but we then schedule # after the nop action solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="nop", input_parameters={"message": "Hello world"}) self.planner.config.weights = {'migrate': 3, 'nop': 5} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'nop', 'parents': [], 'uuid': '55555555-5555-5555-5555-555555555555'}, {'action_type': 'migrate', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '44444444-4444-4444-4444-444444444444'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_actions_with_unknown_action(self, m_generate_uuid): m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # new_action_type "33333333-3333-3333-3333-333333333333", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "src_uuid_node": "server1", "dst_uuid_node": "server2", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="new_action_type", resource_id="", input_parameters={}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'new_action_type', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['22222222-2222-2222-2222-222222222222'], 'uuid': '11111111-1111-1111-1111-111111111111'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") @mock.patch.object(nova_helper.NovaHelper, 'get_instance_by_uuid') def test_schedule_migrate_resize_actions(self, m_nova, m_generate_uuid): m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Migrate 4 "55555555-5555-5555-5555-555555555555", # Migrate 5 "66666666-6666-6666-6666-666666666666", # Resize 1 "77777777-7777-7777-7777-777777777777", # Resize 2 "88888888-8888-8888-8888-888888888888", # Nop "99999999-9999-9999-9999-999999999999", ] m_nova.return_value = 'server1' solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={"flavor": "x1"}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111'], 'uuid': '22222222-2222-2222-2222-222222222222'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_3_migrate_1_resize_1_acpi_actions_1_swimlane( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 1 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Resize "55555555-5555-5555-5555-555555555555", # ACPI "66666666-6666-6666-6666-666666666666", "77777777-7777-7777-7777-777777777777", "88888888-8888-8888-8888-888888888888", "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server0", "destination_node": "server1", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="server1", input_parameters={}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111'], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111'], 'uuid': '22222222-2222-2222-2222-222222222222'}), ({'action_type': 'resize', 'parents': ['33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': ['22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_migrate_resize_acpi_actions_2_swimlanes( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 2 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Resize "55555555-5555-5555-5555-555555555555", # ACPI "66666666-6666-6666-6666-666666666666", "77777777-7777-7777-7777-777777777777", "88888888-8888-8888-8888-888888888888", "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server0", "destination_node": "server1", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="server1", input_parameters={}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'resize', 'parents': ['33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_migrate_resize_acpi_actions_3_swimlanes( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 3 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Resize "55555555-5555-5555-5555-555555555555", # ACPI "66666666-6666-6666-6666-666666666666", "77777777-7777-7777-7777-777777777777", "88888888-8888-8888-8888-888888888888", "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server0", "destination_node": "server1", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="server1", input_parameters={}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '44444444-4444-4444-4444-444444444444'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_three_migrate_two_resize_actions( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 3 self.planner.config.parallelization["resize"] = 2 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Resize "55555555-5555-5555-5555-555555555555", # ACPI "66666666-6666-6666-6666-666666666666", "77777777-7777-7777-7777-777777777777", "88888888-8888-8888-8888-888888888888", "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server0", "destination_node": "server1", } solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters=parameters) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="resize", resource_id="b189db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={'flavor': 'x1'}) with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'resize', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222', '33333333-3333-3333-3333-333333333333'], 'uuid': '55555555-5555-5555-5555-555555555555'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) @mock.patch.object(utils, "generate_uuid") def test_schedule_5_migrate_2_resize_actions_for_2_swimlanes( self, m_generate_uuid): self.planner.config.parallelization["migrate"] = 2 self.planner.config.parallelization["resize"] = 2 m_generate_uuid.side_effect = [ "00000000-0000-0000-0000-000000000000", # Action plan "11111111-1111-1111-1111-111111111111", # Migrate 1 "22222222-2222-2222-2222-222222222222", # Migrate 2 "33333333-3333-3333-3333-333333333333", # Migrate 3 "44444444-4444-4444-4444-444444444444", # Migrate 4 "55555555-5555-5555-5555-555555555555", # Migrate 5 "66666666-6666-6666-6666-666666666666", # Resize 1 "77777777-7777-7777-7777-777777777777", # Resize 2 "88888888-8888-8888-8888-888888888888", # Nop "99999999-9999-9999-9999-999999999999", ] solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server1", "destination_node": "server6"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server2", "destination_node": "server6"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server3", "destination_node": "server6"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server4", "destination_node": "server6"}) solution.add_action(action_type="migrate", resource_id="DOESNOTMATTER", input_parameters={"source_node": "server5", "destination_node": "server6"}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="resize", resource_id="DOESNOTMATTER", input_parameters={'flavor': 'x2'}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="DOESNOTMATTER") with mock.patch.object( pbase.WeightPlanner, "create_scheduled_actions", wraps=self.planner.create_scheduled_actions ) as m_create_scheduled_actions: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_scheduled_actions.call_count) action_graph = m_create_scheduled_actions.call_args[0][0] expected_edges = \ [({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '11111111-1111-1111-1111-111111111111'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '44444444-4444-4444-4444-444444444444'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}), ({'action_type': 'migrate', 'parents': [], 'uuid': '22222222-2222-2222-2222-222222222222'}, {'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '44444444-4444-4444-4444-444444444444'}), ({'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '33333333-3333-3333-3333-333333333333'}, {'action_type': 'migrate', 'parents': ['33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': ['11111111-1111-1111-1111-111111111111', '22222222-2222-2222-2222-222222222222'], 'uuid': '44444444-4444-4444-4444-444444444444'}, {'action_type': 'migrate', 'parents': ['33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}), ({'action_type': 'migrate', 'parents': ['33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}, {'action_type': 'resize', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '66666666-6666-6666-6666-666666666666'}), ({'action_type': 'migrate', 'parents': ['33333333-3333-3333-3333-333333333333', '44444444-4444-4444-4444-444444444444'], 'uuid': '55555555-5555-5555-5555-555555555555'}, {'action_type': 'resize', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '77777777-7777-7777-7777-777777777777'}), ({'action_type': 'resize', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '66666666-6666-6666-6666-666666666666'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['66666666-6666-6666-6666-666666666666', '77777777-7777-7777-7777-777777777777'], 'uuid': '88888888-8888-8888-8888-888888888888'}), ({'action_type': 'resize', 'parents': ['55555555-5555-5555-5555-555555555555'], 'uuid': '77777777-7777-7777-7777-777777777777'}, {'action_type': 'turn_host_to_acpi_s3_state', 'parents': ['66666666-6666-6666-6666-666666666666', '77777777-7777-7777-7777-777777777777'], 'uuid': '88888888-8888-8888-8888-888888888888'})] edges = sorted([(src.as_dict(), dst.as_dict()) for src, dst in action_graph.edges()], key=lambda pair: pair[0]['uuid']) for src, dst in edges: for key in ('id', 'action_plan', 'action_plan_id', 'created_at', 'input_parameters', 'deleted_at', 'updated_at', 'state'): del src[key] del dst[key] self.assertEqual(len(expected_edges), len(edges)) for pair in expected_edges: self.assertIn(pair, edges) class TestWeightPlanner(base.DbTestCase): def setUp(self): super(TestWeightPlanner, self).setUp() self.planner = pbase.WeightPlanner(mock.Mock()) self.planner.config.weights = { 'nop': 0, 'sleep': 1, 'change_nova_service_state': 2, 'migrate': 3 } self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy( self.context, goal_id=self.goal.id) obj_utils.create_test_audit_template( self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') self.mock_create_action_plan = p.start() self.mock_create_action_plan.side_effect = ( self._simulate_action_plan_create) self.addCleanup(p.stop) q = mock.patch.object(db_api.BaseConnection, 'create_action') self.mock_create_action = q.start() self.mock_create_action.side_effect = ( self._simulate_action_create) self.addCleanup(q.stop) def _simulate_action_plan_create(self, action_plan): action_plan.create() return action_plan def _simulate_action_create(self, action): action.create() return action @mock.patch.object(objects.Strategy, 'get_by_name') def test_scheduler_warning_empty_action_plan(self, m_get_by_name): m_get_by_name.return_value = self.strategy audit = db_utils.create_test_audit( goal_id=self.goal.id, strategy_id=self.strategy.id) fake_solution = mock.MagicMock(efficacy_indicators=[], actions=[]) action_plan = self.planner.schedule( self.context, audit.id, fake_solution) self.assertIsNotNone(action_plan.uuid) python-watcher-4.0.0/watcher/tests/decision_engine/planner/test_node_resource_consolidation.py0000664000175000017500000002444713656752270033313 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.decision_engine.planner import \ node_resource_consolidation as pbase from watcher.decision_engine.solution import default as dsol from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils as db_utils from watcher.tests.objects import utils as obj_utils class TestActionScheduling(base.DbTestCase): def setUp(self): super(TestActionScheduling, self).setUp() self.goal = db_utils.create_test_goal(name="server_consolidation") self.strategy = db_utils.create_test_strategy( name="node_resource_consolidation") self.audit = db_utils.create_test_audit( uuid=utils.generate_uuid(), strategy_id=self.strategy.id) self.planner = pbase.NodeResourceConsolidationPlanner(mock.Mock()) def test_schedule_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "host1", "destination_node": "host2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) with mock.patch.object( pbase.NodeResourceConsolidationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_action.call_count) filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("migrate", actions[0].action_type) def test_schedule_two_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) server1_uuid = "b199db0c-1408-4d52-b5a5-5ca14de0ff36" server2_uuid = "b199db0c-1408-4d52-b5a5-5ca14de0ff37" solution.add_action(action_type="migrate", resource_id=server1_uuid, input_parameters={ "source_node": "host1", "destination_node": "host2", }) solution.add_action(action_type="migrate", resource_id=server2_uuid, input_parameters={ "source_node": "host1", "destination_node": "host3", }) with mock.patch.object( pbase.NodeResourceConsolidationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(2, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual( server1_uuid, actions[0]['input_parameters'].get('resource_id')) self.assertEqual( server2_uuid, actions[1]['input_parameters'].get('resource_id')) self.assertIn(actions[0]['uuid'], actions[1]['parents']) def test_schedule_actions_with_unknown_action(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "src_uuid_node": "host1", "dst_uuid_node": "host2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="new_action_type", resource_id="", input_parameters={}) with mock.patch.object( pbase.NodeResourceConsolidationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: self.assertRaises( exception.UnsupportedActionType, self.planner.schedule, self.context, self.audit.id, solution) self.assertEqual(2, m_create_action.call_count) def test_schedule_migrate_change_state_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) solution.add_action(action_type="change_nova_service_state", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={"state": "disabled"}) solution.add_action(action_type="change_nova_service_state", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff37", input_parameters={"state": "disabled"}) solution.add_action(action_type="migrate", resource_id="f6416850-da28-4047-a547-8c49f53e95fe", input_parameters={"source_node": "host1"}) solution.add_action(action_type="migrate", resource_id="bb404e74-2caf-447b-bd1e-9234db386ca5", input_parameters={"source_node": "host2"}) solution.add_action(action_type="migrate", resource_id="f6416850-da28-4047-a547-8c49f53e95ff", input_parameters={"source_node": "host1"}) solution.add_action(action_type="change_nova_service_state", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={"state": "enabled"}) solution.add_action(action_type="change_nova_service_state", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff37", input_parameters={"state": "enabled"}) with mock.patch.object( pbase.NodeResourceConsolidationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(7, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("change_nova_service_state", actions[0].action_type) self.assertEqual("change_nova_service_state", actions[1].action_type) self.assertEqual("migrate", actions[2].action_type) self.assertEqual("migrate", actions[3].action_type) self.assertEqual("migrate", actions[4].action_type) self.assertEqual("change_nova_service_state", actions[5].action_type) self.assertEqual("change_nova_service_state", actions[6].action_type) action0_uuid = actions[0]['uuid'] action1_uuid = actions[1]['uuid'] action2_uuid = actions[2]['uuid'] action3_uuid = actions[3]['uuid'] action4_uuid = actions[4]['uuid'] action5_uuid = actions[5]['uuid'] action6_uuid = actions[6]['uuid'] # parents of action3,4,5 are action0,1 # resource2 and 4 have the same source, # so action about resource4 depends on # action about resource2 parents = [] for action in actions: if action.parents: parents.extend(action.parents) self.assertIn(action0_uuid, parents) self.assertIn(action1_uuid, parents) self.assertIn(action2_uuid, parents) self.assertIn(action3_uuid, parents) self.assertIn(action4_uuid, parents) self.assertNotIn(action5_uuid, parents) self.assertNotIn(action6_uuid, parents) class TestDefaultPlanner(base.DbTestCase): def setUp(self): super(TestDefaultPlanner, self).setUp() self.planner = pbase.NodeResourceConsolidationPlanner(mock.Mock()) self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy( self.context, goal_id=self.goal.id) obj_utils.create_test_audit_template( self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') self.mock_create_action_plan = p.start() self.mock_create_action_plan.side_effect = ( self._simulate_action_plan_create) self.addCleanup(p.stop) q = mock.patch.object(db_api.BaseConnection, 'create_action') self.mock_create_action = q.start() self.mock_create_action.side_effect = ( self._simulate_action_create) self.addCleanup(q.stop) def _simulate_action_plan_create(self, action_plan): action_plan.create() return action_plan def _simulate_action_create(self, action): action.create() return action @mock.patch.object(objects.Strategy, 'get_by_name') def test_scheduler_warning_empty_action_plan(self, m_get_by_name): m_get_by_name.return_value = self.strategy audit = db_utils.create_test_audit( goal_id=self.goal.id, strategy_id=self.strategy.id) fake_solution = mock.MagicMock(efficacy_indicators=[], actions=[]) action_plan = self.planner.schedule( self.context, audit.id, fake_solution) self.assertIsNotNone(action_plan.uuid) python-watcher-4.0.0/watcher/tests/decision_engine/planner/test_workload_stabilization_planner.py0000664000175000017500000003760713656752270034031 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.common import exception from watcher.common import nova_helper from watcher.common import utils from watcher.db import api as db_api from watcher.decision_engine.planner import workload_stabilization as pbase from watcher.decision_engine.solution import default as dsol from watcher.decision_engine.strategy import strategies from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils as db_utils from watcher.tests.decision_engine.model import ceilometer_metrics as fake from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.objects import utils as obj_utils class SolutionFaker(object): @staticmethod def build(): metrics = fake.FakerMetricsCollector() current_state_cluster = faker_cluster_state.FakerModelCollector() sercon = strategies.BasicConsolidation(config=mock.Mock()) sercon._compute_model = current_state_cluster.generate_scenario_1() sercon.ceilometer = mock.MagicMock( get_statistics=metrics.mock_get_statistics) return sercon.execute() class SolutionFakerSingleHyp(object): @staticmethod def build(): metrics = fake.FakerMetricsCollector() current_state_cluster = faker_cluster_state.FakerModelCollector() sercon = strategies.BasicConsolidation(config=mock.Mock()) sercon._compute_model = ( current_state_cluster.generate_scenario_3_with_2_nodes()) sercon.ceilometer = mock.MagicMock( get_statistics=metrics.mock_get_statistics) return sercon.execute() class TestActionScheduling(base.DbTestCase): def setUp(self): super(TestActionScheduling, self).setUp() self.goal = db_utils.create_test_goal(name="dummy") self.strategy = db_utils.create_test_strategy(name="dummy") self.audit = db_utils.create_test_audit( uuid=utils.generate_uuid(), strategy_id=self.strategy.id) self.planner = pbase.WorkloadStabilizationPlanner(mock.Mock()) self.nova_helper = nova_helper.NovaHelper(mock.Mock()) def test_schedule_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: self.planner.config.weights = {'migrate': 3} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(1, m_create_action.call_count) filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("migrate", actions[0].action_type) def test_schedule_two_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="nop", input_parameters={"message": "Hello world"}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: self.planner.config.weights = {'migrate': 3, 'nop': 5} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertIsNotNone(action_plan.uuid) self.assertEqual(2, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("nop", actions[0].action_type) self.assertEqual("migrate", actions[1].action_type) def test_schedule_actions_with_unknown_action(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "src_uuid_node": "server1", "dst_uuid_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="new_action_type", resource_id="", input_parameters={}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: self.planner.config.weights = {'migrate': 0} self.assertRaises(KeyError, self.planner.schedule, self.context, self.audit.id, solution) assert not m_nova.called self.assertEqual(2, m_create_action.call_count) def test_schedule_actions_with_unsupported_action(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "src_uuid_node": "server1", "dst_uuid_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="new_action_type", resource_id="", input_parameters={}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: self.planner.config.weights = { 'turn_host_to_acpi_s3_state': 0, 'resize': 1, 'migrate': 2, 'sleep': 3, 'change_nova_service_state': 4, 'nop': 5, 'new_action_type': 6} self.assertRaises(exception.UnsupportedActionType, self.planner.schedule, self.context, self.audit.id, solution) assert not m_nova.called self.assertEqual(2, m_create_action.call_count) @mock.patch.object(nova_helper.NovaHelper, 'get_instance_by_uuid') def test_schedule_migrate_resize_actions(self, mock_nova): mock_nova.return_value = 'server1' solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="resize", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={"flavor": "x1"}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: with mock.patch.object(nova_helper, 'NovaHelper') as m_nova: self.planner.config.weights = {'migrate': 3, 'resize': 2} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertEqual(1, m_nova.call_count) self.assertIsNotNone(action_plan.uuid) self.assertEqual(2, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("migrate", actions[0].action_type) self.assertEqual("resize", actions[1].action_type) self.assertEqual(actions[0].uuid, actions[1].parents[0]) def test_schedule_migrate_resize_acpi_s3_actions(self): solution = dsol.DefaultSolution( goal=mock.Mock(), strategy=self.strategy) parameters = { "source_node": "server1", "destination_node": "server2", } parent_migration = "b199db0c-1408-4d52-b5a5-5ca14de0ff36" solution.add_action(action_type="migrate", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) solution.add_action(action_type="resize", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters={'flavor': 'x1'}) solution.add_action(action_type="migrate", resource_id="f6416850-da28-4047-a547-8c49f53e95fe", input_parameters={"source_node": "server1", "destination_node": "server2"}) solution.add_action(action_type="migrate", resource_id="bb404e74-2caf-447b-bd1e-9234db386ca5", input_parameters={"source_node": "server2", "destination_node": "server3"}) solution.add_action(action_type="turn_host_to_acpi_s3_state", resource_id="server1", input_parameters={}) with mock.patch.object( pbase.WorkloadStabilizationPlanner, "create_action", wraps=self.planner.create_action ) as m_create_action: with mock.patch.object( nova_helper, 'NovaHelper') as m_nova: m_nova().get_hostname.return_value = 'server1' m_nova().get_instance_by_uuid.return_value = ['uuid1'] self.planner.config.weights = { 'turn_host_to_acpi_s3_state': 0, 'resize': 1, 'migrate': 2, 'sleep': 3, 'change_nova_service_state': 4, 'nop': 5} action_plan = self.planner.schedule( self.context, self.audit.id, solution) self.assertEqual(3, m_nova.call_count) self.assertIsNotNone(action_plan.uuid) self.assertEqual(5, m_create_action.call_count) # check order filters = {'action_plan_id': action_plan.id} actions = objects.Action.dbapi.get_action_list(self.context, filters) self.assertEqual("migrate", actions[0].action_type) self.assertEqual("migrate", actions[1].action_type) self.assertEqual("migrate", actions[2].action_type) self.assertEqual("resize", actions[3].action_type) self.assertEqual("turn_host_to_acpi_s3_state", actions[4].action_type) for action in actions: if action.input_parameters['resource_id'] == parent_migration: parent_migration = action break self.assertEqual(parent_migration.uuid, actions[3].parents[0]) class TestDefaultPlanner(base.DbTestCase): def setUp(self): super(TestDefaultPlanner, self).setUp() self.planner = pbase.WorkloadStabilizationPlanner(mock.Mock()) self.planner.config.weights = { 'nop': 0, 'sleep': 1, 'change_nova_service_state': 2, 'migrate': 3 } self.goal = obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy( self.context, goal_id=self.goal.id) obj_utils.create_test_audit_template( self.context, goal_id=self.goal.id, strategy_id=self.strategy.id) p = mock.patch.object(db_api.BaseConnection, 'create_action_plan') self.mock_create_action_plan = p.start() self.mock_create_action_plan.side_effect = ( self._simulate_action_plan_create) self.addCleanup(p.stop) q = mock.patch.object(db_api.BaseConnection, 'create_action') self.mock_create_action = q.start() self.mock_create_action.side_effect = ( self._simulate_action_create) self.addCleanup(q.stop) def _simulate_action_plan_create(self, action_plan): action_plan.create() return action_plan def _simulate_action_create(self, action): action.create() return action @mock.patch.object(objects.Strategy, 'get_by_name') def test_scheduler_warning_empty_action_plan(self, m_get_by_name): m_get_by_name.return_value = self.strategy audit = db_utils.create_test_audit( goal_id=self.goal.id, strategy_id=self.strategy.id) fake_solution = mock.MagicMock(efficacy_indicators=[], actions=[]) action_plan = self.planner.schedule( self.context, audit.id, fake_solution) self.assertIsNotNone(action_plan.uuid) class TestActionValidator(base.DbTestCase): INSTANCE_UUID = "94ae2f92-b7fd-4da7-9e97-f13504ae98c4" def setUp(self): super(TestActionValidator, self).setUp() self.r_osc_cls = mock.Mock() self.r_helper_cls = mock.Mock() self.r_helper = mock.Mock(spec=nova_helper.NovaHelper) self.r_helper_cls.return_value = self.r_helper r_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.r_helper_cls) r_nova_helper.start() self.addCleanup(r_nova_helper.stop) def test_resize_validate_parents(self): resize_object = pbase.ResizeActionValidator() action = {'uuid': 'fcec56cd-74c1-406b-a7c1-81ef9f0c1393', 'input_parameters': {'resource_id': self.INSTANCE_UUID}} resource_action_map = {self.INSTANCE_UUID: [ ('action_uuid', 'migrate')]} self.r_helper.get_hostname.return_value = 'server1' self.r_helper.get_instance_by_uuid.return_value = ['instance'] result = resize_object.validate_parents(resource_action_map, action) self.assertEqual('action_uuid', result[0]) def test_migrate_validate_parents(self): migrate_object = pbase.MigrationActionValidator() action = {'uuid': '712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'input_parameters': {'source_node': 'server1', 'resource_id': self.INSTANCE_UUID}} resource_action_map = {} expected_map = { '94ae2f92-b7fd-4da7-9e97-f13504ae98c4': [ ('712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'migrate')], 'server1': [ ('712f1701-4c1b-4076-bfcf-3f23cfec6c3b', 'migrate')]} migrate_object.validate_parents(resource_action_map, action) self.assertEqual(resource_action_map, expected_map) python-watcher-4.0.0/watcher/tests/decision_engine/test_rpcapi.py0000664000175000017500000000472713656752270025350 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import oslo_messaging as om from watcher.common import exception from watcher.common import utils from watcher.decision_engine import rpcapi from watcher.tests import base class TestDecisionEngineAPI(base.TestCase): api = rpcapi.DecisionEngineAPI() def test_get_api_version(self): with mock.patch.object(om.RPCClient, 'call') as mock_call: expected_context = self.context self.api.check_api_version(expected_context) mock_call.assert_called_once_with( expected_context, 'check_api_version', api_version=rpcapi.DecisionEngineAPI().api_version) def test_execute_audit_throw_exception(self): audit_uuid = "uuid" self.assertRaises(exception.InvalidUuidOrName, self.api.trigger_audit, audit_uuid) def test_execute_audit_without_error(self): with mock.patch.object(om.RPCClient, 'cast') as mock_cast: audit_uuid = utils.generate_uuid() self.api.trigger_audit(self.context, audit_uuid) mock_cast.assert_called_once_with( self.context, 'trigger_audit', audit_uuid=audit_uuid) def test_get_strategy_info(self): with mock.patch.object(om.RPCClient, 'call') as mock_call: self.api.get_strategy_info(self.context, "dummy") mock_call.assert_called_once_with( self.context, 'get_strategy_info', strategy_name="dummy") def test_get_data_model_info(self): with mock.patch.object(om.RPCClient, 'call') as mock_call: self.api.get_data_model_info( self.context, data_model_type='compute', audit=None) mock_call.assert_called_once_with( self.context, 'get_data_model_info', data_model_type='compute', audit=None) python-watcher-4.0.0/watcher/tests/decision_engine/solution/0000775000175000017500000000000013656752352024324 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/solution/__init__.py0000664000175000017500000000000013656752270026422 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/solution/test_default_solution.py0000664000175000017500000000514113656752270031315 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.decision_engine.solution import default from watcher.decision_engine.strategy import strategies from watcher.tests import base class TestDefaultSolution(base.TestCase): def test_default_solution(self): solution = default.DefaultSolution( goal=mock.Mock(), strategy=strategies.DummyStrategy(config=mock.Mock())) parameters = { "source_node": "server1", "destination_node": "server2", } solution.add_action(action_type="nop", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36", input_parameters=parameters) self.assertEqual(1, len(solution.actions)) expected_action_type = "nop" expected_parameters = { "source_node": "server1", "destination_node": "server2", "resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36" } self.assertEqual(expected_action_type, solution.actions[0].get('action_type')) self.assertEqual(expected_parameters, solution.actions[0].get('input_parameters')) self.assertEqual('weight', solution.strategy.planner) def test_default_solution_with_no_input_parameters(self): solution = default.DefaultSolution( goal=mock.Mock(), strategy=strategies.DummyStrategy(config=mock.Mock())) solution.add_action(action_type="nop", resource_id="b199db0c-1408-4d52-b5a5-5ca14de0ff36") self.assertEqual(1, len(solution.actions)) expected_action_type = "nop" expected_parameters = { "resource_id": "b199db0c-1408-4d52-b5a5-5ca14de0ff36" } self.assertEqual(expected_action_type, solution.actions[0].get('action_type')) self.assertEqual(expected_parameters, solution.actions[0].get('input_parameters')) self.assertEqual('weight', solution.strategy.planner) python-watcher-4.0.0/watcher/tests/decision_engine/scope/0000775000175000017500000000000013656752352023561 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/scope/__init__.py0000664000175000017500000000000013656752270025657 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/scope/fake_scopes.py0000664000175000017500000000516213656752270026420 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.tests.decision_engine.model import faker_cluster_state vum = faker_cluster_state.volume_uuid_mapping fake_scope_1 = [{'compute': [{'availability_zones': [{'name': 'AZ1'}, {'name': 'AZ3'}]}, {'exclude': [ {'instances': [ {'uuid': 'INSTANCE_6'}]}, ]}] } ] compute_scope = [{'compute': [{'host_aggregates': [{'id': '*'}]}, {'availability_zones': [{'name': 'AZ1'}, {'name': 'AZ2'}]}, {'exclude': [ {'instances': [ {'uuid': 'INSTANCE_1'}, {'uuid': 'INSTANCE_2'}]}, {'compute_nodes': [ {'name': 'Node_1'}, {'name': 'Node_2'}]} ]}] } ] fake_scope_2 = [{'storage': [{'availability_zones': [{'name': 'zone_0'}]}, {'exclude': [ {'volumes': [ {'uuid': vum['volume_1']}]}, {'storage_pools': [ {'name': 'host_0@backend_0#pool_1'}]} ]}] } ] fake_scope_3 = [{'compute': [{'host_aggregates': [{'id': '1'}]}, {'exclude': [] }] } ] baremetal_scope = [ {'baremetal': [ {'exclude': [ {'ironic_nodes': [ {'uuid': 'c5941348-5a87-4016-94d4-4f9e0ce2b87a'}, {'uuid': 'c5941348-5a87-4016-94d4-4f9e0ce2b87c'} ] } ] } ] } ] python-watcher-4.0.0/watcher/tests/decision_engine/scope/test_storage.py0000664000175000017500000002323713656752270026644 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 NEC Corportion # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.scope import storage from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.scope import fake_scopes class TestStorageScope(base.TestCase): def setUp(self): super(TestStorageScope, self).setUp() self.fake_cluster = faker_cluster_state.FakerStorageModelCollector() @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') def test_get_scoped_model_with_zones_pools_volumes(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() audit_scope = fake_scopes.fake_scope_2 mock_zone_list.return_value = [ mock.Mock(zone='zone_{0}'.format(i), host='host_{0}@backend_{1}'.format(i, i)) for i in range(2)] model = storage.StorageScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) expected_edges = [(faker_cluster_state.volume_uuid_mapping['volume_0'], 'host_0@backend_0#pool_0'), ('host_0@backend_0#pool_0', 'host_0@backend_0')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') def test_get_scoped_model_without_scope(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() storage.StorageScope([], mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) assert not mock_zone_list.called @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') def test_collect_zones(self, mock_zone_list): allowed_nodes = [] az_scope = [{'name': 'zone_1'}] mock_zone_list.return_value = [ mock.Mock(zone='zone_{0}'.format(i), host='host_{0}@backend_{1}'.format(i, i)) for i in range(2)] storage.StorageScope([{'availability _zones': az_scope}], mock.Mock(), osc=mock.Mock())._collect_zones( az_scope, allowed_nodes) self.assertEqual(['host_1@backend_1'], sorted(allowed_nodes)) # storage scope with az wildcard az_scope = [{'name': '*'}] del allowed_nodes[:] storage.StorageScope([{'availability _zones': az_scope}], mock.Mock(), osc=mock.Mock())._collect_zones( az_scope, allowed_nodes) self.assertEqual(['host_0@backend_0', 'host_1@backend_1'], sorted(allowed_nodes)) # storage scope with az wildcard and other az_scope = [{'name': '*'}, {'name': 'zone_0'}] del allowed_nodes[:] scope_handler = storage.StorageScope( [{'availability _zones': az_scope}], mock.Mock(), osc=mock.Mock()) self.assertRaises(exception.WildcardCharacterIsUsed, scope_handler._collect_zones, az_scope, allowed_nodes) @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') @mock.patch.object(cinder_helper.CinderHelper, 'get_volume_type_by_backendname') def test_collect_vtype(self, mock_vt_list, mock_zone_list): allowed_nodes = [] mock_zone_list.return_value = [ mock.Mock(zone='zone_{0}'.format(i), host='host_{0}@backend_{1}'.format(i, i)) for i in range(2)] def side_effect(arg): if arg == 'backend_0': return ['type_0'] else: return ['type_1'] mock_vt_list.side_effect = side_effect vt_scope = [{'name': 'type_1'}] storage.StorageScope([{'volume_types': vt_scope}], mock.Mock(), osc=mock.Mock())._collect_vtype( vt_scope, allowed_nodes) self.assertEqual(['host_1@backend_1'], sorted(allowed_nodes)) # storage scope with vt wildcard vt_scope = [{'name': '*'}] del allowed_nodes[:] storage.StorageScope([{'volume_types': vt_scope}], mock.Mock(), osc=mock.Mock())._collect_vtype( vt_scope, allowed_nodes) self.assertEqual(['host_0@backend_0', 'host_1@backend_1'], sorted(allowed_nodes)) # storage scope with vt wildcard and other vt_scope = [{'name': '*'}, {'name': 'type_0'}] del allowed_nodes[:] scope_handler = storage.StorageScope([{'volume_types': vt_scope}], mock.Mock(), osc=mock.Mock()) self.assertRaises(exception.WildcardCharacterIsUsed, scope_handler._collect_vtype, vt_scope, allowed_nodes) def test_exclude_resources(self): pools_to_exclude = [] projects_to_exclude = [] volumes_to_exclude = [] resources = [{'volumes': [{'uuid': 'VOLUME_1'}, {'uuid': 'VOLUME_2'}] }, {'storage_pools': [{'name': 'host_0@backend_0#pool_1'}, {'name': 'host_1@backend_1#pool_1'}] }, {'projects': [{'uuid': 'PROJECT_1'}, {'uuid': 'PROJECT_2'}, {'uuid': 'PROJECT_3'}] } ] storage.StorageScope(resources, mock.Mock(), osc=mock.Mock()).exclude_resources( resources, pools=pools_to_exclude, projects=projects_to_exclude, volumes=volumes_to_exclude) self.assertEqual(['VOLUME_1', 'VOLUME_2'], volumes_to_exclude) self.assertEqual(['PROJECT_1', 'PROJECT_2', 'PROJECT_3'], projects_to_exclude) self.assertEqual(['host_0@backend_0#pool_1', 'host_1@backend_1#pool_1'], pools_to_exclude) def test_exclude_volumes(self): cluster = self.fake_cluster.generate_scenario_1() exclude = [faker_cluster_state.volume_uuid_mapping['volume_0'], faker_cluster_state.volume_uuid_mapping['volume_3'], ] storage.StorageScope([], mock.Mock(), osc=mock.Mock()).exclude_volumes(exclude, cluster) self.assertNotIn(exclude[0], cluster.get_all_volumes().keys()) self.assertNotIn(exclude[1], cluster.get_all_volumes().keys()) def test_exclude_pools(self): cluster = self.fake_cluster.generate_scenario_1() exclude = ['host_0@backend_0#pool_0'] node_name = (exclude[0].split('#'))[0] storage.StorageScope([], mock.Mock(), osc=mock.Mock()).exclude_pools(exclude, cluster) node = cluster.get_node_by_name(node_name) self.assertNotIn(exclude, cluster.get_node_pools(node)) def test_exclude_projects(self): cluster = self.fake_cluster.generate_scenario_1() exclude = ['project_1', 'project_2'] storage.StorageScope([], mock.Mock(), osc=mock.Mock()).exclude_projects(exclude, cluster) projects = [] volumes = cluster.get_all_volumes() for volume_id in volumes: volume = volumes.get(volume_id) projects.append(volume.get('project_id')) self.assertNotIn(exclude[0], projects) self.assertNotIn(exclude[1], projects) def test_remove_nodes_from_model(self): cluster = self.fake_cluster.generate_scenario_1() nodes_to_remove = ['host_0@backend_0'] storage.StorageScope([], mock.Mock(), osc=mock.Mock()).remove_nodes_from_model( nodes_to_remove, cluster) self.assertEqual(['host_1@backend_1'], list(cluster.get_all_storage_nodes())) @mock.patch.object(cinder_helper.CinderHelper, 'get_storage_node_list') def test_get_scoped_model_with_multi_scopes(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() # includes storage and compute scope audit_scope = [] audit_scope.extend(fake_scopes.fake_scope_2) audit_scope.extend(fake_scopes.fake_scope_1) mock_zone_list.return_value = [ mock.Mock(zone='zone_{0}'.format(i), host='host_{0}@backend_{1}'.format(i, i)) for i in range(2)] model = storage.StorageScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) expected_edges = [(faker_cluster_state.volume_uuid_mapping['volume_0'], 'host_0@backend_0#pool_0'), ('host_0@backend_0#pool_0', 'host_0@backend_0')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) python-watcher-4.0.0/watcher/tests/decision_engine/scope/test_baremetal.py0000664000175000017500000000440013656752270027123 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 SBCloud # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.decision_engine.scope import baremetal from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.scope import fake_scopes class TestBaremetalScope(base.TestCase): def setUp(self): super(TestBaremetalScope, self).setUp() self.fake_cluster = faker_cluster_state.FakerBaremetalModelCollector() self.audit_scope = fake_scopes.baremetal_scope def test_exclude_all_ironic_nodes(self): cluster = self.fake_cluster.generate_scenario_1() baremetal.BaremetalScope( self.audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) self.assertEqual({}, cluster.get_all_ironic_nodes()) def test_exclude_resources(self): nodes_to_exclude = [] resources = fake_scopes.baremetal_scope[0]['baremetal'][0]['exclude'] baremetal.BaremetalScope( self.audit_scope, mock.Mock(), osc=mock.Mock()).exclude_resources( resources, nodes=nodes_to_exclude) self.assertEqual(sorted(nodes_to_exclude), sorted(['c5941348-5a87-4016-94d4-4f9e0ce2b87a', 'c5941348-5a87-4016-94d4-4f9e0ce2b87c'])) def test_remove_nodes_from_model(self): cluster = self.fake_cluster.generate_scenario_1() baremetal.BaremetalScope( self.audit_scope, mock.Mock(), osc=mock.Mock()).remove_nodes_from_model( ['c5941348-5a87-4016-94d4-4f9e0ce2b87a'], cluster) self.assertEqual(len(cluster.get_all_ironic_nodes()), 1) python-watcher-4.0.0/watcher/tests/decision_engine/scope/test_compute.py0000664000175000017500000003373513656752270026660 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from jsonschema import validators import mock from watcher.api.controllers.v1 import audit_template from watcher.common import exception from watcher.common import nova_helper from watcher.decision_engine.scope import compute from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.scope import fake_scopes class TestComputeScope(base.TestCase): def setUp(self): super(TestComputeScope, self).setUp() self.fake_cluster = faker_cluster_state.FakerModelCollector() @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_get_scoped_model_with_zones_and_instances(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() audit_scope = fake_scopes.fake_scope_1 mock_zone_list.return_value = [ mock.Mock(zone='AZ{0}'.format(i), host={'hostname_{0}'.format(i): {}}) for i in range(4)] model = compute.ComputeScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) # NOTE(adisky):INSTANCE_6 is not excluded from model it will be tagged # as 'exclude' TRUE, blueprint compute-cdm-include-all-instances expected_edges = [('INSTANCE_2', 'Node_1'), (u'INSTANCE_6', u'Node_3')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_get_scoped_model_without_scope(self, mock_zone_list): model = self.fake_cluster.generate_scenario_1() compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).get_scoped_model(model) assert not mock_zone_list.called def test_remove_instance(self): model = self.fake_cluster.generate_scenario_1() compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).remove_instance( model, model.get_instance_by_uuid('INSTANCE_2'), 'Node_1') expected_edges = [ ('INSTANCE_0', 'Node_0'), ('INSTANCE_1', 'Node_0'), ('INSTANCE_3', 'Node_2'), ('INSTANCE_4', 'Node_2'), ('INSTANCE_5', 'Node_2'), ('INSTANCE_6', 'Node_3'), ('INSTANCE_7', 'Node_4'), ] self.assertEqual(sorted(expected_edges), sorted(model.edges())) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_collect_aggregates(self, mock_aggregate): allowed_nodes = [] mock_aggregate.return_value = [ mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] compute.ComputeScope([{'host_aggregates': [{'id': 1}, {'id': 2}]}], mock.Mock(), osc=mock.Mock())._collect_aggregates( [{'id': 1}, {'id': 2}], allowed_nodes) self.assertEqual(['Node_1'], allowed_nodes) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_aggregates_wildcard_is_used(self, mock_aggregate): allowed_nodes = [] mock_aggregate.return_value = [ mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] compute.ComputeScope([{'host_aggregates': [{'id': '*'}]}], mock.Mock(), osc=mock.Mock())._collect_aggregates( [{'id': '*'}], allowed_nodes) self.assertEqual(['Node_0', 'Node_1'], allowed_nodes) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_aggregates_wildcard_with_other_ids(self, mock_aggregate): allowed_nodes = [] mock_aggregate.return_value = [mock.Mock(id=i) for i in range(2)] scope_handler = compute.ComputeScope( [{'host_aggregates': [{'id': '*'}, {'id': 1}]}], mock.Mock(), osc=mock.Mock()) self.assertRaises(exception.WildcardCharacterIsUsed, scope_handler._collect_aggregates, [{'id': '*'}, {'id': 1}], allowed_nodes) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_aggregates_with_names_and_ids(self, mock_aggregate): allowed_nodes = [] mock_collection = [mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] mock_collection[0].name = 'HA_0' mock_collection[1].name = 'HA_1' mock_aggregate.return_value = mock_collection compute.ComputeScope([{'host_aggregates': [{'name': 'HA_1'}, {'id': 0}]}], mock.Mock(), osc=mock.Mock())._collect_aggregates( [{'name': 'HA_1'}, {'id': 0}], allowed_nodes) self.assertEqual(['Node_0', 'Node_1'], allowed_nodes) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_collect_zones(self, mock_zone_list): allowed_nodes = [] mock_zone_list.return_value = [ mock.Mock(zone="AZ{0}".format(i + 1), host={'Node_{0}'.format(2 * i): 1, 'Node_{0}'.format(2 * i + 1): 2}) for i in range(2)] compute.ComputeScope([{'availability_zones': [{'name': "AZ1"}]}], mock.Mock(), osc=mock.Mock())._collect_zones( [{'name': "AZ1"}], allowed_nodes) self.assertEqual(['Node_0', 'Node_1'], sorted(allowed_nodes)) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_zones_wildcard_is_used(self, mock_zone_list): allowed_nodes = [] mock_zone_list.return_value = [ mock.Mock(zone="AZ{0}".format(i + 1), host={'Node_{0}'.format(2 * i): 1, 'Node_{0}'.format(2 * i + 1): 2}) for i in range(2)] compute.ComputeScope([{'availability_zones': [{'name': "*"}]}], mock.Mock(), osc=mock.Mock())._collect_zones( [{'name': "*"}], allowed_nodes) self.assertEqual(['Node_0', 'Node_1', 'Node_2', 'Node_3'], sorted(allowed_nodes)) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_zones_wildcard_with_other_ids(self, mock_zone_list): allowed_nodes = [] mock_zone_list.return_value = [ mock.Mock(zone="AZ{0}".format(i + 1), host={'Node_{0}'.format(2 * i): 1, 'Node_{0}'.format(2 * i + 1): 2}) for i in range(2)] scope_handler = compute.ComputeScope( [{'availability_zones': [{'name': "*"}, {'name': 'AZ1'}]}], mock.Mock(), osc=mock.Mock()) self.assertRaises(exception.WildcardCharacterIsUsed, scope_handler._collect_zones, [{'name': "*"}, {'name': 'AZ1'}], allowed_nodes) def test_compute_schema(self): test_scope = fake_scopes.compute_scope validators.Draft4Validator( audit_template.AuditTemplatePostType._build_schema() ).validate(test_scope) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_exclude_resource(self, mock_aggregate): mock_collection = [mock.Mock(id=i, hosts=['Node_{0}'.format(i)]) for i in range(2)] mock_collection[0].name = 'HA_0' mock_collection[1].name = 'HA_1' mock_aggregate.return_value = mock_collection resources_to_exclude = [{'host_aggregates': [{'name': 'HA_1'}, {'id': 0}]}, {'instances': [{'uuid': 'INSTANCE_1'}, {'uuid': 'INSTANCE_2'}]}, {'compute_nodes': [{'name': 'Node_2'}, {'name': 'Node_3'}]}, {'instance_metadata': [{'optimize': True}, {'optimize1': False}]}, {'projects': [{'uuid': 'PROJECT_1'}, {'uuid': 'PROJECT_2'}]}] instances_to_exclude = [] nodes_to_exclude = [] instance_metadata = [] projects_to_exclude = [] compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).exclude_resources( resources_to_exclude, instances=instances_to_exclude, nodes=nodes_to_exclude, instance_metadata=instance_metadata, projects=projects_to_exclude) self.assertEqual(['Node_0', 'Node_1', 'Node_2', 'Node_3'], sorted(nodes_to_exclude)) self.assertEqual(['INSTANCE_1', 'INSTANCE_2'], sorted(instances_to_exclude)) self.assertEqual([{'optimize': True}, {'optimize1': False}], instance_metadata) self.assertEqual(['PROJECT_1', 'PROJECT_2'], sorted(projects_to_exclude)) def test_exclude_instances_with_given_metadata(self): cluster = self.fake_cluster.generate_scenario_1() instance_metadata = [{'optimize': True}] instances_to_remove = set() compute.ComputeScope( [], mock.Mock(), osc=mock.Mock()).exclude_instances_with_given_metadata( instance_metadata, cluster, instances_to_remove) self.assertEqual(sorted(['INSTANCE_' + str(i) for i in range(35)]), sorted(instances_to_remove)) instance_metadata = [{'optimize': False}] instances_to_remove = set() compute.ComputeScope( [], mock.Mock(), osc=mock.Mock()).exclude_instances_with_given_metadata( instance_metadata, cluster, instances_to_remove) self.assertEqual(set(), instances_to_remove) def test_exclude_instances_with_given_project(self): cluster = self.fake_cluster.generate_scenario_1() instances_to_exclude = set() projects_to_exclude = ['26F03131-32CB-4697-9D61-9123F87A8147', '109F7909-0607-4712-B32C-5CC6D49D2F15'] compute.ComputeScope( [], mock.Mock(), osc=mock.Mock()).exclude_instances_with_given_project( projects_to_exclude, cluster, instances_to_exclude) self.assertEqual(['INSTANCE_1', 'INSTANCE_2'], sorted(instances_to_exclude)) def test_remove_nodes_from_model(self): model = self.fake_cluster.generate_scenario_1() compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).remove_nodes_from_model( ['hostname_1', 'hostname_2'], model) expected_edges = [ ('INSTANCE_0', 'Node_0'), ('INSTANCE_1', 'Node_0'), ('INSTANCE_6', 'Node_3'), ('INSTANCE_7', 'Node_4')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) def test_update_exclude_instances_in_model(self): model = self.fake_cluster.generate_scenario_1() compute.ComputeScope([], mock.Mock(), osc=mock.Mock()).update_exclude_instance_in_model( ['INSTANCE_1', 'INSTANCE_2'], model) expected_edges = [ ('INSTANCE_0', 'Node_0'), ('INSTANCE_1', 'Node_0'), ('INSTANCE_2', 'Node_1'), ('INSTANCE_3', 'Node_2'), ('INSTANCE_4', 'Node_2'), ('INSTANCE_5', 'Node_2'), ('INSTANCE_6', 'Node_3'), ('INSTANCE_7', 'Node_4')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) self.assertFalse( model.get_instance_by_uuid('INSTANCE_0').watcher_exclude) self.assertTrue( model.get_instance_by_uuid('INSTANCE_1').watcher_exclude) @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_detail') @mock.patch.object(nova_helper.NovaHelper, 'get_aggregate_list') def test_get_scoped_model_with_hostaggregate_null( self, mock_list, mock_detail): cluster = self.fake_cluster.generate_scenario_1() audit_scope = fake_scopes.fake_scope_3 mock_list.return_value = [mock.Mock(id=i, name="HA_{0}".format(i)) for i in range(2)] model = compute.ComputeScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) self.assertEqual(0, len(model.edges())) @mock.patch.object(nova_helper.NovaHelper, 'get_service_list') def test_get_scoped_model_with_multi_scopes(self, mock_zone_list): cluster = self.fake_cluster.generate_scenario_1() # includes compute and storage scope audit_scope = [] audit_scope.extend(fake_scopes.fake_scope_1) audit_scope.extend(fake_scopes.fake_scope_2) mock_zone_list.return_value = [ mock.Mock(zone='AZ{0}'.format(i), host={'hostname_{0}'.format(i): {}}) for i in range(4)] model = compute.ComputeScope(audit_scope, mock.Mock(), osc=mock.Mock()).get_scoped_model(cluster) # NOTE(adisky):INSTANCE_6 is not excluded from model it will be tagged # as 'exclude' TRUE, blueprint compute-cdm-include-all-instances expected_edges = [('INSTANCE_2', 'Node_1'), (u'INSTANCE_6', u'Node_3')] self.assertEqual(sorted(expected_edges), sorted(model.edges())) python-watcher-4.0.0/watcher/tests/decision_engine/model/0000775000175000017500000000000013656752352023550 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/__init__.py0000664000175000017500000000000013656752270025646 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/0000775000175000017500000000000013656752352026236 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/__init__.py0000664000175000017500000000000013656752270030334 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/test_cinder_notifications.py0000664000175000017500000006165113656752270034054 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import os import mock from oslo_serialization import jsonutils from watcher.common import cinder_helper from watcher.common import context from watcher.common import exception from watcher.common import service as watcher_service from watcher.db.sqlalchemy import api as db_api from watcher.decision_engine.model.notification import cinder as cnotification from watcher.tests import base as base_test from watcher.tests.db import utils from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model.notification import fake_managers class NotificationTestCase(base_test.TestCase): @staticmethod def load_message(filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as json_file: json_data = jsonutils.load(json_file) return json_data class TestReceiveCinderNotifications(NotificationTestCase): FAKE_METADATA = {'message_id': None, 'timestamp': None} def setUp(self): super(TestReceiveCinderNotifications, self).setUp() p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') m_from_dict = p_from_dict.start() m_from_dict.return_value = self.context self.addCleanup(p_from_dict.stop) p_get_service_list = mock.patch.object( db_api.Connection, 'get_service_list') p_update_service = mock.patch.object( db_api.Connection, 'update_service') m_get_service_list = p_get_service_list.start() m_update_service = p_update_service.start() fake_service = utils.get_test_service( created_at=datetime.datetime.utcnow()) m_get_service_list.return_value = [fake_service] m_update_service.return_value = fake_service.copy() self.addCleanup(p_get_service_list.stop) self.addCleanup(p_update_service.stop) @mock.patch.object(cnotification.CapacityNotificationEndpoint, 'info') def test_cinder_receive_capacity(self, m_info): message = self.load_message('capacity.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'capacity.host1@backend1#pool1', 'capacity.pool', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeCreateEnd, 'info') def test_cinder_receive_volume_create_end(self, m_info): message = self.load_message('scenario_1_volume-create.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.create.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeUpdateEnd, 'info') def test_cinder_receive_volume_update_end(self, m_info): message = self.load_message('scenario_1_volume-update.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.update.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeAttachEnd, 'info') def test_cinder_receive_volume_attach_end(self, m_info): message = self.load_message('scenario_1_volume-attach.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.attach.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeDetachEnd, 'info') def test_cinder_receive_volume_detach_end(self, m_info): message = self.load_message('scenario_1_volume-detach.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.detach.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeResizeEnd, 'info') def test_cinder_receive_volume_resize_end(self, m_info): message = self.load_message('scenario_1_volume-resize.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.resize.end', expected_message, self.FAKE_METADATA) @mock.patch.object(cnotification.VolumeDeleteEnd, 'info') def test_cinder_receive_volume_delete_end(self, m_info): message = self.load_message('scenario_1_volume-delete.json') expected_message = message['payload'] de_service = watcher_service.Service(fake_managers.FakeStorageManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'volume.host_0@backend_0#pool_0', 'volume.delete.end', expected_message, self.FAKE_METADATA) class TestCinderNotifications(NotificationTestCase): FAKE_METADATA = {'message_id': None, 'timestamp': None} def setUp(self): super(TestCinderNotifications, self).setUp() # fake cluster self.fake_cdmc = faker_cluster_state.FakerStorageModelCollector() def test_cinder_capacity(self): """test consuming capacity""" storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) pool_0_name = 'host_0@backend_0#pool_0' pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) # before self.assertEqual(pool_0_name, pool_0.name) self.assertEqual(420, pool_0.free_capacity_gb) self.assertEqual(420, pool_0.virtual_free) self.assertEqual(80, pool_0.allocated_capacity_gb) self.assertEqual(80, pool_0.provisioned_capacity_gb) message = self.load_message('scenario_1_capacity.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # after self.assertEqual(pool_0_name, pool_0.name) self.assertEqual(460, pool_0.free_capacity_gb) self.assertEqual(460, pool_0.virtual_free) self.assertEqual(40, pool_0.allocated_capacity_gb) self.assertEqual(40, pool_0.provisioned_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_capacity_pool_notfound(self, m_cinder_helper): """test consuming capacity, new pool in existing node""" # storage_pool_by_name mock return_mock = mock.Mock() return_mock.configure_mock( name='host_0@backend_0#pool_2', total_volumes='2', total_capacity_gb='500', free_capacity_gb='380', provisioned_capacity_gb='120', allocated_capacity_gb='120') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) message = self.load_message('scenario_1_capacity_pool_notfound.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # after consuming message, still pool_0 exists pool_0_name = 'host_0@backend_0#pool_0' pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0_name, pool_0.name) self.assertEqual(420, pool_0.free_capacity_gb) self.assertEqual(420, pool_0.virtual_free) self.assertEqual(80, pool_0.allocated_capacity_gb) self.assertEqual(80, pool_0.provisioned_capacity_gb) # new pool was added pool_1_name = 'host_0@backend_0#pool_2' m_get_storage_pool_by_name.assert_called_once_with(pool_1_name) storage_node = storage_model.get_node_by_pool_name(pool_1_name) self.assertEqual('host_0@backend_0', storage_node.host) pool_1 = storage_model.get_pool_by_pool_name(pool_1_name) self.assertEqual(pool_1_name, pool_1.name) self.assertEqual(500, pool_1.total_capacity_gb) self.assertEqual(380, pool_1.free_capacity_gb) self.assertEqual(120, pool_1.allocated_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_capacity_node_notfound(self, m_cinder_helper): """test consuming capacity, new pool in new node""" return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_2@backend_2#pool_0', total_volumes='2', total_capacity_gb='500', free_capacity_gb='460', provisioned_capacity_gb='40', allocated_capacity_gb='40') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) # storage_node_by_name mock return_node_mock = mock.Mock() return_node_mock.configure_mock( host='host_2@backend_2', zone='nova', state='up', status='enabled') m_get_storage_node_by_name = mock.Mock( side_effect=lambda name: return_node_mock) m_get_volume_type_by_backendname = mock.Mock( side_effect=lambda name: [mock.Mock('backend_2')]) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name, get_storage_node_by_name=m_get_storage_node_by_name, get_volume_type_by_backendname=m_get_volume_type_by_backendname) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.CapacityNotificationEndpoint(self.fake_cdmc) message = self.load_message('scenario_1_capacity_node_notfound.json') # self.assertRaises(exception.StorageNodeNotFound, handler.info, handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # new pool and new node was added node_1_name = 'host_2@backend_2' pool_1_name = node_1_name + '#pool_0' volume_type = 'backend_2' m_get_storage_pool_by_name.assert_called_once_with(pool_1_name) m_get_storage_node_by_name.assert_called_once_with(node_1_name) m_get_volume_type_by_backendname.assert_called_once_with(volume_type) # new node was added storage_node = storage_model.get_node_by_pool_name(pool_1_name) self.assertEqual('host_2@backend_2', storage_node.host) # new pool was added pool_1 = storage_model.get_pool_by_pool_name(pool_1_name) self.assertEqual(pool_1_name, pool_1.name) self.assertEqual(500, pool_1.total_capacity_gb) self.assertEqual(460, pool_1.free_capacity_gb) self.assertEqual(40, pool_1.allocated_capacity_gb) self.assertEqual(40, pool_1.provisioned_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_volume_create(self, m_cinder_helper): """test creating volume in existing pool and node""" # create storage_pool_by_name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_0@backend_0#pool_0', total_volumes='3', total_capacity_gb='500', free_capacity_gb='380', provisioned_capacity_gb='120', allocated_capacity_gb='120') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeCreateEnd(self.fake_cdmc) message = self.load_message('scenario_1_volume-create.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # check that volume00 was added to the model volume_00_name = '990a723f-6c19-4f83-8526-6383c9e9389f' volume_00 = storage_model.get_volume_by_uuid(volume_00_name) self.assertEqual(volume_00_name, volume_00.uuid) self.assertFalse(volume_00.bootable) # check that capacity was updated pool_0_name = 'host_0@backend_0#pool_0' m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0.name, pool_0_name) self.assertEqual(3, pool_0.total_volumes) self.assertEqual(380, pool_0.free_capacity_gb) self.assertEqual(120, pool_0.allocated_capacity_gb) self.assertEqual(120, pool_0.provisioned_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_bootable_volume_create(self, m_cinder_helper): """test creating bootable volume in existing pool and node""" # create storage_pool_by_name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_0@backend_0#pool_0', total_volumes='3', total_capacity_gb='500', free_capacity_gb='380', provisioned_capacity_gb='120', allocated_capacity_gb='120') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeCreateEnd(self.fake_cdmc) message = self.load_message('scenario_1_bootable-volume-create.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # check that volume00 was added to the model volume_00_name = '990a723f-6c19-4f83-8526-6383c9e9389f' volume_00 = storage_model.get_volume_by_uuid(volume_00_name) self.assertEqual(volume_00_name, volume_00.uuid) self.assertTrue(volume_00.bootable) # check that capacity was updated pool_0_name = 'host_0@backend_0#pool_0' m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0.name, pool_0_name) self.assertEqual(3, pool_0.total_volumes) self.assertEqual(380, pool_0.free_capacity_gb) self.assertEqual(120, pool_0.allocated_capacity_gb) self.assertEqual(120, pool_0.provisioned_capacity_gb) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_volume_create_pool_notfound(self, m_cinder_helper): """check creating volume in not existing pool and node""" # get_storage_pool_by_name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_2@backend_2#pool_0', total_volumes='1', total_capacity_gb='500', free_capacity_gb='460', provisioned_capacity_gb='40', allocated_capacity_gb='40') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) # create storage_node_by_name mock return_node_mock = mock.Mock() return_node_mock.configure_mock( host='host_2@backend_2', zone='nova', state='up', status='enabled') m_get_storage_node_by_name = mock.Mock( side_effect=lambda name: return_node_mock) m_get_volume_type_by_backendname = mock.Mock( side_effect=lambda name: [mock.Mock('backend_2')]) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name, get_storage_node_by_name=m_get_storage_node_by_name, get_volume_type_by_backendname=m_get_volume_type_by_backendname) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeCreateEnd(self.fake_cdmc) message = self.load_message( 'scenario_1_volume-create_pool_notfound.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # check that volume00 was added to the model volume_00_name = '990a723f-6c19-4f83-8526-6383c9e9389f' volume_00 = storage_model.get_volume_by_uuid(volume_00_name) self.assertEqual(volume_00_name, volume_00.uuid) # check that capacity was updated node_2_name = 'host_2@backend_2' pool_0_name = node_2_name + '#pool_0' pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0.name, pool_0_name) self.assertEqual(1, pool_0.total_volumes) self.assertEqual(460, pool_0.free_capacity_gb) self.assertEqual(40, pool_0.allocated_capacity_gb) self.assertEqual(40, pool_0.provisioned_capacity_gb) # check that node was added m_get_storage_node_by_name.assert_called_once_with(node_2_name) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_error_volume_unmapped(self, m_cinder_helper): """test creating error volume unmapped""" m_get_storage_pool_by_name = mock.Mock( side_effect=exception.PoolNotFound(name="TEST")) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeCreateEnd(self.fake_cdmc) message = self.load_message('scenario_1_error-volume-create.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # we do not call get_storage_pool_by_name m_get_storage_pool_by_name.assert_not_called() # check that volume00 was added to the model volume_00_name = '990a723f-6c19-4f83-8526-6383c9e9389f' volume_00 = storage_model.get_volume_by_uuid(volume_00_name) self.assertEqual(volume_00_name, volume_00.uuid) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_volume_update(self, m_cinder_helper): """test updating volume in existing pool and node""" storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeUpdateEnd(self.fake_cdmc) volume_0_name = faker_cluster_state.volume_uuid_mapping['volume_0'] volume_0 = storage_model.get_volume_by_uuid(volume_0_name) self.assertEqual('name_0', volume_0.name) # create storage_pool_by name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_0@backend_0#pool_0', total_volumes='2', total_capacity_gb='500', free_capacity_gb='420', provisioned_capacity_gb='80', allocated_capacity_gb='80') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) message = self.load_message('scenario_1_volume-update.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # check that name of volume_0 was updated in the model volume_0 = storage_model.get_volume_by_uuid(volume_0_name) self.assertEqual('name_01', volume_0.name) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_volume_delete(self, m_cinder_helper): """test deleting volume""" # create storage_pool_by name mock return_pool_mock = mock.Mock() return_pool_mock.configure_mock( name='host_0@backend_0#pool_0', total_volumes='1', total_capacity_gb='500', free_capacity_gb='460', provisioned_capacity_gb='40', allocated_capacity_gb='40') m_get_storage_pool_by_name = mock.Mock( side_effect=lambda name: return_pool_mock) m_cinder_helper.return_value = mock.Mock( get_storage_pool_by_name=m_get_storage_pool_by_name) storage_model = self.fake_cdmc.generate_scenario_1() self.fake_cdmc.cluster_data_model = storage_model handler = cnotification.VolumeDeleteEnd(self.fake_cdmc) # volume exists before consuming volume_0_uuid = faker_cluster_state.volume_uuid_mapping['volume_0'] volume_0 = storage_model.get_volume_by_uuid(volume_0_uuid) self.assertEqual(volume_0_uuid, volume_0.uuid) message = self.load_message('scenario_1_volume-delete.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # volume does not exists after consuming self.assertRaises( exception.VolumeNotFound, storage_model.get_volume_by_uuid, volume_0_uuid) # check that capacity was updated pool_0_name = 'host_0@backend_0#pool_0' m_get_storage_pool_by_name.assert_called_once_with(pool_0_name) pool_0 = storage_model.get_pool_by_pool_name(pool_0_name) self.assertEqual(pool_0.name, pool_0_name) self.assertEqual(1, pool_0.total_volumes) self.assertEqual(460, pool_0.free_capacity_gb) self.assertEqual(40, pool_0.allocated_capacity_gb) self.assertEqual(40, pool_0.provisioned_capacity_gb) python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/0000775000175000017500000000000013656752352027147 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-attach.0000664000175000017500000000101413656752270034017 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.attach.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_0", "size": "40", "status": "in-use", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-soft_delete-end.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-soft_delete-end.0000664000175000017500000000730413656752270034016 0ustar zuulzuul00000000000000{ "event_type": "instance.soft_delete.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": "2012-10-29T13:42:11Z", "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "soft-delete", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:fake-mini" } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-enabled.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-0000664000175000017500000000115013656752270034031 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.name": "ServiceStatusPayload", "nova_object.version": "1.0", "nova_object.data": { "host": "hostname_0", "disabled": false, "last_seen_up": "2012-10-29T13:42:05Z", "binary": "nova-compute", "topic": "compute", "disabled_reason": null, "report_count": 1, "forced_down": false, "version": 15 } }, "event_type": "service.update", "publisher_id": "nova-compute:Node_0" } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_notfound.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_node_0000664000175000017500000000050313656752270034133 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "capacity.host_2@backend_2#pool_0", "event_type": "capacity.pool", "payload": { "name_to_id": "host_2@backend_2#pool_0", "total": 500, "free": 460, "allocated": 40, "provisioned": 40, "virtual_free": 460, "reported_at": "2017-05-15T13:42:11Z" } } ././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-live_migration_force_complete-end.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-live_migration_f0000664000175000017500000000730513656752270034215 0ustar zuulzuul00000000000000{ "event_type": "instance.live_migration_force_complete.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "admin", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "Node_1", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": "migrating", "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-create-end.json0000664000175000017500000001011313656752270033646 0ustar zuulzuul00000000000000{ "event_type": "instance.create.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "keypairs": [ { "nova_object.data": { "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", "name": "my-key", "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated-by-Nova", "type": "ssh", "user_id": "fake" }, "nova_object.name": "KeypairPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "tags": [ "tag" ], "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "trusted_image_certificates": [ "cert-id-1", "cert-id-2" ], "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "c03c0bf9-f46e-4e4f-93f1-817568567ee2" }, "nova_object.name": "InstanceCreatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.10" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity.json0000664000175000017500000000050313656752270033737 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "capacity.host_0@backend_0#pool_0", "event_type": "capacity.pool", "payload": { "name_to_id": "host_0@backend_0#pool_0", "total": 500, "free": 460, "allocated": 40, "provisioned": 40, "virtual_free": 460, "reported_at": "2017-05-15T13:42:11Z" } } ././@LongLink0000000000000000000000000000015400000000000011215 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-resize_confirm-end.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-resize_confirm-e0000664000175000017500000000721113656752270034134 0ustar zuulzuul00000000000000{ "event_type": "instance.resize_confirm.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "2", "is_public": true, "memory_mb": 2048, "name": "m1.small", "projects": null, "root_gb": 20, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "Node_1", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-rescue-end.json0000664000175000017500000000736613656752270033711 0ustar zuulzuul00000000000000{ "event_type": "instance.rescue.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "shutdown", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "rescue_image_ref": "a2459075-d96c-40d5-893e-577ff92e721c", "reservation_id": "r-npxv0e40", "state": "rescued", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionRescuePayload", "nova_object.namespace": "nova", "nova_object.version": "1.2" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-shelve-end.json0000664000175000017500000000725013656752270033701 0ustar zuulzuul00000000000000{ "event_type": "instance.shelve.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "shutdown", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "shelved", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_instance-update.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_instance-update0000664000175000017500000000471613656752270034133 0ustar zuulzuul00000000000000{ "event_type": "instance.update", "payload": { "nova_object.data": { "architecture": "x86_64", "audit_period": { "nova_object.data": { "audit_period_beginning": "2012-10-01T00:00:00Z", "audit_period_ending": "2012-10-29T13:42:11Z"}, "nova_object.name": "AuditPeriodPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "availability_zone": null, "bandwidth": [], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_name": "NEW_INSTANCE0", "host": "Node_0", "host_name": "NEW_INSTANCE0", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "kernel_id": "", "launched_at": null, "metadata": {}, "node": "hostname_0", "old_display_name": null, "os_type": null, "progress": 0, "ramdisk_id": "", "reservation_id": "r-sd3ygfjj", "state": "paused", "task_state": "scheduling", "power_state": "pending", "ip_addresses": [], "state_update": { "nova_object.data": { "old_task_state": null, "new_task_state": null, "old_state": "paused", "state": "paused"}, "nova_object.name": "InstanceStateUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "flavor": { "nova_object.name": "FlavorPayload", "nova_object.data": { "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "root_gb": 1, "vcpus": 1, "ephemeral_gb": 0, "memory_mb": 512 }, "nova_object.version": "1.0", "nova_object.namespace": "nova" }, "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc"}, "nova_object.name": "InstanceUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "priority": "INFO", "publisher_id": "nova-compute:Node_0" } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-detach.0000664000175000017500000000066113656752270034012 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.detach.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_0", "size": "40", "status": "available", "volume_attachment": [], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {} } } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-unpause-end.json0000664000175000017500000000724713656752270034101 0ustar zuulzuul00000000000000{ "event_type": "instance.unpause.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-restore-end.json0000664000175000017500000000724713656752270034104 0ustar zuulzuul00000000000000{ "event_type": "instance.restore.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-shutdown-end.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-shutdown-end.jso0000664000175000017500000000607113656752270034110 0ustar zuulzuul00000000000000{ "event_type": "instance.shutdown.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "stopped", "task_state": "deleting", "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create.0000664000175000017500000000102013656752270034013 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.create.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "990a723f-6c19-4f83-8526-6383c9e9389f", "display_name": "name_00", "size": "40", "status": "available", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "52044438-21f2-4a48-add4-d48bab20f7e1", "metadata": {"readonly": false, "attached_mode": "rw"} } } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-lock.json0000664000175000017500000000723513656752270032602 0ustar zuulzuul00000000000000{ "event_type": "instance.lock", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": true, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-api:fake-mini" } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-delete.0000664000175000017500000000101613656752270034017 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.delete.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_0", "size": "40", "status": "deleting", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {"readonly": false, "attached_mode": "rw"} } } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/service-update.json0000664000175000017500000000130513656752270032760 0ustar zuulzuul00000000000000{ "event_type": "service.update", "payload": { "nova_object.data": { "availability_zone": null, "binary": "nova-compute", "disabled": false, "disabled_reason": null, "forced_down": false, "host": "host1", "last_seen_up": "2012-10-29T13:42:05Z", "report_count": 1, "topic": "compute", "uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73", "version": 23 }, "nova_object.name": "ServiceStatusPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "priority": "INFO", "publisher_id": "nova-compute:host1" } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-power_on-end.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-power_on-end.jso0000664000175000017500000000725013656752270034065 0ustar zuulzuul00000000000000{ "event_type": "instance.power_on.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volume-create.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_bootable-volum0000664000175000017500000000105313656752270034122 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.create.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "990a723f-6c19-4f83-8526-6383c9e9389f", "display_name": "name_00", "size": "40", "status": "available", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "52044438-21f2-4a48-add4-d48bab20f7e1", "metadata": {"readonly": false, "attached_mode": "rw"}, "glance_metadata": {} } } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-suspend-end.json0000664000175000017500000000725213656752270034076 0ustar zuulzuul00000000000000{ "event_type": "instance.suspend.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "suspended", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-update.0000664000175000017500000000101613656752270034037 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.update.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_01", "size": "40", "status": "enabled", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@LongLink0000000000000000000000000000015700000000000011220 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-create.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_error-volume-c0000664000175000017500000000062713656752270034057 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.create.end", "payload": { "host": "", "volume_id": "990a723f-6c19-4f83-8526-6383c9e9389f", "display_name": "name_00", "size": "40", "status": "error", "volume_attachment": [], "snapshot_id": "", "tenant_id": "52044438-21f2-4a48-add4-d48bab20f7e1", "metadata": {} } } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-pause-end.json0000664000175000017500000000724513656752270033534 0ustar zuulzuul00000000000000{ "event_type": "instance.pause.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "paused", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-update.json0000664000175000017500000000470713656752270033135 0ustar zuulzuul00000000000000{ "event_type": "instance.update", "payload": { "nova_object.data": { "architecture": "x86_64", "audit_period": { "nova_object.data": { "audit_period_beginning": "2012-10-01T00:00:00Z", "audit_period_ending": "2012-10-29T13:42:11Z"}, "nova_object.name": "AuditPeriodPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "availability_zone": null, "bandwidth": [], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_name": "some-server", "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "kernel_id": "", "launched_at": null, "metadata": {}, "node": "fake-mini", "old_display_name": null, "os_type": null, "progress": 0, "ramdisk_id": "", "reservation_id": "r-sd3ygfjj", "state": "paused", "task_state": "scheduling", "power_state": "pending", "ip_addresses": [], "state_update": { "nova_object.data": { "new_task_state": null, "old_state": null, "old_task_state": null, "state": "active"}, "nova_object.name": "InstanceStateUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "flavor": { "nova_object.name": "FlavorPayload", "nova_object.data": { "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "root_gb": 1, "vcpus": 1, "ephemeral_gb": 0, "memory_mb": 512 }, "nova_object.version": "1.0", "nova_object.namespace": "nova" }, "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc"}, "nova_object.name": "InstanceUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "priority": "INFO", "publisher_id": "nova-compute:compute" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-resume-end.json0000664000175000017500000000724613656752270033720 0ustar zuulzuul00000000000000{ "event_type": "instance.resume.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@LongLink0000000000000000000000000000017200000000000011215 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy_instance-update.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_legacy0000664000175000017500000000313213656752270034216 0ustar zuulzuul00000000000000{ "publisher_id": "compute:Node_2", "event_type": "compute.instance.update", "payload": { "access_ip_v4": null, "access_ip_v6": null, "architecture": null, "audit_period_beginning": "2016-08-17T13:00:00.000000", "audit_period_ending": "2016-08-17T13:56:05.262440", "availability_zone": "nova", "bandwidth": {}, "cell_name": "", "created_at": "2016-08-17 13:53:23+00:00", "deleted_at": "", "disk_gb": 1, "display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "ephemeral_gb": 0, "host": "Node_2", "hostname": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "image_meta": { "base_image_ref": "205f96f5-91f9-42eb-9138-03fffcea2b97", "container_format": "bare", "disk_format": "qcow2", "min_disk": "1", "min_ram": "0" }, "image_ref_url": "http://10.50.0.222:9292/images/205f96f5-91f9-42eb-9138-03fffcea2b97", "instance_flavor_id": "1", "instance_id": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "instance_type": "m1.tiny", "instance_type_id": 2, "kernel_id": "", "launched_at": "2016-08-17T13:53:35.000000", "memory_mb": 512, "metadata": {}, "new_task_state": null, "node": "hostname_0", "old_state": "paused", "old_task_state": null, "os_type": null, "progress": "", "ramdisk_id": "", "reservation_id": "r-0822ymml", "root_gb": 1, "state": "paused", "state_description": "paused", "tenant_id": "a4b4772d93c74d5e8b7c68cdd2a014e1", "terminated_at": "", "user_id": "ce64facc93354bbfa90f4f9f9a3e1e75", "vcpus": 1 } } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-unlock.json0000664000175000017500000000724013656752270033141 0ustar zuulzuul00000000000000{ "event_type": "instance.unlock", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-api:fake-mini" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-delete-end.json0000664000175000017500000000327613656752270033661 0ustar zuulzuul00000000000000{ "event_type":"instance.delete.end", "payload":{ "nova_object.data":{ "architecture":"x86_64", "availability_zone":null, "created_at":"2012-10-29T13:42:11Z", "deleted_at":"2012-10-29T13:42:11Z", "display_name":"some-server", "fault":null, "host":"compute", "host_name":"some-server", "ip_addresses":[], "kernel_id":"", "launched_at":"2012-10-29T13:42:11Z", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "metadata":{}, "node":"fake-mini", "os_type":null, "progress":0, "ramdisk_id":"", "reservation_id":"r-npxv0e40", "state":"deleted", "task_state":null, "power_state":"pending", "tenant_id":"6f70656e737461636b20342065766572", "terminated_at":"2012-10-29T13:42:11Z", "flavor": { "nova_object.name": "FlavorPayload", "nova_object.data": { "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "root_gb": 1, "vcpus": 1, "ephemeral_gb": 0, "memory_mb": 512 }, "nova_object.version": "1.0", "nova_object.namespace": "nova" }, "user_id":"fake", "uuid":"73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name":"InstanceActionPayload", "nova_object.namespace":"nova", "nova_object.version":"1.0" }, "priority":"INFO", "publisher_id":"nova-compute:compute" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/capacity.json0000664000175000017500000000047613656752270031645 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "capacity.host1@backend1#pool1", "event_type": "capacity.pool", "payload": { "name_to_id": "capacity.host1@backend1#pool1", "total": 3, "free": 1, "allocated": 2, "provisioned": 2, "virtual_free": 1, "reported_at": "2017-05-15T13:42:11Z" } } ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-resize.0000664000175000017500000000101413656752270034054 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_0@backend_0#pool_0", "event_type": "volume.resize.end", "payload": { "host": "host_0@backend_0#pool_0", "volume_id": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "display_name": "name_0", "size": "20", "status": "in-use", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-unrescue-end.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-unrescue-end.jso0000664000175000017500000000725013656752270034066 0ustar zuulzuul00000000000000{ "event_type": "instance.unrescue.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-live_migration_post-end.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-live_migration_p0000664000175000017500000000725513656752270034233 0ustar zuulzuul00000000000000{ "event_type": "instance.live_migration_post.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "admin", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "Node_1", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "host2", "os_type": null, "power_state": "pending", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:host2" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-rebuild-end.json0000664000175000017500000000745513656752270034050 0ustar zuulzuul00000000000000{ "event_type": "instance.rebuild.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": null, "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "Node_1", "host_name": "some-server", "image_uuid": "a2459075-d96c-40d5-893e-577ff92e721c", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "trusted_image_certificates": [ "rebuild-cert-id-1", "rebuild-cert-id-2" ], "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionRebuildPayload", "nova_object.namespace": "nova", "nova_object.version": "1.8" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/service-delete.json0000664000175000017500000000127113656752270032742 0ustar zuulzuul00000000000000{ "event_type": "service.delete", "payload": { "nova_object.data": { "availability_zone": null, "binary": "nova-compute", "disabled": false, "disabled_reason": null, "forced_down": false, "host": "hostname_0", "last_seen_up": null, "report_count": 0, "topic": "compute", "uuid": "fa69c544-906b-4a6a-a9c6-c1f7a8078c73", "version": 23 }, "nova_object.name": "ServiceStatusPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "priority": "INFO", "publisher_id": "nova-compute:host2" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/service-create.json0000664000175000017500000000126413656752270032745 0ustar zuulzuul00000000000000{ "event_type": "service.create", "payload": { "nova_object.data": { "availability_zone": null, "binary": "nova-compute", "disabled": false, "disabled_reason": null, "forced_down": false, "host": "host2", "last_seen_up": null, "report_count": 0, "topic": "compute", "uuid": "fafac544-906b-4a6a-a9c6-c1f7a8078c73", "version": 23 }, "nova_object.name": "ServiceStatusPayload", "nova_object.namespace": "nova", "nova_object.version": "1.1" }, "priority": "INFO", "publisher_id": "nova-compute:host2" } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-disabled.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_service-update-0000664000175000017500000000116413656752270034036 0ustar zuulzuul00000000000000{ "priority": "INFO", "payload": { "nova_object.namespace": "nova", "nova_object.name": "ServiceStatusPayload", "nova_object.version": "1.0", "nova_object.data": { "host": "hostname_0", "disabled": true, "last_seen_up": "2012-10-29T13:42:05Z", "binary": "nova-compute", "topic": "compute", "disabled_reason": "watcher_disabled", "report_count": 1, "forced_down": true, "version": 15 } }, "event_type": "service.update", "publisher_id": "nova-compute:Node_0" } ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-unshelve-end.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-unshelve-end.jso0000664000175000017500000000725013656752270034066 0ustar zuulzuul00000000000000{ "event_type": "instance.unshelve.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "running", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "active", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_notfound.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_capacity_pool_0000664000175000017500000000050513656752270034161 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "capacity.host_0@backend_0#pool_2", "event_type": "capacity.pool", "payload": { "name_to_id": "host_0@backend_0#pool_2", "total": 500, "free": 380, "allocated": 120, "provisioned": 120, "virtual_free": 380, "reported_at": "2017-05-15T13:42:11Z" } } ././@LongLink0000000000000000000000000000014700000000000011217 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-power_off-end.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/instance-power_off-end.js0000664000175000017500000000725313656752270034047 0ustar zuulzuul00000000000000{ "event_type": "instance.power_off.end", "payload": { "nova_object.data": { "action_initiator_project": "6f70656e737461636b20342065766572", "action_initiator_user": "fake", "architecture": "x86_64", "auto_disk_config": "MANUAL", "availability_zone": "nova", "block_devices": [ { "nova_object.data": { "boot_index": null, "delete_on_termination": false, "device_name": "/dev/sdb", "tag": null, "volume_id": "a07f71dc-8151-4e7d-a0cc-cd24a3f11113" }, "nova_object.name": "BlockDevicePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_description": "some-server", "display_name": "some-server", "fault": null, "flavor": { "nova_object.data": { "description": null, "disabled": false, "ephemeral_gb": 0, "extra_specs": { "hw:watchdog_action": "disabled" }, "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "is_public": true, "memory_mb": 512, "name": "test_flavor", "projects": null, "root_gb": 1, "rxtx_factor": 1.0, "swap": 0, "vcpu_weight": 0, "vcpus": 1 }, "nova_object.name": "FlavorPayload", "nova_object.namespace": "nova", "nova_object.version": "1.4" }, "host": "compute", "host_name": "some-server", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "ip_addresses": [ { "nova_object.data": { "address": "192.168.1.3", "device_name": "tapce531f90-19", "label": "private-network", "mac": "fa:16:3e:4c:2c:30", "meta": {}, "port_uuid": "ce531f90-199f-48c0-816c-13e38010b442", "version": 4 }, "nova_object.name": "IpPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" } ], "kernel_id": "", "key_name": "my-key", "launched_at": "2012-10-29T13:42:11Z", "locked": false, "metadata": {}, "node": "fake-mini", "os_type": null, "power_state": "shutdown", "progress": 0, "ramdisk_id": "", "request_id": "req-5b6c791d-5709-4f36-8fbe-c3e02869e35d", "reservation_id": "r-npxv0e40", "state": "stopped", "task_state": null, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "updated_at": "2012-10-29T13:42:11Z", "user_id": "fake", "uuid": "73b09e16-35b7-4922-804e-e8f5d9b740fc" }, "nova_object.name": "InstanceActionPayload", "nova_object.namespace": "nova", "nova_object.version": "1.7" }, "priority": "INFO", "publisher_id": "nova-compute:compute" } ././@LongLink0000000000000000000000000000016700000000000011221 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_pool_notfound.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario_1_volume-create_0000664000175000017500000000102013656752270034074 0ustar zuulzuul00000000000000{ "priority": "INFO", "publisher_id": "volume.host_2@backend_2#pool_0", "event_type": "volume.create.end", "payload": { "host": "host_2@backend_2#pool_0", "volume_id": "990a723f-6c19-4f83-8526-6383c9e9389f", "display_name": "name_00", "size": "40", "status": "available", "volume_attachment": [{"server_id": "server", "attachment_id": "attachment"}], "snapshot_id": "", "tenant_id": "52044438-21f2-4a48-add4-d48bab20f7e1", "metadata": {"readonly": false, "attached_mode": "rw"} } } ././@LongLink0000000000000000000000000000016300000000000011215 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instance-update.jsonpython-watcher-4.0.0/watcher/tests/decision_engine/model/notification/data/scenario3_notfound_instan0000664000175000017500000000502613656752270034252 0ustar zuulzuul00000000000000{ "event_type": "instance.update", "payload": { "nova_object.data": { "architecture": "x86_64", "audit_period": { "nova_object.data": { "audit_period_beginning": "2012-10-01T00:00:00Z", "audit_period_ending": "2012-10-29T13:42:11Z"}, "nova_object.name": "AuditPeriodPayload", "nova_object.namespace": "nova", "nova_object.version": "1.0" }, "availability_zone": null, "bandwidth": [], "created_at": "2012-10-29T13:42:11Z", "deleted_at": null, "display_name": "NEW INSTANCE 9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "host": "Node_2", "host_name": "NEW_INSTANCE_9966d6bd-a45c-4e1c-9d57-3054899a3ec7", "image_uuid": "155d900f-4e14-4e4c-a73d-069cbf4541e6", "kernel_id": "", "launched_at": null, "metadata": {}, "node": "hostname_0", "old_display_name": null, "os_type": null, "progress": 0, "ramdisk_id": "", "reservation_id": "r-sd3ygfjj", "state": "paused", "task_state": "scheduling", "power_state": "pending", "ip_addresses": [], "state_update": { "nova_object.data": { "old_task_state": null, "new_task_state": null, "old_state": "paused", "state": "paused"}, "nova_object.name": "InstanceStateUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "tenant_id": "6f70656e737461636b20342065766572", "terminated_at": null, "flavor": { "nova_object.name": "FlavorPayload", "nova_object.data": { "flavorid": "a22d5517-147c-4147-a0d1-e698df5cd4e3", "root_gb": 1, "vcpus": 1, "ephemeral_gb": 0, "memory_mb": 512 }, "nova_object.version": "1.0", "nova_object.namespace": "nova" }, "user_id": "fake", "uuid": "9966d6bd-a45c-4e1c-9d57-3054899a3ec7"}, "nova_object.name": "InstanceUpdatePayload", "nova_object.namespace": "nova", "nova_object.version": "1.0"}, "priority": "INFO", "publisher_id": "nova-compute:Node_2" } python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/fake_managers.py0000664000175000017500000000440413656752270031374 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import service_manager from watcher.decision_engine.model.notification import cinder as cnotification from watcher.decision_engine.model.notification import nova as novanotification from watcher.tests.decision_engine.model import faker_cluster_state class FakeManager(service_manager.ServiceManager): API_VERSION = '1.0' fake_cdmc = faker_cluster_state.FakerModelCollector() @property def service_name(self): return 'watcher-fake' @property def api_version(self): return self.API_VERSION @property def publisher_id(self): return 'test_publisher_id' @property def conductor_topic(self): return 'test_conductor_topic' @property def notification_topics(self): return ['nova'] @property def conductor_endpoints(self): return [] # Disable audit endpoint @property def notification_endpoints(self): return [ novanotification.VersionedNotification(self.fake_cdmc), ] class FakeStorageManager(FakeManager): fake_cdmc = faker_cluster_state.FakerStorageModelCollector() @property def notification_endpoints(self): return [ cnotification.CapacityNotificationEndpoint(self.fake_cdmc), cnotification.VolumeCreateEnd(self.fake_cdmc), cnotification.VolumeUpdateEnd(self.fake_cdmc), cnotification.VolumeDeleteEnd(self.fake_cdmc), cnotification.VolumeAttachEnd(self.fake_cdmc), cnotification.VolumeDetachEnd(self.fake_cdmc), cnotification.VolumeResizeEnd(self.fake_cdmc), ] python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/test_nova_notifications.py0000664000175000017500000010462213656752270033547 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import os_resource_classes as orc import mock from oslo_serialization import jsonutils from watcher.common import context from watcher.common import exception from watcher.common import nova_helper from watcher.common import placement_helper from watcher.common import service as watcher_service from watcher.decision_engine.model import element from watcher.decision_engine.model.notification import nova as novanotification from watcher.tests import base as base_test from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.model.notification import fake_managers class NotificationTestCase(base_test.TestCase): @staticmethod def load_message(filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as json_file: json_data = jsonutils.load(json_file) return json_data class TestReceiveNovaNotifications(NotificationTestCase): FAKE_METADATA = {'message_id': None, 'timestamp': None} FAKE_NOTIFICATIONS = { 'instance.create.end': 'instance-create-end.json', 'instance.lock': 'instance-lock.json', 'instance.unlock': 'instance-unlock.json', 'instance.pause.end': 'instance-pause-end.json', 'instance.power_off.end': 'instance-power_off-end.json', 'instance.power_on.end': 'instance-power_on-end.json', 'instance.resize_confirm.end': 'instance-resize_confirm-end.json', 'instance.restore.end': 'instance-restore-end.json', 'instance.resume.end': 'instance-resume-end.json', 'instance.shelve.end': 'instance-shelve-end.json', 'instance.shutdown.end': 'instance-shutdown-end.json', 'instance.suspend.end': 'instance-suspend-end.json', 'instance.unpause.end': 'instance-unpause-end.json', 'instance.unrescue.end': 'instance-unrescue-end.json', 'instance.unshelve.end': 'instance-unshelve-end.json', 'instance.rebuild.end': 'instance-rebuild-end.json', 'instance.rescue.end': 'instance-rescue-end.json', 'instance.update': 'instance-update.json', 'instance.live_migration_force_complete.end': 'instance-live_migration_force_complete-end.json', 'instance.live_migration_post.end': 'instance-live_migration_post-end.json', 'instance.delete.end': 'instance-delete-end.json', 'instance.soft_delete.end': 'instance-soft_delete-end.json', 'service.create': 'service-create.json', 'service.delete': 'service-delete.json', 'service.update': 'service-update.json', } def setUp(self): super(TestReceiveNovaNotifications, self).setUp() p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') m_from_dict = p_from_dict.start() m_from_dict.return_value = self.context self.addCleanup(p_from_dict.stop) p_heartbeat = mock.patch.object( watcher_service.ServiceHeartbeat, "send_beat") self.m_heartbeat = p_heartbeat.start() self.addCleanup(p_heartbeat.stop) @mock.patch.object(novanotification.VersionedNotification, 'info') def test_receive_nova_notifications(self, m_info): de_service = watcher_service.Service(fake_managers.FakeManager) n_dicts = novanotification.VersionedNotification.notification_mapping for n_type in n_dicts.keys(): n_json = self.FAKE_NOTIFICATIONS[n_type] message = self.load_message(n_json) expected_message = message['payload'] publisher_id = message['publisher_id'] incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_with( self.context, publisher_id, n_type, expected_message, self.FAKE_METADATA) class TestNovaNotifications(NotificationTestCase): FAKE_METADATA = {'message_id': None, 'timestamp': None} def setUp(self): super(TestNovaNotifications, self).setUp() # fake cluster self.fake_cdmc = faker_cluster_state.FakerModelCollector() def test_nova_service_update(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) node0_name = "hostname_0" node0 = compute_model.get_node_by_name(node0_name) message = self.load_message('scenario3_service-update-disabled.json') self.assertEqual('hostname_0', node0.hostname) self.assertEqual(element.ServiceState.ONLINE.value, node0.state) self.assertEqual(element.ServiceState.ENABLED.value, node0.status) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual("hostname_0", node0.hostname) self.assertEqual(element.ServiceState.OFFLINE.value, node0.state) self.assertEqual(element.ServiceState.DISABLED.value, node0.status) message = self.load_message('scenario3_service-update-enabled.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual("hostname_0", node0.hostname) self.assertEqual(element.ServiceState.ONLINE.value, node0.state) self.assertEqual(element.ServiceState.ENABLED.value, node0.status) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, "NovaHelper") def test_nova_service_create(self, m_nova_helper_cls, m_placement_helper): mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = { orc.DISK_GB: 10, orc.MEMORY_MB: 100, orc.VCPU: 0 } m_placement_helper.return_value = mock_placement m_get_compute_node_by_hostname = mock.Mock( side_effect=lambda uuid: mock.Mock( name='m_get_compute_node_by_uuid', id="fafac544-906b-4a6a-a9c6-c1f7a8078c73", hypervisor_hostname="host2", state='up', status='enabled', memory_mb=7777, vcpus=42, free_disk_gb=974, local_gb=1337, service={'id': 123, 'host': 'host2', 'disabled_reason': ''},)) m_nova_helper_cls.return_value = mock.Mock( get_compute_node_by_hostname=m_get_compute_node_by_hostname, name='m_nova_helper') compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) new_node_name = "host2" self.assertRaises( exception.ComputeNodeNotFound, compute_model.get_node_by_name, new_node_name) message = self.load_message('service-create.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) new_node = compute_model.get_node_by_name(new_node_name) self.assertEqual('host2', new_node.hostname) self.assertEqual(element.ServiceState.ONLINE.value, new_node.state) self.assertEqual(element.ServiceState.ENABLED.value, new_node.status) def test_nova_service_delete(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) node0_name = "hostname_0" # Before self.assertTrue(compute_model.get_node_by_name(node0_name)) message = self.load_message('service-delete.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # After self.assertRaises( exception.ComputeNodeNotFound, compute_model.get_node_by_name, node0_name) def test_nova_instance_update(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-update.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) def test_nova_instance_state_building(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) message = self.load_message('instance-update.json') message['payload']['nova_object.data']['state'] = 'building' handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # Assert that the instance state in the model is unchanged # since the 'building' state is ignored. self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, "NovaHelper") def test_nova_instance_update_notfound_still_creates( self, m_nova_helper_cls, m_placement_helper): mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = { orc.DISK_GB: 10, orc.MEMORY_MB: 100, orc.VCPU: 0 } m_placement_helper.return_value = mock_placement m_get_compute_node_by_hostname = mock.Mock( side_effect=lambda uuid: mock.Mock( name='m_get_compute_node_by_hostname', id='669966bd-a45c-4e1c-9d57-3054899a3ec7', hypervisor_hostname="Node_2", state='up', status='enabled', memory_mb=7777, vcpus=42, free_disk_gb=974, local_gb=1337, service={'id': 123, 'host': 'Node_2', 'disabled_reason': ''},)) m_nova_helper_cls.return_value = mock.Mock( get_compute_node_by_hostname=m_get_compute_node_by_hostname, name='m_nova_helper') compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' message = self.load_message('scenario3_notfound_instance-update.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) instance0 = compute_model.get_instance_by_uuid(instance0_uuid) self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) self.assertEqual(1, instance0.vcpus) self.assertEqual(1, instance0.disk) self.assertEqual(512, instance0.memory) m_get_compute_node_by_hostname.assert_called_once_with('Node_2') node_2 = compute_model.get_node_by_name('Node_2') self.assertEqual(7777, node_2.memory) self.assertEqual(42, node_2.vcpus) self.assertEqual(1337, node_2.disk) @mock.patch.object(nova_helper, "NovaHelper") def test_instance_update_node_notfound_set_unmapped( self, m_nova_helper_cls): m_get_compute_node_by_hostname = mock.Mock( side_effect=exception.ComputeNodeNotFound(name="TEST")) m_nova_helper_cls.return_value = mock.Mock( get_compute_node_by_hostname=m_get_compute_node_by_hostname, name='m_nova_helper') compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '9966d6bd-a45c-4e1c-9d57-3054899a3ec7' message = self.load_message( 'scenario3_notfound_instance-update.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) instance0 = compute_model.get_instance_by_uuid(instance0_uuid) self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) self.assertEqual(1, instance0.vcpus) self.assertEqual(1, instance0.disk) self.assertEqual(512, instance0.memory) m_get_compute_node_by_hostname.assert_any_call('Node_2') self.assertRaises( exception.ComputeNodeNotFound, compute_model.get_node_by_uuid, 'Node_2') @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') def test_nova_instance_create(self, m_nova_helper_cls, m_placement_helper): mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = { orc.DISK_GB: 10, orc.MEMORY_MB: 100, orc.VCPU: 0 } m_placement_helper.return_value = mock_placement m_get_compute_node_by_hostname = mock.Mock( side_effect=lambda uuid: mock.Mock( name='m_get_compute_node_by_hostname', id=3, hypervisor_hostname="compute", state='up', status='enabled', uuid=uuid, memory_mb=7777, vcpus=42, free_disk_gb=974, local_gb=1337, service={'id': 123, 'host': 'compute', 'disabled_reason': ''},)) m_nova_helper_cls.return_value = mock.Mock( get_compute_node_by_hostname=m_get_compute_node_by_hostname, name='m_nova_helper') compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = 'c03c0bf9-f46e-4e4f-93f1-817568567ee2' self.assertRaises( exception.InstanceNotFound, compute_model.get_instance_by_uuid, instance0_uuid) message = self.load_message('instance-create-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) hostname = message['payload']['nova_object.data']['host'] node = self.fake_cdmc.cluster_data_model.get_node_by_instance_uuid( instance0_uuid) self.assertEqual(hostname, node.hostname) m_get_compute_node_by_hostname.assert_called_once_with(hostname) instance0 = self.fake_cdmc.cluster_data_model.get_instance_by_uuid( instance0_uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) self.assertEqual(1, instance0.vcpus) self.assertEqual(1, instance0.disk) self.assertEqual(512, instance0.memory) def test_nova_instance_delete_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' # Before self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid)) message = self.load_message('instance-delete-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # After self.assertRaises( exception.InstanceNotFound, compute_model.get_instance_by_uuid, instance0_uuid) def test_nova_instance_soft_delete_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' # Before self.assertTrue(compute_model.get_instance_by_uuid(instance0_uuid)) message = self.load_message('instance-soft_delete-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) # After self.assertRaises( exception.InstanceNotFound, compute_model.get_instance_by_uuid, instance0_uuid) def test_live_migrated_force_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) message = self.load_message( 'instance-live_migration_force_complete-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_live_migrated_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) message = self.load_message( 'instance-live_migration_post-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_lock(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-lock.json') self.assertFalse(instance0.locked) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertTrue(instance0.locked) message = self.load_message('instance-unlock.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertFalse(instance0.locked) def test_nova_instance_pause(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-pause-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.PAUSED.value, instance0.state) message = self.load_message('instance-unpause-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_power_on_off(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-power_off-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.STOPPED.value, instance0.state) message = self.load_message('instance-power_on-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_instance_rebuild_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73", node.uuid) message = self.load_message('instance-rebuild-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('hostname_0', node.hostname) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_rescue(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-rescue-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.RESCUED.value, instance0.state) message = self.load_message('instance-unrescue-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_instance_resize_confirm_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) message = self.load_message( 'instance-resize_confirm-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) node = compute_model.get_node_by_instance_uuid(instance0_uuid) self.assertEqual('fa69c544-906b-4a6a-a9c6-c1f7a8078c73', node.uuid) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_restore_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-restore-end.json') instance0.state = element.InstanceState.ERROR.value self.assertEqual(element.InstanceState.ERROR.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_resume_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-resume-end.json') instance0.state = element.InstanceState.ERROR.value self.assertEqual(element.InstanceState.ERROR.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_shelve(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-shelve-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.SHELVED.value, instance0.state) message = self.load_message('instance-unshelve-end.json') handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) def test_nova_instance_shutdown_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-shutdown-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual(element.InstanceState.STOPPED.value, instance0.state) def test_nova_instance_suspend_end(self): compute_model = self.fake_cdmc.generate_scenario_3_with_2_nodes() self.fake_cdmc.cluster_data_model = compute_model handler = novanotification.VersionedNotification(self.fake_cdmc) instance0_uuid = '73b09e16-35b7-4922-804e-e8f5d9b740fc' instance0 = compute_model.get_instance_by_uuid(instance0_uuid) message = self.load_message('instance-suspend-end.json') self.assertEqual(element.InstanceState.ACTIVE.value, instance0.state) handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) self.assertEqual( element.InstanceState.SUSPENDED.value, instance0.state) def test_info_no_cdm(self): # Tests that a notification is received before an audit has been # performed which would create the nova CDM. mock_collector = mock.Mock(cluster_data_model=None) handler = novanotification.VersionedNotification(mock_collector) payload = { 'nova_object.data': { 'uuid': '9966d6bd-a45c-4e1c-9d57-3054899a3ec7', 'host': None } } with mock.patch.object(handler, 'update_instance') as update_instance: handler.info(mock.sentinel.ctxt, 'publisher_id', 'instance.update', payload, metadata={}) # update_instance should not be called since we did not add an # Instance object to the CDM since the CDM does not exist yet. update_instance.assert_not_called() def test_fake_instance_create(self): self.fake_cdmc.cluster_data_model = mock.Mock() handler = novanotification.VersionedNotification(self.fake_cdmc) message = self.load_message('instance-create-end.json') # get_instance_by_uuid should not be called when creating instance with mock.patch.object(self.fake_cdmc.cluster_data_model, 'get_instance_by_uuid') as mock_get: handler.info( ctxt=self.context, publisher_id=message['publisher_id'], event_type=message['event_type'], payload=message['payload'], metadata=self.FAKE_METADATA, ) mock_get.assert_not_called() python-watcher-4.0.0/watcher/tests/decision_engine/model/notification/test_notifications.py0000664000175000017500000000716513656752270032530 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mock from oslo_serialization import jsonutils from watcher.common import context from watcher.common import service as watcher_service from watcher.decision_engine.model.notification import base from watcher.decision_engine.model.notification import filtering from watcher.tests import base as base_test from watcher.tests.decision_engine.model.notification import fake_managers class DummyManager(fake_managers.FakeManager): @property def notification_endpoints(self): return [DummyNotification(self.fake_cdmc)] class DummyNotification(base.NotificationEndpoint): @property def filter_rule(self): return filtering.NotificationFilter( publisher_id=r'.*', event_type=r'compute.dummy', payload={'data': {'nested': r'^T.*'}}, ) def info(self, ctxt, publisher_id, event_type, payload, metadata): pass class NotificationTestCase(base_test.TestCase): def load_message(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as json_file: json_data = jsonutils.load(json_file) return json_data class TestReceiveNotifications(NotificationTestCase): def setUp(self): super(TestReceiveNotifications, self).setUp() p_from_dict = mock.patch.object(context.RequestContext, 'from_dict') m_from_dict = p_from_dict.start() m_from_dict.return_value = self.context self.addCleanup(p_from_dict.stop) @mock.patch.object(watcher_service.ServiceHeartbeat, 'send_beat') @mock.patch.object(DummyNotification, 'info') def test_receive_dummy_notification(self, m_info, m_heartbeat): message = { 'publisher_id': 'nova-compute', 'event_type': 'compute.dummy', 'payload': {'data': {'nested': 'TEST'}}, 'priority': 'INFO', } de_service = watcher_service.Service(DummyManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) m_info.assert_called_once_with( self.context, 'nova-compute', 'compute.dummy', {'data': {'nested': 'TEST'}}, {'message_id': None, 'timestamp': None}) @mock.patch.object(watcher_service.ServiceHeartbeat, 'send_beat') @mock.patch.object(DummyNotification, 'info') def test_skip_unwanted_notification(self, m_info, m_heartbeat): message = { 'publisher_id': 'nova-compute', 'event_type': 'compute.dummy', 'payload': {'data': {'nested': 'unwanted'}}, 'priority': 'INFO', } de_service = watcher_service.Service(DummyManager) incoming = mock.Mock(ctxt=self.context.to_dict(), message=message) de_service.notification_handler.dispatcher.dispatch(incoming) self.assertEqual(0, m_info.call_count) python-watcher-4.0.0/watcher/tests/decision_engine/model/test_model.py0000664000175000017500000005026113656752270026264 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import os from oslo_utils import uuidutils from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state class TestModel(base.TestCase): def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return model_root.ModelRoot.from_xml(self.load_data(filename)) def test_model_structure(self): fake_cluster = faker_cluster_state.FakerModelCollector() model1 = fake_cluster.build_scenario_1() self.assertEqual(5, len(model1.get_all_compute_nodes())) self.assertEqual(35, len(model1.get_all_instances())) self.assertEqual(8, len(model1.edges())) expected_struct_str = self.load_data('scenario_1.xml') model2 = model_root.ModelRoot.from_xml(expected_struct_str) self.assertTrue(model_root.ModelRoot.is_isomorphic(model2, model1)) def test_build_model_from_xml(self): fake_cluster = faker_cluster_state.FakerModelCollector() expected_model = fake_cluster.generate_scenario_1() struct_str = self.load_data('scenario_1.xml') model = model_root.ModelRoot.from_xml(struct_str) self.assertEqual(expected_model.to_string(), model.to_string()) @mock.patch.object(model_root.ModelRoot, 'get_all_compute_nodes') @mock.patch.object(model_root.ModelRoot, 'get_node_instances') def test_get_model_to_list(self, mock_instances, mock_nodes): fake_compute_node = mock.MagicMock( uuid='fake_node_uuid', fields=['uuid']) fake_instance = mock.MagicMock( uuid='fake_instance_uuid', fields=['uuid']) mock_nodes.return_value = {'fake_node_uuid': fake_compute_node} mock_instances.return_value = [fake_instance] expected_keys = ['server_uuid', 'node_uuid'] result = model_root.ModelRoot().to_list() self.assertEqual(1, len(result)) result_keys = result[0].keys() self.assertEqual(sorted(expected_keys), sorted(result_keys)) # test compute node has no instance mock_instances.return_value = [] expected_keys = ['node_uuid'] result = model_root.ModelRoot().to_list() self.assertEqual(1, len(result)) result_keys = result[0].keys() self.assertEqual(expected_keys, list(result_keys)) def test_get_node_by_instance_uuid(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(uuid_)) uuid_ = "{0}".format(uuidutils.generate_uuid()) instance = element.Instance(id=1) instance.uuid = uuid_ model.add_instance(instance) self.assertEqual(instance, model.get_instance_by_uuid(uuid_)) model.map_instance(instance, node) self.assertEqual(node, model.get_node_by_instance_uuid(instance.uuid)) def test_add_node(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(uuid_)) def test_delete_node(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(uuid_)) model.remove_node(node) self.assertRaises(exception.ComputeNodeNotFound, model.get_node_by_uuid, uuid_) def test_get_all_compute_nodes(self): model = model_root.ModelRoot() for id_ in range(10): uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id_) node.uuid = uuid_ model.add_node(node) all_nodes = model.get_all_compute_nodes() for uuid_ in all_nodes: node = model.get_node_by_uuid(uuid_) model.assert_node(node) def test_set_get_state_nodes(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertIn(node.state, [el.value for el in element.ServiceState]) node = model.get_node_by_uuid(uuid_) node.state = element.ServiceState.OFFLINE.value self.assertIn(node.state, [el.value for el in element.ServiceState]) def test_get_node_by_name(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) name = 'test_node' node = element.ComputeNode() node.uuid = uuid_ node.hostname = name model.add_node(node) compute_node = model.get_node_by_name(name) model.assert_node(compute_node) self.assertEqual(name, compute_node['hostname']) self.assertEqual(uuid_, compute_node['uuid']) def test_node_from_name_raise(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) name = 'test_node' node = element.ComputeNode() node.uuid = uuid_ node.hostname = name model.add_node(node) fake_name = 'fake_node' self.assertRaises(exception.ComputeNodeNotFound, model.get_node_by_name, fake_name) def test_node_from_uuid_raise(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) uuid2 = "{0}".format(uuidutils.generate_uuid()) self.assertRaises(exception.ComputeNodeNotFound, model.get_node_by_uuid, uuid2) def test_remove_node_raise(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) uuid2 = "{0}".format(uuidutils.generate_uuid()) node2 = element.ComputeNode(id=2) node2.uuid = uuid2 self.assertRaises(exception.ComputeNodeNotFound, model.remove_node, node2) def test_assert_node_raise(self): model = model_root.ModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) node = element.ComputeNode(id=1) node.uuid = uuid_ model.add_node(node) self.assertRaises(exception.IllegalArgumentException, model.assert_node, "objet_qcq") def test_instance_from_uuid_raise(self): fake_cluster = faker_cluster_state.FakerModelCollector() model = fake_cluster.generate_scenario_1() self.assertRaises(exception.InstanceNotFound, model.get_instance_by_uuid, "valeur_qcq") def test_assert_instance_raise(self): model = model_root.ModelRoot() self.assertRaises(exception.IllegalArgumentException, model.assert_instance, "valeur_qcq") def test_get_node_instances(self): fake_cluster = faker_cluster_state.FakerModelCollector() model = fake_cluster.generate_scenario_1() node = element.ComputeNode(uuid="Node_0") instance0 = model.get_instance_by_uuid("INSTANCE_0") instance1 = model.get_instance_by_uuid("INSTANCE_1") instances = model.get_node_instances(node) self.assertEqual(2, len(instances)) self.assertIn(instance0, instances) self.assertIn(instance1, instances) def test_get_node_used_resources(self): fake_cluster = faker_cluster_state.FakerModelCollector() model = fake_cluster.generate_scenario_1() node = element.ComputeNode(uuid="Node_0") resources_used = model.get_node_used_resources(node) self.assertEqual(20, resources_used.get('vcpu')) self.assertEqual(4, resources_used.get('memory')) self.assertEqual(40, resources_used.get('disk')) def test_get_node_free_resources(self): fake_cluster = faker_cluster_state.FakerModelCollector() model = fake_cluster.generate_scenario_1() node = model.get_node_by_uuid("Node_0") resources_free = model.get_node_free_resources(node) self.assertEqual(20, resources_free.get('vcpu')) self.assertEqual(128, resources_free.get('memory')) self.assertEqual(210, resources_free.get('disk')) class TestStorageModel(base.TestCase): def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return model_root.StorageModelRoot.from_xml(self.load_data(filename)) def test_model_structure(self): fake_cluster = faker_cluster_state.FakerStorageModelCollector() model1 = fake_cluster.build_scenario_1() self.assertEqual(2, len(model1.get_all_storage_nodes())) self.assertEqual(9, len(model1.get_all_volumes())) self.assertEqual(12, len(model1.edges())) expected_struct_str = self.load_data('storage_scenario_1.xml') model2 = model_root.StorageModelRoot.from_xml(expected_struct_str) self.assertTrue( model_root.StorageModelRoot.is_isomorphic(model2, model1)) def test_build_model_from_xml(self): fake_cluster = faker_cluster_state.FakerStorageModelCollector() expected_model = fake_cluster.generate_scenario_1() struct_str = self.load_data('storage_scenario_1.xml') model = model_root.StorageModelRoot.from_xml(struct_str) self.assertEqual(expected_model.to_string(), model.to_string()) def test_assert_node_raise(self): model = model_root.StorageModelRoot() node = element.StorageNode(host="host@backend") model.add_node(node) self.assertRaises(exception.IllegalArgumentException, model.assert_node, "obj") def test_assert_pool_raise(self): model = model_root.StorageModelRoot() pool = element.Pool(name="host@backend#pool") model.add_pool(pool) self.assertRaises(exception.IllegalArgumentException, model.assert_pool, "obj") def test_assert_volume_raise(self): model = model_root.StorageModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertRaises(exception.IllegalArgumentException, model.assert_volume, "obj") def test_add_node(self): model = model_root.StorageModelRoot() hostname = "host@backend" node = element.StorageNode(host=hostname) model.add_node(node) self.assertEqual(node, model.get_node_by_name(hostname)) def test_add_pool(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) def test_remove_node(self): model = model_root.StorageModelRoot() hostname = "host@backend" node = element.StorageNode(host=hostname) model.add_node(node) self.assertEqual(node, model.get_node_by_name(hostname)) model.remove_node(node) self.assertRaises(exception.StorageNodeNotFound, model.get_node_by_name, hostname) def test_remove_pool(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) model.remove_pool(pool) self.assertRaises(exception.PoolNotFound, model.get_pool_by_pool_name, pool_name) def test_map_unmap_pool(self): model = model_root.StorageModelRoot() hostname = "host@backend" node = element.StorageNode(host=hostname) model.add_node(node) self.assertEqual(node, model.get_node_by_name(hostname)) pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) model.map_pool(pool, node) self.assertTrue(pool.name in model.predecessors(node.host)) model.unmap_pool(pool, node) self.assertFalse(pool.name in model.predecessors(node.host)) def test_add_volume(self): model = model_root.StorageModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) def test_remove_volume(self): model = model_root.StorageModelRoot() uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) model.remove_volume(volume) self.assertRaises(exception.VolumeNotFound, model.get_volume_by_uuid, uuid_) def test_map_unmap_volume(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) model.map_volume(volume, pool) self.assertTrue(volume.uuid in model.predecessors(pool.name)) model.unmap_volume(volume, pool) self.assertFalse(volume.uuid in model.predecessors(pool.name)) def test_get_all_storage_nodes(self): model = model_root.StorageModelRoot() for i in range(10): hostname = "host_{0}".format(i) node = element.StorageNode(host=hostname) model.add_node(node) all_nodes = model.get_all_storage_nodes() for hostname in all_nodes: node = model.get_node_by_name(hostname) model.assert_node(node) def test_get_all_volumes(self): model = model_root.StorageModelRoot() for id_ in range(10): uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) all_volumes = model.get_all_volumes() for vol in all_volumes: volume = model.get_volume_by_uuid(vol) model.assert_volume(volume) def test_get_node_pools(self): model = model_root.StorageModelRoot() hostname = "host@backend" node = element.StorageNode(host=hostname) model.add_node(node) self.assertEqual(node, model.get_node_by_name(hostname)) pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) model.map_pool(pool, node) self.assertEqual([pool], model.get_node_pools(node)) def test_get_pool_by_volume(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) model.map_volume(volume, pool) self.assertEqual(pool, model.get_pool_by_volume(volume)) def test_get_pool_volumes(self): model = model_root.StorageModelRoot() pool_name = "host@backend#pool" pool = element.Pool(name=pool_name) model.add_pool(pool) self.assertEqual(pool, model.get_pool_by_pool_name(pool_name)) uuid_ = "{0}".format(uuidutils.generate_uuid()) volume = element.Volume(uuid=uuid_) model.add_volume(volume) self.assertEqual(volume, model.get_volume_by_uuid(uuid_)) model.map_volume(volume, pool) self.assertEqual([volume], model.get_pool_volumes(pool)) class TestBaremetalModel(base.TestCase): def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return model_root.StorageModelRoot.from_xml(self.load_data(filename)) def test_model_structure(self): fake_cluster = faker_cluster_state.FakerBaremetalModelCollector() model1 = fake_cluster.build_scenario_1() self.assertEqual(2, len(model1.get_all_ironic_nodes())) expected_struct_str = self.load_data('ironic_scenario_1.xml') model2 = model_root.BaremetalModelRoot.from_xml(expected_struct_str) self.assertTrue( model_root.BaremetalModelRoot.is_isomorphic(model2, model1)) def test_build_model_from_xml(self): fake_cluster = faker_cluster_state.FakerBaremetalModelCollector() expected_model = fake_cluster.generate_scenario_1() struct_str = self.load_data('ironic_scenario_1.xml') model = model_root.BaremetalModelRoot.from_xml(struct_str) self.assertEqual(expected_model.to_string(), model.to_string()) def test_assert_node_raise(self): model = model_root.BaremetalModelRoot() node_uuid = uuidutils.generate_uuid() node = element.IronicNode(uuid=node_uuid) model.add_node(node) self.assertRaises(exception.IllegalArgumentException, model.assert_node, "obj") def test_add_node(self): model = model_root.BaremetalModelRoot() node_uuid = uuidutils.generate_uuid() node = element.IronicNode(uuid=node_uuid) model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(node_uuid)) def test_remove_node(self): model = model_root.BaremetalModelRoot() node_uuid = uuidutils.generate_uuid() node = element.IronicNode(uuid=node_uuid) model.add_node(node) self.assertEqual(node, model.get_node_by_uuid(node_uuid)) model.remove_node(node) self.assertRaises(exception.IronicNodeNotFound, model.get_node_by_uuid, node_uuid) def test_get_all_ironic_nodes(self): model = model_root.BaremetalModelRoot() for i in range(10): node_uuid = uuidutils.generate_uuid() node = element.IronicNode(uuid=node_uuid) model.add_node(node) all_nodes = model.get_all_ironic_nodes() for node_uuid in all_nodes: node = model.get_node_by_uuid(node_uuid) model.assert_node(node) python-watcher-4.0.0/watcher/tests/decision_engine/model/data/0000775000175000017500000000000013656752352024461 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_4_with_1_node_no_instance.xml0000664000175000017500000000042613656752270034072 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_3_with_2_nodes.xml0000664000175000017500000000225313656752270031675 0ustar zuulzuul00000000000000 ././@LongLink0000000000000000000000000000016100000000000011213 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabled_nodes.xmlpython-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_9_with_3_active_plus_1_disabl0000664000175000017500000000507113656752270034052 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_6_with_2_nodes.xml0000664000175000017500000000306313656752270031700 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/storage_scenario_1.xml0000664000175000017500000001006313656752270030751 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_1_node_unavailable.xml0000664000175000017500000002330013656752270034046 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_5_with_instance_disk_0.xml0000664000175000017500000000064713656752270033410 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_7_with_2_nodes.xml0000664000175000017500000000325513656752270031704 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/ironic_scenario_1.xml0000664000175000017500000000070313656752270030570 0ustar zuulzuul00000000000000 1 2 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_2_with_metrics.xml0000664000175000017500000000475313656752270032020 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_8_with_4_nodes.xml0000664000175000017500000000502513656752270031704 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_10.xml0000664000175000017500000001157013656752270027311 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_metrics.xml0000664000175000017500000000204113656752270032003 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_all_nodes_disable.xml0000664000175000017500000000215213656752270033763 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_1.xml0000664000175000017500000002427213656752270027234 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_3_with_metrics.xml0000664000175000017500000000277713656752270032025 0ustar zuulzuul00000000000000 ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_all_instances_exclude.xmlpython-watcher-4.0.0/watcher/tests/decision_engine/model/data/scenario_1_with_all_instances_exclude.0000664000175000017500000001705113656752270034153 0ustar zuulzuul00000000000000 python-watcher-4.0.0/watcher/tests/decision_engine/model/test_element.py0000664000175000017500000001300513656752270026610 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.model import element from watcher.tests import base class TestElement(base.TestCase): scenarios = [ ("ComputeNode_with_all_fields", dict( cls=element.Instance, data={ 'uuid': 'FAKE_UUID', 'state': 'state', 'hostname': 'hostname', 'memory': 111, 'vcpus': 222, 'disk': 333, })), ("ComputeNode_with_some_fields", dict( cls=element.Instance, data={ 'uuid': 'FAKE_UUID', 'state': 'state', 'vcpus': 222, 'disk': 333, })), ("Instance_with_all_fields", dict( cls=element.Instance, data={ 'uuid': 'FAKE_UUID', 'state': 'state', 'hostname': 'hostname', 'name': 'name', 'memory': 111, 'vcpus': 222, 'disk': 333, })), ("Instance_with_some_fields", dict( cls=element.Instance, data={ 'uuid': 'FAKE_UUID', 'state': 'state', 'vcpus': 222, 'disk': 333, })), ] def test_as_xml_element(self): el = self.cls(**self.data) el.as_xml_element() class TestStorageElement(base.TestCase): scenarios = [ ("StorageNode_with_all_fields", dict( cls=element.StorageNode, data={ 'host': 'host@backend', 'zone': 'zone', 'status': 'enabled', 'state': 'up', 'volume_type': ['volume_type'], })), ("Pool_with_all_fields", dict( cls=element.Pool, data={ 'name': 'host@backend#pool', 'total_volumes': 1, 'total_capacity_gb': 500, 'free_capacity_gb': 420, 'provisioned_capacity_gb': 80, 'allocated_capacity_gb': 80, 'virtual_free': 420, })), ("Pool_without_virtual_free_fields", dict( cls=element.Pool, data={ 'name': 'host@backend#pool', 'total_volumes': 1, 'total_capacity_gb': 500, 'free_capacity_gb': 420, 'provisioned_capacity_gb': 80, 'allocated_capacity_gb': 80, })), ("Volume_with_all_fields", dict( cls=element.Volume, data={ 'uuid': 'FAKE_UUID', 'size': 1, 'status': 'in-use', 'attachments': '[{"key": "value"}]', 'name': 'name', 'multiattach': 'false', 'snapshot_id': '', 'project_id': '8ea272ec-52d2-475e-9151-0f3ed8c674d1', 'metadata': '{"key": "value"}', 'bootable': 'false', 'human_id': 'human_id', })), ("Volume_without_bootable_fields", dict( cls=element.Volume, data={ 'uuid': 'FAKE_UUID', 'size': 1, 'status': 'in-use', 'attachments': '[]', 'name': 'name', 'multiattach': 'false', 'snapshot_id': '', 'project_id': '777d7968-9b61-4cc0-844d-a95a6fc22d8c', 'metadata': '{"key": "value"}', 'human_id': 'human_id', })), ("Volume_without_human_id_fields", dict( cls=element.Volume, data={ 'uuid': 'FAKE_UUID', 'size': 1, 'status': 'in-use', 'attachments': '[]', 'name': 'name', 'multiattach': 'false', 'snapshot_id': '', 'project_id': '2e65af64-1898-4cee-bfee-af3fc7f76d16', 'metadata': '{"key": "value"}', })), ] def test_as_xml_element(self): el = self.cls(**self.data) el.as_xml_element() class TestIronicElement(base.TestCase): scenarios = [ ("IronicNode_with_all_fields", dict( cls=element.IronicNode, data={ "uuid": 'FAKE_UUID', "power_state": 'up', "maintenance": "false", "maintenance_reason": "null", "extra": {"compute_node_id": 1} })), ("IronicNode_with_some_fields", dict( cls=element.IronicNode, data={ "uuid": 'FAKE_UUID', "power_state": 'up', "maintenance": "false", "extra": {"compute_node_id": 1} })), ] def test_as_xml_element(self): el = self.cls(**self.data) el.as_xml_element() python-watcher-4.0.0/watcher/tests/decision_engine/model/faker_cluster_and_metrics.py0000664000175000017500000002366113656752270031332 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Authors: Vojtech CIMA # Bruno GRAZIOLI # Sean MURPHY # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mock from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import model_root as modelroot class FakerModelCollector(base.BaseClusterDataModelCollector): def __init__(self, config=None, osc=None, audit_scope=None): if config is None: config = mock.Mock() super(FakerModelCollector, self).__init__(config) @property def notification_endpoints(self): return [] def get_audit_scope_handler(self, audit_scope): return None def execute(self): return self.generate_scenario_1() def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return modelroot.ModelRoot.from_xml(self.load_data(filename)) def generate_scenario_1(self): """Simulates cluster with 2 nodes and 2 instances using 1:1 mapping""" return self.load_model('scenario_1_with_metrics.xml') def generate_scenario_2(self): """Simulates a cluster With 4 nodes and 6 instances all mapped to a single node """ return self.load_model('scenario_2_with_metrics.xml') def generate_scenario_3(self): """Simulates a cluster With 4 nodes and 6 instances all mapped to one node """ return self.load_model('scenario_3_with_metrics.xml') def generate_scenario_4(self): """Simulates a cluster With 4 nodes and 6 instances spread on all nodes """ return self.load_model('scenario_4_with_metrics.xml') class FakeCeilometerMetrics(object): def __init__(self, model): self.model = model def mock_get_statistics(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): if meter_name == 'host_cpu_usage': return self.get_compute_node_cpu_util( resource, period, aggregate, granularity) elif meter_name == 'instance_cpu_usage': return self.get_instance_cpu_util( resource, period, aggregate, granularity) elif meter_name == 'instance_ram_usage': return self.get_instance_ram_util( resource, period, aggregate, granularity) elif meter_name == 'instance_root_disk_size': return self.get_instance_disk_root_size( resource, period, aggregate, granularity) def get_compute_node_cpu_util(self, resource, period, aggregate, granularity): """Calculates node utilization dynamicaly. node CPU utilization should consider and corelate with actual instance-node mappings provided within a cluster model. Returns relative node CPU utilization <0, 100>. :param r_id: resource id """ node_uuid = '%s_%s' % (resource.uuid, resource.hostname) node = self.model.get_node_by_uuid(node_uuid) instances = self.model.get_node_instances(node) util_sum = 0.0 for instance_uuid in instances: instance = self.model.get_instance_by_uuid(instance_uuid) total_cpu_util = instance.vcpus * self.get_instance_cpu_util( instance.uuid) util_sum += total_cpu_util / 100.0 util_sum /= node.vcpus return util_sum * 100.0 @staticmethod def get_instance_cpu_util(resource, period, aggregate, granularity): instance_cpu_util = dict() instance_cpu_util['INSTANCE_0'] = 10 instance_cpu_util['INSTANCE_1'] = 30 instance_cpu_util['INSTANCE_2'] = 60 instance_cpu_util['INSTANCE_3'] = 20 instance_cpu_util['INSTANCE_4'] = 40 instance_cpu_util['INSTANCE_5'] = 50 instance_cpu_util['INSTANCE_6'] = 100 instance_cpu_util['INSTANCE_7'] = 100 instance_cpu_util['INSTANCE_8'] = 100 instance_cpu_util['INSTANCE_9'] = 100 return instance_cpu_util[str(resource.uuid)] @staticmethod def get_instance_ram_util(resource, period, aggregate, granularity): instance_ram_util = dict() instance_ram_util['INSTANCE_0'] = 1 instance_ram_util['INSTANCE_1'] = 2 instance_ram_util['INSTANCE_2'] = 4 instance_ram_util['INSTANCE_3'] = 8 instance_ram_util['INSTANCE_4'] = 3 instance_ram_util['INSTANCE_5'] = 2 instance_ram_util['INSTANCE_6'] = 1 instance_ram_util['INSTANCE_7'] = 2 instance_ram_util['INSTANCE_8'] = 4 instance_ram_util['INSTANCE_9'] = 8 return instance_ram_util[str(resource.uuid)] @staticmethod def get_instance_disk_root_size(resource, period, aggregate, granularity): instance_disk_util = dict() instance_disk_util['INSTANCE_0'] = 10 instance_disk_util['INSTANCE_1'] = 15 instance_disk_util['INSTANCE_2'] = 30 instance_disk_util['INSTANCE_3'] = 35 instance_disk_util['INSTANCE_4'] = 20 instance_disk_util['INSTANCE_5'] = 25 instance_disk_util['INSTANCE_6'] = 25 instance_disk_util['INSTANCE_7'] = 25 instance_disk_util['INSTANCE_8'] = 25 instance_disk_util['INSTANCE_9'] = 25 return instance_disk_util[str(resource.uuid)] class FakeGnocchiMetrics(object): def __init__(self, model): self.model = model def mock_get_statistics(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): if meter_name == 'host_cpu_usage': return self.get_compute_node_cpu_util( resource, period, aggregate, granularity) elif meter_name == 'instance_cpu_usage': return self.get_instance_cpu_util( resource, period, aggregate, granularity) elif meter_name == 'instance_ram_usage': return self.get_instance_ram_util( resource, period, aggregate, granularity) elif meter_name == 'instance_root_disk_size': return self.get_instance_disk_root_size( resource, period, aggregate, granularity) def get_compute_node_cpu_util(self, resource, period, aggregate, granularity): """Calculates node utilization dynamicaly. node CPU utilization should consider and corelate with actual instance-node mappings provided within a cluster model. Returns relative node CPU utilization <0, 100>. :param r_id: resource id """ node_uuid = "%s_%s" % (resource.uuid, resource.hostname) node = self.model.get_node_by_uuid(node_uuid) instances = self.model.get_node_instances(node) util_sum = 0.0 for instance_uuid in instances: instance = self.model.get_instance_by_uuid(instance_uuid) total_cpu_util = instance.vcpus * self.get_instance_cpu_util( instance.uuid) util_sum += total_cpu_util / 100.0 util_sum /= node.vcpus return util_sum * 100.0 @staticmethod def get_instance_cpu_util(resource, period, aggregate, granularity): instance_cpu_util = dict() instance_cpu_util['INSTANCE_0'] = 10 instance_cpu_util['INSTANCE_1'] = 30 instance_cpu_util['INSTANCE_2'] = 60 instance_cpu_util['INSTANCE_3'] = 20 instance_cpu_util['INSTANCE_4'] = 40 instance_cpu_util['INSTANCE_5'] = 50 instance_cpu_util['INSTANCE_6'] = 100 instance_cpu_util['INSTANCE_7'] = 100 instance_cpu_util['INSTANCE_8'] = 100 instance_cpu_util['INSTANCE_9'] = 100 return instance_cpu_util[str(resource.uuid)] @staticmethod def get_instance_ram_util(resource, period, aggregate, granularity): instance_ram_util = dict() instance_ram_util['INSTANCE_0'] = 1 instance_ram_util['INSTANCE_1'] = 2 instance_ram_util['INSTANCE_2'] = 4 instance_ram_util['INSTANCE_3'] = 8 instance_ram_util['INSTANCE_4'] = 3 instance_ram_util['INSTANCE_5'] = 2 instance_ram_util['INSTANCE_6'] = 1 instance_ram_util['INSTANCE_7'] = 2 instance_ram_util['INSTANCE_8'] = 4 instance_ram_util['INSTANCE_9'] = 8 return instance_ram_util[str(resource.uuid)] @staticmethod def get_instance_disk_root_size(resource, period, aggregate, granularity): instance_disk_util = dict() instance_disk_util['INSTANCE_0'] = 10 instance_disk_util['INSTANCE_1'] = 15 instance_disk_util['INSTANCE_2'] = 30 instance_disk_util['INSTANCE_3'] = 35 instance_disk_util['INSTANCE_4'] = 20 instance_disk_util['INSTANCE_5'] = 25 instance_disk_util['INSTANCE_6'] = 25 instance_disk_util['INSTANCE_7'] = 25 instance_disk_util['INSTANCE_8'] = 25 instance_disk_util['INSTANCE_9'] = 25 return instance_disk_util[str(resource.uuid)] python-watcher-4.0.0/watcher/tests/decision_engine/model/monasca_metrics.py0000664000175000017500000001003113656752270027263 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. class FakeMonascaMetrics(object): def __init__(self): self.emptytype = "" def empty_one_metric(self, emptytype): self.emptytype = emptytype def mock_get_statistics(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=None): result = 0.0 if meter_name == 'host_cpu_usage': result = self.get_usage_compute_node_cpu(resource) elif meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu(resource) return result def mock_get_statistics_wb(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=None): """Statistics for workload balance strategy""" result = 0.0 if meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu_wb(resource) return result @staticmethod def get_usage_compute_node_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid:00 :return: """ resource = args[0] uuid = resource.uuid measurements = {} # node 0 measurements['Node_0'] = 7 measurements['Node_1'] = 7 # node 1 measurements['Node_2'] = 80 # node 2 measurements['Node_3'] = 5 measurements['Node_4'] = 5 measurements['Node_5'] = 10 # node 3 measurements['Node_6'] = 8 measurements['Node_19'] = 10 # node 4 measurements['INSTANCE_7'] = 4 if uuid not in measurements.keys(): # measurements[uuid] = random.randint(1, 4) measurements[uuid] = 8 statistics = [ {'columns': ['avg'], 'statistics': [[float(measurements[str(uuid)])]]}] cpu_usage = None for stat in statistics: avg_col_idx = stat['columns'].index('avg') values = [r[avg_col_idx] for r in stat['statistics']] value = float(sum(values)) / len(values) cpu_usage = value return cpu_usage @staticmethod def get_average_usage_instance_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid:00 :return: """ resource = args[0] uuid = resource.uuid measurements = {} # node 0 measurements['INSTANCE_0'] = 7 measurements['INSTANCE_1'] = 7 # node 1 measurements['INSTANCE_2'] = 10 # node 2 measurements['INSTANCE_3'] = 5 measurements['INSTANCE_4'] = 5 measurements['INSTANCE_5'] = 10 # node 3 measurements['INSTANCE_6'] = 8 # node 4 measurements['INSTANCE_7'] = 4 if uuid not in measurements.keys(): # measurements[uuid] = random.randint(1, 4) measurements[uuid] = 8 statistics = [ {'columns': ['avg'], 'statistics': [[float(measurements[str(uuid)])]]}] cpu_usage = None for stat in statistics: avg_col_idx = stat['columns'].index('avg') values = [r[avg_col_idx] for r in stat['statistics']] value = float(sum(values)) / len(values) cpu_usage = value return cpu_usage python-watcher-4.0.0/watcher/tests/decision_engine/model/ceilometer_metrics.py0000664000175000017500000002277313656752270030012 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_utils class FakeCeilometerMetrics(object): NAME = 'ceilometer' def __init__(self): self.emptytype = "" def empty_one_metric(self, emptytype): self.emptytype = emptytype def mock_get_statistics(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=None): result = 0 if meter_name == 'host_cpu_usage': result = self.get_usage_compute_node_cpu(resource) elif meter_name == 'host_ram_usage': result = self.get_usage_compute_node_ram(resource) elif meter_name == 'host_outlet_temp': result = self.get_average_outlet_temp(resource) elif meter_name == 'host_inlet_temp': result = self.get_average_inlet_temp(resource) elif meter_name == 'host_airflow': result = self.get_average_airflow(resource) elif meter_name == 'host_power': result = self.get_average_power(resource) elif meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu(resource) elif meter_name == 'instance_ram_usage': result = self.get_average_usage_instance_memory(resource) return result def mock_get_statistics_nn(self, resource=None, meter_name=None, period=None, aggregate='mean', granularity=300): """Statistics for noisy neighbor strategy Signature should match DataSourceBase.get_instance_l3_cache_usage """ result = 0.0 if period == 100: result = self.get_average_l3_cache_current(resource) if period == 200: result = self.get_average_l3_cache_previous(resource) return result def mock_get_statistics_wb(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=None): """Statistics for workload balance strategy""" result = 0.0 if meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu_wb(resource) elif meter_name == 'instance_ram_usage': result = self.get_average_usage_instance_memory_wb(resource) return result @staticmethod def get_average_l3_cache_current(resource): """The average l3 cache used by instance""" uuid = resource.uuid mock = {} mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 35 * oslo_utils.units.Ki mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30 * oslo_utils.units.Ki mock['INSTANCE_3'] = 40 * oslo_utils.units.Ki mock['INSTANCE_4'] = 35 * oslo_utils.units.Ki return mock[str(uuid)] @staticmethod def get_average_l3_cache_previous(resource): """The average l3 cache used by instance""" uuid = resource.uuid mock = {} mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 34.5 * ( oslo_utils.units.Ki) mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30.5 * ( oslo_utils.units.Ki) mock['INSTANCE_3'] = 60 * oslo_utils.units.Ki mock['INSTANCE_4'] = 22.5 * oslo_utils.units.Ki return mock[str(uuid)] @staticmethod def get_average_outlet_temp(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock["fa69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 30 # use a big value to make sure it exceeds threshold mock["af69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 100 if uuid not in mock.keys(): mock[uuid] = 100 return float(mock[str(uuid)]) @staticmethod def get_usage_compute_node_ram(resource): uuid = resource.uuid mock = {} # Ceilometer returns hardware.memory.used samples in KB. mock['Node_0'] = 7 * oslo_utils.units.Ki mock['Node_1'] = 5 * oslo_utils.units.Ki mock['Node_2'] = 29 * oslo_utils.units.Ki mock['Node_3'] = 8 * oslo_utils.units.Ki mock['Node_4'] = 4 * oslo_utils.units.Ki if uuid not in mock.keys(): # mock[uuid] = random.randint(1, 4) mock[uuid] = 8 return float(mock[str(uuid)]) @staticmethod def get_average_airflow(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock['Node_0'] = 400 # use a big value to make sure it exceeds threshold mock['Node_1'] = 100 if uuid not in mock.keys(): mock[uuid] = 200 return mock[str(uuid)] @staticmethod def get_average_inlet_temp(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock['Node_0'] = 24 mock['Node_1'] = 26 if uuid not in mock.keys(): mock[uuid] = 28 return mock[str(uuid)] @staticmethod def get_average_power(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock['Node_0'] = 260 mock['Node_1'] = 240 if uuid not in mock.keys(): mock[uuid] = 200 return mock[str(uuid)] @staticmethod def get_usage_compute_node_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid:00 :return: """ resource = args[0] uuid = "%s_%s" % (resource.uuid, resource.hostname) measurements = {} # node 0 measurements['Node_0_hostname_0'] = 7 measurements['Node_1_hostname_1'] = 7 measurements['fa69c544-906b-4a6a-a9c6-c1f7a8078c73_hostname_0'] = 7 measurements['af69c544-906b-4a6a-a9c6-c1f7a8078c73_hostname_1'] = 7 # node 1 measurements['Node_2_hostname_2'] = 80 # node 2 measurements['Node_3_hostname_3'] = 5 measurements['Node_4_hostname_4'] = 5 measurements['Node_5_hostname_5'] = 10 # node 3 measurements['Node_6_hostname_6'] = 8 # This node doesn't send metrics measurements['LOST_NODE_hostname_7'] = None measurements['Node_19_hostname_19'] = 10 # node 4 measurements['INSTANCE_7_hostname_7'] = 4 result = measurements[uuid] return float(result) if result is not None else None @staticmethod def get_average_usage_instance_cpu_wb(resource): """The last VM CPU usage values to average :param resource: :return: """ uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_1'] = 80 mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 # node 1 mock['INSTANCE_3'] = 20 mock['INSTANCE_4'] = 10 return float(mock[str(uuid)]) @staticmethod def get_average_usage_instance_memory_wb(resource): uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_1'] = 30 mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 12 # node 1 mock['INSTANCE_3'] = 12 mock['INSTANCE_4'] = 12 return mock[str(uuid)] @staticmethod def get_average_usage_instance_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid:00 :return: """ resource = args[0] uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_0'] = 7 mock['INSTANCE_1'] = 7 # node 1 mock['INSTANCE_2'] = 10 # node 2 mock['INSTANCE_3'] = 5 mock['INSTANCE_4'] = 5 mock['INSTANCE_5'] = 10 # node 3 mock['INSTANCE_6'] = 8 # node 4 mock['INSTANCE_7'] = 4 mock['LOST_INSTANCE'] = None # metrics might be missing in scenarios which do not do computations if uuid not in mock.keys(): mock[uuid] = 0 return mock[str(uuid)] @staticmethod def get_average_usage_instance_memory(resource): uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_0'] = 2 mock['INSTANCE_1'] = 5 # node 1 mock['INSTANCE_2'] = 5 # node 2 mock['INSTANCE_3'] = 8 mock['INSTANCE_4'] = 5 mock['INSTANCE_5'] = 16 # node 3 mock['INSTANCE_6'] = 8 # node 4 mock['INSTANCE_7'] = 4 return mock[str(uuid)] @staticmethod def get_average_usage_instance_disk(resource): uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_0'] = 2 mock['INSTANCE_1'] = 2 # node 1 mock['INSTANCE_2'] = 2 # node 2 mock['INSTANCE_3'] = 10 mock['INSTANCE_4'] = 15 mock['INSTANCE_5'] = 20 # node 3 mock['INSTANCE_6'] = 8 # node 4 mock['INSTANCE_7'] = 4 return mock[str(uuid)] python-watcher-4.0.0/watcher/tests/decision_engine/model/faker_cluster_state.py0000664000175000017500000003014713656752270030157 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import mock from watcher.common import utils from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root as modelroot volume_uuid_mapping = { "volume_0": "5028b1eb-8749-48ae-a42c-5bdd1323976f", "volume_1": "74454247-a064-4b34-8f43-89337987720e", "volume_2": "a16c811e-2521-4fd3-8779-6a94ccb3be73", "volume_3": "37856b95-5be4-4864-8a49-c83f55c66780", } class FakerModelCollector(base.BaseClusterDataModelCollector): def __init__(self, config=None, osc=None, audit_scope=None): if config is None: config = mock.Mock(period=777) super(FakerModelCollector, self).__init__(config) @property def notification_endpoints(self): return [] def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return modelroot.ModelRoot.from_xml(self.load_data(filename)) def get_audit_scope_handler(self, audit_scope): return None def execute(self): return self._cluster_data_model or self.build_scenario_1() def build_scenario_1(self): instances = [] model = modelroot.ModelRoot() # number of nodes node_count = 5 # number max of instance per node node_instance_count = 7 # total number of virtual machine instance_count = (node_count * node_instance_count) for id_ in range(0, node_count): node_uuid = "Node_{0}".format(id_) hostname = "hostname_{0}".format(id_) node_attributes = { "id": id_, "uuid": node_uuid, "hostname": hostname, "memory": 132, "memory_mb_reserved": 0, "memory_ratio": 1, "disk": 250, "disk_capacity": 250, "disk_gb_reserved": 0, "disk_ratio": 1, "vcpus": 40, "vcpu_reserved": 0, "vcpu_ratio": 1, } node = element.ComputeNode(**node_attributes) model.add_node(node) for i in range(0, instance_count): instance_uuid = "INSTANCE_{0}".format(i) if instance_uuid == "INSTANCE_1": project_id = "26F03131-32CB-4697-9D61-9123F87A8147" elif instance_uuid == "INSTANCE_2": project_id = "109F7909-0607-4712-B32C-5CC6D49D2F15" else: project_id = "91FFFE30-78A0-4152-ACD2-8310FF274DC9" instance_attributes = { "uuid": instance_uuid, "name": instance_uuid, "memory": 2, "disk": 20, "disk_capacity": 20, "vcpus": 10, "metadata": '{"optimize": true,"top": "floor","nested": {"x": "y"}}', "project_id": project_id } instance = element.Instance(**instance_attributes) instances.append(instance) model.add_instance(instance) mappings = [ ("INSTANCE_0", "Node_0"), ("INSTANCE_1", "Node_0"), ("INSTANCE_2", "Node_1"), ("INSTANCE_3", "Node_2"), ("INSTANCE_4", "Node_2"), ("INSTANCE_5", "Node_2"), ("INSTANCE_6", "Node_3"), ("INSTANCE_7", "Node_4"), ] for instance_uuid, node_uuid in mappings: model.map_instance( model.get_instance_by_uuid(instance_uuid), model.get_node_by_uuid(node_uuid), ) return model def generate_scenario_1(self): return self.load_model('scenario_1.xml') def generate_scenario_1_with_1_node_unavailable(self): return self.load_model('scenario_1_with_1_node_unavailable.xml') def generate_scenario_1_with_all_nodes_disable(self): return self.load_model('scenario_1_with_all_nodes_disable.xml') def generate_scenario_1_with_all_instances_exclude(self): return self.load_model('scenario_1_with_all_instances_exclude.xml') def generate_scenario_3_with_2_nodes(self): return self.load_model('scenario_3_with_2_nodes.xml') def generate_scenario_4_with_1_node_no_instance(self): return self.load_model('scenario_4_with_1_node_no_instance.xml') def generate_scenario_5_with_instance_disk_0(self): return self.load_model('scenario_5_with_instance_disk_0.xml') def generate_scenario_6_with_2_nodes(self): return self.load_model('scenario_6_with_2_nodes.xml') def generate_scenario_7_with_2_nodes(self): return self.load_model('scenario_7_with_2_nodes.xml') def generate_scenario_8_with_4_nodes(self): return self.load_model('scenario_8_with_4_nodes.xml') def generate_scenario_9_with_3_active_plus_1_disabled_nodes(self): return self.load_model( 'scenario_9_with_3_active_plus_1_disabled_nodes.xml') def generate_scenario_10(self): return self.load_model('scenario_10.xml') class FakerStorageModelCollector(base.BaseClusterDataModelCollector): def __init__(self, config=None, osc=None, audit_scope=None): if config is None: config = mock.Mock(period=777) super(FakerStorageModelCollector, self).__init__(config) @property def notification_endpoints(self): return [] def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return modelroot.StorageModelRoot.from_xml(self.load_data(filename)) def get_audit_scope_handler(self, audit_scope): return None def execute(self): return self._cluster_data_model or self.build_scenario_1() def build_scenario_1(self): model = modelroot.StorageModelRoot() # number of nodes node_count = 2 # number of pools per node pool_count = 2 # number of volumes volume_count = 9 for i in range(0, node_count): host = "host_{0}@backend_{0}".format(i) zone = "zone_{0}".format(i) volume_type = ["type_{0}".format(i)] node_attributes = { "host": host, "zone": zone, "status": 'enabled', "state": 'up', "volume_type": volume_type, } node = element.StorageNode(**node_attributes) model.add_node(node) for j in range(0, pool_count): name = "host_{0}@backend_{0}#pool_{1}".format(i, j) pool_attributes = { "name": name, "total_volumes": 2, "total_capacity_gb": 500, "free_capacity_gb": 420, "provisioned_capacity_gb": 80, "allocated_capacity_gb": 80, "virtual_free": 420, } pool = element.Pool(**pool_attributes) model.add_pool(pool) mappings = [ ("host_0@backend_0#pool_0", "host_0@backend_0"), ("host_0@backend_0#pool_1", "host_0@backend_0"), ("host_1@backend_1#pool_0", "host_1@backend_1"), ("host_1@backend_1#pool_1", "host_1@backend_1"), ] for pool_name, node_name in mappings: model.map_pool( model.get_pool_by_pool_name(pool_name), model.get_node_by_name(node_name), ) volume_uuid_mapping = [ "5028b1eb-8749-48ae-a42c-5bdd1323976f", "74454247-a064-4b34-8f43-89337987720e", "a16c811e-2521-4fd3-8779-6a94ccb3be73", "37856b95-5be4-4864-8a49-c83f55c66780", "694f8fb1-df96-46be-b67d-49f2c14a495e", "66b094b0-8fc3-4a94-913f-a5f9312b11a5", "e9013810-4b4c-4b94-a056-4c36702d51a3", "07976191-6a57-4c35-9f3c-55b3b5ecd6d5", "4d1c952d-95d0-4aac-82aa-c3cb509af9f3", ] for k in range(volume_count): uuid = volume_uuid_mapping[k] name = "name_{0}".format(k) volume_attributes = { "size": 40, "status": "in-use", "uuid": uuid, "attachments": '[{"server_id": "server","attachment_id": "attachment"}]', "name": name, "multiattach": 'True', "snapshot_id": uuid, "project_id": "91FFFE30-78A0-4152-ACD2-8310FF274DC9", "metadata": '{"readonly": false,"attached_mode": "rw"}', "bootable": 'False' } volume = element.Volume(**volume_attributes) model.add_volume(volume) mappings = [ (volume_uuid_mapping[0], "host_0@backend_0#pool_0"), (volume_uuid_mapping[1], "host_0@backend_0#pool_0"), (volume_uuid_mapping[2], "host_0@backend_0#pool_1"), (volume_uuid_mapping[3], "host_0@backend_0#pool_1"), (volume_uuid_mapping[4], "host_1@backend_1#pool_0"), (volume_uuid_mapping[5], "host_1@backend_1#pool_0"), (volume_uuid_mapping[6], "host_1@backend_1#pool_1"), (volume_uuid_mapping[7], "host_1@backend_1#pool_1"), ] for volume_uuid, pool_name in mappings: model.map_volume( model.get_volume_by_uuid(volume_uuid), model.get_pool_by_pool_name(pool_name), ) return model def generate_scenario_1(self): return self.load_model('storage_scenario_1.xml') class FakerBaremetalModelCollector(base.BaseClusterDataModelCollector): def __init__(self, config=None, osc=None): if config is None: config = mock.Mock(period=777) super(FakerBaremetalModelCollector, self).__init__(config) @property def notification_endpoints(self): return [] def get_audit_scope_handler(self, audit_scope): return None def load_data(self, filename): cwd = os.path.abspath(os.path.dirname(__file__)) data_folder = os.path.join(cwd, "data") with open(os.path.join(data_folder, filename), 'rb') as xml_file: xml_data = xml_file.read() return xml_data def load_model(self, filename): return modelroot.BaremetalModelRoot.from_xml(self.load_data(filename)) def execute(self): return self._cluster_data_model or self.build_scenario_1() def build_scenario_1(self): model = modelroot.BaremetalModelRoot() # number of nodes node_count = 2 for i in range(0, node_count): uuid = utils.generate_uuid() node_attributes = { "uuid": uuid, "power_state": "power on", "maintenance": "false", "maintenance_reason": "null", "extra": {"compute_node_id": i} } node = element.IronicNode(**node_attributes) model.add_node(node) return model def generate_scenario_1(self): return self.load_model('ironic_scenario_1.xml') python-watcher-4.0.0/watcher/tests/decision_engine/model/gnocchi_metrics.py0000664000175000017500000002221613656752270027264 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import oslo_utils class FakeGnocchiMetrics(object): NAME = 'gnocchi' def __init__(self): self.emptytype = "" def empty_one_metric(self, emptytype): self.emptytype = emptytype def mock_get_statistics(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=None): result = 0 if meter_name == 'host_cpu_usage': result = self.get_usage_compute_node_cpu(resource) elif meter_name == 'host_ram_usage': result = self.get_usage_compute_node_ram(resource) elif meter_name == 'host_outlet_temp': result = self.get_average_outlet_temperature(resource) elif meter_name == 'host_inlet_temp': result = self.get_average_inlet_temp(resource) elif meter_name == 'host_airflow': result = self.get_average_airflow(resource) elif meter_name == 'host_power': result = self.get_average_power(resource) elif meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu(resource) elif meter_name == 'instance_ram_usage': result = self.get_average_usage_instance_memory(resource) return result def mock_get_statistics_nn(self, resource=None, meter_name=None, period=None, aggregate='mean', granularity=300): """Statistics for noisy neighbor strategy Signature should match DataSourceBase.get_instance_l3_cache_usage """ result = 0.0 if period == 100: result = self.get_average_l3_cache_current(resource) if period == 200: result = self.get_average_l3_cache_previous(resource) return result def mock_get_statistics_wb(self, resource=None, resource_type=None, meter_name=None, period=None, aggregate='mean', granularity=300): """Statistics for workload balance strategy""" result = 0.0 if meter_name == 'instance_cpu_usage': result = self.get_average_usage_instance_cpu_wb(resource) elif meter_name == 'instance_ram_usage': result = self.get_average_usage_instance_memory_wb(resource) return result @staticmethod def get_average_l3_cache_current(resource): """The average l3 cache used by instance""" uuid = resource.uuid mock = {} mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 35 * oslo_utils.units.Ki mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30 * oslo_utils.units.Ki mock['INSTANCE_3'] = 40 * oslo_utils.units.Ki mock['INSTANCE_4'] = 35 * oslo_utils.units.Ki return mock[str(uuid)] @staticmethod def get_average_l3_cache_previous(resource): """The average l3 cache used by instance""" uuid = resource.uuid mock = {} mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 34.5 * ( oslo_utils.units.Ki) mock['cae81432-1631-4d4e-b29c-6f3acdcde906'] = 30.5 * ( oslo_utils.units.Ki) mock['INSTANCE_3'] = 60 * oslo_utils.units.Ki mock['INSTANCE_4'] = 22.5 * oslo_utils.units.Ki return mock[str(uuid)] @staticmethod def get_average_outlet_temperature(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock["Node_0"] = 30 mock["fa69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 30 # use a big value to make sure it exceeds threshold mock["af69c544-906b-4a6a-a9c6-c1f7a8078c73"] = 100 return mock[str(uuid)] @staticmethod def get_usage_compute_node_ram(resource): uuid = resource.uuid mock = {} # Gnocchi returns hardware.memory.used samples in KB. mock['Node_0'] = 7 * oslo_utils.units.Ki mock['Node_1'] = 5 * oslo_utils.units.Ki mock['Node_2'] = 29 * oslo_utils.units.Ki mock['Node_3'] = 8 * oslo_utils.units.Ki mock['Node_4'] = 4 * oslo_utils.units.Ki return float(mock[str(uuid)]) @staticmethod def get_average_airflow(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock['Node_0'] = 400 # use a big value to make sure it exceeds threshold mock['Node_1'] = 100 return mock[str(uuid)] @staticmethod def get_average_inlet_temp(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock['Node_0'] = 24 mock['Node_1'] = 26 return mock[str(uuid)] @staticmethod def get_average_power(resource): """The average outlet temperature for host""" uuid = resource.uuid mock = {} mock['Node_0'] = 260 mock['Node_1'] = 240 return mock[str(uuid)] @staticmethod def get_usage_compute_node_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid: instance UUID :return: float value """ resource = args[0] uuid = "%s_%s" % (resource.uuid, resource.hostname) # Normalize measurements = {} # node 0 measurements['Node_0_hostname_0'] = 7 measurements['Node_1_hostname_1'] = 7 # node 1 measurements['Node_2_hostname_2'] = 80 # node 2 measurements['Node_3_hostname_3'] = 5 measurements['Node_4_hostname_4'] = 5 measurements['Node_5_hostname_5'] = 10 # node 3 measurements['Node_6_hostname_6'] = 8 # This node doesn't send metrics measurements['LOST_NODE_hostname_7'] = None measurements['Node_19_hostname_19'] = 10 # node 4 measurements['INSTANCE_7_hostname_7'] = 4 # metrics might be missing in scenarios which do not do computations if uuid not in measurements.keys(): measurements[uuid] = 0 result = measurements[uuid] return float(result) if result is not None else None @staticmethod def get_average_usage_instance_cpu(*args, **kwargs): """The last VM CPU usage values to average :param uuid: instance UUID :return: int value """ resource = args[0] uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_0'] = 7 mock['INSTANCE_1'] = 7 # node 1 mock['INSTANCE_2'] = 10 # node 2 mock['INSTANCE_3'] = 5 mock['INSTANCE_4'] = 5 mock['INSTANCE_5'] = 10 # node 3 mock['INSTANCE_6'] = 8 # node 4 mock['INSTANCE_7'] = 4 mock['LOST_INSTANCE'] = None # metrics might be missing in scenarios which do not do computations if uuid not in mock.keys(): mock[uuid] = 0 return mock[str(uuid)] @staticmethod def get_average_usage_instance_memory(resource): uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_0'] = 2 mock['INSTANCE_1'] = 5 # node 1 mock['INSTANCE_2'] = 5 # node 2 mock['INSTANCE_3'] = 8 mock['INSTANCE_4'] = 5 mock['INSTANCE_5'] = 16 # node 3 mock['INSTANCE_6'] = 8 # node 4 mock['INSTANCE_7'] = 4 return mock[str(uuid)] @staticmethod def get_average_usage_instance_disk(resource): uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_0'] = 2 mock['INSTANCE_1'] = 2 # node 1 mock['INSTANCE_2'] = 2 # node 2 mock['INSTANCE_3'] = 10 mock['INSTANCE_4'] = 15 mock['INSTANCE_5'] = 20 # node 3 mock['INSTANCE_6'] = 8 # node 4 mock['INSTANCE_7'] = 4 return mock[str(uuid)] @staticmethod def get_average_usage_instance_cpu_wb(resource): """The last VM CPU usage values to average :param uuid: instance UUID :return: float value """ uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_1'] = 80 mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 50 # node 1 mock['INSTANCE_3'] = 20 mock['INSTANCE_4'] = 10 return float(mock[str(uuid)]) @staticmethod def get_average_usage_instance_memory_wb(resource): uuid = resource.uuid mock = {} # node 0 mock['INSTANCE_1'] = 30 mock['73b09e16-35b7-4922-804e-e8f5d9b740fc'] = 12 # node 1 mock['INSTANCE_3'] = 12 mock['INSTANCE_4'] = 12 return mock[str(uuid)] python-watcher-4.0.0/watcher/tests/decision_engine/strategy/0000775000175000017500000000000013656752352024312 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/__init__.py0000664000175000017500000000000013656752270026410 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/context/0000775000175000017500000000000013656752352025776 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/context/__init__.py0000664000175000017500000000000013656752270030074 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/context/test_strategy_context.py0000664000175000017500000001022713656752270033016 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.common import utils from watcher.decision_engine.model.collector import manager from watcher.decision_engine.solution import default from watcher.decision_engine.strategy.context import default as d_strategy_ctx from watcher.decision_engine.strategy.selection import default as d_selector from watcher.decision_engine.strategy import strategies from watcher.tests.db import base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.objects import utils as obj_utils class TestStrategyContext(base.DbTestCase): def setUp(self): super(TestStrategyContext, self).setUp() obj_utils.create_test_goal(self.context, id=1, name="DUMMY") audit_template = obj_utils.create_test_audit_template( self.context, uuid=utils.generate_uuid()) self.audit = obj_utils.create_test_audit( self.context, audit_template_id=audit_template.id) self.fake_cluster = faker_cluster_state.FakerModelCollector() p_model = mock.patch.object( strategies.DummyStrategy, "compute_model", new_callable=mock.PropertyMock) self.m_model = p_model.start() self.addCleanup(p_model.stop) self.m_model.return_value = self.fake_cluster.build_scenario_1() strategy_context = d_strategy_ctx.DefaultStrategyContext() @mock.patch.object(d_selector.DefaultStrategySelector, 'select') def test_execute_strategy(self, mock_call): mock_call.return_value = strategies.DummyStrategy( config=mock.Mock()) solution = self.strategy_context.execute_strategy( self.audit, self.context) self.assertIsInstance(solution, default.DefaultSolution) @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector", mock.Mock()) def test_execute_force_dummy(self): goal = obj_utils.create_test_goal( self.context, id=50, uuid=utils.generate_uuid(), name="my_goal") strategy = obj_utils.create_test_strategy( self.context, id=42, uuid=utils.generate_uuid(), name="dummy", goal_id=goal.id) audit = obj_utils.create_test_audit( self.context, id=2, name='My Audit {0}'.format(2), goal_id=goal.id, strategy_id=strategy.id, uuid=utils.generate_uuid(), ) solution = self.strategy_context.execute_strategy(audit, self.context) self.assertEqual(len(solution.actions), 3) @mock.patch.object(strategies.BasicConsolidation, "execute") @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector", mock.Mock()) def test_execute_force_basic(self, mock_call): expected_strategy = "basic" mock_call.return_value = expected_strategy obj_utils.create_test_goal(self.context, id=50, uuid=utils.generate_uuid(), name="my_goal") strategy = obj_utils.create_test_strategy(self.context, id=42, uuid=utils.generate_uuid(), name=expected_strategy) audit = obj_utils.create_test_audit( self.context, id=2, name='My Audit {0}'.format(2), strategy_id=strategy.id, uuid=utils.generate_uuid(), ) solution = self.strategy_context.execute_strategy(audit, self.context) self.assertEqual(solution, expected_strategy) python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/0000775000175000017500000000000013656752352026464 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/__init__.py0000664000175000017500000000000013656752270030562 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_actuator.py0000664000175000017500000000246013656752270031720 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestActuator(TestBaseStrategy): def setUp(self): super(TestActuator, self).setUp() self.strategy = strategies.Actuator(config=mock.Mock()) def test_actuator_strategy(self): fake_action = {"action_type": "TEST", "input_parameters": {"a": "b"}} self.strategy.input_parameters = utils.Struct( {"actions": [fake_action]}) solution = self.strategy.execute() self.assertEqual(1, len(solution.actions)) self.assertEqual([fake_action], solution.actions) python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_outlet_temp_control.py0000664000175000017500000001143013656752270034174 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 Intel Corp # # Authors: Zhenzan Zhou # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import ceilometer_metrics from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestOutletTempControl(TestBaseStrategy): scenarios = [ ("Ceilometer", {"datasource": "ceilometer", "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestOutletTempControl, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() p_datasource = mock.patch.object( strategies.OutletTempControl, 'datasource_backend', new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( statistic_aggregation=self.fake_metrics.mock_get_statistics, NAME=self.fake_metrics.NAME) self.strategy = strategies.OutletTempControl( config=mock.Mock(datasource=self.datasource)) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'threshold': 34.3}) self.strategy.threshold = 34.3 def test_group_hosts_by_outlet_temp(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model n1, n2 = self.strategy.group_hosts_by_outlet_temp() self.assertEqual("af69c544-906b-4a6a-a9c6-c1f7a8078c73", n1[0]['compute_node'].uuid) self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73", n2[0]['compute_node'].uuid) def test_choose_instance_to_migrate(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model n1, n2 = self.strategy.group_hosts_by_outlet_temp() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) self.assertEqual('af69c544-906b-4a6a-a9c6-c1f7a8078c73', instance_to_mig[0].uuid) self.assertEqual('a4cab39b-9828-413a-bf88-f76921bf1517', instance_to_mig[1].uuid) def test_filter_dest_servers(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model n1, n2 = self.strategy.group_hosts_by_outlet_temp() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) dest_hosts = self.strategy.filter_dest_servers(n2, instance_to_mig[1]) self.assertEqual(1, len(dest_hosts)) self.assertEqual("fa69c544-906b-4a6a-a9c6-c1f7a8078c73", dest_hosts[0]['compute_node'].uuid) def test_execute_no_workload(self): model = self.fake_c_cluster.\ generate_scenario_4_with_1_node_no_instance() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual([], solution.actions) def test_execute(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) num_migrations = actions_counter.get("migrate", 0) self.assertEqual(1, num_migrations) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_dummy_strategy.py0000664000175000017500000000360213656752270033152 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestDummyStrategy(TestBaseStrategy): def setUp(self): super(TestDummyStrategy, self).setUp() self.strategy = strategies.DummyStrategy(config=mock.Mock()) def test_dummy_strategy(self): dummy = strategies.DummyStrategy(config=mock.Mock()) dummy.input_parameters = utils.Struct() dummy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'}) solution = dummy.execute() self.assertEqual(3, len(solution.actions)) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'para1': 4.0, 'para2': 'Hi'}) solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_noisy_neighbor.py0000664000175000017500000001273313656752270033120 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import ceilometer_metrics from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestNoisyNeighbor(TestBaseStrategy): scenarios = [ ("Ceilometer", {"datasource": "ceilometer", "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestNoisyNeighbor, self).setUp() # fake metrics self.f_metrics = self.fake_datasource_cls() p_datasource = mock.patch.object( strategies.NoisyNeighbor, "datasource_backend", new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( get_instance_l3_cache_usage=self.f_metrics.mock_get_statistics_nn) self.strategy = strategies.NoisyNeighbor(config=mock.Mock()) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'cache_threshold': 35}) self.strategy.threshold = 35 self.strategy.input_parameters.update({'period': 100}) self.strategy.threshold = 100 def test_group_hosts(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model node_uuid = 'Node_1' n1, n2 = self.strategy.group_hosts() self.assertTrue(node_uuid in n1) self.assertEqual(n1[node_uuid]['priority_vm'].uuid, 'INSTANCE_3') self.assertEqual(n1[node_uuid]['noisy_vm'].uuid, 'INSTANCE_4') self.assertEqual('Node_0', n2[0].uuid) def test_find_priority_instance(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model potential_prio_inst = model.get_instance_by_uuid('INSTANCE_3') inst_res = self.strategy.find_priority_instance(potential_prio_inst) self.assertEqual('INSTANCE_3', inst_res.uuid) def test_find_noisy_instance(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model potential_noisy_inst = model.get_instance_by_uuid('INSTANCE_4') inst_res = self.strategy.find_noisy_instance(potential_noisy_inst) self.assertEqual('INSTANCE_4', inst_res.uuid) def test_filter_destination_hosts(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.cache_threshold = 35 self.strategy.period = 100 n1, n2 = self.strategy.group_hosts() mig_source_node = max(n1.keys(), key=lambda a: n1[a]['priority_vm']) instance_to_mig = n1[mig_source_node]['noisy_vm'] dest_hosts = self.strategy.filter_dest_servers( n2, instance_to_mig) self.assertEqual(1, len(dest_hosts)) self.assertEqual('Node_0', dest_hosts[0].uuid) def test_execute_no_workload(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.\ generate_scenario_4_with_1_node_no_instance() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual([], solution.actions) def test_execute(self): self.strategy.cache_threshold = 35 self.strategy.period = 100 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) num_migrations = actions_counter.get("migrate", 0) self.assertEqual(1, num_migrations) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_strategy_endpoint.py0000664000175000017500000000547013656752270033644 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.decision_engine.strategy.strategies import base as strategy_base from watcher.tests import base class TestStrategyEndpoint(base.BaseTestCase): def test_collect_metrics(self): datasource = mock.MagicMock() datasource.list_metrics.return_value = ["m1", "m2"] datasource.METRIC_MAP = {"metric1": "m1", "metric2": "m2", "metric3": "m3"} strategy = mock.MagicMock() strategy.DATASOURCE_METRICS = ["metric1", "metric2", "metric3"] strategy.config.datasource = "gnocchi" se = strategy_base.StrategyEndpoint(mock.MagicMock()) result = se._collect_metrics(strategy, datasource) expected_result = {'type': 'Metrics', 'state': [{"m1": "available"}, {"m2": "available"}, {"m3": "not available"}], 'mandatory': False, 'comment': ''} self.assertEqual(expected_result, result) def test_get_datasource_status(self): strategy = mock.MagicMock() datasource = mock.MagicMock() datasource.NAME = 'gnocchi' datasource.check_availability.return_value = "available" se = strategy_base.StrategyEndpoint(mock.MagicMock()) result = se._get_datasource_status(strategy, datasource) expected_result = {'type': 'Datasource', 'state': "gnocchi: available", 'mandatory': True, 'comment': ''} self.assertEqual(expected_result, result) def test_get_cdm(self): strategy = mock.MagicMock() strategy.compute_model = mock.MagicMock() del strategy.storage_model strategy.baremetal_model = mock.MagicMock() se = strategy_base.StrategyEndpoint(mock.MagicMock()) result = se._get_cdm(strategy) expected_result = {'type': 'CDM', 'state': [{"compute_model": "available"}, {"storage_model": "not available"}, {"baremetal_model": "available"}], 'mandatory': True, 'comment': ''} self.assertEqual(expected_result, result) python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_basic_consolidation.py0000664000175000017500000002341313656752270034105 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections import copy import mock from watcher.applier.loading import default from watcher.common import clients from watcher.decision_engine.model import model_root from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import ceilometer_metrics from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.model import monasca_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestBasicConsolidation(TestBaseStrategy): scenarios = [ ("Ceilometer", {"datasource": "ceilometer", "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), ("Monasca", {"datasource": "monasca", "fake_datasource_cls": monasca_metrics.FakeMonascaMetrics}), ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestBasicConsolidation, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() p_osc = mock.patch.object( clients, "OpenStackClients") self.m_osc = p_osc.start() self.addCleanup(p_osc.stop) p_datasource = mock.patch.object( strategies.BasicConsolidation, 'datasource_backend', new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( get_host_cpu_usage=self.fake_metrics.get_usage_compute_node_cpu, get_instance_cpu_usage=self.fake_metrics. get_average_usage_instance_cpu ) self.strategy = strategies.BasicConsolidation( config=mock.Mock(datasource=self.datasource)) def test_cluster_size(self): size_cluster = len( self.fake_c_cluster.generate_scenario_1().get_all_compute_nodes()) size_cluster_assert = 5 self.assertEqual(size_cluster_assert, size_cluster) def test_basic_consolidation_score_comute_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_1_score = 0.023333333333333317 self.assertEqual(node_1_score, self.strategy.calculate_score_node( model.get_node_by_uuid("Node_1"))) node_2_score = 0.26666666666666666 self.assertEqual(node_2_score, self.strategy.calculate_score_node( model.get_node_by_uuid("Node_2"))) node_0_score = 0.023333333333333317 self.assertEqual(node_0_score, self.strategy.calculate_score_node( model.get_node_by_uuid("Node_0"))) def test_basic_consolidation_score_instance(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model instance_0 = model.get_instance_by_uuid("INSTANCE_0") instance_0_score = 0.023333333333333317 self.assertEqual( instance_0_score, self.strategy.calculate_score_instance(instance_0)) instance_1 = model.get_instance_by_uuid("INSTANCE_1") instance_1_score = 0.023333333333333317 self.assertEqual( instance_1_score, self.strategy.calculate_score_instance(instance_1)) instance_2 = model.get_instance_by_uuid("INSTANCE_2") instance_2_score = 0.033333333333333326 self.assertEqual( instance_2_score, self.strategy.calculate_score_instance(instance_2)) instance_6 = model.get_instance_by_uuid("INSTANCE_6") instance_6_score = 0.02666666666666669 self.assertEqual( instance_6_score, self.strategy.calculate_score_instance(instance_6)) instance_7 = model.get_instance_by_uuid("INSTANCE_7") instance_7_score = 0.013333333333333345 self.assertEqual( instance_7_score, self.strategy.calculate_score_instance(instance_7)) def test_basic_consolidation_score_instance_disk(self): model = self.fake_c_cluster.generate_scenario_5_with_instance_disk_0() self.m_c_model.return_value = model instance_0 = model.get_instance_by_uuid("INSTANCE_0") instance_0_score = 0.023333333333333355 self.assertEqual( instance_0_score, self.strategy.calculate_score_instance(instance_0)) def test_basic_consolidation_weight(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model instance_0 = model.get_instance_by_uuid("INSTANCE_0") cores = 16 # 80 Go disk = 80 # mem 8 Go mem = 8 instance_0_weight_assert = 3.1999999999999997 self.assertEqual( instance_0_weight_assert, self.strategy.calculate_weight(instance_0, cores, disk, mem)) def test_check_migration(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model all_instances = model.get_all_instances() all_nodes = model.get_all_compute_nodes() instance0 = all_instances[list(all_instances.keys())[0]] node0 = all_nodes[list(all_nodes.keys())[0]] self.strategy.check_migration(node0, node0, instance0) def test_threshold(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model all_nodes = model.get_all_compute_nodes() node0 = all_nodes[list(all_nodes.keys())[0]] self.assertFalse(self.strategy.check_threshold( node0, 1000, 1000, 1000)) def test_basic_consolidation_works_on_model_copy(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = copy.deepcopy(model) self.assertTrue(model_root.ModelRoot.is_isomorphic( model, self.strategy.compute_model)) self.assertIsNot(model, self.strategy.compute_model) def test_basic_consolidation_migration(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) expected_num_migrations = 1 expected_power_state = 1 num_migrations = actions_counter.get("migrate", 0) num_node_state_change = actions_counter.get( "change_nova_service_state", 0) self.assertEqual(expected_num_migrations, num_migrations) self.assertEqual(expected_power_state, num_node_state_change) def test_basic_consolidation_execute_scenario_8_with_4_nodes(self): model = self.fake_c_cluster.generate_scenario_8_with_4_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) expected_num_migrations = 5 expected_power_state = 3 expected_global_efficacy = 75 num_migrations = actions_counter.get("migrate", 0) num_node_state_change = actions_counter.get( "change_nova_service_state", 0) global_efficacy_value = solution.global_efficacy[0].get('value', 0) self.assertEqual(expected_num_migrations, num_migrations) self.assertEqual(expected_power_state, num_node_state_change) self.assertEqual(expected_global_efficacy, global_efficacy_value) # calculate_weight def test_execute_no_workload(self): model = ( self.fake_c_cluster .generate_scenario_4_with_1_node_no_instance()) self.m_c_model.return_value = model with mock.patch.object( strategies.BasicConsolidation, 'calculate_weight' ) as mock_score_call: mock_score_call.return_value = 0 solution = self.strategy.execute() self.assertEqual(0, solution.efficacy.global_efficacy[0].get('value')) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() def test_parameter_backwards_compat(self): # Set the deprecated node values to a none default value self.strategy.input_parameters.update( {'aggregation_method': { "instance": "mean", "compute_node": "mean", "node": 'min'}}) # Pre execute method handles backwards compatibility of parameters self.strategy.pre_execute() # assert that the compute_node values are updated to the those of node self.assertEqual( 'min', self.strategy.aggregation_method['compute_node']) python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_dummy_with_scorer.py0000664000175000017500000000362113656752270033641 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestDummyWithScorer(TestBaseStrategy): def setUp(self): super(TestDummyWithScorer, self).setUp() self.strategy = strategies.DummyWithScorer(config=mock.Mock()) def test_dummy_with_scorer(self): dummy = strategies.DummyWithScorer(config=mock.Mock()) dummy.input_parameters = utils.Struct() dummy.input_parameters.update({'param1': 4.0, 'param2': 'Hi'}) solution = dummy.execute() self.assertEqual(4, len(solution.actions)) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_3_with_2_nodes() self.m_c_model.return_value = model self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'param1': 4.0, 'param2': 'Hi'}) solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() ././@LongLink0000000000000000000000000000014600000000000011216 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.pypython-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_workload_stabilization.p0000664000175000017500000002733113656752270034467 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica LLC # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.common import clients from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import ceilometer_metrics from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestWorkloadStabilization(TestBaseStrategy): scenarios = [ ("Ceilometer", {"datasource": "ceilometer", "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestWorkloadStabilization, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() self.hosts_load_assert = { 'Node_0': {'instance_cpu_usage': 0.07, 'instance_ram_usage': 7.0, 'vcpus': 40}, 'Node_1': {'instance_cpu_usage': 0.07, 'instance_ram_usage': 5, 'vcpus': 40}, 'Node_2': {'instance_cpu_usage': 0.8, 'instance_ram_usage': 29, 'vcpus': 40}, 'Node_3': {'instance_cpu_usage': 0.05, 'instance_ram_usage': 8, 'vcpus': 40}, 'Node_4': {'instance_cpu_usage': 0.05, 'instance_ram_usage': 4, 'vcpus': 40}} p_osc = mock.patch.object( clients, "OpenStackClients") self.m_osc = p_osc.start() self.addCleanup(p_osc.stop) p_datasource = mock.patch.object( strategies.WorkloadStabilization, "datasource_backend", new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( statistic_aggregation=self.fake_metrics.mock_get_statistics) self.strategy = strategies.WorkloadStabilization( config=mock.Mock(datasource=self.datasource)) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update( {'metrics': ["instance_cpu_usage", "instance_ram_usage"], 'thresholds': {"instance_cpu_usage": 0.2, "instance_ram_usage": 0.2}, 'weights': {"instance_cpu_usage_weight": 1.0, "instance_ram_usage_weight": 1.0}, 'instance_metrics': {"instance_cpu_usage": "host_cpu_usage", "instance_ram_usage": "host_ram_usage"}, 'host_choice': 'retry', 'retry_count': 1, 'periods': { "instance": 720, "compute_node": 600, "node": 0}, 'aggregation_method': { "instance": "mean", "compute_node": "mean", "node": ''}}) self.strategy.metrics = ["instance_cpu_usage", "instance_ram_usage"] self.strategy.thresholds = {"instance_cpu_usage": 0.2, "instance_ram_usage": 0.2} self.strategy.weights = {"instance_cpu_usage_weight": 1.0, "instance_ram_usage_weight": 1.0} self.strategy.instance_metrics = { "instance_cpu_usage": "host_cpu_usage", "instance_ram_usage": "host_ram_usage"} self.strategy.host_choice = 'retry' self.strategy.retry_count = 1 self.strategy.periods = { "instance": 720, "compute_node": 600, # node is deprecated "node": 0, } self.strategy.aggregation_method = { "instance": "mean", "compute_node": "mean", # node is deprecated "node": '', } def test_get_instance_load(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model instance0 = model.get_instance_by_uuid("INSTANCE_0") instance_0_dict = { 'uuid': 'INSTANCE_0', 'vcpus': 10, 'instance_cpu_usage': 0.07, 'instance_ram_usage': 2} self.assertEqual( instance_0_dict, self.strategy.get_instance_load(instance0)) def test_get_instance_load_with_no_metrics(self): model = self.fake_c_cluster.\ generate_scenario_1_with_1_node_unavailable() self.m_c_model.return_value = model lost_instance = model.get_instance_by_uuid("LOST_INSTANCE") self.assertIsNone(self.strategy.get_instance_load(lost_instance)) def test_normalize_hosts_load(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() fake_hosts = {'Node_0': {'instance_cpu_usage': 0.07, 'instance_ram_usage': 7}, 'Node_1': {'instance_cpu_usage': 0.05, 'instance_ram_usage': 5}} normalized_hosts = {'Node_0': {'instance_cpu_usage': 0.07, 'instance_ram_usage': 0.05303030303030303}, 'Node_1': {'instance_cpu_usage': 0.05, 'instance_ram_usage': 0.03787878787878788}} self.assertEqual( normalized_hosts, self.strategy.normalize_hosts_load(fake_hosts)) def test_get_available_nodes(self): self.m_c_model.return_value = self.fake_c_cluster. \ generate_scenario_9_with_3_active_plus_1_disabled_nodes() self.assertEqual(3, len(self.strategy.get_available_nodes())) def test_get_hosts_load(self): self.m_c_model.return_value = self.fake_c_cluster.\ generate_scenario_1() self.assertEqual(self.strategy.get_hosts_load(), self.hosts_load_assert) def test_get_hosts_load_with_node_missing(self): self.m_c_model.return_value = \ self.fake_c_cluster.\ generate_scenario_1_with_1_node_unavailable() self.assertEqual(self.hosts_load_assert, self.strategy.get_hosts_load()) def test_get_sd(self): test_cpu_sd = 0.296 test_ram_sd = 9.3 self.assertEqual( round(self.strategy.get_sd( self.hosts_load_assert, 'instance_cpu_usage'), 3), test_cpu_sd) self.assertEqual( round(self.strategy.get_sd( self.hosts_load_assert, 'instance_ram_usage'), 1), test_ram_sd) def test_calculate_weighted_sd(self): sd_case = [0.5, 0.75] self.assertEqual(self.strategy.calculate_weighted_sd(sd_case), 1.25) def test_calculate_migration_case(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model instance = model.get_instance_by_uuid("INSTANCE_5") src_node = model.get_node_by_uuid("Node_2") dst_node = model.get_node_by_uuid("Node_1") result = self.strategy.calculate_migration_case( self.hosts_load_assert, instance, src_node, dst_node)[-1][dst_node.uuid] result['instance_cpu_usage'] = round(result['instance_cpu_usage'], 3) self.assertEqual(result, {'instance_cpu_usage': 0.095, 'instance_ram_usage': 21.0, 'vcpus': 40}) def test_simulate_migrations(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.strategy.host_choice = 'fullsearch' self.assertEqual( 10, len(self.strategy.simulate_migrations(self.hosts_load_assert))) def test_simulate_migrations_with_all_instances_exclude(self): model = \ self.fake_c_cluster.\ generate_scenario_1_with_all_instances_exclude() self.m_c_model.return_value = model self.strategy.host_choice = 'fullsearch' self.assertEqual( 0, len(self.strategy.simulate_migrations(self.hosts_load_assert))) def test_check_threshold(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy.thresholds = {'instance_cpu_usage': 0.001, 'instance_ram_usage': 0.2} self.strategy.simulate_migrations = mock.Mock(return_value=True) self.assertTrue(self.strategy.check_threshold()) def test_execute_one_migration(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy.thresholds = {'instance_cpu_usage': 0.001, 'instance_ram_usage': 0.2} self.strategy.simulate_migrations = mock.Mock( return_value=[ {'instance': 'INSTANCE_4', 's_host': 'Node_2', 'host': 'Node_1'}] ) with mock.patch.object(self.strategy, 'migrate') as mock_migration: self.strategy.do_execute() mock_migration.assert_called_once_with( 'INSTANCE_4', 'Node_2', 'Node_1') def test_execute_multiply_migrations(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy.thresholds = {'instance_cpu_usage': 0.00001, 'instance_ram_usage': 0.0001} self.strategy.simulate_migrations = mock.Mock( return_value=[ {'instance': 'INSTANCE_4', 's_host': 'Node_2', 'host': 'Node_1'}, {'instance': 'INSTANCE_3', 's_host': 'Node_2', 'host': 'Node_3'}] ) with mock.patch.object(self.strategy, 'migrate') as mock_migrate: self.strategy.do_execute() self.assertEqual(mock_migrate.call_count, 2) def test_execute_nothing_to_migrate(self): self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy.thresholds = {'instance_cpu_usage': 0.042, 'instance_ram_usage': 0.0001} self.strategy.simulate_migrations = mock.Mock(return_value=False) self.strategy.instance_migrations_count = 0 with mock.patch.object(self.strategy, 'migrate') as mock_migrate: self.strategy.execute() mock_migrate.assert_not_called() def test_parameter_backwards_compat(self): # Set the deprecated node values to a none default value self.strategy.input_parameters.update( {'periods': { "instance": 720, "compute_node": 600, "node": 500 }, 'aggregation_method': { "instance": "mean", "compute_node": "mean", "node": 'min'}}) # Pre execute method handles backwards compatibility of parameters self.strategy.pre_execute() # assert that the compute_node values are updated to the those of node self.assertEqual( 'min', self.strategy.aggregation_method['compute_node']) self.assertEqual( 500, self.strategy.periods['compute_node']) python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_zone_migration.py0000664000175000017500000007371113656752270033131 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import mock import cinderclient import novaclient from watcher.common import cinder_helper from watcher.common import clients from watcher.common import nova_helper from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy volume_uuid_mapping = faker_cluster_state.volume_uuid_mapping class TestZoneMigration(TestBaseStrategy): def setUp(self): super(TestZoneMigration, self).setUp() # fake storage cluster self.fake_s_cluster = faker_cluster_state.FakerStorageModelCollector() p_s_model = mock.patch.object( strategies.ZoneMigration, "storage_model", new_callable=mock.PropertyMock) self.m_s_model = p_s_model.start() self.addCleanup(p_s_model.stop) p_migrate_compute_nodes = mock.patch.object( strategies.ZoneMigration, "migrate_compute_nodes", new_callable=mock.PropertyMock) self.m_migrate_compute_nodes = p_migrate_compute_nodes.start() self.addCleanup(p_migrate_compute_nodes.stop) p_migrate_storage_pools = mock.patch.object( strategies.ZoneMigration, "migrate_storage_pools", new_callable=mock.PropertyMock) self.m_migrate_storage_pools = p_migrate_storage_pools.start() self.addCleanup(p_migrate_storage_pools.stop) p_parallel_total = mock.patch.object( strategies.ZoneMigration, "parallel_total", new_callable=mock.PropertyMock) self.m_parallel_total = p_parallel_total.start() self.addCleanup(p_parallel_total.stop) p_parallel_per_node = mock.patch.object( strategies.ZoneMigration, "parallel_per_node", new_callable=mock.PropertyMock) self.m_parallel_per_node = p_parallel_per_node.start() self.addCleanup(p_parallel_per_node.stop) p_parallel_per_pool = mock.patch.object( strategies.ZoneMigration, "parallel_per_pool", new_callable=mock.PropertyMock) self.m_parallel_per_pool = p_parallel_per_pool.start() self.addCleanup(p_parallel_per_pool.stop) p_priority = mock.patch.object( strategies.ZoneMigration, "priority", new_callable=mock.PropertyMock ) self.m_priority = p_priority.start() self.addCleanup(p_priority.stop) model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model model = self.fake_s_cluster.generate_scenario_1() self.m_s_model.return_value = model self.m_parallel_total.return_value = 6 self.m_parallel_per_node.return_value = 2 self.m_parallel_per_pool.return_value = 2 self.m_audit_scope.return_value = mock.Mock() self.m_migrate_compute_nodes.return_value = [ {"src_node": "src1", "dst_node": "dst1"}, {"src_node": "src2", "dst_node": "dst2"} ] self.m_migrate_storage_pools.return_value = [ {"src_pool": "src1@back1#pool1", "dst_pool": "dst1@back1#pool1", "src_type": "type1", "dst_type": "type1"}, {"src_pool": "src2@back1#pool1", "dst_pool": "dst2@back2#pool1", "src_type": "type2", "dst_type": "type3"} ] self.strategy = strategies.ZoneMigration( config=mock.Mock()) self.m_osc_cls = mock.Mock() self.m_osc = mock.Mock(spec=clients.OpenStackClients) self.m_osc_cls.return_value = self.m_osc m_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.m_osc_cls) m_openstack_clients.start() self.addCleanup(m_openstack_clients.stop) self.m_n_helper_cls = mock.Mock() self.m_n_helper = mock.Mock(spec=nova_helper.NovaHelper) self.m_n_helper_cls.return_value = self.m_n_helper m_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.m_n_helper_cls) m_nova_helper.start() self.addCleanup(m_nova_helper.stop) self.m_c_helper_cls = mock.Mock() self.m_c_helper = mock.Mock(spec=cinder_helper.CinderHelper) self.m_c_helper_cls.return_value = self.m_c_helper m_cinder_helper = mock.patch.object( cinder_helper, "CinderHelper", self.m_c_helper_cls) m_cinder_helper.start() self.addCleanup(m_cinder_helper.stop) @staticmethod def fake_instance(**kwargs): instance = mock.MagicMock(spec=novaclient.v2.servers.Server) instance.id = kwargs.get('id', utils.generate_uuid()) instance.name = kwargs.get('name', 'fake_name') instance.status = kwargs.get('status', 'ACTIVE') instance.tenant_id = kwargs.get('project_id', None) instance.flavor = {'id': kwargs.get('flavor_id', None)} setattr(instance, 'OS-EXT-SRV-ATTR:host', kwargs.get('host')) setattr(instance, 'created_at', kwargs.get('created_at', '1977-01-01T00:00:00')) setattr(instance, 'OS-EXT-STS:vm_state', kwargs.get('state', 'active')) return instance @staticmethod def fake_volume(**kwargs): volume = mock.MagicMock(spec=cinderclient.v2.volumes.Volume) volume.id = kwargs.get('id', utils.generate_uuid()) volume.name = kwargs.get('name', 'fake_name') volume.status = kwargs.get('status', 'available') tenant_id = kwargs.get('project_id', None) setattr(volume, 'os-vol-tenant-attr:tenant_id', tenant_id) setattr(volume, 'os-vol-host-attr:host', kwargs.get('host')) setattr(volume, 'size', kwargs.get('size', '1')) setattr(volume, 'created_at', kwargs.get('created_at', '1977-01-01T00:00:00')) volume.volume_type = kwargs.get('volume_type', 'type1') return volume @staticmethod def fake_flavor(**kwargs): flavor = mock.MagicMock() flavor.id = kwargs.get('id', None) flavor.ram = kwargs.get('mem_size', '1') flavor.vcpus = kwargs.get('vcpu_num', '1') flavor.disk = kwargs.get('disk_size', '1') return flavor def test_get_src_node_list(self): instances = self.strategy.get_src_node_list() self.assertEqual(sorted(instances), sorted(["src1", "src2"])) def test_get_instances(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") instance_on_src2 = self.fake_instance( host="src2", id="INSTANCE_2", name="INSTANCE_2") instance_on_src3 = self.fake_instance( host="src3", id="INSTANCE_3", name="INSTANCE_3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] instances = self.strategy.get_instances() # src1,src2 is in instances # src3 is not in instances self.assertIn(instance_on_src1, instances) self.assertIn(instance_on_src2, instances) self.assertNotIn(instance_on_src3, instances) def test_get_volumes(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") volume_on_src3 = self.fake_volume(host="src3@back2#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] volumes = self.strategy.get_volumes() # src1,src2 is in instances # src3 is not in instances self.assertIn(volume_on_src1, volumes) self.assertIn(volume_on_src2, volumes) self.assertNotIn(volume_on_src3, volumes) # execute # def test_execute_live_migrate_instance(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, ] self.m_c_helper.get_volume_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("live", 0)) global_efficacy_value = solution.global_efficacy[0].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_cold_migrate_instance(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") setattr(instance_on_src1, "status", "SHUTOFF") setattr(instance_on_src1, "OS-EXT-STS:vm_state", "stopped") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, ] self.m_c_helper.get_volume_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("cold", 0)) global_efficacy_value = solution.global_efficacy[1].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_migrate_volume(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("migrate", 0)) global_efficacy_value = solution.global_efficacy[2].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_retype_volume(self): volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") self.m_c_helper.get_volume_list.return_value = [ volume_on_src2, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("retype", 0)) global_efficacy_value = solution.global_efficacy[2].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_swap_volume(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src1.status = "in-use" self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("swap", 0)) global_efficacy_value = solution.global_efficacy[3].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_live_migrate_instance_parallel(self): instance_on_src1_1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") instance_on_src1_2 = self.fake_instance( host="src2", id="INSTANCE_2", name="INSTANCE_2") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1_1, instance_on_src1_2, ] self.m_c_helper.get_volume_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(2, migration_types.get("live", 0)) global_efficacy_value = solution.global_efficacy[0].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_parallel_per_node(self): self.m_parallel_per_node.return_value = 1 instance_on_src1_1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") instance_on_src1_2 = self.fake_instance( host="src1", id="INSTANCE_2", name="INSTANCE_2") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1_1, instance_on_src1_2, ] self.m_c_helper.get_volume_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("live", 0)) global_efficacy_value = solution.global_efficacy[0].get('value', 0) self.assertEqual(50.0, global_efficacy_value) def test_execute_migrate_volume_parallel(self): volume_on_src1_1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src1_2 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1_1, volume_on_src1_2, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(2, migration_types.get("migrate", 0)) global_efficacy_value = solution.global_efficacy[2].get('value', 0) self.assertEqual(100, global_efficacy_value) def test_execute_parallel_per_pool(self): self.m_parallel_per_pool.return_value = 1 volume_on_src1_1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src1_2 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1_1, volume_on_src1_2, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("migrate", 0)) global_efficacy_value = solution.global_efficacy[2].get('value', 0) self.assertEqual(50.0, global_efficacy_value) def test_execute_parallel_total(self): self.m_parallel_total.return_value = 1 self.m_parallel_per_pool.return_value = 1 volume_on_src1_1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src1_2 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") volume_on_src2_1 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1_1, volume_on_src1_2, volume_on_src2_1, ] self.m_n_helper.get_instance_list.return_value = [] solution = self.strategy.execute() migration_types = collections.Counter( [action.get('input_parameters')['migration_type'] for action in solution.actions]) self.assertEqual(1, migration_types.get("migrate", 0)) # priority filter # def test_get_priority_filter_list(self): self.m_priority.return_value = { "project": ["pj1"], "compute_node": ["compute1", "compute2"], "compute": ["cpu_num"], "storage_pool": ["pool1", "pool2"], "storage": ["size"] } filters = self.strategy.get_priority_filter_list() self.assertIn(strategies.zone_migration.ComputeHostSortFilter, map(lambda l: l.__class__, filters)) self.assertIn(strategies.zone_migration.StorageHostSortFilter, map(lambda l: l.__class__, filters)) self.assertIn(strategies.zone_migration.ProjectSortFilter, map(lambda l: l.__class__, filters)) # ComputeHostSortFilter # def test_filtered_targets_compute_nodes(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1") instance_on_src2 = self.fake_instance( host="src2", id="INSTANCE_2", name="INSTANCE_2") instance_on_src3 = self.fake_instance( host="src3", id="INSTANCE_3", name="INSTANCE_3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute_node": ["src1", "src2"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src1, instance_on_src2]) # StorageHostSortFilter # def test_filtered_targets_storage_pools(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2") volume_on_src3 = self.fake_volume(host="src3@back2#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] self.m_n_helper.get_instance_list.return_value = [] self.m_priority.return_value = { "storage_pool": ["src1@back1#pool1", "src2@back1#pool1"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get("volume"), [volume_on_src1, volume_on_src2]) # ProjectSortFilter # def test_filtered_targets_project(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name='INSTANCE_1', project_id="pj2") instance_on_src2 = self.fake_instance( host="src2", id="INSTANCE_2", name='INSTANCE_2', project_id="pj1") instance_on_src3 = self.fake_instance( host="src3", id="INSTANCE_3", name='INSTANCE_3', project_id="pj3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1", project_id="pj2") volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2", project_id="pj1") volume_on_src3 = self.fake_volume(host="src3@back2#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3", project_id="pj3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] self.m_priority.return_value = { "project": ["pj1"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) self.assertEqual(targets.get('volume'), [volume_on_src2, volume_on_src1]) self.assertEqual(targets, {"instance": [instance_on_src2, instance_on_src1], "volume": [volume_on_src2, volume_on_src1]}) # ComputeSpecSortFilter # def test_filtered_targets_instance_mem_size(self): flavor_64 = self.fake_flavor(id="1", mem_size="64") flavor_128 = self.fake_flavor(id="2", mem_size="128") flavor_512 = self.fake_flavor(id="3", mem_size="512") self.m_n_helper.get_flavor_list.return_value = [ flavor_64, flavor_128, flavor_512, ] instance_on_src1 = self.fake_instance(host="src1", name="INSTANCE_1", id="INSTANCE_1", flavor_id="1") instance_on_src2 = self.fake_instance(host="src2", name="INSTANCE_2", id="INSTANCE_2", flavor_id="2") instance_on_src3 = self.fake_instance(host="src3", name="INSTANCE_3", id="INSTANCE_3", flavor_id="3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute": ["mem_size"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) def test_filtered_targets_instance_vcpu_num(self): flavor_1 = self.fake_flavor(id="1", vcpu_num="1") flavor_2 = self.fake_flavor(id="2", vcpu_num="2") flavor_3 = self.fake_flavor(id="3", vcpu_num="3") self.m_n_helper.get_flavor_list.return_value = [ flavor_1, flavor_2, flavor_3, ] instance_on_src1 = self.fake_instance(host="src1", name="INSTANCE_1", id="INSTANCE_1", flavor_id="1") instance_on_src2 = self.fake_instance(host="src2", name="INSTANCE_2", id="INSTANCE_2", flavor_id="2") instance_on_src3 = self.fake_instance(host="src3", name="INSTANCE_3", id="INSTANCE_3", flavor_id="3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute": ["vcpu_num"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) def test_filtered_targets_instance_disk_size(self): flavor_1 = self.fake_flavor(id="1", disk_size="1") flavor_2 = self.fake_flavor(id="2", disk_size="2") flavor_3 = self.fake_flavor(id="3", disk_size="3") self.m_n_helper.get_flavor_list.return_value = [ flavor_1, flavor_2, flavor_3, ] instance_on_src1 = self.fake_instance(host="src1", name="INSTANCE_1", id="INSTANCE_1", flavor_id="1") instance_on_src2 = self.fake_instance(host="src2", name="INSTANCE_2", id="INSTANCE_2", flavor_id="2") instance_on_src3 = self.fake_instance(host="src3", name="INSTANCE_3", id="INSTANCE_3", flavor_id="3") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute": ["disk_size"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) def test_filtered_targets_instance_created_at(self): instance_on_src1 = self.fake_instance( host="src1", id="INSTANCE_1", name="INSTANCE_1", created_at="2017-10-30T00:00:00") instance_on_src2 = self.fake_instance( host="src2", id="INSTANCE_2", name="INSTANCE_2", created_at="1977-03-29T03:03:03") instance_on_src3 = self.fake_instance( host="src3", id="INSTANCE_3", name="INSTANCE_3", created_at="1977-03-29T03:03:03") self.m_n_helper.get_instance_list.return_value = [ instance_on_src1, instance_on_src2, instance_on_src3, ] self.m_c_helper.get_volume_list.return_value = [] self.m_priority.return_value = { "compute": ["created_at"], } targets = self.strategy.filtered_targets() self.assertEqual(targets.get('instance'), [instance_on_src2, instance_on_src1]) # StorageSpecSortFilter # def test_filtered_targets_storage_size(self): volume_on_src1 = self.fake_volume( host="src1@back1#pool1", size="1", id=volume_uuid_mapping["volume_1"], name="volume_1") volume_on_src2 = self.fake_volume( host="src2@back1#pool1", size="2", id=volume_uuid_mapping["volume_2"], name="volume_2") volume_on_src3 = self.fake_volume( host="src3@back2#pool1", size="3", id=volume_uuid_mapping["volume_3"], name="volume_3") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] self.m_n_helper.get_instance_list.return_value = [] self.m_priority.return_value = { "storage": ["size"] } targets = self.strategy.filtered_targets() self.assertEqual(targets.get("volume"), [volume_on_src2, volume_on_src1]) def test_filtered_targets_storage_created_at(self): volume_on_src1 = self.fake_volume(host="src1@back1#pool1", id=volume_uuid_mapping["volume_1"], name="volume_1", created_at="2017-10-30T00:00:00") volume_on_src2 = self.fake_volume(host="src2@back1#pool1", id=volume_uuid_mapping["volume_2"], name="volume_2", created_at="1977-03-29T03:03:03") volume_on_src3 = self.fake_volume(host="src3@back2#pool1", id=volume_uuid_mapping["volume_3"], name="volume_3", created_at="1977-03-29T03:03:03") self.m_c_helper.get_volume_list.return_value = [ volume_on_src1, volume_on_src2, volume_on_src3, ] self.m_n_helper.get_instance_list.return_value = [] self.m_priority.return_value = { "storage": ["created_at"] } targets = self.strategy.filtered_targets() self.assertEqual(targets.get("volume"), [volume_on_src2, volume_on_src1]) ././@LongLink0000000000000000000000000000015300000000000011214 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_node_resource_consolidation.pypython-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_node_resource_consolidat0000664000175000017500000003702413656752270034526 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy import strategies from watcher import objects from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy from watcher.tests.objects import utils as obj_utils class TestNodeResourceConsolidation(TestBaseStrategy): def setUp(self): super(TestNodeResourceConsolidation, self).setUp() self.strategy = strategies.NodeResourceConsolidation( config=mock.Mock()) self.model = self.fake_c_cluster.generate_scenario_10() self.m_c_model.return_value = self.model self.strategy.input_parameters = {'host_choice': 'auto'} def test_pre_execute(self): planner = 'node_resource_consolidation' self.assertEqual('auto', self.strategy.host_choice) self.assertNotEqual(planner, self.strategy.planner) self.strategy.input_parameters.update( {'host_choice': 'specify'}) self.strategy.pre_execute() self.assertEqual(planner, self.strategy.planner) self.assertEqual('specify', self.strategy.host_choice) def test_check_resources(self): instance = [self.model.get_instance_by_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff")] dest = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") # test destination is null result = self.strategy.check_resources(instance, []) self.assertFalse(result) result = self.strategy.check_resources(instance, dest) self.assertTrue(result) self.assertEqual([], instance) def test_select_destination(self): instance0 = self.model.get_instance_by_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff") source = self.model.get_node_by_instance_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff") expected = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") # test destination is null result = self.strategy.select_destination(instance0, source, []) self.assertIsNone(result) nodes = list(self.model.get_all_compute_nodes().values()) nodes.remove(source) result = self.strategy.select_destination(instance0, source, nodes) self.assertEqual(expected, result) def test_add_migrate_actions_with_null(self): self.strategy.add_migrate_actions([], []) self.assertEqual([], self.strategy.solution.actions) self.strategy.add_migrate_actions(None, None) self.assertEqual([], self.strategy.solution.actions) def test_add_migrate_actions_with_auto(self): self.strategy.host_choice = 'auto' source = self.model.get_node_by_instance_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff") nodes = list(self.model.get_all_compute_nodes().values()) nodes.remove(source) self.strategy.add_migrate_actions([source], nodes) expected = [{'action_type': 'migrate', 'input_parameters': { 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f1', 'resource_name': 'INSTANCE_1', 'source_node': 'hostname_0'}}, {'action_type': 'migrate', 'input_parameters': { 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8ff', 'resource_name': 'INSTANCE_0', 'source_node': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_migrate_actions_with_specify(self): self.strategy.host_choice = 'specify' source = self.model.get_node_by_instance_uuid( "6ae05517-a512-462d-9d83-90c313b5a8ff") nodes = list(self.model.get_all_compute_nodes().values()) nodes.remove(source) self.strategy.add_migrate_actions([source], nodes) expected = [{'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_1', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f1', 'resource_name': 'INSTANCE_1', 'source_node': 'hostname_0'}}, {'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_2', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8ff', 'resource_name': 'INSTANCE_0', 'source_node': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_migrate_actions_with_no_action(self): self.strategy.host_choice = 'specify' source = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c971") dest = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") self.strategy.add_migrate_actions([source], [dest]) self.assertEqual([], self.strategy.solution.actions) def test_add_change_node_state_actions_with_exeception(self): self.assertRaises(exception.IllegalArgumentException, self.strategy.add_change_node_state_actions, [], 'down') def test_add_change_node_state_actions(self): node1 = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") node2 = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c97f") # disable two nodes status = element.ServiceState.DISABLED.value result = self.strategy.add_change_node_state_actions( [node1, node2], status) self.assertEqual([node1, node2], result) expected = [{ 'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource ' 'consolidation strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c972', 'resource_name': 'hostname_2', 'state': 'disabled'}}, { 'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource consolidation ' 'strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c97f', 'resource_name': 'hostname_0', 'state': 'disabled'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_change_node_state_actions_one_disabled(self): node1 = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c972") node2 = self.model.get_node_by_uuid( "89dce55c-8e74-4402-b23f-32aaf216c97f") # disable two nodes status = element.ServiceState.DISABLED.value # one enable, one disable node1.status = element.ServiceState.DISABLED.value result = self.strategy.add_change_node_state_actions( [node1, node2], status) self.assertEqual([node2], result) expected = [{ 'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource consolidation ' 'strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c97f', 'resource_name': 'hostname_0', 'state': 'disabled'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_get_nodes_migrate_failed_return_null(self): self.strategy.audit = None result = self.strategy.get_nodes_migrate_failed() self.assertEqual([], result) self.strategy.audit = mock.Mock( audit_type=objects.audit.AuditType.ONESHOT.value) result = self.strategy.get_nodes_migrate_failed() self.assertEqual([], result) @mock.patch.object(objects.action.Action, 'list') def test_get_nodes_migrate_failed(self, mock_list): self.strategy.audit = mock.Mock( audit_type=objects.audit.AuditType.CONTINUOUS.value) fake_action = obj_utils.get_test_action( self.context, state=objects.action.State.FAILED, action_type='migrate', input_parameters={ 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f1'}) mock_list.return_value = [fake_action] result = self.strategy.get_nodes_migrate_failed() expected = self.model.get_node_by_uuid( '89dce55c-8e74-4402-b23f-32aaf216c97f') self.assertEqual([expected], result) def test_group_nodes_with_ONESHOT(self): self.strategy.audit = mock.Mock( audit_type=objects.audit.AuditType.ONESHOT.value) nodes = list(self.model.get_all_compute_nodes().values()) result = self.strategy.group_nodes(nodes) node0 = self.model.get_node_by_name('hostname_0') node1 = self.model.get_node_by_name('hostname_1') node2 = self.model.get_node_by_name('hostname_2') node3 = self.model.get_node_by_name('hostname_3') node4 = self.model.get_node_by_name('hostname_4') node5 = self.model.get_node_by_name('hostname_5') node6 = self.model.get_node_by_name('hostname_6') node7 = self.model.get_node_by_name('hostname_7') source_nodes = [node3, node4, node7] dest_nodes = [node2, node0, node1] self.assertIn(node5, result[0]) self.assertIn(node6, result[0]) self.assertEqual(source_nodes, result[1]) self.assertEqual(dest_nodes, result[2]) @mock.patch.object(objects.action.Action, 'list') def test_group_nodes_with_CONTINUOUS(self, mock_list): self.strategy.audit = mock.Mock( audit_type=objects.audit.AuditType.CONTINUOUS.value) fake_action = obj_utils.get_test_action( self.context, state=objects.action.State.FAILED, action_type='migrate', input_parameters={ 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f6'}) mock_list.return_value = [fake_action] nodes = list(self.model.get_all_compute_nodes().values()) result = self.strategy.group_nodes(nodes) node0 = self.model.get_node_by_name('hostname_0') node1 = self.model.get_node_by_name('hostname_1') node2 = self.model.get_node_by_name('hostname_2') node3 = self.model.get_node_by_name('hostname_3') node4 = self.model.get_node_by_name('hostname_4') node5 = self.model.get_node_by_name('hostname_5') node6 = self.model.get_node_by_name('hostname_6') node7 = self.model.get_node_by_name('hostname_7') source_nodes = [node4, node7] dest_nodes = [node3, node2, node0, node1] self.assertIn(node5, result[0]) self.assertIn(node6, result[0]) self.assertEqual(source_nodes, result[1]) self.assertEqual(dest_nodes, result[2]) @mock.patch.object(objects.action.Action, 'list') def test_execute_with_auto(self, mock_list): fake_action = obj_utils.get_test_action( self.context, state=objects.action.State.FAILED, action_type='migrate', input_parameters={ 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f6'}) mock_list.return_value = [fake_action] mock_audit = mock.Mock( audit_type=objects.audit.AuditType.CONTINUOUS.value) self.strategy.host_choice = 'auto' self.strategy.do_execute(mock_audit) expected = [ {'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource consolidation ' 'strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c975', 'resource_name': 'hostname_5', 'state': 'disabled'}}, {'action_type': 'change_nova_service_state', 'input_parameters': { 'disabled_reason': 'Watcher node resource consolidation ' 'strategy', 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c974', 'resource_name': 'hostname_4', 'state': 'disabled'}}, {'action_type': 'migrate', 'input_parameters': { 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f7', 'resource_name': 'INSTANCE_7', 'source_node': 'hostname_4'}}, {'action_type': 'migrate', 'input_parameters': { 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f8', 'resource_name': 'INSTANCE_8', 'source_node': 'hostname_7'}}, {'action_type': 'change_nova_service_state', 'input_parameters': { 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c975', 'resource_name': 'hostname_5', 'state': 'enabled'}}, {'action_type': 'change_nova_service_state', 'input_parameters': { 'resource_id': '89dce55c-8e74-4402-b23f-32aaf216c974', 'resource_name': 'hostname_4', 'state': 'enabled'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_execute_with_specify(self): mock_audit = mock.Mock( audit_type=objects.audit.AuditType.ONESHOT.value) self.strategy.host_choice = 'specify' self.strategy.do_execute(mock_audit) expected = [ {'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_2', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f6', 'resource_name': 'INSTANCE_6', 'source_node': 'hostname_3'}}, {'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_0', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f7', 'resource_name': 'INSTANCE_7', 'source_node': 'hostname_4'}}, {'action_type': 'migrate', 'input_parameters': { 'destination_node': 'hostname_1', 'migration_type': 'live', 'resource_id': '6ae05517-a512-462d-9d83-90c313b5a8f8', 'resource_name': 'INSTANCE_8', 'source_node': 'hostname_7'}}] self.assertEqual(expected, self.strategy.solution.actions) ././@LongLink0000000000000000000000000000015000000000000011211 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_storage_capacity_balance.pypython-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_storage_capacity_balance0000664000175000017500000002227513656752270034443 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Authors: Canwei Li # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.common import cinder_helper from watcher.common import clients from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestStorageCapacityBalance(TestBaseStrategy): def setUp(self): super(TestStorageCapacityBalance, self).setUp() def test_fake_pool(name, free, total, allocated): fake_pool = mock.MagicMock() fake_pool.name = name fake_pool.pool_name = name.split('#')[1] fake_pool.volume_backend_name = name.split('#')[1] fake_pool.free_capacity_gb = free fake_pool.total_capacity_gb = total fake_pool.allocated_capacity_gb = allocated fake_pool.max_over_subscription_ratio = 1.0 return fake_pool self.fake_pool1 = test_fake_pool('host1@IPSAN-1#pool1', '60', '100', '90') self.fake_pool2 = test_fake_pool('host1@IPSAN-1#pool2', '20', '100', '80') self.fake_pool3 = test_fake_pool('host1@IPSAN-1#local_vstorage', '20', '100', '80') self.fake_pools = [self.fake_pool1, self.fake_pool2, self.fake_pool3] def test_fake_vol(id, name, size, status, bootable, migration_status=None, volume_type=None): fake_vol = mock.MagicMock() fake_vol.id = id fake_vol.name = name fake_vol.size = size fake_vol.status = status fake_vol.bootable = bootable fake_vol.migration_status = migration_status fake_vol.volume_type = volume_type setattr(fake_vol, 'os-vol-host-attr:host', 'host1@IPSAN-1#pool2') return fake_vol self.fake_vol1 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd861', 'test_volume1', 4, 'available', 'true', 'success', volume_type='type2') self.fake_vol2 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd862', 'test_volume2', 10, 'in-use', 'false') self.fake_vol3 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd863', 'test_volume3', 4, 'in-use', 'true', volume_type='type2') self.fake_vol4 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd864', 'test_volume4', 10, 'error', 'true') self.fake_vol5 = test_fake_vol('922d4762-0bc5-4b30-9cb9-48ab644dd865', 'test_volume5', 15, 'in-use', 'true') self.fake_volumes = [self.fake_vol1, self.fake_vol2, self.fake_vol3, self.fake_vol4, self.fake_vol5] def test_fake_snap(vol_id): fake_snap = mock.MagicMock() fake_snap.volume_id = vol_id return fake_snap self.fake_snap = [test_fake_snap( '922d4762-0bc5-4b30-9cb9-48ab644dd865')] def test_fake_volume_type(type_name, extra_specs): fake_type = mock.MagicMock() fake_type.name = type_name fake_type.extra_specs = extra_specs return fake_type self.fake_types = [test_fake_volume_type( 'type1', {'volume_backend_name': 'pool1'}), test_fake_volume_type( 'type2', {'volume_backend_name': 'pool2'}) ] self.fake_c_cluster = faker_cluster_state.FakerStorageModelCollector() osc = clients.OpenStackClients() p_cinder = mock.patch.object(osc, 'cinder') p_cinder.start() self.addCleanup(p_cinder.stop) self.m_cinder = cinder_helper.CinderHelper(osc=osc) self.m_cinder.get_storage_pool_list = mock.Mock( return_value=self.fake_pools) self.m_cinder.get_volume_list = mock.Mock( return_value=self.fake_volumes) self.m_cinder.get_volume_snapshots_list = mock.Mock( return_value=self.fake_snap) self.m_cinder.get_volume_type_list = mock.Mock( return_value=self.fake_types) model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.strategy = strategies.StorageCapacityBalance( config=mock.Mock(), osc=osc) self.strategy._cinder = self.m_cinder self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update( {'volume_threshold': 80.0}) self.strategy.volume_threshold = 80.0 def test_get_pools(self): self.strategy.config.ex_pools = "local_vstorage" pools = self.strategy.get_pools(self.m_cinder) self.assertEqual(len(pools), 2) def test_get_volumes(self): volumes = self.strategy.get_volumes(self.m_cinder) self.assertEqual(len(volumes), 3) def test_group_pools(self): self.strategy.config.ex_pools = "local_vstorage" pools = self.strategy.get_pools(self.m_cinder) over_pools, under_pools = self.strategy.group_pools(pools, 0.50) self.assertEqual(len(under_pools), 1) self.assertEqual(len(over_pools), 1) over_pools, under_pools = self.strategy.group_pools(pools, 0.85) self.assertEqual(len(under_pools), 2) self.assertEqual(len(over_pools), 0) over_pools, under_pools = self.strategy.group_pools(pools, 0.30) self.assertEqual(len(under_pools), 0) self.assertEqual(len(over_pools), 2) def test_get_volume_type_by_name(self): vol_type = self.strategy.get_volume_type_by_name( self.m_cinder, 'pool1') self.assertEqual(len(vol_type), 1) vol_type = self.strategy.get_volume_type_by_name( self.m_cinder, 'ks3200') self.assertEqual(len(vol_type), 0) def test_check_pool_type(self): pool_type = self.strategy.check_pool_type( self.fake_vol3, self.fake_pool1) self.assertIsNotNone(pool_type) pool_type = self.strategy.check_pool_type( self.fake_vol3, self.fake_pool2) self.assertIsNone(pool_type) def test_migrate_fit(self): self.strategy.config.ex_pools = "local_vstorage" pools = self.strategy.get_pools(self.m_cinder) self.strategy.source_pools, self.strategy.dest_pools = ( self.strategy.group_pools(pools, 0.60)) target_pool = self.strategy.migrate_fit(self.fake_vol2, 0.60) self.assertIsNotNone(target_pool) target_pool = self.strategy.migrate_fit(self.fake_vol3, 0.50) self.assertIsNone(target_pool) target_pool = self.strategy.migrate_fit(self.fake_vol5, 0.60) self.assertIsNone(target_pool) def test_retype_fit(self): self.strategy.config.ex_pools = "local_vstorage" pools = self.strategy.get_pools(self.m_cinder) self.strategy.source_pools, self.strategy.dest_pools = ( self.strategy.group_pools(pools, 0.50)) target_pool = self.strategy.retype_fit(self.fake_vol1, 0.50) self.assertIsNotNone(target_pool) target_pool = self.strategy.retype_fit(self.fake_vol2, 0.50) self.assertIsNone(target_pool) target_pool = self.strategy.retype_fit(self.fake_vol3, 0.50) self.assertIsNotNone(target_pool) target_pool = self.strategy.retype_fit(self.fake_vol5, 0.60) self.assertIsNone(target_pool) def test_execute(self): self.strategy.input_parameters.update( {'volume_threshold': 45.0}) self.strategy.config.ex_pools = "local_vstorage" solution = self.strategy.execute() self.assertEqual(len(solution.actions), 1) setattr(self.fake_pool1, 'free_capacity_gb', '60') self.strategy.input_parameters.update( {'volume_threshold': 50.0}) solution = self.strategy.execute() self.assertEqual(len(solution.actions), 2) setattr(self.fake_pool1, 'free_capacity_gb', '60') self.strategy.input_parameters.update( {'volume_threshold': 60.0}) solution = self.strategy.execute() self.assertEqual(len(solution.actions), 3) python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_base.py0000664000175000017500000001204513656752270031010 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.common import exception from watcher.decision_engine.datasources import manager from watcher.decision_engine.model import model_root from watcher.decision_engine.strategy import strategies from watcher.tests import base from watcher.tests.decision_engine.model import faker_cluster_state class TestBaseStrategy(base.TestCase): def setUp(self): super(TestBaseStrategy, self).setUp() # fake cluster self.fake_c_cluster = faker_cluster_state.FakerModelCollector() p_c_model = mock.patch.object( strategies.BaseStrategy, "compute_model", new_callable=mock.PropertyMock) self.m_c_model = p_c_model.start() self.addCleanup(p_c_model.stop) p_audit_scope = mock.patch.object( strategies.BaseStrategy, "audit_scope", new_callable=mock.PropertyMock) self.m_audit_scope = p_audit_scope.start() self.addCleanup(p_audit_scope.stop) self.m_audit_scope.return_value = mock.Mock() self.m_c_model.return_value = model_root.ModelRoot() self.strategy = strategies.DummyStrategy(config=mock.Mock()) class TestBaseStrategyDatasource(TestBaseStrategy): def setUp(self): super(TestBaseStrategyDatasource, self).setUp() self.strategy = strategies.DummyStrategy( config=mock.Mock(datasources=None)) @mock.patch.object(strategies.BaseStrategy, 'osc', None) @mock.patch.object(manager, 'DataSourceManager') @mock.patch.object(strategies.base, 'CONF') def test_global_preference(self, m_conf, m_manager): """Test if the global preference is used""" m_conf.watcher_datasources.datasources = \ ['gnocchi', 'monasca', 'ceilometer'] # Make sure we access the property and not the underlying function. m_manager.return_value.get_backend.return_value = \ mock.NonCallableMock() # Access the property so that the configuration is read in order to # get the correct datasource self.strategy.datasource_backend m_manager.assert_called_once_with( config=m_conf.watcher_datasources, osc=None) @mock.patch.object(strategies.BaseStrategy, 'osc', None) @mock.patch.object(manager, 'DataSourceManager') @mock.patch.object(strategies.base, 'CONF') def test_global_preference_reverse(self, m_conf, m_manager): """Test if the global preference is used with another order""" m_conf.watcher_datasources.datasources = \ ['ceilometer', 'monasca', 'gnocchi'] # Make sure we access the property and not the underlying function. m_manager.return_value.get_backend.return_value = \ mock.NonCallableMock() # Access the property so that the configuration is read in order to # get the correct datasource self.strategy.datasource_backend m_manager.assert_called_once_with( config=m_conf.watcher_datasources, osc=None) @mock.patch.object(strategies.BaseStrategy, 'osc', None) @mock.patch.object(manager, 'DataSourceManager') @mock.patch.object(strategies.base, 'CONF') def test_strategy_preference_override(self, m_conf, m_manager): """Test if the global preference can be overridden""" datasources = mock.Mock(datasources=['ceilometer']) self.strategy = strategies.DummyStrategy( config=datasources) m_conf.watcher_datasources.datasources = \ ['ceilometer', 'monasca', 'gnocchi'] # Access the property so that the configuration is read in order to # get the correct datasource self.strategy.datasource_backend m_manager.assert_called_once_with( config=datasources, osc=None) class TestBaseStrategyException(TestBaseStrategy): def setUp(self): super(TestBaseStrategyException, self).setUp() def test_exception_model(self): self.m_c_model.return_value = None self.assertRaises( exception.ClusterStateNotDefined, self.strategy.execute) def test_exception_stale_cdm(self): self.fake_c_cluster.set_cluster_data_model_as_stale() self.m_c_model.return_value = self.fake_c_cluster.cluster_data_model self.assertRaises( # TODO(Dantali0n) This should return ClusterStale, # improve set_cluster_data_model_as_stale(). exception.ClusterStateNotDefined, self.strategy.execute) python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_uniform_airflow.py0000664000175000017500000001644013656752270033303 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import ceilometer_metrics from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestUniformAirflow(TestBaseStrategy): scenarios = [ ("Ceilometer", {"datasource": "ceilometer", "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestUniformAirflow, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() p_datasource = mock.patch.object( strategies.UniformAirflow, 'datasource_backend', new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( statistic_aggregation=self.fake_metrics.mock_get_statistics, NAME=self.fake_metrics.NAME) self.strategy = strategies.UniformAirflow( config=mock.Mock(datasource=self.datasource)) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'threshold_airflow': 400.0, 'threshold_inlet_t': 28.0, 'threshold_power': 350.0, 'period': 300}) self.strategy.threshold_airflow = 400 self.strategy.threshold_inlet_t = 28 self.strategy.threshold_power = 350 self._period = 300 self.strategy.pre_execute() def test_calc_used_resource(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model node = model.get_node_by_uuid('Node_0') cores_used, mem_used, disk_used = ( self.strategy.calculate_used_resource(node)) self.assertEqual((cores_used, mem_used, disk_used), (25, 4, 40)) def test_group_hosts_by_airflow(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 n1, n2 = self.strategy.group_hosts_by_airflow() # print n1, n2, avg, w_map self.assertEqual(n1[0]['node'].uuid, 'Node_0') self.assertEqual(n2[0]['node'].uuid, 'Node_1') def test_choose_instance_to_migrate(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 22 n1, n2 = self.strategy.group_hosts_by_airflow() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) self.assertEqual(instance_to_mig[0].uuid, 'Node_0') self.assertEqual(len(instance_to_mig[1]), 1) self.assertIn(instance_to_mig[1][0].uuid, {'cae81432-1631-4d4e-b29c-6f3acdcde906', '73b09e16-35b7-4922-804e-e8f5d9b740fc'}) def test_choose_instance_to_migrate_all(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 25 n1, n2 = self.strategy.group_hosts_by_airflow() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) self.assertEqual(instance_to_mig[0].uuid, 'Node_0') self.assertEqual(len(instance_to_mig[1]), 2) self.assertEqual({'cae81432-1631-4d4e-b29c-6f3acdcde906', '73b09e16-35b7-4922-804e-e8f5d9b740fc'}, {inst.uuid for inst in instance_to_mig[1]}) def test_choose_instance_notfound(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 22 n1, n2 = self.strategy.group_hosts_by_airflow() instances = model.get_all_instances() [model.remove_instance(inst) for inst in instances.values()] instance_to_mig = self.strategy.choose_instance_to_migrate(n1) self.assertIsNone(instance_to_mig) def test_filter_destination_hosts(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 22 n1, n2 = self.strategy.group_hosts_by_airflow() instance_to_mig = self.strategy.choose_instance_to_migrate(n1) dest_hosts = self.strategy.filter_destination_hosts( n2, instance_to_mig[1]) self.assertEqual(len(dest_hosts), 1) self.assertEqual(dest_hosts[0]['node'].uuid, 'Node_1') self.assertIn(instance_to_mig[1][0].uuid, {'cae81432-1631-4d4e-b29c-6f3acdcde906', '73b09e16-35b7-4922-804e-e8f5d9b740fc'}) def test_execute_no_workload(self): self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 25 self.strategy.threshold_power = 300 model = self.fake_c_cluster.\ generate_scenario_4_with_1_node_no_instance() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual([], solution.actions) def test_execute(self): self.strategy.threshold_airflow = 300 self.strategy.threshold_inlet_t = 25 self.strategy.threshold_power = 300 model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) num_migrations = actions_counter.get("migrate", 0) self.assertEqual(num_migrations, 2) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_7_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_host_maintenance.py0000775000175000017500000002403613656752270033423 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 chinac.com # # Authors: suzhengwei # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestHostMaintenance(TestBaseStrategy): def setUp(self): super(TestHostMaintenance, self).setUp() self.strategy = strategies.HostMaintenance(config=mock.Mock()) def test_get_instance_state_str(self): mock_instance = mock.MagicMock(state="active") self.assertEqual("active", self.strategy.get_instance_state_str(mock_instance)) mock_instance.state = element.InstanceState("active") self.assertEqual("active", self.strategy.get_instance_state_str(mock_instance)) mock_instance.state = None self.assertRaises( exception.WatcherException, self.strategy.get_instance_state_str, mock_instance) def test_get_node_status_str(self): mock_node = mock.MagicMock(status="enabled") self.assertEqual("enabled", self.strategy.get_node_status_str(mock_node)) mock_node.status = element.ServiceState("enabled") self.assertEqual("enabled", self.strategy.get_node_status_str(mock_node)) mock_node.status = None self.assertRaises( exception.WatcherException, self.strategy.get_node_status_str, mock_node) def test_get_node_capacity(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid("Node_0") node_capacity = dict(cpu=40, ram=132, disk=250) self.assertEqual(node_capacity, self.strategy.get_node_capacity(node_0)) def test_host_fits(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid("Node_0") node_1 = model.get_node_by_uuid("Node_1") self.assertTrue(self.strategy.host_fits(node_0, node_1)) def test_add_action_enable_compute_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') self.strategy.add_action_enable_compute_node(node_0) expected = [{'action_type': 'change_nova_service_state', 'input_parameters': { 'state': 'enabled', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_action_maintain_compute_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') self.strategy.add_action_maintain_compute_node(node_0) expected = [{'action_type': 'change_nova_service_state', 'input_parameters': { 'state': 'disabled', 'disabled_reason': 'watcher_maintaining', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_instance_migration(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') node_1 = model.get_node_by_uuid('Node_1') instance_0 = model.get_instance_by_uuid("INSTANCE_0") self.strategy.instance_migration(instance_0, node_0, node_1) self.assertEqual(1, len(self.strategy.solution.actions)) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': node_1.uuid, 'source_node': node_0.uuid, 'migration_type': 'live', 'resource_id': instance_0.uuid, 'resource_name': instance_0.name }}] self.assertEqual(expected, self.strategy.solution.actions) def test_instance_migration_without_dest_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') instance_0 = model.get_instance_by_uuid("INSTANCE_0") self.strategy.instance_migration(instance_0, node_0) self.assertEqual(1, len(self.strategy.solution.actions)) expected = [{'action_type': 'migrate', 'input_parameters': {'source_node': node_0.uuid, 'migration_type': 'live', 'resource_id': instance_0.uuid, 'resource_name': instance_0.name }}] self.assertEqual(expected, self.strategy.solution.actions) def test_host_migration(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') node_1 = model.get_node_by_uuid('Node_1') instance_0 = model.get_instance_by_uuid("INSTANCE_0") instance_1 = model.get_instance_by_uuid("INSTANCE_1") self.strategy.host_migration(node_0, node_1) self.assertEqual(2, len(self.strategy.solution.actions)) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': node_1.uuid, 'source_node': node_0.uuid, 'migration_type': 'live', 'resource_id': instance_0.uuid, 'resource_name': instance_0.name }}, {'action_type': 'migrate', 'input_parameters': {'destination_node': node_1.uuid, 'source_node': node_0.uuid, 'migration_type': 'live', 'resource_id': instance_1.uuid, 'resource_name': instance_1.name }}] self.assertIn(expected[0], self.strategy.solution.actions) self.assertIn(expected[1], self.strategy.solution.actions) def test_safe_maintain(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') node_1 = model.get_node_by_uuid('Node_1') self.assertFalse(self.strategy.safe_maintain(node_0)) self.assertFalse(self.strategy.safe_maintain(node_1)) model = self.fake_c_cluster.\ generate_scenario_1_with_all_nodes_disable() self.m_c_model.return_value = model node_0 = model.get_node_by_uuid('Node_0') self.assertTrue(self.strategy.safe_maintain(node_0)) def test_try_maintain(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model node_1 = model.get_node_by_uuid('Node_1') self.strategy.try_maintain(node_1) self.assertEqual(2, len(self.strategy.solution.actions)) def test_exception_compute_node_not_found(self): self.m_c_model.return_value = self.fake_c_cluster.build_scenario_1() self.assertRaises(exception.ComputeNodeNotFound, self.strategy.execute) def test_strategy(self): model = self.fake_c_cluster. \ generate_scenario_9_with_3_active_plus_1_disabled_nodes() self.m_c_model.return_value = model node_2 = model.get_node_by_uuid('Node_2') node_3 = model.get_node_by_uuid('Node_3') instance_4 = model.get_instance_by_uuid("INSTANCE_4") result = self.strategy.pre_execute() self.assertIsNone(result) self.strategy.input_parameters = {"maintenance_node": 'hostname_2', "backup_node": 'hostname_3'} self.strategy.do_execute() expected = [{'action_type': 'change_nova_service_state', 'input_parameters': { 'resource_id': 'Node_3', 'resource_name': 'hostname_3', 'state': 'enabled'}}, {'action_type': 'change_nova_service_state', 'input_parameters': { 'resource_id': 'Node_2', 'resource_name': 'hostname_2', 'state': 'disabled', 'disabled_reason': 'watcher_maintaining'}}, {'action_type': 'migrate', 'input_parameters': { 'destination_node': node_3.uuid, 'source_node': node_2.uuid, 'migration_type': 'live', 'resource_id': instance_4.uuid, 'resource_name': instance_4.name}}] self.assertEqual(expected, self.strategy.solution.actions) result = self.strategy.post_execute() self.assertIsNone(result) ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidation.pypython-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_vm_workload_consolidatio0000664000175000017500000004107413656752270034546 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Authors: Vojtech CIMA # Bruno GRAZIOLI # Sean MURPHY # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.decision_engine.model import element from watcher.decision_engine.solution.base import BaseSolution from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import faker_cluster_and_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestVMWorkloadConsolidation(TestBaseStrategy): scenarios = [ ("Ceilometer", {"datasource": "ceilometer", "fake_datasource_cls": faker_cluster_and_metrics.FakeCeilometerMetrics}), ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": faker_cluster_and_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestVMWorkloadConsolidation, self).setUp() # fake cluster self.fake_c_cluster = faker_cluster_and_metrics.FakerModelCollector() p_datasource = mock.patch.object( strategies.VMWorkloadConsolidation, 'datasource_backend', new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) # fake metrics self.fake_metrics = self.fake_datasource_cls( self.m_c_model.return_value) self.m_datasource.return_value = mock.Mock( get_instance_cpu_usage=( self.fake_metrics.get_instance_cpu_util), get_instance_ram_usage=( self.fake_metrics.get_instance_ram_util), get_instance_root_disk_size=( self.fake_metrics.get_instance_disk_root_size), ) self.strategy = strategies.VMWorkloadConsolidation( config=mock.Mock(datasources=self.datasource)) def test_get_instance_utilization(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model instance_0 = model.get_instance_by_uuid("INSTANCE_0") instance_util = dict(cpu=1.0, ram=1, disk=10) self.assertEqual( instance_util, self.strategy.get_instance_utilization(instance_0)) def test_get_node_utilization(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model node_0 = model.get_node_by_uuid("Node_0") node_util = dict(cpu=1.0, ram=1, disk=10) self.assertEqual( node_util, self.strategy.get_node_utilization(node_0)) def test_get_node_capacity(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model node_0 = model.get_node_by_uuid("Node_0") node_util = dict(cpu=40, ram=64, disk=250) self.assertEqual(node_util, self.strategy.get_node_capacity(node_0)) def test_get_relative_node_utilization(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model node = model.get_node_by_uuid('Node_0') rhu = self.strategy.get_relative_node_utilization(node) expected_rhu = {'disk': 0.04, 'ram': 0.015625, 'cpu': 0.025} self.assertEqual(expected_rhu, rhu) def test_get_relative_cluster_utilization(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model cru = self.strategy.get_relative_cluster_utilization() expected_cru = {'cpu': 0.05, 'disk': 0.05, 'ram': 0.0234375} self.assertEqual(expected_cru, cru) def test_add_migration_with_active_state(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') n2 = model.get_node_by_uuid('Node_1') instance_uuid = 'INSTANCE_0' instance = model.get_instance_by_uuid(instance_uuid) self.strategy.add_migration(instance, n1, n2) self.assertEqual(1, len(self.strategy.solution.actions)) expected = {'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'source_node': n1.hostname, 'migration_type': 'live', 'resource_id': instance.uuid, 'resource_name': instance.name}} self.assertEqual(expected, self.strategy.solution.actions[0]) def test_add_migration_with_paused_state(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') n2 = model.get_node_by_uuid('Node_1') instance_uuid = 'INSTANCE_0' instance = model.get_instance_by_uuid(instance_uuid) setattr(instance, 'state', element.InstanceState.ERROR.value) self.strategy.add_migration(instance, n1, n2) self.assertEqual(0, len(self.strategy.solution.actions)) setattr(instance, 'state', element.InstanceState.PAUSED.value) self.strategy.add_migration(instance, n1, n2) self.assertEqual(1, len(self.strategy.solution.actions)) expected = {'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'source_node': n1.hostname, 'migration_type': 'live', 'resource_id': instance.uuid, 'resource_name': instance.name}} self.assertEqual(expected, self.strategy.solution.actions[0]) def test_is_overloaded(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} res = self.strategy.is_overloaded(n1, cc) self.assertFalse(res) cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} res = self.strategy.is_overloaded(n1, cc) self.assertFalse(res) cc = {'cpu': 0.024, 'ram': 1.0, 'disk': 1.0} res = self.strategy.is_overloaded(n1, cc) self.assertTrue(res) def test_instance_fits(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n = model.get_node_by_uuid('Node_1') instance0 = model.get_instance_by_uuid('INSTANCE_0') cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} res = self.strategy.instance_fits(instance0, n, cc) self.assertTrue(res) cc = {'cpu': 0.025, 'ram': 1.0, 'disk': 1.0} res = self.strategy.instance_fits(instance0, n, cc) self.assertFalse(res) def test_add_action_enable_compute_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n = model.get_node_by_uuid('Node_0') self.strategy.add_action_enable_compute_node(n) expected = [{'action_type': 'change_nova_service_state', 'input_parameters': {'state': 'enabled', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_add_action_disable_node(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n = model.get_node_by_uuid('Node_0') self.strategy.add_action_disable_node(n) expected = [{'action_type': 'change_nova_service_state', 'input_parameters': { 'state': 'disabled', 'disabled_reason': 'watcher_disabled', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}}] self.assertEqual(expected, self.strategy.solution.actions) def test_disable_unused_nodes(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') n2 = model.get_node_by_uuid('Node_1') instance_uuid = 'INSTANCE_0' instance = model.get_instance_by_uuid(instance_uuid) self.strategy.disable_unused_nodes() self.assertEqual(0, len(self.strategy.solution.actions)) # Migrate VM to free the node self.strategy.add_migration(instance, n1, n2) self.strategy.disable_unused_nodes() expected = {'action_type': 'change_nova_service_state', 'input_parameters': { 'state': 'disabled', 'disabled_reason': 'watcher_disabled', 'resource_id': 'Node_0', 'resource_name': 'hostname_0'}} self.assertEqual(2, len(self.strategy.solution.actions)) self.assertEqual(expected, self.strategy.solution.actions[1]) def test_offload_phase(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} self.strategy.offload_phase(cc) expected = [] self.assertEqual(expected, self.strategy.solution.actions) def test_consolidation_phase(self): model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') n2 = model.get_node_by_uuid('Node_1') instance_uuid = 'INSTANCE_0' instance = model.get_instance_by_uuid(instance_uuid) cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} self.strategy.consolidation_phase(cc) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'source_node': n1.hostname, 'migration_type': 'live', 'resource_id': instance.uuid, 'resource_name': instance.name}}] self.assertEqual(expected, self.strategy.solution.actions) def test_strategy(self): model = self.fake_c_cluster.generate_scenario_2() self.m_c_model.return_value = model self.fake_metrics.model = model result = self.strategy.pre_execute() self.assertIsNone(result) n1 = model.get_node_by_uuid('Node_0') self.strategy.get_relative_cluster_utilization = mock.MagicMock() self.strategy.do_execute() n2_name = self.strategy.solution.actions[0][ 'input_parameters']['destination_node'] n2 = model.get_node_by_name(n2_name) n3_uuid = self.strategy.solution.actions[2][ 'input_parameters']['resource_id'] n3 = model.get_node_by_uuid(n3_uuid) n4_uuid = self.strategy.solution.actions[3][ 'input_parameters']['resource_id'] n4 = model.get_node_by_uuid(n4_uuid) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'source_node': n1.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_3', 'resource_name': ''}}, {'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'source_node': n1.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_1', 'resource_name': ''}}, {'action_type': 'change_nova_service_state', 'input_parameters': {'state': 'disabled', 'disabled_reason': 'watcher_disabled', 'resource_id': n3.uuid, 'resource_name': n3.hostname}}, {'action_type': 'change_nova_service_state', 'input_parameters': {'state': 'disabled', 'disabled_reason': 'watcher_disabled', 'resource_id': n4.uuid, 'resource_name': n4.hostname}}] self.assertEqual(expected, self.strategy.solution.actions) compute_nodes_count = len(self.strategy.get_available_compute_nodes()) number_of_released_nodes = self.strategy.number_of_released_nodes number_of_migrations = self.strategy.number_of_migrations with mock.patch.object( BaseSolution, 'set_efficacy_indicators' ) as mock_set_efficacy_indicators: result = self.strategy.post_execute() mock_set_efficacy_indicators.assert_called_once_with( compute_nodes_count=compute_nodes_count, released_compute_nodes_count=number_of_released_nodes, instance_migrations_count=number_of_migrations ) def test_strategy2(self): model = self.fake_c_cluster.generate_scenario_3() self.m_c_model.return_value = model self.fake_metrics.model = model n1 = model.get_node_by_uuid('Node_0') n2 = model.get_node_by_uuid('Node_1') cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} self.strategy.offload_phase(cc) expected = [{'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_6', 'resource_name': '', 'source_node': n1.hostname}}, {'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_7', 'resource_name': '', 'source_node': n1.hostname}}, {'action_type': 'migrate', 'input_parameters': {'destination_node': n2.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_8', 'resource_name': '', 'source_node': n1.hostname}}] self.assertEqual(expected, self.strategy.solution.actions) self.strategy.consolidation_phase(cc) expected.append({'action_type': 'migrate', 'input_parameters': {'destination_node': n1.hostname, 'migration_type': 'live', 'resource_id': 'INSTANCE_7', 'resource_name': '', 'source_node': n2.hostname}}) self.assertEqual(expected, self.strategy.solution.actions) self.strategy.optimize_solution() del expected[3] del expected[1] self.assertEqual(expected, self.strategy.solution.actions) python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_workload_balance.py0000664000175000017500000001417113656752270033367 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import collections import mock from watcher.applier.loading import default from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.model import ceilometer_metrics from watcher.tests.decision_engine.model import gnocchi_metrics from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestWorkloadBalance(TestBaseStrategy): scenarios = [ ("Ceilometer", {"datasource": "ceilometer", "fake_datasource_cls": ceilometer_metrics.FakeCeilometerMetrics}), ("Gnocchi", {"datasource": "gnocchi", "fake_datasource_cls": gnocchi_metrics.FakeGnocchiMetrics}), ] def setUp(self): super(TestWorkloadBalance, self).setUp() # fake metrics self.fake_metrics = self.fake_datasource_cls() p_datasource = mock.patch.object( strategies.WorkloadBalance, "datasource_backend", new_callable=mock.PropertyMock) self.m_datasource = p_datasource.start() self.addCleanup(p_datasource.stop) self.m_datasource.return_value = mock.Mock( statistic_aggregation=self.fake_metrics.mock_get_statistics_wb) self.strategy = strategies.WorkloadBalance( config=mock.Mock(datasource=self.datasource)) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update({'metrics': 'instance_cpu_usage', 'threshold': 25.0, 'period': 300, 'granularity': 300}) self.strategy.threshold = 25.0 self.strategy._period = 300 self.strategy._meter = 'instance_cpu_usage' self.strategy._granularity = 300 def test_group_hosts_by_cpu_util(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model self.strategy.threshold = 30 n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() self.assertEqual(n1[0]['compute_node'].uuid, 'Node_0') self.assertEqual(n2[0]['compute_node'].uuid, 'Node_1') self.assertEqual(avg, 8.0) def test_group_hosts_by_ram_util(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model self.strategy._meter = 'instance_ram_usage' self.strategy.threshold = 30 n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() self.assertEqual(n1[0]['compute_node'].uuid, 'Node_0') self.assertEqual(n2[0]['compute_node'].uuid, 'Node_1') self.assertEqual(avg, 33.0) def test_choose_instance_to_migrate(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() instance_to_mig = self.strategy.choose_instance_to_migrate( n1, avg, w_map) self.assertEqual(instance_to_mig[0].uuid, 'Node_0') self.assertEqual(instance_to_mig[1].uuid, "73b09e16-35b7-4922-804e-e8f5d9b740fc") def test_choose_instance_notfound(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() instances = model.get_all_instances() [model.remove_instance(inst) for inst in instances.values()] instance_to_mig = self.strategy.choose_instance_to_migrate( n1, avg, w_map) self.assertIsNone(instance_to_mig) def test_filter_destination_hosts(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model self.strategy.datasource = mock.MagicMock( statistic_aggregation=self.fake_metrics.mock_get_statistics_wb) n1, n2, avg, w_map = self.strategy.group_hosts_by_cpu_or_ram_util() instance_to_mig = self.strategy.choose_instance_to_migrate( n1, avg, w_map) dest_hosts = self.strategy.filter_destination_hosts( n2, instance_to_mig[1], avg, w_map) self.assertEqual(len(dest_hosts), 1) self.assertEqual(dest_hosts[0]['compute_node'].uuid, 'Node_1') def test_execute_no_workload(self): model = self.fake_c_cluster.\ generate_scenario_4_with_1_node_no_instance() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual([], solution.actions) def test_execute(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() actions_counter = collections.Counter( [action.get('action_type') for action in solution.actions]) num_migrations = actions_counter.get("migrate", 0) self.assertEqual(num_migrations, 1) def test_check_parameters(self): model = self.fake_c_cluster.generate_scenario_6_with_2_nodes() self.m_c_model.return_value = model solution = self.strategy.execute() loader = default.DefaultActionLoader() for action in solution.actions: loaded_action = loader.load(action['action_type']) loaded_action.input_parameters = action['input_parameters'] loaded_action.validate_parameters() python-watcher-4.0.0/watcher/tests/decision_engine/strategy/strategies/test_saving_energy.py0000664000175000017500000002057713656752270032747 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.common import clients from watcher.common import utils from watcher.decision_engine.strategy import strategies from watcher.tests.decision_engine.strategy.strategies.test_base \ import TestBaseStrategy class TestSavingEnergy(TestBaseStrategy): def setUp(self): super(TestSavingEnergy, self).setUp() mock_node1_dict = { 'uuid': '922d4762-0bc5-4b30-9cb9-48ab644dd861'} mock_node2_dict = { 'uuid': '922d4762-0bc5-4b30-9cb9-48ab644dd862'} mock_node1 = mock.Mock(**mock_node1_dict) mock_node2 = mock.Mock(**mock_node2_dict) self.fake_nodes = [mock_node1, mock_node2] p_ironic = mock.patch.object( clients.OpenStackClients, 'ironic') self.m_ironic = p_ironic.start() self.addCleanup(p_ironic.stop) p_nova = mock.patch.object( clients.OpenStackClients, 'nova') self.m_nova = p_nova.start() self.addCleanup(p_nova.stop) self.m_ironic.node.list.return_value = self.fake_nodes self.m_c_model.return_value = self.fake_c_cluster.generate_scenario_1() self.strategy = strategies.SavingEnergy( config=mock.Mock()) self.strategy.input_parameters = utils.Struct() self.strategy.input_parameters.update( {'free_used_percent': 10.0, 'min_free_hosts_num': 1}) self.strategy.free_used_percent = 10.0 self.strategy.min_free_hosts_num = 1 self.strategy._ironic_client = self.m_ironic self.strategy._nova_client = self.m_nova def test_get_hosts_pool_with_vms_node_pool(self): mock_node1_dict = { 'extra': {'compute_node_id': 1}, 'power_state': 'power on'} mock_node2_dict = { 'extra': {'compute_node_id': 2}, 'power_state': 'power off'} mock_node1 = mock.Mock(**mock_node1_dict) mock_node2 = mock.Mock(**mock_node2_dict) self.m_ironic.node.get.side_effect = [mock_node1, mock_node2] mock_hyper1 = mock.Mock() mock_hyper2 = mock.Mock() mock_hyper1.to_dict.return_value = { 'running_vms': 2, 'service': {'host': 'hostname_0'}, 'state': 'up'} mock_hyper2.to_dict.return_value = { 'running_vms': 2, 'service': {'host': 'hostname_1'}, 'state': 'up'} self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2] self.strategy.get_hosts_pool() self.assertEqual(len(self.strategy.with_vms_node_pool), 2) self.assertEqual(len(self.strategy.free_poweron_node_pool), 0) self.assertEqual(len(self.strategy.free_poweroff_node_pool), 0) def test_get_hosts_pool_free_poweron_node_pool(self): mock_node1_dict = { 'extra': {'compute_node_id': 1}, 'power_state': 'power on'} mock_node2_dict = { 'extra': {'compute_node_id': 2}, 'power_state': 'power on'} mock_node1 = mock.Mock(**mock_node1_dict) mock_node2 = mock.Mock(**mock_node2_dict) self.m_ironic.node.get.side_effect = [mock_node1, mock_node2] mock_hyper1 = mock.Mock() mock_hyper2 = mock.Mock() mock_hyper1.to_dict.return_value = { 'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'} mock_hyper2.to_dict.return_value = { 'running_vms': 0, 'service': {'host': 'hostname_1'}, 'state': 'up'} self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2] self.strategy.get_hosts_pool() self.assertEqual(len(self.strategy.with_vms_node_pool), 0) self.assertEqual(len(self.strategy.free_poweron_node_pool), 2) self.assertEqual(len(self.strategy.free_poweroff_node_pool), 0) def test_get_hosts_pool_free_poweroff_node_pool(self): mock_node1_dict = { 'extra': {'compute_node_id': 1}, 'power_state': 'power off'} mock_node2_dict = { 'extra': {'compute_node_id': 2}, 'power_state': 'power off'} mock_node1 = mock.Mock(**mock_node1_dict) mock_node2 = mock.Mock(**mock_node2_dict) self.m_ironic.node.get.side_effect = [mock_node1, mock_node2] mock_hyper1 = mock.Mock() mock_hyper2 = mock.Mock() mock_hyper1.to_dict.return_value = { 'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'} mock_hyper2.to_dict.return_value = { 'running_vms': 0, 'service': {'host': 'hostname_1'}, 'state': 'up'} self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2] self.strategy.get_hosts_pool() self.assertEqual(len(self.strategy.with_vms_node_pool), 0) self.assertEqual(len(self.strategy.free_poweron_node_pool), 0) self.assertEqual(len(self.strategy.free_poweroff_node_pool), 2) def test_get_hosts_pool_with_node_out_model(self): mock_node1_dict = { 'extra': {'compute_node_id': 1}, 'power_state': 'power off'} mock_node2_dict = { 'extra': {'compute_node_id': 2}, 'power_state': 'power off'} mock_node1 = mock.Mock(**mock_node1_dict) mock_node2 = mock.Mock(**mock_node2_dict) self.m_ironic.node.get.side_effect = [mock_node1, mock_node2] mock_hyper1 = mock.Mock() mock_hyper2 = mock.Mock() mock_hyper1.to_dict.return_value = { 'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'} mock_hyper2.to_dict.return_value = { 'running_vms': 0, 'service': {'host': 'hostname_10'}, 'state': 'up'} self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2] self.strategy.get_hosts_pool() self.assertEqual(len(self.strategy.with_vms_node_pool), 0) self.assertEqual(len(self.strategy.free_poweron_node_pool), 0) self.assertEqual(len(self.strategy.free_poweroff_node_pool), 1) def test_save_energy_poweron(self): self.strategy.free_poweroff_node_pool = [ mock.Mock(uuid='922d4762-0bc5-4b30-9cb9-48ab644dd861'), mock.Mock(uuid='922d4762-0bc5-4b30-9cb9-48ab644dd862') ] self.strategy.save_energy() self.assertEqual(len(self.strategy.solution.actions), 1) action = self.strategy.solution.actions[0] self.assertEqual(action.get('input_parameters').get('state'), 'on') def test_save_energy_poweroff(self): self.strategy.free_poweron_node_pool = [ mock.Mock(uuid='922d4762-0bc5-4b30-9cb9-48ab644dd861'), mock.Mock(uuid='922d4762-0bc5-4b30-9cb9-48ab644dd862') ] self.strategy.save_energy() self.assertEqual(len(self.strategy.solution.actions), 1) action = self.strategy.solution.actions[0] self.assertEqual(action.get('input_parameters').get('state'), 'off') def test_execute(self): mock_node1_dict = { 'extra': {'compute_node_id': 1}, 'power_state': 'power on'} mock_node2_dict = { 'extra': {'compute_node_id': 2}, 'power_state': 'power on'} mock_node1 = mock.Mock(**mock_node1_dict) mock_node2 = mock.Mock(**mock_node2_dict) self.m_ironic.node.get.side_effect = [mock_node1, mock_node2] mock_hyper1 = mock.Mock() mock_hyper2 = mock.Mock() mock_hyper1.to_dict.return_value = { 'running_vms': 0, 'service': {'host': 'hostname_0'}, 'state': 'up'} mock_hyper2.to_dict.return_value = { 'running_vms': 0, 'service': {'host': 'hostname_1'}, 'state': 'up'} self.m_nova.hypervisors.get.side_effect = [mock_hyper1, mock_hyper2] model = self.fake_c_cluster.generate_scenario_1() self.m_c_model.return_value = model solution = self.strategy.execute() self.assertEqual(len(solution.actions), 1) python-watcher-4.0.0/watcher/tests/decision_engine/strategy/selector/0000775000175000017500000000000013656752352026132 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/selector/__init__.py0000664000175000017500000000000013656752270030230 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/strategy/selector/test_strategy_selector.py0000664000175000017500000000500413656752270033303 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.common import exception from watcher.decision_engine.loading import default as default_loader from watcher.decision_engine.strategy.selection import ( default as default_selector) from watcher.decision_engine.strategy import strategies from watcher.tests import base class TestStrategySelector(base.TestCase): @mock.patch.object(default_loader.DefaultStrategyLoader, 'load') def test_select_with_strategy_name(self, m_load): expected_goal = 'dummy' expected_strategy = "dummy" strategy_selector = default_selector.DefaultStrategySelector( expected_goal, expected_strategy, osc=None) strategy_selector.select() m_load.assert_called_once_with(expected_strategy, osc=None) @mock.patch.object(default_loader.DefaultStrategyLoader, 'load') @mock.patch.object(default_loader.DefaultStrategyLoader, 'list_available') def test_select_with_goal_name_only(self, m_list_available, m_load): m_list_available.return_value = {"dummy": strategies.DummyStrategy} expected_goal = 'dummy' expected_strategy = "dummy" strategy_selector = default_selector.DefaultStrategySelector( expected_goal, osc=None) strategy_selector.select() m_load.assert_called_once_with(expected_strategy, osc=None) def test_select_non_existing_strategy(self): strategy_selector = default_selector.DefaultStrategySelector( "dummy", "NOT_FOUND") self.assertRaises(exception.LoadingError, strategy_selector.select) @mock.patch.object(default_loader.DefaultStrategyLoader, 'list_available') def test_select_no_available_strategy_for_goal(self, m_list_available): m_list_available.return_value = {} strategy_selector = default_selector.DefaultStrategySelector("dummy") self.assertRaises(exception.NoAvailableStrategyForGoal, strategy_selector.select) python-watcher-4.0.0/watcher/tests/decision_engine/event_consumer/0000775000175000017500000000000013656752352025504 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/event_consumer/__init__.py0000664000175000017500000000000013656752270027602 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/test_sync.py0000664000175000017500000007354113656752270025046 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_serialization import jsonutils from watcher.common import context from watcher.common import utils from watcher.decision_engine.loading import default from watcher.decision_engine import sync from watcher import objects from watcher.tests.db import base from watcher.tests.decision_engine import fake_goals from watcher.tests.decision_engine import fake_strategies class TestSyncer(base.DbTestCase): def setUp(self): super(TestSyncer, self).setUp() self.ctx = context.make_context() # This mock simulates the strategies discovery done in discover() self.m_available_strategies = mock.Mock(return_value={ fake_strategies.FakeDummy1Strategy1.get_name(): fake_strategies.FakeDummy1Strategy1, fake_strategies.FakeDummy1Strategy2.get_name(): fake_strategies.FakeDummy1Strategy2, fake_strategies.FakeDummy2Strategy3.get_name(): fake_strategies.FakeDummy2Strategy3, fake_strategies.FakeDummy2Strategy4.get_name(): fake_strategies.FakeDummy2Strategy4, }) self.m_available_goals = mock.Mock(return_value={ fake_goals.FakeDummy1.get_name(): fake_goals.FakeDummy1, fake_goals.FakeDummy2.get_name(): fake_goals.FakeDummy2, }) self.goal1_spec = fake_goals.FakeDummy1( config=mock.Mock()).get_efficacy_specification() self.goal2_spec = fake_goals.FakeDummy2( config=mock.Mock()).get_efficacy_specification() p_goals_load = mock.patch.object( default.DefaultGoalLoader, 'load', side_effect=lambda goal: self.m_available_goals()[goal]()) p_goals = mock.patch.object( default.DefaultGoalLoader, 'list_available', self.m_available_goals) p_strategies = mock.patch.object( default.DefaultStrategyLoader, 'list_available', self.m_available_strategies) p_goals.start() p_goals_load.start() p_strategies.start() self.syncer = sync.Syncer() self.addCleanup(p_goals.stop) self.addCleanup(p_goals_load.stop) self.addCleanup(p_strategies.stop) @staticmethod def _find_created_modified_unmodified_ids(befores, afters): created = { a_item.id: a_item for a_item in afters if a_item.uuid not in (b_item.uuid for b_item in befores) } modified = { a_item.id: a_item for a_item in afters if a_item.as_dict() not in ( b_items.as_dict() for b_items in befores) } unmodified = { a_item.id: a_item for a_item in afters if a_item.as_dict() in ( b_items.as_dict() for b_items in befores) } return created, modified, unmodified @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_empty_db( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [] m_s_list.return_value = [] self.syncer.sync() self.assertEqual(2, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(0, m_g_soft_delete.call_count) self.assertEqual(4, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(0, m_s_soft_delete.call_count) @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_with_existing_goal( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [ objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=( self.goal1_spec.serialize_indicators_specs())) ] m_s_list.return_value = [] self.syncer.sync() self.assertEqual(1, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(0, m_g_soft_delete.call_count) self.assertEqual(4, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(0, m_s_soft_delete.call_count) @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_with_existing_strategy( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [ objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=( self.goal1_spec.serialize_indicators_specs())) ] m_s_list.return_value = [ objects.Strategy(self.ctx, id=1, name="strategy_1", goal_id=1, display_name="Strategy 1", parameters_spec='{}') ] self.syncer.sync() self.assertEqual(1, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(0, m_g_soft_delete.call_count) self.assertEqual(3, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(0, m_s_soft_delete.call_count) @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_with_modified_goal( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [objects.Goal( self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_2", display_name="original", efficacy_specification=self.goal2_spec.serialize_indicators_specs() )] m_s_list.return_value = [] self.syncer.sync() self.assertEqual(2, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(1, m_g_soft_delete.call_count) self.assertEqual(4, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(0, m_s_soft_delete.call_count) @mock.patch.object(objects.Strategy, "soft_delete") @mock.patch.object(objects.Strategy, "save") @mock.patch.object(objects.Strategy, "create") @mock.patch.object(objects.Strategy, "list") @mock.patch.object(objects.Goal, "get_by_name") @mock.patch.object(objects.Goal, "soft_delete") @mock.patch.object(objects.Goal, "save") @mock.patch.object(objects.Goal, "create") @mock.patch.object(objects.Goal, "list") def test_sync_with_modified_strategy( self, m_g_list, m_g_create, m_g_save, m_g_soft_delete, m_g_get_by_name, m_s_list, m_s_create, m_s_save, m_s_soft_delete): m_g_get_by_name.side_effect = [ objects.Goal(self.ctx, id=i) for i in range(1, 10)] m_g_list.return_value = [ objects.Goal(self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=( self.goal1_spec.serialize_indicators_specs())) ] m_s_list.return_value = [ objects.Strategy(self.ctx, id=1, name="strategy_1", goal_id=1, display_name="original", parameters_spec='{}') ] self.syncer.sync() self.assertEqual(1, m_g_create.call_count) self.assertEqual(0, m_g_save.call_count) self.assertEqual(0, m_g_soft_delete.call_count) self.assertEqual(4, m_s_create.call_count) self.assertEqual(0, m_s_save.call_count) self.assertEqual(1, m_s_soft_delete.call_count) def test_end2end_sync_goals_with_modified_goal_and_strategy(self): # ### Setup ### # # Here, we simulate goals and strategies already discovered in the past # that were saved in DB # Should stay unmodified after sync() goal1 = objects.Goal( self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=( self.goal1_spec.serialize_indicators_specs())) # Should be modified by the sync() goal2 = objects.Goal( self.ctx, id=2, uuid=utils.generate_uuid(), name="dummy_2", display_name="Original", efficacy_specification=self.goal2_spec.serialize_indicators_specs() ) goal1.create() goal2.create() # Should stay unmodified after sync() strategy1 = objects.Strategy( self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), display_name="Strategy 1", goal_id=goal1.id) # Should be modified after sync() because its related goal has been # modified strategy2 = objects.Strategy( self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), display_name="Strategy 2", goal_id=goal2.id) # Should be modified after sync() because its strategy name has been # modified strategy3 = objects.Strategy( self.ctx, id=3, name="strategy_3", uuid=utils.generate_uuid(), display_name="Original", goal_id=goal1.id) # Should be modified after sync() because both its related goal # and its strategy name have been modified strategy4 = objects.Strategy( self.ctx, id=4, name="strategy_4", uuid=utils.generate_uuid(), display_name="Original", goal_id=goal2.id) strategy1.create() strategy2.create() strategy3.create() strategy4.create() # Here we simulate audit_templates that were already created in the # past and hence saved within the Watcher DB # Should stay unmodified after sync() audit_template1 = objects.AuditTemplate( self.ctx, id=1, name="Synced AT1", uuid=utils.generate_uuid(), goal_id=goal1.id, strategy_id=strategy1.id) # Should be modified by the sync() because its associated goal # has been modified (compared to the defined fake goals) audit_template2 = objects.AuditTemplate( self.ctx, id=2, name="Synced AT2", uuid=utils.generate_uuid(), goal_id=goal2.id, strategy_id=strategy2.id) # Should be modified by the sync() because its associated strategy # has been modified (compared to the defined fake strategies) audit_template3 = objects.AuditTemplate( self.ctx, id=3, name="Synced AT3", uuid=utils.generate_uuid(), goal_id=goal1.id, strategy_id=strategy3.id) # Modified because of both because its associated goal and associated # strategy should be modified audit_template4 = objects.AuditTemplate( self.ctx, id=4, name="Synced AT4", uuid=utils.generate_uuid(), goal_id=goal2.id, strategy_id=strategy4.id) audit_template1.create() audit_template2.create() audit_template3.create() audit_template4.create() # Should stay unmodified after sync() audit1 = objects.Audit( self.ctx, id=1, uuid=utils.generate_uuid(), name='audit_1', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal1.id, strategy_id=strategy1.id, auto_trigger=False) # Should be modified by the sync() because its associated goal # has been modified (compared to the defined fake goals) audit2 = objects.Audit( self.ctx, id=2, uuid=utils.generate_uuid(), name='audit_2', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal2.id, strategy_id=strategy2.id, auto_trigger=False) # Should be modified by the sync() because its associated strategy # has been modified (compared to the defined fake strategies) audit3 = objects.Audit( self.ctx, id=3, uuid=utils.generate_uuid(), name='audit_3', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal1.id, strategy_id=strategy3.id, auto_trigger=False) # Modified because of both because its associated goal and associated # strategy should be modified (compared to the defined fake # goals/strategies) audit4 = objects.Audit( self.ctx, id=4, uuid=utils.generate_uuid(), name='audit_4', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal2.id, strategy_id=strategy4.id, auto_trigger=False) audit1.create() audit2.create() audit3.create() audit4.create() # Should stay unmodified after sync() action_plan1 = objects.ActionPlan( self.ctx, id=1, uuid=utils.generate_uuid(), audit_id=audit1.id, strategy_id=strategy1.id, state='DOESNOTMATTER', global_efficacy=[]) # Stale after syncing because the goal of the audit has been modified # (compared to the defined fake goals) action_plan2 = objects.ActionPlan( self.ctx, id=2, uuid=utils.generate_uuid(), audit_id=audit2.id, strategy_id=strategy2.id, state='DOESNOTMATTER', global_efficacy=[]) # Stale after syncing because the strategy has been modified # (compared to the defined fake strategies) action_plan3 = objects.ActionPlan( self.ctx, id=3, uuid=utils.generate_uuid(), audit_id=audit3.id, strategy_id=strategy3.id, state='DOESNOTMATTER', global_efficacy=[]) # Stale after syncing because both the strategy and the related audit # have been modified (compared to the defined fake goals/strategies) action_plan4 = objects.ActionPlan( self.ctx, id=4, uuid=utils.generate_uuid(), audit_id=audit4.id, strategy_id=strategy4.id, state='DOESNOTMATTER', global_efficacy=[]) action_plan1.create() action_plan2.create() action_plan3.create() action_plan4.create() before_goals = objects.Goal.list(self.ctx) before_strategies = objects.Strategy.list(self.ctx) before_audit_templates = objects.AuditTemplate.list(self.ctx) before_audits = objects.Audit.list(self.ctx) before_action_plans = objects.ActionPlan.list(self.ctx) # ### Action under test ### # try: self.syncer.sync() except Exception as exc: self.fail(exc) # ### Assertions ### # after_goals = objects.Goal.list(self.ctx) after_strategies = objects.Strategy.list(self.ctx) after_audit_templates = objects.AuditTemplate.list(self.ctx) after_audits = objects.Audit.list(self.ctx) after_action_plans = objects.ActionPlan.list(self.ctx) self.assertEqual(2, len(before_goals)) self.assertEqual(4, len(before_strategies)) self.assertEqual(4, len(before_audit_templates)) self.assertEqual(4, len(before_audits)) self.assertEqual(4, len(before_action_plans)) self.assertEqual(2, len(after_goals)) self.assertEqual(4, len(after_strategies)) self.assertEqual(4, len(after_audit_templates)) self.assertEqual(4, len(after_audits)) self.assertEqual(4, len(after_action_plans)) self.assertEqual( {"dummy_1", "dummy_2"}, set([g.name for g in after_goals])) self.assertEqual( {"strategy_1", "strategy_2", "strategy_3", "strategy_4"}, set([s.name for s in after_strategies])) created_goals, modified_goals, unmodified_goals = ( self._find_created_modified_unmodified_ids( before_goals, after_goals)) created_strategies, modified_strategies, unmodified_strategies = ( self._find_created_modified_unmodified_ids( before_strategies, after_strategies)) (created_audit_templates, modified_audit_templates, unmodified_audit_templates) = ( self._find_created_modified_unmodified_ids( before_audit_templates, after_audit_templates)) created_audits, modified_audits, unmodified_audits = ( self._find_created_modified_unmodified_ids( before_audits, after_audits)) (created_action_plans, modified_action_plans, unmodified_action_plans) = ( self._find_created_modified_unmodified_ids( before_action_plans, after_action_plans)) dummy_1_spec = jsonutils.loads( self.goal1_spec.serialize_indicators_specs()) dummy_2_spec = jsonutils.loads( self.goal2_spec.serialize_indicators_specs()) self.assertEqual( [dummy_1_spec, dummy_2_spec], [g.efficacy_specification for g in after_goals]) self.assertEqual(1, len(created_goals)) self.assertEqual(3, len(created_strategies)) self.assertEqual(0, len(created_audits)) self.assertEqual(0, len(created_action_plans)) self.assertEqual(2, strategy2.goal_id) self.assertNotEqual( set([strategy2.id, strategy3.id, strategy4.id]), set(modified_strategies)) self.assertEqual(set([strategy1.id]), set(unmodified_strategies)) self.assertEqual( set([audit_template2.id, audit_template3.id, audit_template4.id]), set(modified_audit_templates)) self.assertEqual(set([audit_template1.id]), set(unmodified_audit_templates)) self.assertEqual( set([audit2.id, audit3.id, audit4.id]), set(modified_audits)) self.assertEqual(set([audit1.id]), set(unmodified_audits)) self.assertEqual( set([action_plan2.id, action_plan3.id, action_plan4.id]), set(modified_action_plans)) self.assertTrue( all(ap.state == objects.action_plan.State.CANCELLED for ap in modified_action_plans.values())) self.assertEqual(set([action_plan1.id]), set(unmodified_action_plans)) def test_end2end_sync_goals_with_removed_goal_and_strategy(self): # ### Setup ### # # We simulate the fact that we removed 2 strategies self.m_available_strategies.return_value = { fake_strategies.FakeDummy1Strategy1.get_name(): fake_strategies.FakeDummy1Strategy1 } # We simulate the fact that we removed the dummy_2 goal self.m_available_goals.return_value = { fake_goals.FakeDummy1.get_name(): fake_goals.FakeDummy1, } # Should stay unmodified after sync() goal1 = objects.Goal( self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=self.goal1_spec.serialize_indicators_specs() ) # To be removed by the sync() goal2 = objects.Goal( self.ctx, id=2, uuid=utils.generate_uuid(), name="dummy_2", display_name="Dummy 2", efficacy_specification=self.goal2_spec.serialize_indicators_specs() ) goal1.create() goal2.create() # Should stay unmodified after sync() strategy1 = objects.Strategy( self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), display_name="Strategy 1", goal_id=goal1.id) # To be removed by the sync() because strategy entry point does not # exist anymore strategy2 = objects.Strategy( self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), display_name="Strategy 2", goal_id=goal1.id) # To be removed by the sync() because the goal has been soft deleted # and because the strategy entry point does not exist anymore strategy3 = objects.Strategy( self.ctx, id=3, name="strategy_3", uuid=utils.generate_uuid(), display_name="Original", goal_id=goal2.id) strategy1.create() strategy2.create() strategy3.create() # Here we simulate audit_templates that were already created in the # past and hence saved within the Watcher DB # The strategy of this audit template will be dereferenced # as it does not exist anymore audit_template1 = objects.AuditTemplate( self.ctx, id=1, name="Synced AT1", uuid=utils.generate_uuid(), goal_id=goal1.id, strategy_id=strategy1.id) # Stale after syncing because the goal has been soft deleted audit_template2 = objects.AuditTemplate( self.ctx, id=2, name="Synced AT2", uuid=utils.generate_uuid(), goal_id=goal2.id, strategy_id=strategy2.id) audit_template1.create() audit_template2.create() # Should stay unmodified after sync() audit1 = objects.Audit( self.ctx, id=1, uuid=utils.generate_uuid(), name='audit_1', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal1.id, strategy_id=strategy1.id, auto_trigger=False) # Stale after syncing because the goal has been soft deleted audit2 = objects.Audit( self.ctx, id=2, uuid=utils.generate_uuid(), name='audit_2', audit_type=objects.audit.AuditType.ONESHOT.value, state=objects.audit.State.PENDING, goal_id=goal2.id, strategy_id=strategy2.id, auto_trigger=False) audit1.create() audit2.create() # Stale after syncing because its related strategy has been be # soft deleted action_plan1 = objects.ActionPlan( self.ctx, id=1, uuid=utils.generate_uuid(), audit_id=audit1.id, strategy_id=strategy1.id, state='DOESNOTMATTER', global_efficacy=[]) # Stale after syncing because its related goal has been soft deleted action_plan2 = objects.ActionPlan( self.ctx, id=2, uuid=utils.generate_uuid(), audit_id=audit2.id, strategy_id=strategy2.id, state='DOESNOTMATTER', global_efficacy=[]) action_plan1.create() action_plan2.create() before_goals = objects.Goal.list(self.ctx) before_strategies = objects.Strategy.list(self.ctx) before_audit_templates = objects.AuditTemplate.list(self.ctx) before_audits = objects.Audit.list(self.ctx) before_action_plans = objects.ActionPlan.list(self.ctx) # ### Action under test ### # try: self.syncer.sync() except Exception as exc: self.fail(exc) # ### Assertions ### # after_goals = objects.Goal.list(self.ctx) after_strategies = objects.Strategy.list(self.ctx) after_audit_templates = objects.AuditTemplate.list(self.ctx) after_audits = objects.Audit.list(self.ctx) after_action_plans = objects.ActionPlan.list(self.ctx) self.assertEqual(2, len(before_goals)) self.assertEqual(3, len(before_strategies)) self.assertEqual(2, len(before_audit_templates)) self.assertEqual(2, len(before_audits)) self.assertEqual(2, len(before_action_plans)) self.assertEqual(1, len(after_goals)) self.assertEqual(1, len(after_strategies)) self.assertEqual(2, len(after_audit_templates)) self.assertEqual(2, len(after_audits)) self.assertEqual(2, len(after_action_plans)) self.assertEqual( {"dummy_1"}, set([g.name for g in after_goals])) self.assertEqual( {"strategy_1"}, set([s.name for s in after_strategies])) created_goals, modified_goals, unmodified_goals = ( self._find_created_modified_unmodified_ids( before_goals, after_goals)) created_strategies, modified_strategies, unmodified_strategies = ( self._find_created_modified_unmodified_ids( before_strategies, after_strategies)) (created_audit_templates, modified_audit_templates, unmodified_audit_templates) = ( self._find_created_modified_unmodified_ids( before_audit_templates, after_audit_templates)) created_audits, modified_audits, unmodified_audits = ( self._find_created_modified_unmodified_ids( before_audits, after_audits)) (created_action_plans, modified_action_plans, unmodified_action_plans) = ( self._find_created_modified_unmodified_ids( before_action_plans, after_action_plans)) self.assertEqual(0, len(created_goals)) self.assertEqual(0, len(created_strategies)) self.assertEqual(0, len(created_audits)) self.assertEqual(0, len(created_action_plans)) self.assertEqual(set([audit_template2.id]), set(modified_audit_templates)) self.assertEqual(set([audit_template1.id]), set(unmodified_audit_templates)) self.assertEqual(set([audit2.id]), set(modified_audits)) self.assertEqual(set([audit1.id]), set(unmodified_audits)) self.assertEqual(set([action_plan2.id]), set(modified_action_plans)) self.assertTrue( all(ap.state == objects.action_plan.State.CANCELLED for ap in modified_action_plans.values())) self.assertEqual(set([action_plan1.id]), set(unmodified_action_plans)) def test_sync_strategies_with_removed_goal(self): # ### Setup ### # goal1 = objects.Goal( self.ctx, id=1, uuid=utils.generate_uuid(), name="dummy_1", display_name="Dummy 1", efficacy_specification=self.goal1_spec.serialize_indicators_specs() ) goal2 = objects.Goal( self.ctx, id=2, uuid=utils.generate_uuid(), name="dummy_2", display_name="Dummy 2", efficacy_specification=self.goal2_spec.serialize_indicators_specs() ) goal1.create() goal2.create() strategy1 = objects.Strategy( self.ctx, id=1, name="strategy_1", uuid=utils.generate_uuid(), display_name="Strategy 1", goal_id=goal1.id) strategy2 = objects.Strategy( self.ctx, id=2, name="strategy_2", uuid=utils.generate_uuid(), display_name="Strategy 2", goal_id=goal2.id) strategy1.create() strategy2.create() # to be removed by some reasons goal2.soft_delete() before_goals = objects.Goal.list(self.ctx) before_strategies = objects.Strategy.list(self.ctx) # ### Action under test ### # try: self.syncer.sync() except Exception as exc: self.fail(exc) # ### Assertions ### # after_goals = objects.Goal.list(self.ctx) after_strategies = objects.Strategy.list(self.ctx) self.assertEqual(1, len(before_goals)) self.assertEqual(2, len(before_strategies)) self.assertEqual(2, len(after_goals)) self.assertEqual(4, len(after_strategies)) self.assertEqual( {"dummy_1", "dummy_2"}, set([g.name for g in after_goals])) self.assertEqual( {"strategy_1", "strategy_2", "strategy_3", "strategy_4"}, set([s.name for s in after_strategies])) python-watcher-4.0.0/watcher/tests/decision_engine/fake_strategies.py0000664000175000017500000000371413656752270026166 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.decision_engine.strategy.strategies import base as base_strategy class FakeStrategy(base_strategy.BaseStrategy): NAME = NotImplemented DISPLAY_NAME = NotImplemented GOAL_NAME = NotImplemented @classmethod def get_name(cls): return cls.NAME @classmethod def get_display_name(cls): return cls.DISPLAY_NAME @classmethod def get_translatable_display_name(cls): return cls.DISPLAY_NAME @classmethod def get_goal_name(cls): return cls.GOAL_NAME @classmethod def get_config_opts(cls): return [] def pre_execute(self): pass def do_execute(self): pass def post_execute(self): pass class FakeDummy1Strategy1(FakeStrategy): GOAL_NAME = "dummy_1" NAME = "strategy_1" DISPLAY_NAME = "Strategy 1" @classmethod def get_config_opts(cls): return [ cfg.StrOpt('test_opt', help="Option used for testing."), ] class FakeDummy1Strategy2(FakeStrategy): GOAL_NAME = "dummy_1" NAME = "strategy_2" DISPLAY_NAME = "Strategy 2" class FakeDummy2Strategy3(FakeStrategy): GOAL_NAME = "dummy_2" NAME = "strategy_3" DISPLAY_NAME = "Strategy 3" class FakeDummy2Strategy4(FakeStrategy): GOAL_NAME = "dummy_2" NAME = "strategy_4" DISPLAY_NAME = "Strategy 4" python-watcher-4.0.0/watcher/tests/decision_engine/cluster/0000775000175000017500000000000013656752352024131 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/cluster/__init__.py0000664000175000017500000000000013656752270026227 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/cluster/test_cinder_cdmc.py0000664000175000017500000001222213656752270027772 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.model.collector import cinder from watcher.tests import base from watcher.tests import conf_fixture class TestCinderClusterDataModelCollector(base.TestCase): def setUp(self): super(TestCinderClusterDataModelCollector, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) @mock.patch('keystoneclient.v3.client.Client', mock.Mock()) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_cdmc_execute(self, m_cinder_helper_cls): m_cinder_helper = mock.Mock(name="cinder_helper") m_cinder_helper_cls.return_value = m_cinder_helper fake_storage_node = mock.Mock( host='host@backend', zone='zone', status='enabled', state='up', volume_type=['fake_type'] ) fake_storage_pool = mock.Mock( total_volumes=1, total_capacity_gb=30, free_capacity_gb=20, provisioned_capacity_gb=10, allocated_capacity_gb=10, virtual_free=20 ) setattr(fake_storage_pool, 'name', 'host@backend#pool') fake_volume = mock.Mock( id=1, size=1, status='in-use', attachments=[{"server_id": "server_id", "attachment_id": "attachment_id"}], multiattach='false', snapshot_id='', metadata='{"key": "value"}', bootable='false' ) setattr(fake_volume, 'name', 'name') setattr(fake_volume, 'os-vol-tenant-attr:tenant_id', '0c003652-0cb1-4210-9005-fd5b92b1faa2') setattr(fake_volume, 'os-vol-host-attr:host', 'host@backend#pool') # storage node list m_cinder_helper.get_storage_node_list.return_value = [ fake_storage_node] m_cinder_helper.get_volume_type_by_backendname.return_value = [ 'fake_type'] # storage pool list m_cinder_helper.get_storage_pool_list.return_value = [ fake_storage_pool] # volume list m_cinder_helper.get_volume_list.return_value = [fake_volume] m_config = mock.Mock() m_osc = mock.Mock() cinder_cdmc = cinder.CinderClusterDataModelCollector( config=m_config, osc=m_osc) cinder_cdmc.get_audit_scope_handler([]) model = cinder_cdmc.execute() storage_nodes = model.get_all_storage_nodes() storage_node = list(storage_nodes.values())[0] storage_pools = model.get_node_pools(storage_node) storage_pool = storage_pools[0] volumes = model.get_pool_volumes(storage_pool) volume = volumes[0] self.assertEqual(1, len(storage_nodes)) self.assertEqual(1, len(storage_pools)) self.assertEqual(1, len(volumes)) self.assertEqual(storage_node.host, 'host@backend') self.assertEqual(storage_pool.name, 'host@backend#pool') self.assertEqual(volume.uuid, '1') @mock.patch('keystoneclient.v3.client.Client', mock.Mock()) @mock.patch.object(cinder_helper, 'CinderHelper') def test_cinder_cdmc_total_capacity_gb_not_integer( self, m_cinder_helper_cls): m_cinder_helper = mock.Mock(name="cinder_helper") m_cinder_helper_cls.return_value = m_cinder_helper fake_storage_node = mock.Mock( host='host@backend', zone='zone', status='enabled', state='up', volume_type=['fake_type'] ) fake_storage_pool = mock.Mock( total_volumes=1, total_capacity_gb="unknown", free_capacity_gb=20, provisioned_capacity_gb=10, allocated_capacity_gb=10, virtual_free=20 ) setattr(fake_storage_pool, 'name', 'host@backend#pool') # storage node list m_cinder_helper.get_storage_node_list.return_value = [ fake_storage_node] m_cinder_helper.get_volume_type_by_backendname.return_value = [ 'fake_type'] # storage pool list m_cinder_helper.get_storage_pool_list.return_value = [ fake_storage_pool] # volume list m_cinder_helper.get_volume_list.return_value = [] m_config = mock.Mock() m_osc = mock.Mock() cinder_cdmc = cinder.CinderClusterDataModelCollector( config=m_config, osc=m_osc) cinder_cdmc.get_audit_scope_handler([]) self.assertRaises(exception.InvalidPoolAttributeValue, cinder_cdmc.execute) python-watcher-4.0.0/watcher/tests/decision_engine/cluster/test_cluster_data_model_collector.py0000664000175000017500000000337713656752270033453 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import model_root from watcher.tests import base as test_base class DummyClusterDataModelCollector(base.BaseClusterDataModelCollector): @property def notification_endpoints(self): return [] def get_audit_scope_handler(self, audit_scope): return None def execute(self): model = model_root.ModelRoot() # Do something here... return model class TestClusterDataModelCollector(test_base.TestCase): def test_is_singleton(self): m_config = mock.Mock() inst1 = DummyClusterDataModelCollector(config=m_config) inst2 = DummyClusterDataModelCollector(config=m_config) self.assertIs(inst1, inst2) def test_in_memory_model_is_copied(self): m_config = mock.Mock() collector = DummyClusterDataModelCollector(config=m_config) collector.synchronize() self.assertIs( collector._cluster_data_model, collector.cluster_data_model) self.assertIsNot( collector.cluster_data_model, collector.get_latest_cluster_data_model()) python-watcher-4.0.0/watcher/tests/decision_engine/cluster/test_nova_cdmc.py0000664000175000017500000004615713656752270027507 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock import os_resource_classes as orc from watcher.common import nova_helper from watcher.common import placement_helper from watcher.decision_engine.model.collector import nova from watcher.tests import base from watcher.tests import conf_fixture class TestNovaClusterDataModelCollector(base.TestCase): def setUp(self): super(TestNovaClusterDataModelCollector, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) @mock.patch('keystoneclient.v3.client.Client', mock.Mock()) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') def test_nova_cdmc_execute(self, m_nova_helper_cls, m_placement_helper_cls): m_placement_helper = mock.Mock(name="placement_helper") m_placement_helper.get_inventories.return_value = { orc.VCPU: { "allocation_ratio": 16.0, "total": 8, "reserved": 0, "step_size": 1, "min_unit": 1, "max_unit": 8}, orc.MEMORY_MB: { "allocation_ratio": 1.5, "total": 16039, "reserved": 512, "step_size": 1, "min_unit": 1, "max_unit": 16039}, orc.DISK_GB: { "allocation_ratio": 1.0, "total": 142, "reserved": 0, "step_size": 1, "min_unit": 1, "max_unit": 142} } m_placement_helper.get_usages_for_resource_provider.return_value = { orc.DISK_GB: 10, orc.MEMORY_MB: 100, orc.VCPU: 0 } m_placement_helper_cls.return_value = m_placement_helper m_nova_helper = mock.Mock(name="nova_helper") m_nova_helper_cls.return_value = m_nova_helper m_nova_helper.get_service.return_value = mock.Mock( id=1355, host='test_hostname', binary='nova-compute', status='enabled', state='up', disabled_reason='', ) minimal_node = dict( id='160a0e7b-8b0b-4854-8257-9c71dff4efcc', hypervisor_hostname='test_hostname', state='TEST_STATE', status='TEST_STATUS', ) minimal_node_with_servers = dict( servers=[ {'name': 'fake_instance', 'uuid': 'ef500f7e-dac8-470f-960c-169486fce71b'} ], **minimal_node ) fake_compute_node = mock.Mock( service={'id': 123, 'host': 'test_hostname', 'disabled_reason': ''}, memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, servers=None, # Don't let the mock return a value for servers. **minimal_node ) fake_detailed_node = mock.Mock( service={'id': 123, 'host': 'test_hostname', 'disabled_reason': ''}, memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, **minimal_node_with_servers) fake_instance = mock.Mock( id='ef500f7e-dac8-470f-960c-169486fce71b', name='fake_instance', flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1}, metadata={'hi': 'hello'}, tenant_id='ff560f7e-dbc8-771f-960c-164482fce21b', ) setattr(fake_instance, 'OS-EXT-STS:vm_state', 'VM_STATE') setattr(fake_instance, 'name', 'fake_instance') # Returns the hypervisors with details (service) but no servers. m_nova_helper.get_compute_node_list.return_value = [fake_compute_node] # Returns the hypervisor with servers and details (service). m_nova_helper.get_compute_node_by_name.return_value = [ fake_detailed_node] # Returns the hypervisor with details (service) but no servers. m_nova_helper.get_instance_list.return_value = [fake_instance] m_config = mock.Mock() m_osc = mock.Mock() nova_cdmc = nova.NovaClusterDataModelCollector( config=m_config, osc=m_osc) nova_cdmc.get_audit_scope_handler([]) model = nova_cdmc.execute() compute_nodes = model.get_all_compute_nodes() instances = model.get_all_instances() self.assertEqual(1, len(compute_nodes)) self.assertEqual(1, len(instances)) node = list(compute_nodes.values())[0] instance = list(instances.values())[0] self.assertEqual(node.uuid, '160a0e7b-8b0b-4854-8257-9c71dff4efcc') self.assertEqual(instance.uuid, 'ef500f7e-dac8-470f-960c-169486fce71b') memory_total = (node.memory-node.memory_mb_reserved)*node.memory_ratio self.assertEqual(node.memory_mb_capacity, memory_total) disk_total = (node.disk-node.disk_gb_reserved)*node.disk_ratio self.assertEqual(node.disk_gb_capacity, disk_total) vcpus_total = (node.vcpus-node.vcpu_reserved)*node.vcpu_ratio self.assertEqual(node.vcpu_capacity, vcpus_total) m_nova_helper.get_compute_node_by_name.assert_called_once_with( minimal_node['hypervisor_hostname'], servers=True, detailed=True) m_nova_helper.get_instance_list.assert_called_once_with( filters={'host': fake_compute_node.service['host']}, limit=1) class TestNovaModelBuilder(base.TestCase): @mock.patch.object(nova_helper, 'NovaHelper', mock.MagicMock()) def test_add_instance_node(self): model_builder = nova.NovaModelBuilder(osc=mock.MagicMock()) model_builder.model = mock.MagicMock() mock_node = mock.MagicMock() mock_host = mock_node.service["host"] inst1 = mock.MagicMock( id='ef500f7e-dac8-470f-960c-169486fce711', tenant_id='ff560f7e-dbc8-771f-960c-164482fce21b') setattr(inst1, 'OS-EXT-STS:vm_state', 'deleted') setattr(inst1, 'name', 'instance1') inst2 = mock.MagicMock( id='ef500f7e-dac8-470f-960c-169486fce722', tenant_id='ff560f7e-dbc8-771f-960c-164482fce21b') setattr(inst2, 'OS-EXT-STS:vm_state', 'active') setattr(inst2, 'name', 'instance2') mock_instances = [inst1, inst2] model_builder.nova_helper.get_instance_list.return_value = ( mock_instances) model_builder.add_instance_node(mock_node, mock_instances) # verify that when len(instances) <= 1000, limit == len(instance). model_builder.nova_helper.get_instance_list.assert_called_once_with( filters={'host': mock_host}, limit=2) fake_instance = model_builder._build_instance_node(inst2) model_builder.model.add_instance.assert_called_once_with( fake_instance) # verify that when len(instances) > 1000, limit == -1. mock_instance = mock.Mock() mock_instances = [mock_instance] * 1001 model_builder.add_instance_node(mock_node, mock_instances) model_builder.nova_helper.get_instance_list.assert_called_with( filters={'host': mock_host}, limit=-1) def test_check_model(self): """Initialize collector ModelBuilder and test check model""" m_scope = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) self.assertTrue(t_nova_cluster._check_model_scope(m_scope)) def test_check_model_update_false(self): """Initialize check model with multiple identical scopes The seconds check_model should return false as the models are the same """ m_scope = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) self.assertTrue(t_nova_cluster._check_model_scope(m_scope)) self.assertFalse(t_nova_cluster._check_model_scope(m_scope)) def test_check_model_update_true(self): """Initialize check model with multiple different scopes Since the models differ both should return True for the update flag """ m_scope_one = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] m_scope_two = [{"compute": [ {"host_aggregates": [{"id": 2}]}, {"availability_zones": [{"name": "av_b"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) self.assertTrue(t_nova_cluster._check_model_scope(m_scope_one)) self.assertTrue(t_nova_cluster._check_model_scope(m_scope_two)) def test_merge_compute_scope(self): """""" m_scope_one = [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ] m_scope_two = [ {"host_aggregates": [{"id": 4}]}, {"availability_zones": [{"name": "av_b"}]} ] reference = {'availability_zones': [{'name': 'av_a'}, {'name': 'av_b'}], 'host_aggregates': [{'id': 5}, {'id': 4}]} t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) t_nova_cluster._merge_compute_scope(m_scope_one) t_nova_cluster._merge_compute_scope(m_scope_two) self.assertEqual(reference, t_nova_cluster.model_scope) @mock.patch.object(nova_helper, 'NovaHelper') def test_collect_aggregates(self, m_nova): """""" m_nova.return_value.get_aggregate_list.return_value = \ [mock.Mock(id=1, name='example'), mock.Mock(id=5, name='example', hosts=['hostone', 'hosttwo'])] m_nova.return_value.get_compute_node_by_name.return_value = False m_scope = [{'id': 5}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) result = set() t_nova_cluster._collect_aggregates(m_scope, result) self.assertEqual(set(['hostone', 'hosttwo']), result) @mock.patch.object(nova_helper, 'NovaHelper') def test_collect_aggregates_none(self, m_nova): """Test collect_aggregates with host_aggregates None""" result = set() t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) t_nova_cluster._collect_aggregates(None, result) self.assertEqual(set(), result) @mock.patch.object(nova_helper, 'NovaHelper') def test_collect_zones(self, m_nova): """""" m_nova.return_value.get_service_list.return_value = \ [mock.Mock(zone='av_b'), mock.Mock(zone='av_a', host='hostone')] m_nova.return_value.get_compute_node_by_name.return_value = False m_scope = [{'name': 'av_a'}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) result = set() t_nova_cluster._collect_zones(m_scope, result) self.assertEqual(set(['hostone']), result) @mock.patch.object(nova_helper, 'NovaHelper') def test_collect_zones_none(self, m_nova): """Test collect_zones with availability_zones None""" result = set() t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) t_nova_cluster._collect_zones(None, result) self.assertEqual(set(), result) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') def test_add_physical_layer(self, m_nova, m_placement): """Ensure all three steps of the physical layer are fully executed First the return value for get_aggregate_list and get_service_list are mocked. These return 3 hosts of which hostone is returned by both the aggregate and service call. This will help verify the elimination of duplicates. The scope is setup so that only hostone and hosttwo should remain. There will be 2 simulated compute nodes and 2 associated instances. These will be returned by their matching calls in nova helper. The calls to get_compute_node_by_name and get_instance_list are asserted as to verify the correct operation of add_physical_layer. """ mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = None m_placement.return_value = mock_placement m_nova.return_value.get_aggregate_list.return_value = \ [mock.Mock(id=1, name='example'), mock.Mock(id=5, name='example', hosts=['hostone', 'hosttwo'])] m_nova.return_value.get_service_list.return_value = \ [mock.Mock(zone='av_b', host='hostthree'), mock.Mock(zone='av_a', host='hostone')] compute_node_one = mock.Mock( id='796fee99-65dd-4262-aa-fd2a1143faa6', hypervisor_hostname='hostone', hypervisor_type='QEMU', state='TEST_STATE', status='TEST_STATUS', memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, servers=[ {'name': 'fake_instance', 'uuid': 'ef500f7e-dac8-470f-960c-169486fce71b'} ], service={'id': 123, 'host': 'hostone', 'disabled_reason': ''}, ) compute_node_two = mock.Mock( id='756fef99-65dd-4262-aa-fd2a1143faa6', hypervisor_hostname='hosttwo', hypervisor_type='QEMU', state='TEST_STATE', status='TEST_STATUS', memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, servers=[ {'name': 'fake_instance2', 'uuid': 'ef500f7e-dac8-47f0-960c-169486fce71b'} ], service={'id': 123, 'host': 'hosttwo', 'disabled_reason': ''}, ) m_nova.return_value.get_compute_node_by_name.side_effect = [ [compute_node_one], [compute_node_two] ] fake_instance_one = mock.Mock( id='796fee99-65dd-4262-aa-fd2a1143faa6', name='fake_instance', flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1}, metadata={'hi': 'hello'}, tenant_id='ff560f7e-dbc8-771f-960c-164482fce21b', ) fake_instance_two = mock.Mock( id='ef500f7e-dac8-47f0-960c-169486fce71b', name='fake_instance2', flavor={'ram': 333, 'disk': 222, 'vcpus': 4, 'id': 1}, metadata={'hi': 'hello'}, tenant_id='756fef99-65dd-4262-aa-fd2a1143faa6', ) m_nova.return_value.get_instance_list.side_effect = [ [fake_instance_one], [fake_instance_two] ] m_scope = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) t_nova_cluster.execute(m_scope) m_nova.return_value.get_compute_node_by_name.assert_any_call( 'hostone', servers=True, detailed=True) m_nova.return_value.get_compute_node_by_name.assert_any_call( 'hosttwo', servers=True, detailed=True) self.assertEqual( m_nova.return_value.get_compute_node_by_name.call_count, 2) m_nova.return_value.get_instance_list.assert_any_call( filters={'host': 'hostone'}, limit=1) m_nova.return_value.get_instance_list.assert_any_call( filters={'host': 'hosttwo'}, limit=1) self.assertEqual( m_nova.return_value.get_instance_list.call_count, 2) @mock.patch.object(placement_helper, 'PlacementHelper') @mock.patch.object(nova_helper, 'NovaHelper') def test_add_physical_layer_with_baremetal_node(self, m_nova, m_placement_helper): """""" mock_placement = mock.Mock(name="placement_helper") mock_placement.get_inventories.return_value = dict() mock_placement.get_usages_for_resource_provider.return_value = None m_placement_helper.return_value = mock_placement m_nova.return_value.get_aggregate_list.return_value = \ [mock.Mock(id=1, name='example'), mock.Mock(id=5, name='example', hosts=['hostone', 'hosttwo'])] m_nova.return_value.get_service_list.return_value = \ [mock.Mock(zone='av_b', host='hostthree'), mock.Mock(zone='av_a', host='hostone')] compute_node = mock.Mock( id='796fee99-65dd-4262-aa-fd2a1143faa6', hypervisor_hostname='hostone', hypervisor_type='QEMU', state='TEST_STATE', status='TEST_STATUS', memory_mb=333, memory_mb_used=100, free_disk_gb=222, local_gb=111, local_gb_used=10, vcpus=4, vcpus_used=0, servers=[ {'name': 'fake_instance', 'uuid': 'ef500f7e-dac8-470f-960c-169486fce71b'} ], service={'id': 123, 'host': 'hostone', 'disabled_reason': ''}, ) baremetal_node = mock.Mock( id='5f2d1b3d-4099-4623-b9-05148aefd6cb', hypervisor_hostname='hosttwo', hypervisor_type='ironic', state='TEST_STATE', status='TEST_STATUS', ) m_nova.return_value.get_compute_node_by_name.side_effect = [ [compute_node], [baremetal_node]] m_scope = [{"compute": [ {"host_aggregates": [{"id": 5}]}, {"availability_zones": [{"name": "av_a"}]} ]}] t_nova_cluster = nova.NovaModelBuilder(mock.Mock()) model = t_nova_cluster.execute(m_scope) compute_nodes = model.get_all_compute_nodes() self.assertEqual(1, len(compute_nodes)) m_nova.return_value.get_compute_node_by_name.assert_any_call( 'hostone', servers=True, detailed=True) m_nova.return_value.get_compute_node_by_name.assert_any_call( 'hosttwo', servers=True, detailed=True) self.assertEqual( m_nova.return_value.get_compute_node_by_name.call_count, 2) python-watcher-4.0.0/watcher/tests/decision_engine/datasources/0000775000175000017500000000000013656752352024765 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/datasources/__init__.py0000664000175000017500000000000013656752270027063 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/datasources/test_gnocchi_helper.py0000664000175000017500000001622413656752270031353 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import gnocchi as gnocchi_helper from watcher.tests import base CONF = cfg.CONF @mock.patch.object(clients.OpenStackClients, 'gnocchi') class TestGnocchiHelper(base.BaseTestCase): def setUp(self): super(TestGnocchiHelper, self).setUp() self.osc_mock = mock.Mock() self.helper = gnocchi_helper.GnocchiHelper(osc=self.osc_mock) stat_agg_patcher = mock.patch.object( self.helper, 'statistic_aggregation', spec=gnocchi_helper.GnocchiHelper.statistic_aggregation) self.mock_aggregation = stat_agg_patcher.start() self.addCleanup(stat_agg_patcher.stop) def test_gnocchi_statistic_aggregation(self, mock_gnocchi): gnocchi = mock.MagicMock() expected_result = 5.5 expected_measures = [["2017-02-02T09:00:00.000000", 360, 5.5]] gnocchi.metric.get_measures.return_value = expected_measures mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() result = helper.statistic_aggregation( resource=mock.Mock(id='16a86790-327a-45f9-bc82-45839f062fdc'), resource_type='instance', meter_name='instance_cpu_usage', period=300, granularity=360, aggregate='mean', ) self.assertEqual(expected_result, result) def test_statistic_aggregation_metric_unavailable(self, mock_gnocchi): helper = gnocchi_helper.GnocchiHelper() # invalidate instance_cpu_usage in metric map original_metric_value = helper.METRIC_MAP.get('instance_cpu_usage') helper.METRIC_MAP.update( instance_cpu_usage=None ) self.assertRaises( exception.MetricNotAvailable, helper.statistic_aggregation, resource=mock.Mock(id='16a86790-327a-45f9-bc82-45839f062fdc'), resource_type='instance', meter_name='instance_cpu_usage', period=300, granularity=360, aggregate='mean', ) # restore the metric map as it is a static attribute that does not get # restored between unit tests! helper.METRIC_MAP.update( instance_cpu_usage=original_metric_value ) def test_get_host_cpu_usage(self, mock_gnocchi): self.helper.get_host_cpu_usage('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_cpu_usage', 600, 'mean', 300) def test_get_host_ram_usage(self, mock_gnocchi): self.helper.get_host_ram_usage('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_ram_usage', 600, 'mean', 300) def test_get_host_outlet_temperature(self, mock_gnocchi): self.helper.get_host_outlet_temp('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_outlet_temp', 600, 'mean', 300) def test_get_host_inlet_temperature(self, mock_gnocchi): self.helper.get_host_inlet_temp('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_inlet_temp', 600, 'mean', 300) def test_get_host_airflow(self, mock_gnocchi): self.helper.get_host_airflow('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_airflow', 600, 'mean', 300) def test_get_host_power(self, mock_gnocchi): self.helper.get_host_power('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_power', 600, 'mean', 300) def test_get_instance_cpu_usage(self, mock_gnocchi): self.helper.get_instance_cpu_usage('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_cpu_usage', 600, 'mean', 300) def test_get_instance_memory_usage(self, mock_gnocchi): self.helper.get_instance_ram_usage('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_ram_usage', 600, 'mean', 300) def test_get_instance_ram_allocated(self, mock_gnocchi): self.helper.get_instance_ram_allocated('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_ram_allocated', 600, 'mean', 300) def test_get_instance_root_disk_allocated(self, mock_gnocchi): self.helper.get_instance_root_disk_size('compute1', 600, 'mean', 300) self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_root_disk_size', 600, 'mean', 300) def test_gnocchi_check_availability(self, mock_gnocchi): gnocchi = mock.MagicMock() gnocchi.status.get.return_value = True mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() result = helper.check_availability() self.assertEqual('available', result) def test_gnocchi_check_availability_with_failure(self, mock_gnocchi): cfg.CONF.set_override("query_max_retries", 1, group='watcher_datasources') gnocchi = mock.MagicMock() gnocchi.status.get.side_effect = Exception() mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() self.assertEqual('not available', helper.check_availability()) def test_gnocchi_list_metrics(self, mock_gnocchi): gnocchi = mock.MagicMock() metrics = [{"name": "metric1"}, {"name": "metric2"}] expected_metrics = set(["metric1", "metric2"]) gnocchi.metric.list.return_value = metrics mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() result = helper.list_metrics() self.assertEqual(expected_metrics, result) def test_gnocchi_list_metrics_with_failure(self, mock_gnocchi): cfg.CONF.set_override("query_max_retries", 1, group='watcher_datasources') gnocchi = mock.MagicMock() gnocchi.metric.list.side_effect = Exception() mock_gnocchi.return_value = gnocchi helper = gnocchi_helper.GnocchiHelper() self.assertFalse(helper.list_metrics()) python-watcher-4.0.0/watcher/tests/decision_engine/datasources/test_monasca_helper.py0000664000175000017500000001040113656752270031351 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import monasca as monasca_helper from watcher.tests import base CONF = cfg.CONF @mock.patch.object(clients.OpenStackClients, 'monasca') class TestMonascaHelper(base.BaseTestCase): def setUp(self): super(TestMonascaHelper, self).setUp() self.osc_mock = mock.Mock() self.helper = monasca_helper.MonascaHelper(osc=self.osc_mock) stat_agg_patcher = mock.patch.object( self.helper, 'statistic_aggregation', spec=monasca_helper.MonascaHelper.statistic_aggregation) self.mock_aggregation = stat_agg_patcher.start() self.addCleanup(stat_agg_patcher.stop) def test_monasca_statistic_aggregation(self, mock_monasca): monasca = mock.MagicMock() expected_stat = [{ 'columns': ['timestamp', 'avg'], 'dimensions': { 'hostname': 'rdev-indeedsrv001', 'service': 'monasca'}, 'id': '0', 'name': 'cpu.percent', 'statistics': [ ['2016-07-29T12:45:00Z', 0.0], ['2016-07-29T12:50:00Z', 0.9], ['2016-07-29T12:55:00Z', 0.9]]}] monasca.metrics.list_statistics.return_value = expected_stat mock_monasca.return_value = monasca helper = monasca_helper.MonascaHelper() result = helper.statistic_aggregation( resource=mock.Mock(id='NODE_UUID'), resource_type='compute_node', meter_name='host_cpu_usage', period=7200, granularity=300, aggregate='mean', ) self.assertEqual(0.6, result) def test_statistic_aggregation_metric_unavailable(self, mock_monasca): helper = monasca_helper.MonascaHelper() # invalidate host_cpu_usage in metric map original_metric_value = helper.METRIC_MAP.get('host_cpu_usage') helper.METRIC_MAP.update( host_cpu_usage=None ) self.assertRaises( exception.MetricNotAvailable, helper.statistic_aggregation, resource=mock.Mock(id='NODE_UUID'), resource_type='compute_node', meter_name='host_cpu_usage', period=7200, granularity=300, aggregate='mean', ) # restore the metric map as it is a static attribute that does not get # restored between unit tests! helper.METRIC_MAP.update( instance_cpu_usage=original_metric_value ) def test_check_availability(self, mock_monasca): monasca = mock.MagicMock() monasca.metrics.list.return_value = True mock_monasca.return_value = monasca helper = monasca_helper.MonascaHelper() result = helper.check_availability() self.assertEqual('available', result) def test_check_availability_with_failure(self, mock_monasca): monasca = mock.MagicMock() monasca.metrics.list.side_effect = Exception() mock_monasca.return_value = monasca helper = monasca_helper.MonascaHelper() self.assertEqual('not available', helper.check_availability()) def test_get_host_cpu_usage(self, mock_monasca): self.mock_aggregation.return_value = 0.6 node = mock.Mock(id='compute1') cpu_usage = self.helper.get_host_cpu_usage(node, 600, 'mean') self.assertEqual(0.6, cpu_usage) def test_get_instance_cpu_usage(self, mock_monasca): self.mock_aggregation.return_value = 0.6 node = mock.Mock(id='vm1') cpu_usage = self.helper.get_instance_cpu_usage(node, 600, 'mean') self.assertEqual(0.6, cpu_usage) python-watcher-4.0.0/watcher/tests/decision_engine/datasources/grafana_translators/0000775000175000017500000000000013656752352031020 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/datasources/grafana_translators/__init__.py0000664000175000017500000000000013656752270033116 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/datasources/grafana_translators/test_base.py0000664000175000017500000000714013656752270033344 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_log import log from watcher.common import exception from watcher.decision_engine.datasources.grafana_translator import \ base as base_translator from watcher.tests import base CONF = cfg.CONF LOG = log.getLogger(__name__) class TestGrafanaTranslatorBase(base.BaseTestCase): """Base class for all GrafanaTranslator test classes Objects under test are preceded with t_ and mocked objects are preceded with m_ , additionally, patched objects are preceded with p_ no object under test should be created in setUp this can influence the results. """ def setUp(self): super(TestGrafanaTranslatorBase, self).setUp() """Basic valid reference data""" self.reference_data = { 'metric': 'host_cpu_usage', 'db': 'production', 'attribute': 'hostname', 'query': 'SHOW all_base FROM belong_to_us', 'resource': mock.Mock(hostname='hyperion'), 'resource_type': 'compute_node', 'period': '120', 'aggregate': 'mean', 'granularity': None } class TestBaseGrafanaTranslator(TestGrafanaTranslatorBase): """Test the GrafanaTranslator base class Objects under test are preceded with t_ and mocked objects are preceded with m_ , additionally, patched objects are preceded with p_ no object under test should be created in setUp this can influence the results. """ def setUp(self): super(TestBaseGrafanaTranslator, self).setUp() def test_validate_data(self): """Initialize InfluxDBGrafanaTranslator and check data validation""" t_base_translator = base_translator.BaseGrafanaTranslator( data=self.reference_data) self.assertIsInstance(t_base_translator, base_translator.BaseGrafanaTranslator) def test_validate_data_error(self): """Initialize InfluxDBGrafanaTranslator and check data validation""" self.assertRaises(exception.InvalidParameter, base_translator.BaseGrafanaTranslator, data=[]) def test_extract_attribute(self): """Test that an attribute can be extracted from an object""" m_object = mock.Mock(hostname='test') t_base_translator = base_translator.BaseGrafanaTranslator( data=self.reference_data) self.assertEqual('test', t_base_translator._extract_attribute( m_object, 'hostname')) def test_extract_attribute_error(self): """Test error on attempt to extract none existing attribute""" m_object = mock.Mock(hostname='test') m_object.test = mock.PropertyMock(side_effect=AttributeError) t_base_translator = base_translator.BaseGrafanaTranslator( data=self.reference_data) self.assertRaises(AttributeError, t_base_translator._extract_attribute( m_object, 'test')) python-watcher-4.0.0/watcher/tests/decision_engine/datasources/grafana_translators/test_influxdb.py0000664000175000017500000001325213656752270034246 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import mock from oslo_config import cfg from oslo_log import log from watcher.common import exception from watcher.decision_engine.datasources.grafana_translator import influxdb from watcher.tests.decision_engine.datasources.grafana_translators import \ test_base CONF = cfg.CONF LOG = log.getLogger(__name__) class TestInfluxDBGrafanaTranslator(test_base.TestGrafanaTranslatorBase): """Test the InfluxDB gragana database translator Objects under test are preceded with t_ and mocked objects are preceded with m_ , additionally, patched objects are preceded with p_ no object under test should be created in setUp this can influence the results. """ def setUp(self): super(TestInfluxDBGrafanaTranslator, self).setUp() self.p_conf = mock.patch.object( influxdb, 'CONF', new_callable=mock.PropertyMock) self.m_conf = self.p_conf.start() self.addCleanup(self.p_conf.stop) self.m_conf.grafana_translators.retention_periods = { 'one_day': 86400, 'one_week': 604800 } def test_retention_period_one_day(self): """Validate lowest retention period""" data = copy.copy(self.reference_data) data['query'] = "{4}" t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) params = t_influx.build_params() self.assertEqual(params['q'], 'one_day') def test_retention_period_one_week(self): """Validate incrementing retention periods""" data = copy.copy(self.reference_data) data['query'] = "{4}" data['period'] = 90000 t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) params = t_influx.build_params() self.assertEqual(params['q'], 'one_week') @mock.patch.object(influxdb, 'LOG') def test_retention_period_warning(self, m_log): """Validate retention period warning""" data = copy.copy(self.reference_data) data['query'] = "{4}" data['period'] = 650000 t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) params = t_influx.build_params() self.assertEqual(params['q'], 'one_week') m_log.warning.assert_called_once_with( "Longest retention period is to short for desired period") def test_build_params_granularity(self): """Validate build params granularity""" data = copy.copy(self.reference_data) data['granularity'] = None data['query'] = "{3}" t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) raw_results = { 'db': 'production', 'epoch': 'ms', 'q': '1' } # InfluxDB build_params should replace granularity None optional with 1 result = t_influx.build_params() self.assertEqual(raw_results, result) def test_build_params_order(self): """Validate order of build params""" data = copy.copy(self.reference_data) data['aggregate'] = 'count' # prevent having to deepcopy by keeping this value the same # this will access the value 'hyperion' from the mocked resource object data['attribute'] = 'hostname' data['period'] = 3 # because the period is only 3 the retention_period will be one_day data['granularity'] = 4 data['query'] = "{0}{1}{2}{3}{4}" t_influx = influxdb.InfluxDBGrafanaTranslator( data=data) raw_results = "counthyperion34one_day" result = t_influx.build_params() self.assertEqual(raw_results, result['q']) def test_extract_results(self): """Validate proper result extraction""" t_influx = influxdb.InfluxDBGrafanaTranslator( data=self.reference_data) raw_results = "{ \"results\": [{ \"series\": [{ " \ "\"columns\": [\"time\",\"mean\"]," \ "\"values\": [[1552500855000, " \ "67.3550078657577]]}]}]}" # Structure of InfluxDB time series data # { "results": [{ # "statement_id": 0, # "series": [{ # "name": "cpu_percent", # "columns": [ # "time", # "mean" # ], # "values": [[ # 1552500855000, # 67.3550078657577 # ]] # }] # }]} self.assertEqual(t_influx.extract_result(raw_results), 67.3550078657577) def test_extract_results_error(self): """Validate error on missing results""" t_influx = influxdb.InfluxDBGrafanaTranslator( data=self.reference_data) raw_results = "{}" self.assertRaises(exception.NoSuchMetricForHost, t_influx.extract_result, raw_results) python-watcher-4.0.0/watcher/tests/decision_engine/datasources/test_manager.py0000664000175000017500000001521713656752270030015 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from mock import MagicMock from watcher.common import exception from watcher.decision_engine.datasources import gnocchi from watcher.decision_engine.datasources import grafana from watcher.decision_engine.datasources import manager as ds_manager from watcher.decision_engine.datasources import monasca from watcher.tests import base class TestDataSourceManager(base.BaseTestCase): def _dsm_config(self, **kwargs): dss = ['gnocchi', 'ceilometer', 'monasca'] opts = dict(datasources=dss, metric_map_path=None) opts.update(kwargs) return MagicMock(**opts) def _dsm(self, **kwargs): opts = dict(config=self._dsm_config(), osc=mock.MagicMock()) opts.update(kwargs) return ds_manager.DataSourceManager(**opts) def test_metric_file_path_not_exists(self): manager = self._dsm() expected = ds_manager.DataSourceManager.metric_map actual = manager.metric_map self.assertEqual(expected, actual) self.assertEqual({}, manager.load_metric_map('/nope/nope/nope.yaml')) def test_metric_file_metric_override(self): path = 'watcher.decision_engine.datasources.manager.' \ 'DataSourceManager.load_metric_map' retval = { monasca.MonascaHelper.NAME: {"host_airflow": "host_fnspid"} } with mock.patch(path, return_value=retval): dsmcfg = self._dsm_config(datasources=['monasca']) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_airflow']) self.assertEqual("host_fnspid", backend.METRIC_MAP['host_airflow']) @mock.patch.object(grafana, 'CONF') def test_metric_file_metric_override_grafana(self, m_config): """Grafana requires a different structure in the metric map""" m_config.grafana_client.token = \ "eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk==" m_config.grafana_client.base_url = "https://grafana.proxy/api/" path = 'watcher.decision_engine.datasources.manager.' \ 'DataSourceManager.load_metric_map' metric_map = { 'db': 'production_cloud', 'project': '7485', 'attribute': 'hostname', 'translator': 'influxdb', 'query': 'SHOW SERIES' } retval = { grafana.GrafanaHelper.NAME: {"host_airflow": metric_map} } with mock.patch(path, return_value=retval): dsmcfg = self._dsm_config(datasources=['grafana']) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_airflow']) self.assertEqual(metric_map, backend.METRIC_MAP['host_airflow']) def test_metric_file_invalid_ds(self): with mock.patch('yaml.safe_load') as mo: mo.return_value = {"newds": {"metric_one": "i_am_metric_one"}} mgr = self._dsm() self.assertNotIn('newds', mgr.metric_map.keys()) def test_get_backend(self): manager = self._dsm() backend = manager.get_backend(['host_cpu_usage', 'instance_cpu_usage']) self.assertEqual(backend, manager.gnocchi) def test_get_backend_order(self): dss = ['monasca', 'ceilometer', 'gnocchi'] dsmcfg = self._dsm_config(datasources=dss) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_cpu_usage', 'instance_cpu_usage']) self.assertEqual(backend, manager.monasca) def test_get_backend_wrong_metric(self): manager = self._dsm() self.assertRaises(exception.MetricNotAvailable, manager.get_backend, ['host_cpu', 'instance_cpu_usage']) @mock.patch.object(gnocchi, 'GnocchiHelper') def test_get_backend_error_datasource(self, m_gnocchi): m_gnocchi.side_effect = exception.DataSourceNotAvailable manager = self._dsm() backend = manager.get_backend(['host_cpu_usage', 'instance_cpu_usage']) self.assertEqual(backend, manager.ceilometer) @mock.patch.object(grafana.GrafanaHelper, 'METRIC_MAP', {'host_cpu_usage': 'test'}) def test_get_backend_grafana(self): dss = ['grafana', 'ceilometer', 'gnocchi'] dsmcfg = self._dsm_config(datasources=dss) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_cpu_usage']) self.assertEqual(backend, manager.grafana) @mock.patch.object(grafana, 'CONF') def test_dynamic_metric_map_grafana(self, m_config): m_config.grafana_client.token = \ "eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk==" m_config.grafana_client.base_url = "https://grafana.proxy/api/" m_config.grafana_client.project_id_map = {'host_cpu_usage': 7221} m_config.grafana_client.attribute_map = {'host_cpu_usage': 'hostname'} m_config.grafana_client.database_map = {'host_cpu_usage': 'mock_db'} m_config.grafana_client.translator_map = {'host_cpu_usage': 'influxdb'} m_config.grafana_client.query_map = { 'host_cpu_usage': 'SHOW SERIES' } dss = ['grafana', 'ceilometer', 'gnocchi'] dsmcfg = self._dsm_config(datasources=dss) manager = self._dsm(config=dsmcfg) backend = manager.get_backend(['host_cpu_usage']) self.assertEqual(backend, manager.grafana) def test_get_backend_no_datasources(self): dsmcfg = self._dsm_config(datasources=[]) manager = self._dsm(config=dsmcfg) self.assertRaises(exception.NoDatasourceAvailable, manager.get_backend, ['host_cpu_usage', 'instance_cpu_usage']) dsmcfg = self._dsm_config(datasources=None) manager = self._dsm(config=dsmcfg) self.assertRaises(exception.NoDatasourceAvailable, manager.get_backend, ['host_cpu_usage', 'instance_cpu_usage']) def test_get_backend_no_metrics(self): manager = self._dsm() self.assertRaises(exception.InvalidParameter, manager.get_backend, []) self.assertRaises(exception.InvalidParameter, manager.get_backend, None) python-watcher-4.0.0/watcher/tests/decision_engine/datasources/test_ceilometer_helper.py0000664000175000017500000001631313656752270032070 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import unicode_literals import mock from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import ceilometer as ceilometer_helper from watcher.tests import base @mock.patch.object(clients.OpenStackClients, 'ceilometer') class TestCeilometerHelper(base.BaseTestCase): def setUp(self): super(TestCeilometerHelper, self).setUp() self.osc_mock = mock.Mock() self.helper = ceilometer_helper.CeilometerHelper(osc=self.osc_mock) stat_agg_patcher = mock.patch.object( self.helper, 'statistic_aggregation', spec=ceilometer_helper.CeilometerHelper.statistic_aggregation) self.mock_aggregation = stat_agg_patcher.start() self.addCleanup(stat_agg_patcher.stop) def test_build_query(self, mock_ceilometer): mock_ceilometer.return_value = mock.MagicMock() cm = ceilometer_helper.CeilometerHelper() expected = [{'field': 'user_id', 'op': 'eq', 'value': u'user_id'}, {'field': 'project_id', 'op': 'eq', 'value': u'tenant_id'}, {'field': 'resource_id', 'op': 'eq', 'value': u'resource_id'}] query = cm.build_query(user_id="user_id", tenant_id="tenant_id", resource_id="resource_id", user_ids=["user_ids"], tenant_ids=["tenant_ids"], resource_ids=["resource_ids"]) self.assertEqual(expected, query) def test_statistic_aggregation(self, mock_ceilometer): ceilometer = mock.MagicMock() statistic = mock.MagicMock() expected_result = 100 statistic[-1]._info = {'aggregate': {'avg': expected_result}} ceilometer.statistics.list.return_value = statistic mock_ceilometer.return_value = ceilometer cm = ceilometer_helper.CeilometerHelper() val = cm.statistic_aggregation( resource=mock.Mock(id="INSTANCE_ID"), resource_type='instance', meter_name="instance_cpu_usage", period="7300", granularity=None ) self.assertEqual(expected_result, val) def test_statistic_aggregation_metric_unavailable(self, mock_ceilometer): helper = ceilometer_helper.CeilometerHelper() # invalidate instance_cpu_usage in metric map original_metric_value = helper.METRIC_MAP.get('instance_cpu_usage') helper.METRIC_MAP.update( instance_cpu_usage=None ) self.assertRaises( exception.MetricNotAvailable, helper.statistic_aggregation, resource=mock.Mock(id="INSTANCE_ID"), resource_type='instance', meter_name="instance_cpu_usage", period="7300", granularity=None ) # restore the metric map as it is a static attribute that does not get # restored between unit tests! helper.METRIC_MAP.update( instance_cpu_usage=original_metric_value ) def test_get_host_cpu_usage(self, mock_ceilometer): self.helper.get_host_cpu_usage('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_cpu_usage', 600, 'mean', None) def test_get_host_ram_usage(self, mock_ceilometer): self.helper.get_host_ram_usage('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_ram_usage', 600, 'mean', None) def test_get_host_outlet_temp(self, mock_ceilometer): self.helper.get_host_outlet_temp('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_outlet_temp', 600, 'mean', None) def test_get_host_inlet_temp(self, mock_ceilometer): self.helper.get_host_inlet_temp('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_inlet_temp', 600, 'mean', None) def test_get_host_airflow(self, mock_ceilometer): self.helper.get_host_airflow('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_airflow', 600, 'mean', None) def test_get_host_power(self, mock_ceilometer): self.helper.get_host_power('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'compute_node', 'host_power', 600, 'mean', None) def test_get_instance_cpu_usage(self, mock_ceilometer): self.helper.get_instance_cpu_usage('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_cpu_usage', 600, 'mean', None) def test_get_instance_ram_usage(self, mock_ceilometer): self.helper.get_instance_ram_usage('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_ram_usage', 600, 'mean', None) def test_get_instance_ram_allocated(self, mock_ceilometer): self.helper.get_instance_ram_allocated('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_ram_allocated', 600, 'mean', None) def test_get_instance_l3_cache_usage(self, mock_ceilometer): self.helper.get_instance_l3_cache_usage('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_l3_cache_usage', 600, 'mean', None) def test_get_instance_root_disk_size(self, mock_ceilometer): self.helper.get_instance_root_disk_size('compute1', 600, 'mean') self.mock_aggregation.assert_called_once_with( 'compute1', 'instance', 'instance_root_disk_size', 600, 'mean', None) def test_check_availability(self, mock_ceilometer): ceilometer = mock.MagicMock() ceilometer.resources.list.return_value = True mock_ceilometer.return_value = ceilometer helper = ceilometer_helper.CeilometerHelper() result = helper.check_availability() self.assertEqual('available', result) def test_check_availability_with_failure(self, mock_ceilometer): ceilometer = mock.MagicMock() ceilometer.resources.list.side_effect = Exception() mock_ceilometer.return_value = ceilometer helper = ceilometer_helper.CeilometerHelper() self.assertEqual('not available', helper.check_availability()) python-watcher-4.0.0/watcher/tests/decision_engine/datasources/test_base.py0000664000175000017500000000451013656752270027307 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from watcher.decision_engine.datasources import base as datasource from watcher.tests import base CONF = cfg.CONF class TestBaseDatasourceHelper(base.BaseTestCase): def test_query_retry(self): exc = Exception() method = mock.Mock() # first call will fail but second will succeed method.side_effect = [exc, True] # Max 2 attempts CONF.set_override("query_max_retries", 2, group='watcher_datasources') # Reduce sleep time to 0 CONF.set_override("query_timeout", 0, group='watcher_datasources') helper = datasource.DataSourceBase() helper.query_retry_reset = mock.Mock() self.assertTrue(helper.query_retry(f=method)) helper.query_retry_reset.assert_called_once_with(exc) def test_query_retry_exception(self): exc = Exception() method = mock.Mock() # only third call will succeed method.side_effect = [exc, exc, True] # Max 2 attempts CONF.set_override("query_max_retries", 2, group='watcher_datasources') # Reduce sleep time to 0 CONF.set_override("query_timeout", 0, group='watcher_datasources') helper = datasource.DataSourceBase() helper.query_retry_reset = mock.Mock() # Maximum number of retries exceeded query_retry should return None self.assertIsNone(helper.query_retry(f=method)) # query_retry_reset should be called twice helper.query_retry_reset.assert_has_calls( [mock.call(exc), mock.call(exc)]) python-watcher-4.0.0/watcher/tests/decision_engine/datasources/test_grafana_helper.py0000664000175000017500000002702713656752270031343 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_log import log from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import grafana from watcher.tests import base import requests CONF = cfg.CONF LOG = log.getLogger(__name__) @mock.patch.object(clients.OpenStackClients, 'nova', mock.Mock()) class TestGrafana(base.BaseTestCase): """Test the GrafanaHelper datasource Objects under test are preceded with t_ and mocked objects are preceded with m_ , additionally, patched objects are preceded with p_ no object under test should be created in setUp this can influence the results. """ def setUp(self): super(TestGrafana, self).setUp() self.p_conf = mock.patch.object( grafana, 'CONF', new_callable=mock.PropertyMock) self.m_conf = self.p_conf.start() self.addCleanup(self.p_conf.stop) self.m_conf.grafana_client.token = \ "eyJrIjoiT0tTcG1pUlY2RnVKZTFVaDFsNFZXdE9ZWmNrMkZYbk==" self.m_conf.grafana_client.base_url = "https://grafana.proxy/api/" self.m_conf.grafana_client.project_id_map = {'host_cpu_usage': 7221} self.m_conf.grafana_client.database_map = \ {'host_cpu_usage': 'mock_db'} self.m_conf.grafana_client.attribute_map = \ {'host_cpu_usage': 'hostname'} self.m_conf.grafana_client.translator_map = \ {'host_cpu_usage': 'influxdb'} self.m_conf.grafana_client.query_map = \ {'host_cpu_usage': 'SELECT 100-{0}("{0}_value") FROM {3}.' 'cpu_percent WHERE ("host" =~ /^{1}$/ AND ' '"type_instance" =~/^idle$/ AND time > ' '(now()-{2}m)'} self.m_grafana = grafana.GrafanaHelper(osc=mock.Mock()) stat_agg_patcher = mock.patch.object( self.m_grafana, 'statistic_aggregation', spec=grafana.GrafanaHelper.statistic_aggregation) self.mock_aggregation = stat_agg_patcher.start() self.addCleanup(stat_agg_patcher.stop) self.m_compute_node = mock.Mock( id='16a86790-327a-45f9-bc82-45839f062fdc', hostname='example.hostname.ch' ) self.m_instance = mock.Mock( id='73b1ff78-aca7-404f-ac43-3ed16c1fa555', human_id='example.hostname' ) def test_configured(self): """Initialize GrafanaHelper and check if configured is true""" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertTrue(t_grafana.configured) def test_configured_error(self): """Butcher the required configuration and test if configured is false """ self.m_conf.grafana_client.base_url = "" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertFalse(t_grafana.configured) def test_configured_raise_error(self): """Test raising error when using improperly configured GrafanHelper Assure that the _get_metric method raises errors if the metric is missing from the map """ # Clear the METRIC_MAP of Grafana since it is a static variable that # other tests might have set before this test runs. grafana.GrafanaHelper.METRIC_MAP = {} self.m_conf.grafana_client.base_url = "" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertFalse(t_grafana.configured) self.assertEqual({}, t_grafana.METRIC_MAP) self.assertRaises( exception.MetricNotAvailable, t_grafana.get_host_cpu_usage, self.m_compute_node ) @mock.patch.object(requests, 'get') def test_request_raise_error(self, m_request): """Test raising error when status code of request indicates problem Assure that the _request method raises errors if the response indicates problems. """ m_request.return_value = mock.Mock(status_code=404) t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertIsNone(t_grafana.get_host_cpu_usage(self.m_compute_node)) def test_no_metric_raise_error(self): """Test raising error when specified meter does not exist""" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertRaises(exception.MetricNotAvailable, t_grafana.statistic_aggregation, self.m_compute_node, 'none existing meter', 60) @mock.patch.object(grafana.GrafanaHelper, '_request') def test_get_metric_raise_error(self, m_request): """Test raising error when endpoint unable to deliver data for metric """ m_request.return_value.content = "{}" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertRaises(exception.NoSuchMetricForHost, t_grafana.get_host_cpu_usage, self.m_compute_node, 60) def test_metric_builder(self): """Creates valid and invalid sets of configuration for metrics Ensures that a valid metric entry can be configured even if multiple invalid configurations exist for other metrics. """ self.m_conf.grafana_client.project_id_map = { 'host_cpu_usage': 7221, 'host_ram_usage': 7221, 'instance_ram_allocated': 7221, } self.m_conf.grafana_client.database_map = { 'host_cpu_usage': 'mock_db', 'instance_cpu_usage': 'mock_db', 'instance_ram_allocated': 'mock_db', } self.m_conf.grafana_client.attribute_map = { 'host_cpu_usage': 'hostname', 'host_power': 'hostname', 'instance_ram_allocated': 'human_id', } self.m_conf.grafana_client.translator_map = { 'host_cpu_usage': 'influxdb', 'host_inlet_temp': 'influxdb', # validate that invalid entries don't get added 'instance_ram_usage': 'dummy', 'instance_ram_allocated': 'influxdb', } self.m_conf.grafana_client.query_map = { 'host_cpu_usage': 'SHOW SERIES', 'instance_ram_usage': 'SHOW SERIES', 'instance_ram_allocated': 'SHOW SERIES', } expected_result = { 'host_cpu_usage': { 'db': 'mock_db', 'project': 7221, 'attribute': 'hostname', 'translator': 'influxdb', 'query': 'SHOW SERIES'}, 'instance_ram_allocated': { 'db': 'mock_db', 'project': 7221, 'attribute': 'human_id', 'translator': 'influxdb', 'query': 'SHOW SERIES'}, } t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) self.assertEqual(t_grafana.METRIC_MAP, expected_result) @mock.patch.object(grafana.GrafanaHelper, '_request') def test_statistic_aggregation(self, m_request): m_request.return_value.content = "{ \"results\": [{ \"series\": [{ " \ "\"columns\": [\"time\",\"mean\"]," \ "\"values\": [[1552500855000, " \ "67.3550078657577]]}]}]}" t_grafana = grafana.GrafanaHelper(osc=mock.Mock()) result = t_grafana.statistic_aggregation( self.m_compute_node, 'compute_node', 'host_cpu_usage', 60) self.assertEqual(result, 67.3550078657577) def test_get_host_cpu_usage(self): self.m_grafana.get_host_cpu_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_cpu_usage', 60, 'min', 15) def test_get_host_ram_usage(self): self.m_grafana.get_host_ram_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_ram_usage', 60, 'min', 15) def test_get_host_outlet_temperature(self): self.m_grafana.get_host_outlet_temp(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_outlet_temp', 60, 'min', 15) def test_get_host_inlet_temperature(self): self.m_grafana.get_host_inlet_temp(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_inlet_temp', 60, 'min', 15) def test_get_host_airflow(self): self.m_grafana.get_host_airflow(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_airflow', 60, 'min', 15) def test_get_host_power(self): self.m_grafana.get_host_power(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'compute_node', 'host_power', 60, 'min', 15) def test_get_instance_cpu_usage(self): self.m_grafana.get_instance_cpu_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_cpu_usage', 60, 'min', 15) def test_get_instance_ram_usage(self): self.m_grafana.get_instance_ram_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_ram_usage', 60, 'min', 15) def test_get_instance_ram_allocated(self): self.m_grafana.get_instance_ram_allocated(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_ram_allocated', 60, 'min', 15) def test_get_instance_l3_cache_usage(self): self.m_grafana.get_instance_l3_cache_usage(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_l3_cache_usage', 60, 'min', 15) def test_get_instance_root_disk_allocated(self): self.m_grafana.get_instance_root_disk_size(self.m_compute_node, 60, 'min', 15) self.mock_aggregation.assert_called_once_with( self.m_compute_node, 'instance', 'instance_root_disk_size', 60, 'min', 15) python-watcher-4.0.0/watcher/tests/decision_engine/audit/0000775000175000017500000000000013656752352023556 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/audit/test_audit_handlers.py0000664000175000017500000005712013656752270030161 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import mock from oslo_config import cfg from oslo_utils import uuidutils from apscheduler import job from watcher.applier import rpcapi from watcher.common import exception from watcher.common import scheduling from watcher.db.sqlalchemy import api as sq_api from watcher.decision_engine.audit import continuous from watcher.decision_engine.audit import oneshot from watcher.decision_engine.model.collector import manager from watcher.decision_engine.strategy.strategies import base as base_strategy from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.decision_engine.model import faker_cluster_state as faker from watcher.tests.objects import utils as obj_utils class TestOneShotAuditHandler(base.DbTestCase): def setUp(self): super(TestOneShotAuditHandler, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) self.strategy = obj_utils.create_test_strategy( self.context, name=dummy_strategy.DummyStrategy.get_name(), goal_id=self.goal.id) audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=self.strategy.id) self.audit = obj_utils.create_test_audit( self.context, uuid=uuidutils.generate_uuid(), goal_id=self.goal.id, strategy_id=self.strategy.id, audit_template_id=audit_template.id, goal=self.goal) @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") @mock.patch.object(base_strategy.BaseStrategy, "compute_model", mock.Mock(stale=False)) def test_trigger_audit_without_errors(self, m_collector): m_collector.return_value = faker.FakerModelCollector() audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) expected_calls = [ mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.END), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.END)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) @mock.patch.object(base_strategy.BaseStrategy, "do_execute") @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") def test_trigger_audit_with_error(self, m_collector, m_do_execute): m_collector.return_value = faker.FakerModelCollector() m_do_execute.side_effect = Exception audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) expected_calls = [ mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, priority=objects.fields.NotificationPriority.ERROR, phase=objects.fields.NotificationPhase.ERROR)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") @mock.patch.object(base_strategy.BaseStrategy, "compute_model", mock.Mock(stale=False)) def test_trigger_audit_state_succeeded(self, m_collector): m_collector.return_value = faker.FakerModelCollector() audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) audit = objects.audit.Audit.get_by_uuid(self.context, self.audit.uuid) self.assertEqual(objects.audit.State.SUCCEEDED, audit.state) expected_calls = [ mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.END), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.END)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) @mock.patch.object(manager.CollectorManager, "get_cluster_model_collector") @mock.patch.object(base_strategy.BaseStrategy, "compute_model", mock.Mock(stale=False)) def test_trigger_audit_send_notification(self, m_collector): m_collector.return_value = faker.FakerModelCollector() audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) expected_calls = [ mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.END), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audit, action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.END)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) class TestAutoTriggerActionPlan(base.DbTestCase): def setUp(self): super(TestAutoTriggerActionPlan, self).setUp() self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) self.strategy = obj_utils.create_test_strategy( self.context, name=dummy_strategy.DummyStrategy.get_name(), goal_id=self.goal.id) audit_template = obj_utils.create_test_audit_template( self.context) self.audit = obj_utils.create_test_audit( self.context, id=0, uuid=uuidutils.generate_uuid(), audit_template_id=audit_template.id, goal_id=self.goal.id, audit_type=objects.audit.AuditType.CONTINUOUS.value, goal=self.goal, auto_trigger=True) self.ongoing_action_plan = obj_utils.create_test_action_plan( self.context, uuid=uuidutils.generate_uuid(), audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy, ) self.recommended_action_plan = obj_utils.create_test_action_plan( self.context, uuid=uuidutils.generate_uuid(), state=objects.action_plan.State.ONGOING, audit_id=self.audit.id, strategy_id=self.strategy.id, audit=self.audit, strategy=self.strategy, ) @mock.patch.object(oneshot.OneShotAuditHandler, 'do_execute') @mock.patch.object(objects.action_plan.ActionPlan, 'list') def test_trigger_audit_with_actionplan_ongoing(self, mock_list, mock_do_execute): mock_list.return_value = [self.ongoing_action_plan] audit_handler = oneshot.OneShotAuditHandler() audit_handler.execute(self.audit, self.context) self.assertFalse(mock_do_execute.called) @mock.patch.object(rpcapi.ApplierAPI, 'launch_action_plan') @mock.patch.object(objects.action_plan.ActionPlan, 'list') @mock.patch.object(objects.audit.Audit, 'get_by_id') def test_trigger_action_plan_without_ongoing(self, mock_get_by_id, mock_list, mock_applier): mock_get_by_id.return_value = self.audit mock_list.return_value = [] auto_trigger_handler = oneshot.OneShotAuditHandler() with mock.patch.object(auto_trigger_handler, 'do_schedule') as m_schedule: m_schedule().uuid = self.recommended_action_plan.uuid auto_trigger_handler.post_execute(self.audit, mock.MagicMock(), self.context) mock_applier.assert_called_once_with(self.context, self.recommended_action_plan.uuid) @mock.patch.object(oneshot.OneShotAuditHandler, 'do_execute') def test_trigger_audit_with_force(self, mock_do_execute): audit_handler = oneshot.OneShotAuditHandler() self.audit.force = True audit_handler.execute(self.audit, self.context) self.assertTrue(mock_do_execute.called) class TestContinuousAuditHandler(base.DbTestCase): def setUp(self): super(TestContinuousAuditHandler, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) audit_template = obj_utils.create_test_audit_template( self.context) self.audits = [ obj_utils.create_test_audit( self.context, id=id_, name='My Audit {0}'.format(id_), uuid=uuidutils.generate_uuid(), audit_template_id=audit_template.id, goal_id=self.goal.id, audit_type=objects.audit.AuditType.CONTINUOUS.value, goal=self.goal, hostname='hostname1') for id_ in range(2, 4)] cfg.CONF.set_override("host", "hostname1") @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_audits_periodically_with_interval( self, mock_list, mock_jobs, m_add_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits self.audits[0].next_run_time = (datetime.datetime.now() - datetime.timedelta(seconds=1800)) mock_jobs.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() m_add_job.return_value = mock.MagicMock() audit_handler.launch_audits_periodically() m_service.assert_called() m_engine.assert_called() m_add_job.assert_called() mock_jobs.assert_called() self.assertIsNotNone(self.audits[0].next_run_time) self.assertIsNone(self.audits[1].next_run_time) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_audits_periodically_with_cron( self, mock_list, mock_jobs, m_add_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits self.audits[0].interval = "*/5 * * * *" mock_jobs.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() m_add_job.return_value = mock.MagicMock() audit_handler.launch_audits_periodically() m_service.assert_called() m_engine.assert_called() m_add_job.assert_called() mock_jobs.assert_called() self.assertIsNotNone(self.audits[0].next_run_time) self.assertIsNone(self.audits[1].next_run_time) @mock.patch.object(continuous.ContinuousAuditHandler, '_next_cron_time') @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_audits_periodically_with_invalid_cron( self, mock_list, mock_jobs, m_add_job, m_engine, m_service, mock_cron): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits self.audits[0].interval = "*/5* * * *" mock_cron.side_effect = exception.CronFormatIsInvalid mock_jobs.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() m_add_job.return_value = mock.MagicMock() self.assertRaises(exception.CronFormatIsInvalid, audit_handler.launch_audits_periodically) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_multiply_audits_periodically(self, mock_list, mock_jobs, m_add_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits mock_jobs.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() m_service.return_value = mock.MagicMock() calls = [mock.call(audit_handler.execute_audit, 'interval', args=[mock.ANY, mock.ANY], seconds=3600, name='execute_audit', next_run_time=mock.ANY) for _ in self.audits] audit_handler.launch_audits_periodically() m_add_job.assert_has_calls(calls) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_period_audit_not_called_when_deleted(self, mock_list, mock_jobs, m_add_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits mock_jobs.return_value = mock.MagicMock() m_service.return_value = mock.MagicMock() m_engine.return_value = mock.MagicMock() ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit', func=audit_handler.execute_audit, args=(self.audits[0], mock.MagicMock()), kwargs={}), job.Job(mock.MagicMock(), name='execute_audit', func=audit_handler.execute_audit, args=(self.audits[1], mock.MagicMock()), kwargs={}) ] mock_jobs.return_value = ap_jobs audit_handler.launch_audits_periodically() audit_handler.update_audit_state(self.audits[1], objects.audit.State.CANCELLED) audit_handler.update_audit_state(self.audits[0], objects.audit.State.SUSPENDED) is_inactive = audit_handler._is_audit_inactive(self.audits[1]) self.assertTrue(is_inactive) is_inactive = audit_handler._is_audit_inactive(self.audits[0]) self.assertTrue(is_inactive) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.AuditStateTransitionManager, 'is_inactive') @mock.patch.object(continuous.ContinuousAuditHandler, 'execute') def test_execute_audit_with_interval_no_job( self, m_execute, m_is_inactive, m_get_jobs, m_get_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() self.audits[0].next_run_time = (datetime.datetime.now() - datetime.timedelta(seconds=1800)) m_is_inactive.return_value = True m_get_jobs.return_value = [] audit_handler.execute_audit(self.audits[0], self.context) self.assertIsNotNone(self.audits[0].next_run_time) @mock.patch.object(objects.service.Service, 'list') @mock.patch.object(sq_api, 'get_engine') @mock.patch.object(scheduling.BackgroundSchedulerService, 'remove_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'add_job') @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') @mock.patch.object(objects.audit.Audit, 'list') def test_launch_audits_periodically_with_diff_interval( self, mock_list, mock_jobs, m_add_job, m_remove_job, m_engine, m_service): audit_handler = continuous.ContinuousAuditHandler() mock_list.return_value = self.audits self.audits[0].next_run_time = (datetime.datetime.now() - datetime.timedelta(seconds=1800)) m_job1 = mock.MagicMock() m_job1.name = 'execute_audit' m_audit = mock.MagicMock() m_audit.uuid = self.audits[0].uuid m_audit.interval = 60 m_job1.args = [m_audit] mock_jobs.return_value = [m_job1] m_engine.return_value = mock.MagicMock() m_add_job.return_value = mock.MagicMock() audit_handler.launch_audits_periodically() m_service.assert_called() m_engine.assert_called() m_add_job.assert_called() mock_jobs.assert_called() self.assertIsNotNone(self.audits[0].next_run_time) self.assertIsNone(self.audits[1].next_run_time) audit_handler.launch_audits_periodically() m_remove_job.assert_called() @mock.patch.object(continuous.ContinuousAuditHandler, 'get_planner', mock.Mock()) @mock.patch.object(base_strategy.BaseStrategy, "compute_model", mock.Mock(stale=False)) def test_execute_audit(self): audit_handler = continuous.ContinuousAuditHandler() audit_handler.execute_audit(self.audits[0], self.context) expected_calls = [ mock.call(self.context, self.audits[0], action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audits[0], action=objects.fields.NotificationAction.STRATEGY, phase=objects.fields.NotificationPhase.END), mock.call(self.context, self.audits[0], action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.audits[0], action=objects.fields.NotificationAction.PLANNER, phase=objects.fields.NotificationPhase.END)] self.assertEqual( expected_calls, self.m_audit_notifications.send_action_notification.call_args_list) @mock.patch.object(scheduling.BackgroundSchedulerService, 'get_jobs') def test_is_audit_inactive(self, mock_jobs): audit_handler = continuous.ContinuousAuditHandler() mock_jobs.return_value = mock.MagicMock() audit_handler._audit_scheduler = mock.MagicMock() ap_jobs = [job.Job(mock.MagicMock(), name='execute_audit', func=audit_handler.execute_audit, args=(self.audits[0], mock.MagicMock()), kwargs={}), ] audit_handler.update_audit_state(self.audits[1], objects.audit.State.CANCELLED) mock_jobs.return_value = ap_jobs is_inactive = audit_handler._is_audit_inactive(self.audits[1]) self.assertTrue(is_inactive) is_inactive = audit_handler._is_audit_inactive(self.audits[0]) self.assertFalse(is_inactive) def test_check_audit_expired(self): current = datetime.datetime.utcnow() # start_time and end_time are None audit_handler = continuous.ContinuousAuditHandler() result = audit_handler.check_audit_expired(self.audits[0]) self.assertFalse(result) self.assertIsNone(self.audits[0].start_time) self.assertIsNone(self.audits[0].end_time) # current time < start_time and end_time is None self.audits[0].start_time = current+datetime.timedelta(days=1) result = audit_handler.check_audit_expired(self.audits[0]) self.assertTrue(result) self.assertIsNone(self.audits[0].end_time) # current time is between start_time and end_time self.audits[0].start_time = current-datetime.timedelta(days=1) self.audits[0].end_time = current+datetime.timedelta(days=1) result = audit_handler.check_audit_expired(self.audits[0]) self.assertFalse(result) # current time > end_time self.audits[0].end_time = current-datetime.timedelta(days=1) result = audit_handler.check_audit_expired(self.audits[0]) self.assertTrue(result) self.assertEqual(objects.audit.State.SUCCEEDED, self.audits[0].state) python-watcher-4.0.0/watcher/tests/decision_engine/audit/__init__.py0000664000175000017500000000000013656752270025654 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/test_scheduling.py0000664000175000017500000001236213656752270026211 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from apscheduler.schedulers import background from apscheduler.triggers import interval as interval_trigger import eventlet import mock from oslo_config import cfg from oslo_utils import uuidutils from watcher.decision_engine.loading import default as default_loading from watcher.decision_engine import scheduling from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher import notifications from watcher import objects from watcher.tests import base from watcher.tests.db import base as db_base from watcher.tests.decision_engine.model import faker_cluster_state from watcher.tests.objects import utils as obj_utils class TestCancelOngoingAudits(db_base.DbTestCase): def setUp(self): super(TestCancelOngoingAudits, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) self.strategy = obj_utils.create_test_strategy( self.context, name=dummy_strategy.DummyStrategy.get_name(), goal_id=self.goal.id) audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=self.strategy.id) self.audit = obj_utils.create_test_audit( self.context, id=999, name='My Audit 999', uuid=uuidutils.generate_uuid(), audit_template_id=audit_template.id, goal_id=self.goal.id, audit_type=objects.audit.AuditType.ONESHOT.value, goal=self.goal, hostname='hostname1', state=objects.audit.State.ONGOING) cfg.CONF.set_override("host", "hostname1") @mock.patch.object(objects.audit.Audit, 'save') @mock.patch.object(objects.audit.Audit, 'list') def test_cancel_ongoing_audits(self, m_list, m_save): m_list.return_value = [self.audit] scheduler = scheduling.DecisionEngineSchedulingService() scheduler.cancel_ongoing_audits() m_list.assert_called() m_save.assert_called() self.assertEqual(self.audit.state, objects.audit.State.CANCELLED) @mock.patch.object(objects.audit.Audit, 'save') @mock.patch.object(objects.audit.Audit, 'list') class TestDecisionEngineSchedulingService(base.TestCase): @mock.patch.object( default_loading.ClusterDataModelCollectorLoader, 'load') @mock.patch.object( default_loading.ClusterDataModelCollectorLoader, 'list_available') @mock.patch.object(background.BackgroundScheduler, 'start') def test_start_de_scheduling_service(self, m_start, m_list_available, m_load, m_list, m_save): m_list_available.return_value = { 'fake': faker_cluster_state.FakerModelCollector} fake_collector = faker_cluster_state.FakerModelCollector( config=mock.Mock(period=777)) m_load.return_value = fake_collector scheduler = scheduling.DecisionEngineSchedulingService() scheduler.start() m_start.assert_called_once_with(scheduler) jobs = scheduler.get_jobs() self.assertEqual(2, len(jobs)) job = jobs[0] self.assertTrue(bool(fake_collector.cluster_data_model)) self.assertIsInstance(job.trigger, interval_trigger.IntervalTrigger) @mock.patch.object( default_loading.ClusterDataModelCollectorLoader, 'load') @mock.patch.object( default_loading.ClusterDataModelCollectorLoader, 'list_available') @mock.patch.object(background.BackgroundScheduler, 'start') def test_execute_sync_job_fails(self, m_start, m_list_available, m_load, m_list, m_save): fake_config = mock.Mock(period=.01) fake_collector = faker_cluster_state.FakerModelCollector( config=fake_config) fake_collector.synchronize = mock.Mock( side_effect=lambda: eventlet.sleep(.5)) m_list_available.return_value = { 'fake': faker_cluster_state.FakerModelCollector} m_load.return_value = fake_collector scheduler = scheduling.DecisionEngineSchedulingService() scheduler.start() m_start.assert_called_once_with(scheduler) jobs = scheduler.get_jobs() self.assertEqual(2, len(jobs)) job = jobs[0] job.func() self.assertFalse(bool(fake_collector.cluster_data_model)) self.assertIsInstance(job.trigger, interval_trigger.IntervalTrigger) python-watcher-4.0.0/watcher/tests/decision_engine/test_gmr.py0000664000175000017500000000243113656752270024645 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.decision_engine import gmr from watcher.decision_engine.model.collector import manager from watcher.tests import base class TestGmrPlugin(base.TestCase): @mock.patch.object(manager.CollectorManager, "get_collectors") def test_show_models(self, m_get_collectors): m_to_string = mock.Mock(return_value="") m_get_collectors.return_value = { "test_model": mock.Mock( cluster_data_model=mock.Mock(to_string=m_to_string))} output = gmr.show_models() self.assertEqual(1, m_to_string.call_count) self.assertIn("", output) python-watcher-4.0.0/watcher/tests/decision_engine/loading/0000775000175000017500000000000013656752352024065 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/loading/__init__.py0000664000175000017500000000000013656752270026163 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/decision_engine/loading/test_goal_loader.py0000664000175000017500000000524213656752270027750 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from stevedore import extension from watcher.common import exception from watcher.decision_engine.goal import goals from watcher.decision_engine.loading import default as default_loading from watcher.tests import base class TestDefaultGoalLoader(base.TestCase): def setUp(self): super(TestDefaultGoalLoader, self).setUp() self.goal_loader = default_loading.DefaultGoalLoader() def test_load_goal_with_empty_model(self): self.assertRaises( exception.LoadingError, self.goal_loader.load, None) def test_goal_loader(self): dummy_goal_name = "dummy" # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=dummy_goal_name, entry_point="%s:%s" % ( goals.Dummy.__module__, goals.Dummy.__name__), plugin=goals.Dummy, obj=None, )], namespace="watcher_goals", ) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: m_ext_manager.return_value = fake_extmanager_call loaded_goal = self.goal_loader.load("dummy") self.assertEqual("dummy", loaded_goal.name) self.assertEqual("Dummy goal", loaded_goal.display_name) def test_load_dummy_goal(self): goal_loader = default_loading.DefaultGoalLoader() loaded_goal = goal_loader.load("dummy") self.assertIsInstance(loaded_goal, goals.Dummy) class TestLoadGoalsWithDefaultGoalLoader(base.TestCase): goal_loader = default_loading.DefaultGoalLoader() # test matrix (1 test execution per goal entry point) scenarios = [ (goal_name, {"goal_name": goal_name, "goal_cls": goal_cls}) for goal_name, goal_cls in goal_loader.list_available().items()] def test_load_goals(self): goal = self.goal_loader.load(self.goal_name) self.assertIsNotNone(goal) self.assertEqual(self.goal_name, goal.name) python-watcher-4.0.0/watcher/tests/decision_engine/loading/test_default_strategy_loader.py0000664000175000017500000000557013656752270032400 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from stevedore import extension from watcher.common import exception from watcher.decision_engine.loading import default as default_loading from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher.tests import base class TestDefaultStrategyLoader(base.TestCase): def setUp(self): super(TestDefaultStrategyLoader, self).setUp() self.strategy_loader = default_loading.DefaultStrategyLoader() def test_load_strategy_with_empty_model(self): self.assertRaises( exception.LoadingError, self.strategy_loader.load, None) def test_strategy_loader(self): dummy_strategy_name = "dummy" # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=dummy_strategy_name, entry_point="%s:%s" % ( dummy_strategy.DummyStrategy.__module__, dummy_strategy.DummyStrategy.__name__), plugin=dummy_strategy.DummyStrategy, obj=None, )], namespace="watcher_strategies", ) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: m_ext_manager.return_value = fake_extmanager_call loaded_strategy = self.strategy_loader.load( "dummy") self.assertEqual("dummy", loaded_strategy.name) self.assertEqual("Dummy strategy", loaded_strategy.display_name) def test_load_dummy_strategy(self): strategy_loader = default_loading.DefaultStrategyLoader() loaded_strategy = strategy_loader.load("dummy") self.assertIsInstance(loaded_strategy, dummy_strategy.DummyStrategy) class TestLoadStrategiesWithDefaultStrategyLoader(base.TestCase): strategy_loader = default_loading.DefaultStrategyLoader() scenarios = [ (strategy_name, {"strategy_name": strategy_name, "strategy_cls": strategy_cls}) for strategy_name, strategy_cls in strategy_loader.list_available().items()] def test_load_strategies(self): strategy = self.strategy_loader.load(self.strategy_name) self.assertIsNotNone(strategy) self.assertEqual(self.strategy_name, strategy.name) python-watcher-4.0.0/watcher/tests/decision_engine/loading/test_default_planner_loader.py0000664000175000017500000000220413656752270032164 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.loading import default from watcher.decision_engine.planner import base as planner from watcher.tests import base class TestDefaultPlannerLoader(base.TestCase): def setUp(self): super(TestDefaultPlannerLoader, self).setUp() self.loader = default.DefaultPlannerLoader() def test_endpoints(self): for endpoint in self.loader.list_available(): loaded = self.loader.load(endpoint) self.assertIsNotNone(loaded) self.assertIsInstance(loaded, planner.BasePlanner) python-watcher-4.0.0/watcher/tests/decision_engine/loading/test_collector_loader.py0000664000175000017500000000612313656752270031013 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from stevedore import driver as drivermanager from stevedore import extension as stevedore_extension from watcher.common import clients from watcher.common import exception from watcher.decision_engine.loading import default as default_loading from watcher.tests import base from watcher.tests import conf_fixture from watcher.tests.decision_engine.model import faker_cluster_state class TestClusterDataModelCollectorLoader(base.TestCase): def setUp(self): super(TestClusterDataModelCollectorLoader, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) self.collector_loader = ( default_loading.ClusterDataModelCollectorLoader()) def test_load_collector_with_empty_model(self): self.assertRaises( exception.LoadingError, self.collector_loader.load, None) def test_collector_loader(self): fake_driver = "fake" # Set up the fake Stevedore extensions fake_driver_call = drivermanager.DriverManager.make_test_instance( extension=stevedore_extension.Extension( name=fake_driver, entry_point="%s:%s" % ( faker_cluster_state.FakerModelCollector.__module__, faker_cluster_state.FakerModelCollector.__name__), plugin=faker_cluster_state.FakerModelCollector, obj=None, ), namespace="watcher_cluster_data_model_collectors", ) with mock.patch.object(drivermanager, "DriverManager") as m_driver_manager: m_driver_manager.return_value = fake_driver_call loaded_collector = self.collector_loader.load("fake") self.assertIsInstance( loaded_collector, faker_cluster_state.FakerModelCollector) class TestLoadClusterDataModelCollectors(base.TestCase): collector_loader = default_loading.ClusterDataModelCollectorLoader() scenarios = [ (collector_name, {"collector_name": collector_name, "collector_cls": collector_cls}) for collector_name, collector_cls in collector_loader.list_available().items()] def setUp(self): super(TestLoadClusterDataModelCollectors, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) @mock.patch.object(clients, 'OpenStackClients', mock.Mock()) def test_load_cluster_data_model_collectors(self): collector = self.collector_loader.load(self.collector_name) self.assertIsNotNone(collector) python-watcher-4.0.0/watcher/tests/applier/0000775000175000017500000000000013656752352020762 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/workflow_engine/0000775000175000017500000000000013656752352024161 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/workflow_engine/test_taskflow_action_container.py0000664000175000017500000001050313656752270033021 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import eventlet import mock from watcher.applier.workflow_engine import default as tflow from watcher.common import clients from watcher.common import nova_helper from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils as obj_utils class TestTaskFlowActionContainer(base.DbTestCase): def setUp(self): super(TestTaskFlowActionContainer, self).setUp() self.engine = tflow.DefaultWorkFlowEngine( config=mock.Mock(), context=self.context, applier_manager=mock.MagicMock()) obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy(self.context) self.audit = obj_utils.create_test_audit( self.context, strategy_id=self.strategy.id) def test_execute(self): action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id, state=objects.action_plan.State.ONGOING) action = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, state=objects.action.State.ONGOING, action_type='nop', input_parameters={'message': 'hello World'}) action_container = tflow.TaskFlowActionContainer( db_action=action, engine=self.engine) action_container.execute() obj_action = objects.Action.get_by_uuid( self.engine.context, action.uuid) self.assertEqual(obj_action.state, objects.action.State.SUCCEEDED) @mock.patch.object(clients.OpenStackClients, 'nova', mock.Mock()) def test_execute_with_failed(self): nova_util = nova_helper.NovaHelper() instance = "31b9dd5c-b1fd-4f61-9b68-a47096326dac" nova_util.nova.servers.get.return_value = instance action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id, state=objects.action_plan.State.ONGOING) action = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, state=objects.action.State.ONGOING, action_type='migrate', input_parameters={"resource_id": instance, "migration_type": "live", "destination_node": "host2", "source_node": "host1"}) action_container = tflow.TaskFlowActionContainer( db_action=action, engine=self.engine) result = action_container.execute() self.assertFalse(result) obj_action = objects.Action.get_by_uuid( self.engine.context, action.uuid) self.assertEqual(obj_action.state, objects.action.State.FAILED) @mock.patch('eventlet.spawn') def test_execute_with_cancel_action_plan(self, mock_eventlet_spawn): action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id, state=objects.action_plan.State.CANCELLING) action = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, state=objects.action.State.ONGOING, action_type='nop', input_parameters={'message': 'hello World'}) action_container = tflow.TaskFlowActionContainer( db_action=action, engine=self.engine) def empty_test(): pass et = eventlet.spawn(empty_test) mock_eventlet_spawn.return_value = et action_container.execute() et.kill.assert_called_with() python-watcher-4.0.0/watcher/tests/applier/workflow_engine/__init__.py0000664000175000017500000000000013656752270026257 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/workflow_engine/test_default_workflow_engine.py0000664000175000017500000004027613656752270032505 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import mock import six from watcher.applier.actions import base as abase from watcher.applier.actions import factory from watcher.applier.workflow_engine import default as tflow from watcher.common import exception from watcher.common import utils from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.objects import utils as obj_utils class ExpectedException(Exception): pass @six.add_metaclass(abc.ABCMeta) class FakeAction(abase.BaseAction): def schema(self): pass def post_condition(self): pass def pre_condition(self): pass def revert(self): pass def execute(self): return False def get_description(self): return "fake action, just for test" class TestDefaultWorkFlowEngine(base.DbTestCase): def setUp(self): super(TestDefaultWorkFlowEngine, self).setUp() self.engine = tflow.DefaultWorkFlowEngine( config=mock.Mock(), context=self.context, applier_manager=mock.MagicMock()) self.engine.config.max_workers = 2 @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch('taskflow.engines.load') @mock.patch('taskflow.patterns.graph_flow.Flow.link') def test_execute(self, graph_flow, engines, m_actionplan, m_strategy): actions = mock.MagicMock() try: self.engine.execute(actions) self.assertTrue(engines.called) except Exception as exc: self.fail(exc) def create_action(self, action_type, parameters, parents=None, uuid=None, state=None): action = { 'uuid': uuid or utils.generate_uuid(), 'action_plan_id': 0, 'action_type': action_type, 'input_parameters': parameters, 'state': objects.action.State.PENDING, 'parents': parents or [], } new_action = objects.Action(self.context, **action) with mock.patch.object(notifications.action, 'send_create'): new_action.create() return new_action def check_action_state(self, action, expected_state): to_check = objects.Action.get_by_uuid(self.context, action.uuid) self.assertEqual(expected_state, to_check.state) def check_actions_state(self, actions, expected_state): for a in actions: self.check_action_state(a, expected_state) @mock.patch('taskflow.engines.load') @mock.patch('taskflow.patterns.graph_flow.Flow.link') def test_execute_with_no_actions(self, graph_flow, engines): actions = [] try: self.engine.execute(actions) self.assertFalse(graph_flow.called) self.assertTrue(engines.called) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_one_action(self, mock_send_update, mock_execution_notification, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [self.create_action("nop", {'message': 'test'})] try: self.engine.execute(actions) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_nop_sleep(self, mock_send_update, mock_execution_notification, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] first_nop = self.create_action("nop", {'message': 'test'}) second_nop = self.create_action("nop", {'message': 'second test'}) sleep = self.create_action("sleep", {'duration': 0.0}, parents=[first_nop.uuid, second_nop.uuid]) actions.extend([first_nop, second_nop, sleep]) try: self.engine.execute(actions) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_parents(self, mock_send_update, mock_execution_notification, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] first_nop = self.create_action( "nop", {'message': 'test'}, uuid='bc7eee5c-4fbe-4def-9744-b539be55aa19') second_nop = self.create_action( "nop", {'message': 'second test'}, uuid='0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23') first_sleep = self.create_action( "sleep", {'duration': 0.0}, parents=[first_nop.uuid, second_nop.uuid], uuid='be436531-0da3-4dad-a9c0-ea1d2aff6496') second_sleep = self.create_action( "sleep", {'duration': 0.0}, parents=[first_sleep.uuid], uuid='9eb51e14-936d-4d12-a500-6ba0f5e0bb1c') actions.extend([first_nop, second_nop, first_sleep, second_sleep]) expected_nodes = [ {'uuid': 'bc7eee5c-4fbe-4def-9744-b539be55aa19', 'input_parameters': {u'message': u'test'}, 'action_plan_id': 0, 'state': u'PENDING', 'parents': [], 'action_type': u'nop', 'id': 1}, {'uuid': '0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23', 'input_parameters': {u'message': u'second test'}, 'action_plan_id': 0, 'state': u'PENDING', 'parents': [], 'action_type': u'nop', 'id': 2}, {'uuid': 'be436531-0da3-4dad-a9c0-ea1d2aff6496', 'input_parameters': {u'duration': 0.0}, 'action_plan_id': 0, 'state': u'PENDING', 'parents': [u'bc7eee5c-4fbe-4def-9744-b539be55aa19', u'0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23'], 'action_type': u'sleep', 'id': 3}, {'uuid': '9eb51e14-936d-4d12-a500-6ba0f5e0bb1c', 'input_parameters': {u'duration': 0.0}, 'action_plan_id': 0, 'state': u'PENDING', 'parents': [u'be436531-0da3-4dad-a9c0-ea1d2aff6496'], 'action_type': u'sleep', 'id': 4}] expected_edges = [ ('action_type:nop uuid:0565bd5c-aa00-46e5-8d81-2cb5cc1ffa23', 'action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496'), ('action_type:nop uuid:bc7eee5c-4fbe-4def-9744-b539be55aa19', 'action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496'), ('action_type:sleep uuid:be436531-0da3-4dad-a9c0-ea1d2aff6496', 'action_type:sleep uuid:9eb51e14-936d-4d12-a500-6ba0f5e0bb1c')] try: flow = self.engine.execute(actions) actual_nodes = sorted([x[0]._db_action.as_dict() for x in flow.iter_nodes()], key=lambda x: x['id']) for expected, actual in zip(expected_nodes, actual_nodes): for key in expected.keys(): self.assertIn(expected[key], actual.values()) actual_edges = [(u.name, v.name) for (u, v, _) in flow.iter_links()] for edge in expected_edges: self.assertIn(edge, actual_edges) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_two_actions(self, m_send_update, m_execution, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] second = self.create_action("sleep", {'duration': 0.0}) first = self.create_action("nop", {'message': 'test'}) actions.append(first) actions.append(second) try: self.engine.execute(actions) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_three_actions(self, m_send_update, m_execution, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] third = self.create_action("nop", {'message': 'next'}) second = self.create_action("sleep", {'duration': 0.0}) first = self.create_action("nop", {'message': 'hello'}) self.check_action_state(first, objects.action.State.PENDING) self.check_action_state(second, objects.action.State.PENDING) self.check_action_state(third, objects.action.State.PENDING) actions.append(first) actions.append(second) actions.append(third) try: self.engine.execute(actions) self.check_actions_state(actions, objects.action.State.SUCCEEDED) except Exception as exc: self.fail(exc) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') def test_execute_with_exception(self, m_send_update, m_execution, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [] third = self.create_action("no_exist", {'message': 'next'}) second = self.create_action("sleep", {'duration': 0.0}) first = self.create_action("nop", {'message': 'hello'}) self.check_action_state(first, objects.action.State.PENDING) self.check_action_state(second, objects.action.State.PENDING) self.check_action_state(third, objects.action.State.PENDING) actions.append(first) actions.append(second) actions.append(third) self.engine.execute(actions) self.check_action_state(first, objects.action.State.SUCCEEDED) self.check_action_state(second, objects.action.State.SUCCEEDED) self.check_action_state(third, objects.action.State.FAILED) @mock.patch.object(objects.Strategy, "get_by_id") @mock.patch.object(objects.ActionPlan, "get_by_id") @mock.patch.object(notifications.action, 'send_execution_notification') @mock.patch.object(notifications.action, 'send_update') @mock.patch.object(factory.ActionFactory, "make_action") def test_execute_with_action_failed(self, m_make_action, m_send_update, m_send_execution, m_get_actionplan, m_get_strategy): m_get_actionplan.return_value = obj_utils.get_test_action_plan( self.context, id=0) m_get_strategy.return_value = obj_utils.get_test_strategy( self.context, id=1) actions = [self.create_action("fake_action", {})] m_make_action.return_value = FakeAction(mock.Mock()) self.engine.execute(actions) self.check_action_state(actions[0], objects.action.State.FAILED) @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_execute_with_action_plan_cancel(self, m_get_actionplan): obj_utils.create_test_goal(self.context) strategy = obj_utils.create_test_strategy(self.context) audit = obj_utils.create_test_audit( self.context, strategy_id=strategy.id) action_plan = obj_utils.create_test_action_plan( self.context, audit_id=audit.id, strategy_id=strategy.id, state=objects.action_plan.State.CANCELLING) action1 = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, action_type='nop', state=objects.action.State.SUCCEEDED, input_parameters={'message': 'hello World'}) action2 = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, action_type='nop', state=objects.action.State.ONGOING, uuid='9eb51e14-936d-4d12-a500-6ba0f5e0bb1c', input_parameters={'message': 'hello World'}) action3 = obj_utils.create_test_action( self.context, action_plan_id=action_plan.id, action_type='nop', state=objects.action.State.PENDING, uuid='bc7eee5c-4fbe-4def-9744-b539be55aa19', input_parameters={'message': 'hello World'}) m_get_actionplan.return_value = action_plan actions = [] actions.append(action1) actions.append(action2) actions.append(action3) self.assertRaises(exception.ActionPlanCancelled, self.engine.execute, actions) try: self.check_action_state(action1, objects.action.State.SUCCEEDED) self.check_action_state(action2, objects.action.State.CANCELLED) self.check_action_state(action3, objects.action.State.CANCELLED) except Exception as exc: self.fail(exc) def test_decider(self): # execution_rule is ALWAYS self.engine.execution_rule = 'ALWAYS' history = {'action1': True} self.assertTrue(self.engine.decider(history)) history = {'action1': False} self.assertTrue(self.engine.decider(history)) # execution_rule is ANY self.engine.execution_rule = 'ANY' history = {'action1': True} self.assertFalse(self.engine.decider(history)) history = {'action1': False} self.assertTrue(self.engine.decider(history)) python-watcher-4.0.0/watcher/tests/applier/workflow_engine/loading/0000775000175000017500000000000013656752352025576 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/workflow_engine/loading/__init__.py0000664000175000017500000000000013656752270027674 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/workflow_engine/loading/test_default_engine_loader.py0000664000175000017500000000225713656752270033513 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals from watcher.applier.loading import default from watcher.applier.workflow_engine import base as wbase from watcher.tests import base class TestDefaultActionLoader(base.TestCase): def setUp(self): super(TestDefaultActionLoader, self).setUp() self.loader = default.DefaultWorkFlowEngineLoader() def test_endpoints(self): for endpoint in self.loader.list_available(): loaded = self.loader.load(endpoint) self.assertIsNotNone(loaded) self.assertIsInstance(loaded, wbase.BaseWorkFlowEngine) python-watcher-4.0.0/watcher/tests/applier/__init__.py0000664000175000017500000000000013656752270023060 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/actions/0000775000175000017500000000000013656752352022422 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/actions/__init__.py0000664000175000017500000000000013656752270024520 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/actions/test_resize.py0000664000175000017500000000665013656752270025342 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import jsonschema import mock from watcher.applier.actions import base as baction from watcher.applier.actions import resize from watcher.common import clients from watcher.common import nova_helper from watcher.tests import base class TestResize(base.TestCase): INSTANCE_UUID = "94ae2f92-b7fd-4da7-9e97-f13504ae98c4" def setUp(self): super(TestResize, self).setUp() self.r_osc_cls = mock.Mock() self.r_helper_cls = mock.Mock() self.r_helper = mock.Mock(spec=nova_helper.NovaHelper) self.r_helper_cls.return_value = self.r_helper self.r_osc = mock.Mock(spec=clients.OpenStackClients) self.r_osc_cls.return_value = self.r_osc r_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.r_osc_cls) r_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.r_helper_cls) r_openstack_clients.start() r_nova_helper.start() self.addCleanup(r_openstack_clients.stop) self.addCleanup(r_nova_helper.stop) self.input_parameters = { "flavor": "x1", baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, } self.action = resize.Resize(mock.Mock()) self.action.input_parameters = self.input_parameters def test_parameters(self): params = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.FLAVOR: 'x1'} self.action.input_parameters = params self.assertTrue(self.action.validate_parameters()) def test_parameters_exception_empty_fields(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.FLAVOR: None} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_exception_flavor(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.FLAVOR: None} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_exception_resource_id(self): parameters = {baction.BaseAction.RESOURCE_ID: "EFEF", self.action.FLAVOR: 'x1'} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_execute_resize(self): self.r_helper.find_instance.return_value = self.INSTANCE_UUID self.action.execute() self.r_helper.resize_instance.assert_called_once_with( instance_id=self.INSTANCE_UUID, flavor='x1') python-watcher-4.0.0/watcher/tests/applier/actions/test_change_nova_service_state.py0000664000175000017500000001207413656752270031226 0ustar zuulzuul00000000000000# Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import jsonschema import mock from watcher.applier.actions import base as baction from watcher.applier.actions import change_nova_service_state from watcher.common import clients from watcher.common import nova_helper from watcher.decision_engine.model import element from watcher.tests import base class TestChangeNovaServiceState(base.TestCase): def setUp(self): super(TestChangeNovaServiceState, self).setUp() self.m_osc_cls = mock.Mock() self.m_helper_cls = mock.Mock() self.m_helper = mock.Mock(spec=nova_helper.NovaHelper) self.m_helper_cls.return_value = self.m_helper self.m_osc = mock.Mock(spec=clients.OpenStackClients) self.m_osc_cls.return_value = self.m_osc m_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.m_osc_cls) m_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.m_helper_cls) m_openstack_clients.start() m_nova_helper.start() self.addCleanup(m_openstack_clients.stop) self.addCleanup(m_nova_helper.stop) self.input_parameters = { "resource_name": "compute-1", "state": element.ServiceState.ENABLED.value, } self.action = change_nova_service_state.ChangeNovaServiceState( mock.Mock()) self.action.input_parameters = self.input_parameters def test_parameters_down(self): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: "compute-1", self.action.STATE: element.ServiceState.DISABLED.value} self.assertTrue(self.action.validate_parameters()) def test_parameters_up(self): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: "compute-1", self.action.STATE: element.ServiceState.ENABLED.value} self.assertTrue(self.action.validate_parameters()) def test_parameters_exception_wrong_state(self): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: "compute-1", self.action.STATE: 'error'} self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_resource_id_empty(self): self.action.input_parameters = { self.action.STATE: element.ServiceState.ENABLED.value, } self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_applies_add_extra(self): self.action.input_parameters = {"extra": "failed"} self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_change_service_state_pre_condition(self): try: self.action.pre_condition() except Exception as exc: self.fail(exc) def test_change_service_state_post_condition(self): try: self.action.post_condition() except Exception as exc: self.fail(exc) def test_execute_change_service_state_with_enable_target(self): self.action.execute() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.enable_service_nova_compute.assert_called_once_with( "compute-1") def test_execute_change_service_state_with_disable_target(self): self.action.input_parameters["state"] = ( element.ServiceState.DISABLED.value) self.action.input_parameters["disabled_reason"] = ( "watcher_disabled") self.action.execute() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.disable_service_nova_compute.assert_called_once_with( "compute-1", "watcher_disabled") def test_revert_change_service_state_with_enable_target(self): self.action.input_parameters["disabled_reason"] = ( "watcher_disabled") self.action.revert() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.disable_service_nova_compute.assert_called_once_with( "compute-1", "watcher_disabled") def test_revert_change_service_state_with_disable_target(self): self.action.input_parameters["state"] = ( element.ServiceState.DISABLED.value) self.action.revert() self.m_helper_cls.assert_called_once_with(osc=self.m_osc) self.m_helper.enable_service_nova_compute.assert_called_once_with( "compute-1") python-watcher-4.0.0/watcher/tests/applier/actions/test_change_node_power_state.py0000664000175000017500000001422513656752270030704 0ustar zuulzuul00000000000000# Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import jsonschema import mock from watcher.applier.actions import base as baction from watcher.applier.actions import change_node_power_state from watcher.common import clients from watcher.tests import base COMPUTE_NODE = "compute-1" @mock.patch.object(clients.OpenStackClients, 'nova') @mock.patch.object(clients.OpenStackClients, 'ironic') class TestChangeNodePowerState(base.TestCase): def setUp(self): super(TestChangeNodePowerState, self).setUp() self.input_parameters = { baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, "state": change_node_power_state.NodeState.POWERON.value, } self.action = change_node_power_state.ChangeNodePowerState( mock.Mock()) self.action.input_parameters = self.input_parameters def test_parameters_down(self, mock_ironic, mock_nova): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, self.action.STATE: change_node_power_state.NodeState.POWEROFF.value} self.assertTrue(self.action.validate_parameters()) def test_parameters_up(self, mock_ironic, mock_nova): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, self.action.STATE: change_node_power_state.NodeState.POWERON.value} self.assertTrue(self.action.validate_parameters()) def test_parameters_exception_wrong_state(self, mock_ironic, mock_nova): self.action.input_parameters = { baction.BaseAction.RESOURCE_ID: COMPUTE_NODE, self.action.STATE: 'error'} self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_resource_id_empty(self, mock_ironic, mock_nova): self.action.input_parameters = { self.action.STATE: change_node_power_state.NodeState.POWERON.value, } self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_applies_add_extra(self, mock_ironic, mock_nova): self.action.input_parameters = {"extra": "failed"} self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_change_service_state_pre_condition(self, mock_ironic, mock_nova): try: self.action.pre_condition() except Exception as exc: self.fail(exc) def test_change_node_state_post_condition(self, mock_ironic, mock_nova): try: self.action.post_condition() except Exception as exc: self.fail(exc) def test_execute_node_service_state_with_poweron_target( self, mock_ironic, mock_nova): mock_irclient = mock_ironic.return_value self.action.input_parameters["state"] = ( change_node_power_state.NodeState.POWERON.value) mock_irclient.node.get.side_effect = [ mock.MagicMock(power_state='power off'), mock.MagicMock(power_state='power on')] result = self.action.execute() self.assertTrue(result) mock_irclient.node.set_power_state.assert_called_once_with( COMPUTE_NODE, change_node_power_state.NodeState.POWERON.value) def test_execute_change_node_state_with_poweroff_target( self, mock_ironic, mock_nova): mock_irclient = mock_ironic.return_value mock_nvclient = mock_nova.return_value mock_get = mock.MagicMock() mock_get.to_dict.return_value = {'running_vms': 0} mock_nvclient.hypervisors.get.return_value = mock_get self.action.input_parameters["state"] = ( change_node_power_state.NodeState.POWEROFF.value) mock_irclient.node.get.side_effect = [ mock.MagicMock(power_state='power on'), mock.MagicMock(power_state='power on'), mock.MagicMock(power_state='power off')] result = self.action.execute() self.assertTrue(result) mock_irclient.node.set_power_state.assert_called_once_with( COMPUTE_NODE, change_node_power_state.NodeState.POWEROFF.value) def test_revert_change_node_state_with_poweron_target( self, mock_ironic, mock_nova): mock_irclient = mock_ironic.return_value mock_nvclient = mock_nova.return_value mock_get = mock.MagicMock() mock_get.to_dict.return_value = {'running_vms': 0} mock_nvclient.hypervisors.get.return_value = mock_get self.action.input_parameters["state"] = ( change_node_power_state.NodeState.POWERON.value) mock_irclient.node.get.side_effect = [ mock.MagicMock(power_state='power on'), mock.MagicMock(power_state='power on'), mock.MagicMock(power_state='power off')] self.action.revert() mock_irclient.node.set_power_state.assert_called_once_with( COMPUTE_NODE, change_node_power_state.NodeState.POWEROFF.value) def test_revert_change_node_state_with_poweroff_target( self, mock_ironic, mock_nova): mock_irclient = mock_ironic.return_value self.action.input_parameters["state"] = ( change_node_power_state.NodeState.POWEROFF.value) mock_irclient.node.get.side_effect = [ mock.MagicMock(power_state='power off'), mock.MagicMock(power_state='power on')] self.action.revert() mock_irclient.node.set_power_state.assert_called_once_with( COMPUTE_NODE, change_node_power_state.NodeState.POWERON.value) python-watcher-4.0.0/watcher/tests/applier/actions/test_sleep.py0000664000175000017500000000312713656752270025145 0ustar zuulzuul00000000000000# Copyright (c) 2016 b<>com # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import jsonschema import mock from watcher.applier.actions import sleep from watcher.tests import base class TestSleep(base.TestCase): def setUp(self): super(TestSleep, self).setUp() self.s = sleep.Sleep(mock.Mock()) def test_parameters_duration(self): self.s.input_parameters = {self.s.DURATION: 1.0} self.assertTrue(self.s.validate_parameters()) def test_parameters_duration_empty(self): self.s.input_parameters = {self.s.DURATION: None} self.assertRaises(jsonschema.ValidationError, self.s.validate_parameters) def test_parameters_wrong_parameter(self): self.s.input_parameters = {self.s.DURATION: "ef"} self.assertRaises(jsonschema.ValidationError, self.s.validate_parameters) def test_parameters_add_field(self): self.s.input_parameters = {self.s.DURATION: 1.0, "not_required": "nop"} self.assertRaises(jsonschema.ValidationError, self.s.validate_parameters) python-watcher-4.0.0/watcher/tests/applier/actions/test_volume_migration.py0000664000175000017500000002144713656752270027422 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import jsonschema import mock from watcher.applier.actions import base as baction from watcher.applier.actions import volume_migration from watcher.common import cinder_helper from watcher.common import clients from watcher.common import keystone_helper from watcher.common import nova_helper from watcher.common import utils as w_utils from watcher.tests import base class TestMigration(base.TestCase): VOLUME_UUID = "45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba" INSTANCE_UUID = "45a37aec-85ab-4dda-a303-7d9f62c2f5bb" def setUp(self): super(TestMigration, self).setUp() self.m_osc_cls = mock.Mock() self.m_osc = mock.Mock(spec=clients.OpenStackClients) self.m_osc_cls.return_value = self.m_osc self.m_n_helper_cls = mock.Mock() self.m_n_helper = mock.Mock(spec=nova_helper.NovaHelper) self.m_n_helper_cls.return_value = self.m_n_helper self.m_c_helper_cls = mock.Mock() self.m_c_helper = mock.Mock(spec=cinder_helper.CinderHelper) self.m_c_helper_cls.return_value = self.m_c_helper self.m_k_helper_cls = mock.Mock() self.m_k_helper = mock.Mock(spec=keystone_helper.KeystoneHelper) self.m_k_helper_cls.return_value = self.m_k_helper m_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.m_osc_cls) m_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.m_n_helper_cls) m_cinder_helper = mock.patch.object( cinder_helper, "CinderHelper", self.m_c_helper_cls) m_keystone_helper = mock.patch.object( keystone_helper, "KeystoneHelper", self.m_k_helper_cls) m_openstack_clients.start() m_nova_helper.start() m_cinder_helper.start() m_keystone_helper.start() self.addCleanup(m_keystone_helper.stop) self.addCleanup(m_cinder_helper.stop) self.addCleanup(m_nova_helper.stop) self.addCleanup(m_openstack_clients.stop) self.action = volume_migration.VolumeMigrate(mock.Mock()) self.input_parameters_swap = { "migration_type": "swap", "destination_node": "storage1-poolname", "destination_type": "storage1-typename", baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, } self.action_swap = volume_migration.VolumeMigrate(mock.Mock()) self.action_swap.input_parameters = self.input_parameters_swap self.input_parameters_migrate = { "migration_type": "migrate", "destination_node": "storage1-poolname", "destination_type": "", baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, } self.action_migrate = volume_migration.VolumeMigrate(mock.Mock()) self.action_migrate.input_parameters = self.input_parameters_migrate self.input_parameters_retype = { "migration_type": "retype", "destination_node": "", "destination_type": "storage1-typename", baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, } self.action_retype = volume_migration.VolumeMigrate(mock.Mock()) self.action_retype.input_parameters = self.input_parameters_retype @staticmethod def fake_volume(**kwargs): volume = mock.MagicMock() volume.id = kwargs.get('id', TestMigration.VOLUME_UUID) volume.size = kwargs.get('size', '1') volume.status = kwargs.get('status', 'available') volume.snapshot_id = kwargs.get('snapshot_id', None) volume.availability_zone = kwargs.get('availability_zone', 'nova') return volume @staticmethod def fake_instance(**kwargs): instance = mock.MagicMock() instance.id = kwargs.get('id', TestMigration.INSTANCE_UUID) instance.status = kwargs.get('status', 'ACTIVE') return instance def test_parameters_swap(self): params = {baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, self.action.MIGRATION_TYPE: 'swap', self.action.DESTINATION_NODE: None, self.action.DESTINATION_TYPE: 'type-1'} self.action_swap.input_parameters = params self.assertTrue(self.action_swap.validate_parameters) def test_parameters_migrate(self): params = {baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, self.action.MIGRATION_TYPE: 'migrate', self.action.DESTINATION_NODE: 'node-1', self.action.DESTINATION_TYPE: None} self.action_migrate.input_parameters = params self.assertTrue(self.action_migrate.validate_parameters) def test_parameters_retype(self): params = {baction.BaseAction.RESOURCE_ID: self.VOLUME_UUID, self.action.MIGRATION_TYPE: 'retype', self.action.DESTINATION_NODE: None, self.action.DESTINATION_TYPE: 'type-1'} self.action_retype.input_parameters = params self.assertTrue(self.action_retype.validate_parameters) def test_parameters_exception_resource_id(self): params = {baction.BaseAction.RESOURCE_ID: "EFEF", self.action.MIGRATION_TYPE: 'swap', self.action.DESTINATION_NODE: None, self.action.DESTINATION_TYPE: 'type-1'} self.action_swap.input_parameters = params self.assertRaises(jsonschema.ValidationError, self.action_swap.validate_parameters) def test_migrate_success(self): volume = self.fake_volume() self.m_c_helper.get_volume.return_value = volume result = self.action_migrate.execute() self.assertTrue(result) self.m_c_helper.migrate.assert_called_once_with( volume, "storage1-poolname" ) def test_retype_success(self): volume = self.fake_volume() self.m_c_helper.get_volume.return_value = volume result = self.action_retype.execute() self.assertTrue(result) self.m_c_helper.retype.assert_called_once_with( volume, "storage1-typename", ) def test_swap_success(self): volume = self.fake_volume( status='in-use', attachments=[{'server_id': 'server_id'}]) self.m_n_helper.find_instance.return_value = self.fake_instance() new_volume = self.fake_volume(id=w_utils.generate_uuid()) user = mock.Mock() session = mock.MagicMock() self.m_k_helper.create_user.return_value = user self.m_k_helper.create_session.return_value = session self.m_c_helper.get_volume.return_value = volume self.m_c_helper.create_volume.return_value = new_volume result = self.action_swap.execute() self.assertTrue(result) self.m_n_helper.swap_volume.assert_called_once_with( volume, new_volume ) self.m_k_helper.delete_user.assert_called_once_with(user) def test_swap_fail(self): # _can_swap fail instance = self.fake_instance(status='STOPPED') self.m_n_helper.find_instance.return_value = instance result = self.action_swap.execute() self.assertFalse(result) def test_can_swap_success(self): volume = self.fake_volume( status='in-use', attachments=[{'server_id': 'server_id'}]) instance = self.fake_instance() self.m_n_helper.find_instance.return_value = instance result = self.action_swap._can_swap(volume) self.assertTrue(result) instance = self.fake_instance(status='PAUSED') self.m_n_helper.find_instance.return_value = instance result = self.action_swap._can_swap(volume) self.assertTrue(result) instance = self.fake_instance(status='RESIZED') self.m_n_helper.find_instance.return_value = instance result = self.action_swap._can_swap(volume) self.assertTrue(result) def test_can_swap_fail(self): volume = self.fake_volume( status='in-use', attachments=[{'server_id': 'server_id'}]) instance = self.fake_instance(status='STOPPED') self.m_n_helper.find_instance.return_value = instance result = self.action_swap._can_swap(volume) self.assertFalse(result) python-watcher-4.0.0/watcher/tests/applier/actions/test_migration.py0000664000175000017500000001731013656752270026025 0ustar zuulzuul00000000000000# Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals import jsonschema import mock from watcher.applier.actions import base as baction from watcher.applier.actions import migration from watcher.common import clients from watcher.common import exception from watcher.common import nova_helper from watcher.tests import base class TestMigration(base.TestCase): INSTANCE_UUID = "45a37aeb-95ab-4ddb-a305-7d9f62c2f5ba" def setUp(self): super(TestMigration, self).setUp() self.m_osc_cls = mock.Mock() self.m_helper_cls = mock.Mock() self.m_helper = mock.Mock(spec=nova_helper.NovaHelper) self.m_helper_cls.return_value = self.m_helper self.m_osc = mock.Mock(spec=clients.OpenStackClients) self.m_osc_cls.return_value = self.m_osc m_openstack_clients = mock.patch.object( clients, "OpenStackClients", self.m_osc_cls) m_nova_helper = mock.patch.object( nova_helper, "NovaHelper", self.m_helper_cls) m_openstack_clients.start() m_nova_helper.start() self.addCleanup(m_openstack_clients.stop) self.addCleanup(m_nova_helper.stop) self.input_parameters = { "migration_type": "live", "source_node": "compute1-hostname", "destination_node": "compute2-hostname", baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, } self.action = migration.Migrate(mock.Mock()) self.action.input_parameters = self.input_parameters self.input_parameters_cold = { "migration_type": "cold", "source_node": "compute1-hostname", "destination_node": "compute2-hostname", baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, } self.action_cold = migration.Migrate(mock.Mock()) self.action_cold.input_parameters = self.input_parameters_cold def test_parameters(self): params = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.MIGRATION_TYPE: 'live', self.action.DESTINATION_NODE: 'compute-2', self.action.SOURCE_NODE: 'compute-3'} self.action.input_parameters = params self.assertTrue(self.action.validate_parameters()) def test_parameters_cold(self): params = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, self.action.MIGRATION_TYPE: 'cold', self.action.DESTINATION_NODE: 'compute-2', self.action.SOURCE_NODE: 'compute-3'} self.action_cold.input_parameters = params self.assertTrue(self.action_cold.validate_parameters()) def test_parameters_exception_empty_fields(self): parameters = {baction.BaseAction.RESOURCE_ID: None, 'migration_type': None, 'source_node': None, 'destination_node': None} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_exception_migration_type(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, 'migration_type': 'unknown', 'source_node': 'compute-2', 'destination_node': 'compute-3'} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_exception_source_node(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, 'migration_type': 'live', 'source_node': None, 'destination_node': 'compute-3'} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_parameters_destination_node_none(self): parameters = {baction.BaseAction.RESOURCE_ID: self.INSTANCE_UUID, 'migration_type': 'live', 'source_node': 'compute-1', 'destination_node': None} self.action.input_parameters = parameters self.assertTrue(self.action.validate_parameters) def test_parameters_exception_resource_id(self): parameters = {baction.BaseAction.RESOURCE_ID: "EFEF", 'migration_type': 'live', 'source_node': 'compute-2', 'destination_node': 'compute-3'} self.action.input_parameters = parameters self.assertRaises(jsonschema.ValidationError, self.action.validate_parameters) def test_migration_pre_condition(self): try: self.action.pre_condition() except Exception as exc: self.fail(exc) def test_migration_post_condition(self): try: self.action.post_condition() except Exception as exc: self.fail(exc) def test_execute_live_migration_invalid_instance(self): self.m_helper.find_instance.return_value = None exc = self.assertRaises( exception.InstanceNotFound, self.action.execute) self.m_helper.find_instance.assert_called_once_with(self.INSTANCE_UUID) self.assertEqual(self.INSTANCE_UUID, exc.kwargs["name"]) def test_execute_cold_migration_invalid_instance(self): self.m_helper.find_instance.return_value = None exc = self.assertRaises( exception.InstanceNotFound, self.action_cold.execute) self.m_helper.find_instance.assert_called_once_with(self.INSTANCE_UUID) self.assertEqual(self.INSTANCE_UUID, exc.kwargs["name"]) def test_execute_live_migration(self): self.m_helper.find_instance.return_value = self.INSTANCE_UUID try: self.action.execute() except Exception as exc: self.fail(exc) self.m_helper.live_migrate_instance.assert_called_once_with( instance_id=self.INSTANCE_UUID, dest_hostname="compute2-hostname") def test_execute_cold_migration(self): self.m_helper.find_instance.return_value = self.INSTANCE_UUID try: self.action_cold.execute() except Exception as exc: self.fail(exc) self.m_helper.watcher_non_live_migrate_instance.\ assert_called_once_with( instance_id=self.INSTANCE_UUID, dest_hostname="compute2-hostname" ) def test_abort_live_migrate(self): migration = mock.MagicMock() migration.id = "2" migrations = [migration] self.m_helper.get_running_migration.return_value = migrations self.m_helper.find_instance.return_value = self.INSTANCE_UUID try: self.action.abort() except Exception as exc: self.fail(exc) self.m_helper.abort_live_migrate.assert_called_once_with( instance_id=self.INSTANCE_UUID, source="compute1-hostname", destination="compute2-hostname") python-watcher-4.0.0/watcher/tests/applier/actions/loading/0000775000175000017500000000000013656752352024037 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/actions/loading/__init__.py0000664000175000017500000000000013656752270026135 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/actions/loading/test_default_actions_loader.py0000664000175000017500000000217513656752270032146 0ustar zuulzuul00000000000000# Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals from watcher.applier.actions import base as abase from watcher.applier.loading import default from watcher.tests import base class TestDefaultActionLoader(base.TestCase): def setUp(self): super(TestDefaultActionLoader, self).setUp() self.loader = default.DefaultActionLoader() def test_endpoints(self): for endpoint in self.loader.list_available(): loaded = self.loader.load(endpoint) self.assertIsNotNone(loaded) self.assertIsInstance(loaded, abase.BaseAction) python-watcher-4.0.0/watcher/tests/applier/action_plan/0000775000175000017500000000000013656752352023251 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/action_plan/__init__.py0000664000175000017500000000000013656752270025347 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/action_plan/test_default_action_handler.py0000775000175000017500000001254213656752270031346 0ustar zuulzuul00000000000000# Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from watcher.applier.action_plan import default from watcher.applier import default as ap_applier from watcher.common import exception from watcher import notifications from watcher import objects from watcher.objects import action_plan as ap_objects from watcher.tests.db import base from watcher.tests.objects import utils as obj_utils class TestDefaultActionPlanHandler(base.DbTestCase): class FakeApplierException(Exception): pass def setUp(self): super(TestDefaultActionPlanHandler, self).setUp() p_action_plan_notifications = mock.patch.object( notifications, 'action_plan', autospec=True) self.m_action_plan_notifications = p_action_plan_notifications.start() self.addCleanup(p_action_plan_notifications.stop) obj_utils.create_test_goal(self.context) self.strategy = obj_utils.create_test_strategy(self.context) self.audit = obj_utils.create_test_audit( self.context, strategy_id=self.strategy.id) self.action_plan = obj_utils.create_test_action_plan( self.context, audit_id=self.audit.id, strategy_id=self.strategy.id) self.action = obj_utils.create_test_action( self.context, action_plan_id=self.action_plan.id, action_type='nop', input_parameters={'message': 'hello World'}) @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_launch_action_plan(self, m_get_action_plan): m_get_action_plan.return_value = self.action_plan command = default.DefaultActionPlanHandler( self.context, mock.MagicMock(), self.action_plan.uuid) command.execute() expected_calls = [ mock.call(self.context, self.action_plan, action=objects.fields.NotificationAction.EXECUTION, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.action_plan, action=objects.fields.NotificationAction.EXECUTION, phase=objects.fields.NotificationPhase.END)] self.assertEqual(ap_objects.State.SUCCEEDED, self.action_plan.state) self.assertEqual( expected_calls, self.m_action_plan_notifications .send_action_notification .call_args_list) @mock.patch.object(ap_applier.DefaultApplier, "execute") @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_launch_action_plan_with_error(self, m_get_action_plan, m_execute): m_get_action_plan.return_value = self.action_plan m_execute.side_effect = self.FakeApplierException command = default.DefaultActionPlanHandler( self.context, mock.MagicMock(), self.action_plan.uuid) command.execute() expected_calls = [ mock.call(self.context, self.action_plan, action=objects.fields.NotificationAction.EXECUTION, phase=objects.fields.NotificationPhase.START), mock.call(self.context, self.action_plan, action=objects.fields.NotificationAction.EXECUTION, priority=objects.fields.NotificationPriority.ERROR, phase=objects.fields.NotificationPhase.ERROR)] self.assertEqual(ap_objects.State.FAILED, self.action_plan.state) self.assertEqual( expected_calls, self.m_action_plan_notifications .send_action_notification .call_args_list) @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_cancel_action_plan(self, m_get_action_plan): m_get_action_plan.return_value = self.action_plan self.action_plan.state = ap_objects.State.CANCELLED self.action_plan.save() command = default.DefaultActionPlanHandler( self.context, mock.MagicMock(), self.action_plan.uuid) command.execute() action = self.action.get_by_uuid(self.context, self.action.uuid) self.assertEqual(ap_objects.State.CANCELLED, self.action_plan.state) self.assertEqual(objects.action.State.CANCELLED, action.state) @mock.patch.object(ap_applier.DefaultApplier, "execute") @mock.patch.object(objects.ActionPlan, "get_by_uuid") def test_cancel_action_plan_with_exception(self, m_get_action_plan, m_execute): m_get_action_plan.return_value = self.action_plan m_execute.side_effect = exception.ActionPlanCancelled( self.action_plan.uuid) command = default.DefaultActionPlanHandler( self.context, mock.MagicMock(), self.action_plan.uuid) command.execute() self.assertEqual(ap_objects.State.CANCELLED, self.action_plan.state) python-watcher-4.0.0/watcher/tests/applier/messaging/0000775000175000017500000000000013656752352022737 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/messaging/__init__.py0000664000175000017500000000000013656752270025035 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/applier/messaging/test_trigger_action_plan_endpoint.py0000664000175000017500000000247113656752270032265 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock from watcher.applier.messaging import trigger from watcher.common import utils from watcher.tests import base class TestTriggerActionPlan(base.TestCase): def __init__(self, *args, **kwds): super(TestTriggerActionPlan, self).__init__(*args, **kwds) self.applier = mock.MagicMock() self.endpoint = trigger.TriggerActionPlan(self.applier) def test_launch_action_plan(self): action_plan_uuid = utils.generate_uuid() expected_uuid = self.endpoint.launch_action_plan(self.context, action_plan_uuid) self.assertEqual(expected_uuid, action_plan_uuid) python-watcher-4.0.0/watcher/tests/applier/test_rpcapi.py0000664000175000017500000000363313656752270023655 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock import oslo_messaging as om from watcher.applier import rpcapi from watcher.common import exception from watcher.common import utils from watcher.tests import base class TestApplierAPI(base.TestCase): api = rpcapi.ApplierAPI() def test_get_api_version(self): with mock.patch.object(om.RPCClient, 'call') as mock_call: expected_context = self.context self.api.check_api_version(expected_context) mock_call.assert_called_once_with( expected_context, 'check_api_version', api_version=rpcapi.ApplierAPI().API_VERSION) def test_execute_audit_without_error(self): with mock.patch.object(om.RPCClient, 'cast') as mock_cast: action_plan_uuid = utils.generate_uuid() self.api.launch_action_plan(self.context, action_plan_uuid) mock_cast.assert_called_once_with( self.context, 'launch_action_plan', action_plan_uuid=action_plan_uuid) def test_execute_action_plan_throw_exception(self): action_plan_uuid = "uuid" self.assertRaises(exception.InvalidUuidOrName, self.api.launch_action_plan, action_plan_uuid) python-watcher-4.0.0/watcher/tests/applier/test_sync.py0000664000175000017500000000647313656752270023360 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 SBCloud # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from oslo_utils import uuidutils from watcher.applier import sync from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher.tests.db import base as db_base from watcher import notifications from watcher import objects from watcher.tests.objects import utils as obj_utils class TestCancelOngoingActionPlans(db_base.DbTestCase): def setUp(self): super(TestCancelOngoingActionPlans, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.goal = obj_utils.create_test_goal( self.context, id=1, name=dummy_strategy.DummyStrategy.get_name()) self.strategy = obj_utils.create_test_strategy( self.context, name=dummy_strategy.DummyStrategy.get_name(), goal_id=self.goal.id) audit_template = obj_utils.create_test_audit_template( self.context, strategy_id=self.strategy.id) self.audit = obj_utils.create_test_audit( self.context, id=999, name='My Audit 999', uuid=uuidutils.generate_uuid(), audit_template_id=audit_template.id, goal_id=self.goal.id, audit_type=objects.audit.AuditType.ONESHOT.value, goal=self.goal, hostname='hostname1', state=objects.audit.State.ONGOING) self.actionplan = obj_utils.create_test_action_plan( self.context, state=objects.action_plan.State.ONGOING, audit_id=999, hostname='hostname1') self.action = obj_utils.create_test_action( self.context, action_plan_id=1, state=objects.action.State.PENDING) cfg.CONF.set_override("host", "hostname1") @mock.patch.object(objects.action.Action, 'save') @mock.patch.object(objects.action_plan.ActionPlan, 'save') @mock.patch.object(objects.action.Action, 'list') @mock.patch.object(objects.action_plan.ActionPlan, 'list') def test_cancel_ongoing_actionplans(self, m_plan_list, m_action_list, m_plan_save, m_action_save): m_plan_list.return_value = [self.actionplan] m_action_list.return_value = [self.action] syncer = sync.Syncer() syncer._cancel_ongoing_actionplans(self.context) m_plan_list.assert_called() m_action_list.assert_called() m_plan_save.assert_called() m_action_save.assert_called() self.assertEqual(self.action.state, objects.audit.State.CANCELLED) python-watcher-4.0.0/watcher/tests/applier/test_applier_manager.py0000664000175000017500000000275113656752270025525 0ustar zuulzuul00000000000000# Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import mock import oslo_messaging as om from watcher.applier import manager as applier_manager from watcher.common import service from watcher.tests import base class TestApplierManager(base.TestCase): def setUp(self): super(TestApplierManager, self).setUp() p_heartbeat = mock.patch.object( service.ServiceHeartbeat, "send_beat") self.m_heartbeat = p_heartbeat.start() self.addCleanup(p_heartbeat.stop) self.applier = service.Service(applier_manager.ApplierManager) @mock.patch.object(om.rpc.server.RPCServer, "stop") @mock.patch.object(om.rpc.server.RPCServer, "start") def test_start(self, m_messaging_start, m_messaging_stop): self.applier.start() self.applier.stop() self.assertEqual(1, m_messaging_start.call_count) self.assertEqual(1, m_messaging_stop.call_count) python-watcher-4.0.0/watcher/tests/policy_fixture.py0000664000175000017500000000310713656752270022745 0ustar zuulzuul00000000000000# Copyright 2012 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import fixtures from oslo_config import cfg from oslo_policy import _parser from oslo_policy import opts as policy_opts from watcher.common import policy as watcher_policy from watcher.tests import fake_policy CONF = cfg.CONF class PolicyFixture(fixtures.Fixture): def _setUp(self): self.policy_dir = self.useFixture(fixtures.TempDir()) self.policy_file_name = os.path.join(self.policy_dir.path, 'policy.json') with open(self.policy_file_name, 'w') as policy_file: policy_file.write(fake_policy.policy_data) policy_opts.set_defaults(CONF) CONF.set_override('policy_file', self.policy_file_name, 'oslo_policy') watcher_policy._ENFORCER = None self.addCleanup(watcher_policy.init().clear) def set_rules(self, rules): policy = watcher_policy._ENFORCER policy.set_rules({k: _parser.parse_rule(v) for k, v in rules.items()}) python-watcher-4.0.0/watcher/tests/db/0000775000175000017500000000000013656752352017713 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/db/utils.py0000664000175000017500000003233413656752270021431 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Watcher test utilities.""" from oslo_utils import timeutils from watcher.db import api as db_api from watcher.db.sqlalchemy import models from watcher import objects def id_generator(): id_ = 1 while True: yield id_ id_ += 1 def _load_relationships(model, db_data): rel_data = {} relationships = db_api.get_instance()._get_relationships(model) for name, relationship in relationships.items(): related_model = relationship.argument if not db_data.get(name): rel_data[name] = None else: rel_data[name] = related_model(**db_data.get(name)) return rel_data def get_test_audit_template(**kwargs): audit_template_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', 'e74c40e0-d825-11e2-a28f-0800200c9a66'), 'goal_id': kwargs.get('goal_id', 1), 'strategy_id': kwargs.get('strategy_id', None), 'name': kwargs.get('name', 'My Audit Template'), 'description': kwargs.get('description', 'Desc. Of My Audit Template'), 'scope': kwargs.get('scope', []), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. audit_template_data.update( _load_relationships(models.AuditTemplate, kwargs)) return audit_template_data def create_test_audit_template(**kwargs): """Create test audit template entry in DB and return AuditTemplate DB object. Function to be used to create test AuditTemplate objects in the database. :param kwargs: kwargsargs with overriding values for audit template's attributes. :returns: Test AuditTemplate DB object. """ audit_template = get_test_audit_template(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del audit_template['id'] dbapi = db_api.get_instance() return dbapi.create_audit_template(audit_template) def get_test_audit(**kwargs): audit_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), 'name': kwargs.get('name', 'My Audit'), 'audit_type': kwargs.get('audit_type', 'ONESHOT'), 'state': kwargs.get('state', objects.audit.State.PENDING), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), 'parameters': kwargs.get('parameters', {}), 'interval': kwargs.get('interval', '3600'), 'goal_id': kwargs.get('goal_id', 1), 'strategy_id': kwargs.get('strategy_id', None), 'scope': kwargs.get('scope', []), 'auto_trigger': kwargs.get('auto_trigger', False), 'next_run_time': kwargs.get('next_run_time'), 'hostname': kwargs.get('hostname', 'host_1'), 'start_time': kwargs.get('start_time'), 'end_time': kwargs.get('end_time'), 'force': kwargs.get('force', False) } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. audit_data.update(_load_relationships(models.Audit, kwargs)) return audit_data def create_test_audit(**kwargs): """Create test audit entry in DB and return Audit DB object. Function to be used to create test Audit objects in the database. :param kwargs: kwargsargs with overriding values for audit's attributes. :returns: Test Audit DB object. """ audit = get_test_audit(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del audit['id'] dbapi = db_api.get_instance() return dbapi.create_audit(audit) def get_test_action(**kwargs): action_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', '10a47dd1-4874-4298-91cf-eff046dbdb8d'), 'action_plan_id': kwargs.get('action_plan_id', 1), 'action_type': kwargs.get('action_type', 'nop'), 'input_parameters': kwargs.get('input_parameters', {'key1': 'val1', 'key2': 'val2', 'resource_id': '10a47dd1-4874-4298-91cf-eff046dbdb8d'}), 'state': kwargs.get('state', objects.action_plan.State.PENDING), 'parents': kwargs.get('parents', []), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. action_data.update(_load_relationships(models.Action, kwargs)) return action_data def create_test_action(**kwargs): """Create test action entry in DB and return Action DB object. Function to be used to create test Action objects in the database. :param kwargs: kwargsargs with overriding values for action's attributes. :returns: Test Action DB object. """ action = get_test_action(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del action['id'] dbapi = db_api.get_instance() return dbapi.create_action(action) def get_test_action_plan(**kwargs): action_plan_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', '76be87bd-3422-43f9-93a0-e85a577e3061'), 'state': kwargs.get('state', objects.action_plan.State.ONGOING), 'audit_id': kwargs.get('audit_id', 1), 'strategy_id': kwargs.get('strategy_id', 1), 'global_efficacy': kwargs.get('global_efficacy', []), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), 'hostname': kwargs.get('hostname', 'host_1'), } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. action_plan_data.update(_load_relationships(models.ActionPlan, kwargs)) return action_plan_data def create_test_action_plan(**kwargs): """Create test action plan entry in DB and return Action Plan DB object. Function to be used to create test Action objects in the database. :param kwargs: kwargsargs with overriding values for action's attributes. :returns: Test Action DB object. """ action = get_test_action_plan(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del action['id'] dbapi = db_api.get_instance() return dbapi.create_action_plan(action) def get_test_goal(**kwargs): return { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', 'f7ad87ae-4298-91cf-93a0-f35a852e3652'), 'name': kwargs.get('name', 'TEST'), 'display_name': kwargs.get('display_name', 'test goal'), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), 'efficacy_specification': kwargs.get('efficacy_specification', []), } def create_test_goal(**kwargs): """Create test goal entry in DB and return Goal DB object. Function to be used to create test Goal objects in the database. :param kwargs: kwargs which override default goal values of its attributes. :returns: Test Goal DB object. """ goal = get_test_goal(**kwargs) dbapi = db_api.get_instance() return dbapi.create_goal(goal) def get_test_scoring_engine(**kwargs): return { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', 'e8370ede-4f39-11e6-9ffa-08002722cb21'), 'name': kwargs.get('name', 'test-se-01'), 'description': kwargs.get('description', 'test scoring engine 01'), 'metainfo': kwargs.get('metainfo', 'test_attr=test_val'), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } def create_test_scoring_engine(**kwargs): """Create test scoring engine in DB and return ScoringEngine DB object. Function to be used to create test ScoringEngine objects in the database. :param kwargs: kwargs with overriding values for SE'sattributes. :returns: Test ScoringEngine DB object. """ scoring_engine = get_test_scoring_engine(**kwargs) dbapi = db_api.get_instance() return dbapi.create_scoring_engine(scoring_engine) def get_test_strategy(**kwargs): strategy_data = { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', 'cb3d0b58-4415-4d90-b75b-1e96878730e3'), 'name': kwargs.get('name', 'TEST'), 'display_name': kwargs.get('display_name', 'test strategy'), 'goal_id': kwargs.get('goal_id', 1), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), 'parameters_spec': kwargs.get('parameters_spec', {}), } # ObjectField doesn't allow None nor dict, so if we want to simulate a # non-eager object loading, the field should not be referenced at all. strategy_data.update(_load_relationships(models.Strategy, kwargs)) return strategy_data def get_test_service(**kwargs): return { 'id': kwargs.get('id', 1), 'name': kwargs.get('name', 'watcher-service'), 'host': kwargs.get('host', 'controller'), 'last_seen_up': kwargs.get( 'last_seen_up', timeutils.parse_isotime('2016-09-22T08:32:06').replace(tzinfo=None) ), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } def create_test_service(**kwargs): """Create test service entry in DB and return Service DB object. Function to be used to create test Service objects in the database. :param kwargs: kwargs with overriding values for service's attributes. :returns: Test Service DB object. """ service = get_test_service(**kwargs) dbapi = db_api.get_instance() return dbapi.create_service(service) def create_test_strategy(**kwargs): """Create test strategy entry in DB and return Strategy DB object. Function to be used to create test Strategy objects in the database. :param kwargs: kwargs with overriding values for strategy's attributes. :returns: Test Strategy DB object. """ strategy = get_test_strategy(**kwargs) dbapi = db_api.get_instance() return dbapi.create_strategy(strategy) def get_test_efficacy_indicator(**kwargs): return { 'id': kwargs.get('id', 1), 'uuid': kwargs.get('uuid', '202cfcf9-811c-411a-8a35-d8351f64eb24'), 'name': kwargs.get('name', 'test_indicator'), 'description': kwargs.get('description', 'Test indicator'), 'unit': kwargs.get('unit', '%'), 'value': kwargs.get('value', 0), 'action_plan_id': kwargs.get('action_plan_id', 1), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } def create_test_efficacy_indicator(**kwargs): """Create and return a test efficacy indicator entry in DB. Function to be used to create test EfficacyIndicator objects in the DB. :param kwargs: kwargs for overriding the values of the attributes :returns: Test EfficacyIndicator DB object. """ efficacy_indicator = get_test_efficacy_indicator(**kwargs) # Let DB generate ID if it isn't specified explicitly if 'id' not in kwargs: del efficacy_indicator['id'] dbapi = db_api.get_instance() return dbapi.create_efficacy_indicator(efficacy_indicator) def get_test_action_desc(**kwargs): return { 'id': kwargs.get('id', 1), 'action_type': kwargs.get('action_type', 'nop'), 'description': kwargs.get('description', 'Logging a NOP message'), 'created_at': kwargs.get('created_at'), 'updated_at': kwargs.get('updated_at'), 'deleted_at': kwargs.get('deleted_at'), } def create_test_action_desc(**kwargs): """Create test action description entry in DB and return ActionDescription. Function to be used to create test ActionDescription objects in the DB. :param kwargs: kwargs with overriding values for service's attributes. :returns: Test ActionDescription DB object. """ action_desc = get_test_action_desc(**kwargs) dbapi = db_api.get_instance() return dbapi.create_action_description(action_desc) python-watcher-4.0.0/watcher/tests/db/test_goal.py0000664000175000017500000002750713656752270022260 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Goal via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.tests.db import base from watcher.tests.db import utils class TestDbGoalFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbGoalFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): with freezegun.freeze_time(self.FAKE_TODAY): self.goal1 = utils.create_test_goal( id=1, uuid=w_utils.generate_uuid(), name="GOAL_1", display_name="Goal 1") with freezegun.freeze_time(self.FAKE_OLD_DATE): self.goal2 = utils.create_test_goal( id=2, uuid=w_utils.generate_uuid(), name="GOAL_2", display_name="Goal 2") with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.goal3 = utils.create_test_goal( id=3, uuid=w_utils.generate_uuid(), name="GOAL_3", display_name="Goal 3") def _soft_delete_goals(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_goal(self.goal1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_goal(self.goal2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_goal(self.goal3.id) def _update_goals(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_goal( self.goal1.uuid, values={"display_name": "goal1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_goal( self.goal2.uuid, values={"display_name": "goal2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_goal( self.goal3.uuid, values={"display_name": "goal3"}) def test_get_goal_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_goal(self.goal1.id) res = self.dbapi.get_goal_list( self.context, filters={'deleted': True}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_goal(self.goal1.id) res = self.dbapi.get_goal_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_deleted_at_eq(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_deleted_at_lt(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_deleted_at_lte(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_deleted_at_gt(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_deleted_at_gte(self): self._soft_delete_goals() res = self.dbapi.get_goal_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal1.uuid, self.goal2.uuid]), set([r.uuid for r in res])) # created_at # def test_get_goal_list_filter_created_at_eq(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_created_at_lt(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_created_at_lte(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_created_at_gt(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_created_at_gte(self): res = self.dbapi.get_goal_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal1.uuid, self.goal2.uuid]), set([r.uuid for r in res])) # updated_at # def test_get_goal_list_filter_updated_at_eq(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_updated_at_lt(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_updated_at_lte(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal2.uuid, self.goal3.uuid]), set([r.uuid for r in res])) def test_get_goal_list_filter_updated_at_gt(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.goal1.uuid], [r.uuid for r in res]) def test_get_goal_list_filter_updated_at_gte(self): self._update_goals() res = self.dbapi.get_goal_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.goal1.uuid, self.goal2.uuid]), set([r.uuid for r in res])) class DbGoalTestCase(base.DbTestCase): def test_get_goal_list(self): uuids = [] for i in range(1, 4): goal = utils.create_test_goal( id=i, uuid=w_utils.generate_uuid(), name="GOAL_%s" % i, display_name='My Goal %s' % i) uuids.append(str(goal['uuid'])) goals = self.dbapi.get_goal_list(self.context) goal_uuids = [g.uuid for g in goals] self.assertEqual(sorted(uuids), sorted(goal_uuids)) def test_get_goal_list_with_filters(self): goal1 = utils.create_test_goal( id=1, uuid=w_utils.generate_uuid(), name="GOAL_1", display_name='Goal 1', ) goal2 = utils.create_test_goal( id=2, uuid=w_utils.generate_uuid(), name="GOAL_2", display_name='Goal 2', ) goal3 = utils.create_test_goal( id=3, uuid=w_utils.generate_uuid(), name="GOAL_3", display_name='Goal 3', ) self.dbapi.soft_delete_goal(goal3['uuid']) res = self.dbapi.get_goal_list( self.context, filters={'display_name': 'Goal 1'}) self.assertEqual([goal1['uuid']], [r.uuid for r in res]) res = self.dbapi.get_goal_list( self.context, filters={'display_name': 'Goal 3'}) self.assertEqual([], [r.uuid for r in res]) res = self.dbapi.get_goal_list( self.context, filters={'name': 'GOAL_1'}) self.assertEqual([goal1['uuid']], [r.uuid for r in res]) res = self.dbapi.get_goal_list( self.context, filters={'display_name': 'Goal 2'}) self.assertEqual([goal2['uuid']], [r.uuid for r in res]) res = self.dbapi.get_goal_list( self.context, filters={'uuid': goal3['uuid']}) self.assertEqual([], [r.uuid for r in res]) def test_get_goal_by_uuid(self): efficacy_spec = [{"unit": "%", "name": "dummy", "schema": "Range(min=0, max=100, min_included=True, " "max_included=True, msg=None)", "description": "Dummy indicator"}] created_goal = utils.create_test_goal( efficacy_specification=efficacy_spec) goal = self.dbapi.get_goal_by_uuid(self.context, created_goal['uuid']) self.assertEqual(goal.uuid, created_goal['uuid']) def test_get_goal_that_does_not_exist(self): random_uuid = w_utils.generate_uuid() self.assertRaises(exception.GoalNotFound, self.dbapi.get_goal_by_uuid, self.context, random_uuid) def test_update_goal(self): goal = utils.create_test_goal() res = self.dbapi.update_goal(goal['uuid'], {'display_name': 'updated-model'}) self.assertEqual('updated-model', res.display_name) def test_update_goal_id(self): goal = utils.create_test_goal() self.assertRaises(exception.Invalid, self.dbapi.update_goal, goal['uuid'], {'uuid': 'NEW_GOAL'}) def test_update_goal_that_does_not_exist(self): random_uuid = w_utils.generate_uuid() self.assertRaises(exception.GoalNotFound, self.dbapi.update_goal, random_uuid, {'display_name': ''}) def test_destroy_goal(self): goal = utils.create_test_goal() self.dbapi.destroy_goal(goal['uuid']) self.assertRaises(exception.GoalNotFound, self.dbapi.get_goal_by_uuid, self.context, goal['uuid']) def test_destroy_goal_that_does_not_exist(self): random_uuid = w_utils.generate_uuid() self.assertRaises(exception.GoalNotFound, self.dbapi.destroy_goal, random_uuid) def test_create_goal_already_exists(self): goal_uuid = w_utils.generate_uuid() utils.create_test_goal(uuid=goal_uuid) self.assertRaises(exception.GoalAlreadyExists, utils.create_test_goal, uuid=goal_uuid) python-watcher-4.0.0/watcher/tests/db/__init__.py0000664000175000017500000000000013656752270022011 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/db/test_audit_template.py0000664000175000017500000004021513656752270024326 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating AuditTemplate via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.tests.db import base from watcher.tests.db import utils class TestDbAuditTemplateFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbAuditTemplateFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): def gen_name(): return "Audit Template %s" % w_utils.generate_uuid() self.audit_template1_name = gen_name() self.audit_template2_name = gen_name() self.audit_template3_name = gen_name() with freezegun.freeze_time(self.FAKE_TODAY): self.audit_template1 = utils.create_test_audit_template( name=self.audit_template1_name, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.audit_template2 = utils.create_test_audit_template( name=self.audit_template2_name, id=2, uuid=None) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.audit_template3 = utils.create_test_audit_template( name=self.audit_template3_name, id=3, uuid=None) def _soft_delete_audit_templates(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_audit_template(self.audit_template2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_audit_template(self.audit_template3.uuid) def _update_audit_templates(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_audit_template( self.audit_template1.uuid, values={"name": "audit_template1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_audit_template( self.audit_template2.uuid, values={"name": "audit_template2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_audit_template( self.audit_template3.uuid, values={"name": "audit_template3"}) def test_get_audit_template_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) res = self.dbapi.get_audit_template_list( self.context, filters={'deleted': True}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit_template(self.audit_template1.uuid) res = self.dbapi.get_audit_template_list( self.context, filters={'deleted': False}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_eq(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_lt(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_lte(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_gt(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_deleted_at_gte(self): self._soft_delete_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template1['id'], self.audit_template2['id']], [r.id for r in res]) # created_at # def test_get_audit_template_list_filter_created_at_eq(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_created_at_lt(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_created_at_lte(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_created_at_gt(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_created_at_gte(self): res = self.dbapi.get_audit_template_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template1['id'], self.audit_template2['id']], [r.id for r in res]) # updated_at # def test_get_audit_template_list_filter_updated_at_eq(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_updated_at_lt(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_updated_at_lte(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template2['id'], self.audit_template3['id']], [r.id for r in res]) def test_get_audit_template_list_filter_updated_at_gt(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit_template1['id']], [r.id for r in res]) def test_get_audit_template_list_filter_updated_at_gte(self): self._update_audit_templates() res = self.dbapi.get_audit_template_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit_template1['id'], self.audit_template2['id']], [r.id for r in res]) class DbAuditTemplateTestCase(base.DbTestCase): def test_get_audit_template_list(self): uuids = [] for i in range(1, 4): audit_template = utils.create_test_audit_template( id=i, uuid=w_utils.generate_uuid(), name='My Audit Template {0}'.format(i)) uuids.append(str(audit_template['uuid'])) audit_templates = self.dbapi.get_audit_template_list(self.context) audit_template_uuids = [at.uuid for at in audit_templates] self.assertEqual(sorted(uuids), sorted(audit_template_uuids)) for audit_template in audit_templates: self.assertIsNone(audit_template.goal) self.assertIsNone(audit_template.strategy) def test_get_audit_template_list_eager(self): _goal = utils.get_test_goal() goal = self.dbapi.create_goal(_goal) _strategy = utils.get_test_strategy() strategy = self.dbapi.create_strategy(_strategy) uuids = [] for i in range(1, 4): audit_template = utils.create_test_audit_template( id=i, uuid=w_utils.generate_uuid(), name='My Audit Template {0}'.format(i), goal_id=goal.id, strategy_id=strategy.id) uuids.append(str(audit_template['uuid'])) audit_templates = self.dbapi.get_audit_template_list( self.context, eager=True) audit_template_map = {a.uuid: a for a in audit_templates} self.assertEqual(sorted(uuids), sorted(audit_template_map.keys())) eager_audit_template = audit_template_map[audit_template.uuid] self.assertEqual(goal.as_dict(), eager_audit_template.goal.as_dict()) self.assertEqual( strategy.as_dict(), eager_audit_template.strategy.as_dict()) def test_get_audit_template_list_with_filters(self): goal = utils.create_test_goal(name='DUMMY') audit_template1 = utils.create_test_audit_template( id=1, uuid=w_utils.generate_uuid(), name='My Audit Template 1', description='Description of my audit template 1', goal_id=goal['id']) audit_template2 = utils.create_test_audit_template( id=2, uuid=w_utils.generate_uuid(), name='My Audit Template 2', description='Description of my audit template 2', goal_id=goal['id']) audit_template3 = utils.create_test_audit_template( id=3, uuid=w_utils.generate_uuid(), name='My Audit Template 3', description='Description of my audit template 3', goal_id=goal['id']) self.dbapi.soft_delete_audit_template(audit_template3['uuid']) res = self.dbapi.get_audit_template_list( self.context, filters={'name': 'My Audit Template 1'}) self.assertEqual([audit_template1['id']], [r.id for r in res]) res = self.dbapi.get_audit_template_list( self.context, filters={'name': 'Does not exist'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_audit_template_list( self.context, filters={'goal_name': 'DUMMY'}) self.assertEqual( sorted([audit_template1['id'], audit_template2['id']]), sorted([r.id for r in res])) temp_context = self.context temp_context.show_deleted = True res = self.dbapi.get_audit_template_list( temp_context, filters={'goal_name': 'DUMMY'}) self.assertEqual( sorted([audit_template1['id'], audit_template2['id'], audit_template3['id']]), sorted([r.id for r in res])) res = self.dbapi.get_audit_template_list( self.context, filters={'name': 'My Audit Template 2'}) self.assertEqual([audit_template2['id']], [r.id for r in res]) def test_get_audit_template_list_with_filter_by_uuid(self): audit_template = utils.create_test_audit_template() res = self.dbapi.get_audit_template_list( self.context, filters={'uuid': audit_template["uuid"]}) self.assertEqual(len(res), 1) self.assertEqual(audit_template['uuid'], res[0].uuid) def test_get_audit_template_by_id(self): audit_template = utils.create_test_audit_template() audit_template = self.dbapi.get_audit_template_by_id( self.context, audit_template['id']) self.assertEqual(audit_template['uuid'], audit_template.uuid) def test_get_audit_template_by_uuid(self): audit_template = utils.create_test_audit_template() audit_template = self.dbapi.get_audit_template_by_uuid( self.context, audit_template['uuid']) self.assertEqual(audit_template['id'], audit_template.id) def test_get_audit_template_that_does_not_exist(self): self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.get_audit_template_by_id, self.context, 1234) def test_update_audit_template(self): audit_template = utils.create_test_audit_template() res = self.dbapi.update_audit_template(audit_template['id'], {'name': 'updated-model'}) self.assertEqual('updated-model', res.name) def test_update_audit_template_that_does_not_exist(self): self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.update_audit_template, 1234, {'name': ''}) def test_update_audit_template_uuid(self): audit_template = utils.create_test_audit_template() self.assertRaises(exception.Invalid, self.dbapi.update_audit_template, audit_template['id'], {'uuid': 'hello'}) def test_destroy_audit_template(self): audit_template = utils.create_test_audit_template() self.dbapi.destroy_audit_template(audit_template['id']) self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.get_audit_template_by_id, self.context, audit_template['id']) def test_destroy_audit_template_by_uuid(self): uuid = w_utils.generate_uuid() utils.create_test_audit_template(uuid=uuid) self.assertIsNotNone(self.dbapi.get_audit_template_by_uuid( self.context, uuid)) self.dbapi.destroy_audit_template(uuid) self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.get_audit_template_by_uuid, self.context, uuid) def test_destroy_audit_template_that_does_not_exist(self): self.assertRaises(exception.AuditTemplateNotFound, self.dbapi.destroy_audit_template, 1234) def test_create_audit_template_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_audit_template(id=1, uuid=uuid) self.assertRaises(exception.AuditTemplateAlreadyExists, utils.create_test_audit_template, id=2, uuid=uuid) def test_audit_template_create_same_name(self): audit_template1 = utils.create_test_audit_template( uuid=w_utils.generate_uuid(), name='audit_template_name') self.assertEqual(audit_template1['uuid'], audit_template1.uuid) self.assertRaises( exception.AuditTemplateAlreadyExists, utils.create_test_audit_template, uuid=w_utils.generate_uuid(), name='audit_template_name') python-watcher-4.0.0/watcher/tests/db/test_strategy.py0000664000175000017500000003352013656752270023170 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for manipulating Strategy via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.tests.db import base from watcher.tests.db import utils class TestDbStrategyFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbStrategyFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): strategy1_name = "STRATEGY_ID_1" strategy2_name = "STRATEGY_ID_2" strategy3_name = "STRATEGY_ID_3" self.goal1 = utils.create_test_goal( id=1, uuid=w_utils.generate_uuid(), name="GOAL_ID", display_name="Goal") self.goal2 = utils.create_test_goal( id=2, uuid=w_utils.generate_uuid(), name="DUMMY", display_name="Dummy") with freezegun.freeze_time(self.FAKE_TODAY): self.strategy1 = utils.create_test_strategy( id=1, uuid=w_utils.generate_uuid(), name=strategy1_name, display_name="Strategy 1", goal_id=self.goal1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.strategy2 = utils.create_test_strategy( id=2, uuid=w_utils.generate_uuid(), name=strategy2_name, display_name="Strategy 2", goal_id=self.goal1.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.strategy3 = utils.create_test_strategy( id=3, uuid=w_utils.generate_uuid(), name=strategy3_name, display_name="Strategy 3", goal_id=self.goal2.id) def _soft_delete_strategys(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_strategy(self.strategy1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_strategy(self.strategy2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_strategy(self.strategy3.id) def _update_strategies(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_strategy( self.strategy1.id, values={"display_name": "strategy1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_strategy( self.strategy2.id, values={"display_name": "strategy2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_strategy( self.strategy3.id, values={"display_name": "strategy3"}) def test_get_strategy_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_strategy(self.strategy1.id) res = self.dbapi.get_strategy_list( self.context, filters={'deleted': True}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_strategy(self.strategy1.id) res = self.dbapi.get_strategy_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_deleted_at_eq(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_deleted_at_lt(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_deleted_at_lte(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_deleted_at_gt(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_deleted_at_gte(self): self._soft_delete_strategys() res = self.dbapi.get_strategy_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy1['uuid'], self.strategy2['uuid']]), set([r.uuid for r in res])) # created_at # def test_get_strategy_list_filter_created_at_eq(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_created_at_lt(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_created_at_lte(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_created_at_gt(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_created_at_gte(self): res = self.dbapi.get_strategy_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy1['uuid'], self.strategy2['uuid']]), set([r.uuid for r in res])) # updated_at # def test_get_strategy_list_filter_updated_at_eq(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_updated_at_lt(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_updated_at_lte(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy2['uuid'], self.strategy3['uuid']]), set([r.uuid for r in res])) def test_get_strategy_list_filter_updated_at_gt(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.strategy1['uuid']], [r.uuid for r in res]) def test_get_strategy_list_filter_updated_at_gte(self): self._update_strategies() res = self.dbapi.get_strategy_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.strategy1['uuid'], self.strategy2['uuid']]), set([r.uuid for r in res])) class DbStrategyTestCase(base.DbTestCase): def test_get_strategy_list(self): uuids = [] for i in range(1, 4): strategy = utils.create_test_strategy( id=i, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_%s" % i, display_name='My Strategy {0}'.format(i)) uuids.append(str(strategy['uuid'])) strategies = self.dbapi.get_strategy_list(self.context) strategy_uuids = [s.uuid for s in strategies] self.assertEqual(sorted(uuids), sorted(strategy_uuids)) for strategy in strategies: self.assertIsNone(strategy.goal) def test_get_strategy_list_eager(self): _goal = utils.get_test_goal() goal = self.dbapi.create_goal(_goal) uuids = [] for i in range(1, 4): strategy = utils.create_test_strategy( id=i, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_%s" % i, display_name='My Strategy {0}'.format(i), goal_id=goal.id) uuids.append(str(strategy['uuid'])) strategys = self.dbapi.get_strategy_list(self.context, eager=True) strategy_map = {a.uuid: a for a in strategys} self.assertEqual(sorted(uuids), sorted(strategy_map.keys())) eager_strategy = strategy_map[strategy.uuid] self.assertEqual(goal.as_dict(), eager_strategy.goal.as_dict()) def test_get_strategy_list_with_filters(self): # NOTE(erakli): we don't create goal in database but links to # goal_id = 1. There is no error in dbapi.create_strategy() method. # Is it right behaviour? strategy1 = utils.create_test_strategy( id=1, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_1", display_name='Strategy 1', ) strategy2 = utils.create_test_strategy( id=2, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_2", display_name='Strategy 2', ) strategy3 = utils.create_test_strategy( id=3, uuid=w_utils.generate_uuid(), name="STRATEGY_ID_3", display_name='Strategy 3', ) self.dbapi.soft_delete_strategy(strategy3['uuid']) res = self.dbapi.get_strategy_list( self.context, filters={'display_name': 'Strategy 1'}) self.assertEqual([strategy1['uuid']], [r.uuid for r in res]) res = self.dbapi.get_strategy_list( self.context, filters={'display_name': 'Strategy 3'}) self.assertEqual([], [r.uuid for r in res]) res = self.dbapi.get_strategy_list( self.context, filters={'goal_id': 1}) self.assertEqual([strategy1['uuid'], strategy2['uuid']], [r.uuid for r in res]) res = self.dbapi.get_strategy_list( self.context, filters={'display_name': 'Strategy 2'}) self.assertEqual([strategy2['uuid']], [r.uuid for r in res]) def test_get_strategy_by_uuid(self): created_strategy = utils.create_test_strategy() strategy = self.dbapi.get_strategy_by_uuid( self.context, created_strategy['uuid']) self.assertEqual(strategy.uuid, created_strategy['uuid']) def test_get_strategy_by_name(self): created_strategy = utils.create_test_strategy() strategy = self.dbapi.get_strategy_by_name( self.context, created_strategy['name']) self.assertEqual(strategy.name, created_strategy['name']) def test_get_strategy_that_does_not_exist(self): self.assertRaises(exception.StrategyNotFound, self.dbapi.get_strategy_by_id, self.context, 404) def test_update_strategy(self): strategy = utils.create_test_strategy() res = self.dbapi.update_strategy( strategy['uuid'], {'display_name': 'updated-model'}) self.assertEqual('updated-model', res.display_name) def test_update_goal_id(self): strategy = utils.create_test_strategy() self.assertRaises(exception.Invalid, self.dbapi.update_strategy, strategy['uuid'], {'uuid': 'new_strategy_id'}) def test_update_strategy_that_does_not_exist(self): self.assertRaises(exception.StrategyNotFound, self.dbapi.update_strategy, 404, {'display_name': ''}) def test_destroy_strategy(self): strategy = utils.create_test_strategy() self.dbapi.destroy_strategy(strategy['uuid']) self.assertRaises(exception.StrategyNotFound, self.dbapi.get_strategy_by_id, self.context, strategy['uuid']) def test_destroy_strategy_that_does_not_exist(self): self.assertRaises(exception.StrategyNotFound, self.dbapi.destroy_strategy, 404) def test_create_strategy_already_exists(self): strategy_id = "STRATEGY_ID" utils.create_test_strategy(name=strategy_id) self.assertRaises(exception.StrategyAlreadyExists, utils.create_test_strategy, name=strategy_id) python-watcher-4.0.0/watcher/tests/db/test_scoring_engine.py0000664000175000017500000003313413656752270024320 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for manipulating ScoringEngine via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.tests.db import base from watcher.tests.db import utils class TestDbScoringEngineFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbScoringEngineFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): with freezegun.freeze_time(self.FAKE_TODAY): self.scoring_engine1 = utils.create_test_scoring_engine( id=1, uuid='e8370ede-4f39-11e6-9ffa-08002722cb22', name="se-1", description="Scoring Engine 1", metainfo="a1=b1") with freezegun.freeze_time(self.FAKE_OLD_DATE): self.scoring_engine2 = utils.create_test_scoring_engine( id=2, uuid='e8370ede-4f39-11e6-9ffa-08002722cb23', name="se-2", description="Scoring Engine 2", metainfo="a2=b2") with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.scoring_engine3 = utils.create_test_scoring_engine( id=3, uuid='e8370ede-4f39-11e6-9ffa-08002722cb24', name="se-3", description="Scoring Engine 3", metainfo="a3=b3") def _soft_delete_scoring_engines(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_scoring_engine(self.scoring_engine2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_scoring_engine(self.scoring_engine3.id) def _update_scoring_engines(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_scoring_engine( self.scoring_engine1.id, values={"description": "scoring_engine1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_scoring_engine( self.scoring_engine2.id, values={"description": "scoring_engine2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_scoring_engine( self.scoring_engine3.id, values={"description": "scoring_engine3"}) def test_get_scoring_engine_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted': True}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_scoring_engine(self.scoring_engine1.id) res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_deleted_at_eq(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_deleted_at_lt(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_deleted_at_lte(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_deleted_at_gt(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_deleted_at_gte(self): self._soft_delete_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine1['id'], self.scoring_engine2['id']]), set([r.id for r in res])) # created_at # def test_get_scoring_engine_list_filter_created_at_eq(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_created_at_lt(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_created_at_lte(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_created_at_gt(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_created_at_gte(self): res = self.dbapi.get_scoring_engine_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine1['id'], self.scoring_engine2['id']]), set([r.id for r in res])) # updated_at # def test_get_scoring_engine_list_filter_updated_at_eq(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_updated_at_lt(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_updated_at_lte(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine2['id'], self.scoring_engine3['id']]), set([r.id for r in res])) def test_get_scoring_engine_list_filter_updated_at_gt(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.scoring_engine1['id']], [r.id for r in res]) def test_get_scoring_engine_list_filter_updated_at_gte(self): self._update_scoring_engines() res = self.dbapi.get_scoring_engine_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.scoring_engine1['id'], self.scoring_engine2['id']]), set([r.id for r in res])) class DbScoringEngineTestCase(base.DbTestCase): def test_get_scoring_engine_list(self): names = [] for i in range(1, 4): scoring_engine = utils.create_test_scoring_engine( id=i, uuid=w_utils.generate_uuid(), name="SE_ID_%s" % i, description='My ScoringEngine {0}'.format(i), metainfo='a{0}=b{0}'.format(i)) names.append(str(scoring_engine['name'])) scoring_engines = self.dbapi.get_scoring_engine_list(self.context) scoring_engines_names = [se.name for se in scoring_engines] self.assertEqual(sorted(names), sorted(scoring_engines_names)) def test_get_scoring_engine_list_with_filters(self): scoring_engine1 = utils.create_test_scoring_engine( id=1, uuid=w_utils.generate_uuid(), name="SE_ID_1", description='ScoringEngine 1', metainfo="a1=b1", ) scoring_engine2 = utils.create_test_scoring_engine( id=2, uuid=w_utils.generate_uuid(), name="SE_ID_2", description='ScoringEngine 2', metainfo="a2=b2", ) scoring_engine3 = utils.create_test_scoring_engine( id=3, uuid=w_utils.generate_uuid(), name="SE_ID_3", description='ScoringEngine 3', metainfo="a3=b3", ) self.dbapi.soft_delete_scoring_engine(scoring_engine3['uuid']) res = self.dbapi.get_scoring_engine_list( self.context, filters={'description': 'ScoringEngine 1'}) self.assertEqual([scoring_engine1['name']], [r.name for r in res]) res = self.dbapi.get_scoring_engine_list( self.context, filters={'description': 'ScoringEngine 3'}) self.assertEqual([], [r.name for r in res]) res = self.dbapi.get_scoring_engine_list( self.context, filters={'description': 'ScoringEngine 2'}) self.assertEqual([scoring_engine2['name']], [r.name for r in res]) def test_get_scoring_engine_by_id(self): created_scoring_engine = utils.create_test_scoring_engine() scoring_engine = self.dbapi.get_scoring_engine_by_id( self.context, created_scoring_engine['id']) self.assertEqual(scoring_engine.id, created_scoring_engine['id']) def test_get_scoring_engine_by_uuid(self): created_scoring_engine = utils.create_test_scoring_engine() scoring_engine = self.dbapi.get_scoring_engine_by_uuid( self.context, created_scoring_engine['uuid']) self.assertEqual(scoring_engine.uuid, created_scoring_engine['uuid']) def test_get_scoring_engine_by_name(self): created_scoring_engine = utils.create_test_scoring_engine() scoring_engine = self.dbapi.get_scoring_engine_by_name( self.context, created_scoring_engine['name']) self.assertEqual(scoring_engine.name, created_scoring_engine['name']) def test_get_scoring_engine_that_does_not_exist(self): self.assertRaises(exception.ScoringEngineNotFound, self.dbapi.get_scoring_engine_by_id, self.context, 404) def test_update_scoring_engine(self): scoring_engine = utils.create_test_scoring_engine() res = self.dbapi.update_scoring_engine( scoring_engine['id'], {'description': 'updated-model'}) self.assertEqual('updated-model', res.description) def test_update_scoring_engine_id(self): scoring_engine = utils.create_test_scoring_engine() self.assertRaises(exception.Invalid, self.dbapi.update_scoring_engine, scoring_engine['id'], {'uuid': w_utils.generate_uuid()}) def test_update_scoring_engine_that_does_not_exist(self): self.assertRaises(exception.ScoringEngineNotFound, self.dbapi.update_scoring_engine, 404, {'description': ''}) def test_destroy_scoring_engine(self): scoring_engine = utils.create_test_scoring_engine() self.dbapi.destroy_scoring_engine(scoring_engine['id']) self.assertRaises(exception.ScoringEngineNotFound, self.dbapi.get_scoring_engine_by_id, self.context, scoring_engine['id']) def test_destroy_scoring_engine_that_does_not_exist(self): self.assertRaises(exception.ScoringEngineNotFound, self.dbapi.destroy_scoring_engine, 404) def test_create_scoring_engine_already_exists(self): scoring_engine_id = "SE_ID" utils.create_test_scoring_engine(name=scoring_engine_id) self.assertRaises(exception.ScoringEngineAlreadyExists, utils.create_test_scoring_engine, name=scoring_engine_id) python-watcher-4.0.0/watcher/tests/db/test_action_description.py0000664000175000017500000002601213656752270025204 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for manipulating ActionDescription via the DB API""" import freezegun from watcher.common import exception from watcher.tests.db import base from watcher.tests.db import utils class TestDbActionDescriptionFilters(base.DbTestCase): FAKE_OLDER_DATE = '2015-01-01T09:52:05.219414' FAKE_OLD_DATE = '2016-01-01T09:52:05.219414' FAKE_TODAY = '2017-02-24T09:52:05.219414' def setUp(self): super(TestDbActionDescriptionFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): action_desc1_type = "nop" action_desc2_type = "sleep" action_desc3_type = "resize" with freezegun.freeze_time(self.FAKE_TODAY): self.action_desc1 = utils.create_test_action_desc( id=1, action_type=action_desc1_type, description="description") with freezegun.freeze_time(self.FAKE_OLD_DATE): self.action_desc2 = utils.create_test_action_desc( id=2, action_type=action_desc2_type, description="description") with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.action_desc3 = utils.create_test_action_desc( id=3, action_type=action_desc3_type, description="description") def _soft_delete_action_descs(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_description(self.action_desc1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_action_description(self.action_desc2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_action_description(self.action_desc3.id) def _update_action_descs(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_action_description( self.action_desc1.id, values={"description": "nop description"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_action_description( self.action_desc2.id, values={"description": "sleep description"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_action_description( self.action_desc3.id, values={"description": "resize description"}) def test_get_action_desc_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_description(self.action_desc1.id) res = self.dbapi.get_action_description_list( self.context, filters={'deleted': True}) self.assertEqual([self.action_desc1['action_type']], [r.action_type for r in res]) def test_get_action_desc_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_description(self.action_desc1.id) res = self.dbapi.get_action_description_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.action_desc2['action_type'], self.action_desc3['action_type']]), set([r.action_type for r in res])) def test_get_action_desc_list_filter_deleted_at_eq(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_deleted_at_lt(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_deleted_at_lte(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_deleted_at_gt(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_deleted_at_gte(self): self._soft_delete_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc1['id'], self.action_desc2['id']]), set([r.id for r in res])) # created_at # def test_get_action_desc_list_filter_created_at_eq(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_created_at_lt(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_created_at_lte(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_created_at_gt(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_created_at_gte(self): res = self.dbapi.get_action_description_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc1['id'], self.action_desc2['id']]), set([r.id for r in res])) # updated_at # def test_get_action_desc_list_filter_updated_at_eq(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_updated_at_lt(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_updated_at_lte(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc2['id'], self.action_desc3['id']]), set([r.id for r in res])) def test_get_action_desc_list_filter_updated_at_gt(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_desc1['id']], [r.id for r in res]) def test_get_action_desc_list_filter_updated_at_gte(self): self._update_action_descs() res = self.dbapi.get_action_description_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.action_desc1['id'], self.action_desc2['id']]), set([r.id for r in res])) class DbActionDescriptionTestCase(base.DbTestCase): def test_get_action_desc_list(self): ids = [] for i in range(1, 4): action_desc = utils.create_test_action_desc( id=i, action_type="action_%s" % i, description="description_{0}".format(i)) ids.append(action_desc['id']) action_descs = self.dbapi.get_action_description_list(self.context) action_desc_ids = [s.id for s in action_descs] self.assertEqual(sorted(ids), sorted(action_desc_ids)) def test_get_action_desc_list_with_filters(self): action_desc1 = utils.create_test_action_desc( id=1, action_type="action_1", description="description_1", ) action_desc2 = utils.create_test_action_desc( id=2, action_type="action_2", description="description_2", ) res = self.dbapi.get_action_description_list( self.context, filters={'action_type': 'action_1'}) self.assertEqual([action_desc1['id']], [r.id for r in res]) res = self.dbapi.get_action_description_list( self.context, filters={'action_type': 'action_3'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_action_description_list( self.context, filters={'action_type': 'action_2'}) self.assertEqual([action_desc2['id']], [r.id for r in res]) def test_get_action_desc_by_type(self): created_action_desc = utils.create_test_action_desc() action_desc = self.dbapi.get_action_description_by_type( self.context, created_action_desc['action_type']) self.assertEqual(action_desc.action_type, created_action_desc['action_type']) def test_get_action_desc_that_does_not_exist(self): self.assertRaises(exception.ActionDescriptionNotFound, self.dbapi.get_action_description_by_id, self.context, 404) def test_update_action_desc(self): action_desc = utils.create_test_action_desc() res = self.dbapi.update_action_description( action_desc['id'], {'description': 'description_test'}) self.assertEqual('description_test', res.description) python-watcher-4.0.0/watcher/tests/db/test_action.py0000664000175000017500000003562713656752270022615 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Action via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestDbActionFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbActionFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): self.audit_template_name = "Audit Template" self.audit_template = utils.create_test_audit_template( name=self.audit_template_name, id=1, uuid=None) self.audit = utils.create_test_audit( audit_template_id=self.audit_template.id, id=1, uuid=None) self.action_plan = utils.create_test_action_plan( audit_id=self.audit.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_TODAY): self.action1 = utils.create_test_action( action_plan_id=self.action_plan.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.action2 = utils.create_test_action( action_plan_id=self.action_plan.id, id=2, uuid=None) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.action3 = utils.create_test_action( action_plan_id=self.action_plan.id, id=3, uuid=None) def _soft_delete_actions(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action(self.action1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_action(self.action2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_action(self.action3.uuid) def _update_actions(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_action( self.action1.uuid, values={"state": objects.action_plan.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_action( self.action2.uuid, values={"state": objects.action_plan.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_action( self.action3.uuid, values={"state": objects.action_plan.State.SUCCEEDED}) def test_get_action_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action(self.action1.uuid) res = self.dbapi.get_action_list( self.context, filters={'deleted': True}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action(self.action1.uuid) res = self.dbapi.get_action_list( self.context, filters={'deleted': False}) self.assertEqual([self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_eq(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_lt(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_lte(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_gt(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_deleted_at_gte(self): self._soft_delete_actions() res = self.dbapi.get_action_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action1['id'], self.action2['id']], [r.id for r in res]) # created_at # def test_get_action_filter_created_at_eq(self): res = self.dbapi.get_action_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_created_at_lt(self): with freezegun.freeze_time(self.FAKE_TODAY): res = self.dbapi.get_action_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_created_at_lte(self): res = self.dbapi.get_action_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_created_at_gt(self): res = self.dbapi.get_action_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_created_at_gte(self): res = self.dbapi.get_action_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action1['id'], self.action2['id']], [r.id for r in res]) # updated_at # def test_get_action_filter_updated_at_eq(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_updated_at_lt(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_updated_at_lte(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action2['id'], self.action3['id']], [r.id for r in res]) def test_get_action_filter_updated_at_gt(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action1['id']], [r.id for r in res]) def test_get_action_filter_updated_at_gte(self): self._update_actions() res = self.dbapi.get_action_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action1['id'], self.action2['id']], [r.id for r in res]) class DbActionTestCase(base.DbTestCase): def test_get_action_list(self): uuids = [] for _ in range(1, 4): action = utils.create_test_action(uuid=w_utils.generate_uuid()) uuids.append(str(action['uuid'])) actions = self.dbapi.get_action_list(self.context) action_uuids = [a.uuid for a in actions] self.assertEqual(3, len(action_uuids)) self.assertEqual(sorted(uuids), sorted(action_uuids)) for action in actions: self.assertIsNone(action.action_plan) def test_get_action_list_eager(self): _action_plan = utils.get_test_action_plan() action_plan = self.dbapi.create_action_plan(_action_plan) uuids = [] for i in range(1, 4): action = utils.create_test_action( id=i, uuid=w_utils.generate_uuid(), action_plan_id=action_plan.id) uuids.append(str(action['uuid'])) actions = self.dbapi.get_action_list(self.context, eager=True) action_map = {a.uuid: a for a in actions} self.assertEqual(sorted(uuids), sorted(action_map.keys())) eager_action = action_map[action.uuid] self.assertEqual( action_plan.as_dict(), eager_action.action_plan.as_dict()) def test_get_action_list_with_filters(self): audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) action_plan = utils.create_test_action_plan( id=1, uuid=w_utils.generate_uuid(), audit_id=audit.id, parents=None, state=objects.action_plan.State.RECOMMENDED) action1 = utils.create_test_action( id=1, action_plan_id=action_plan['id'], description='description action 1', uuid=w_utils.generate_uuid(), parents=None, state=objects.action_plan.State.PENDING) action2 = utils.create_test_action( id=2, action_plan_id=2, description='description action 2', uuid=w_utils.generate_uuid(), parents=[action1['uuid']], state=objects.action_plan.State.PENDING) action3 = utils.create_test_action( id=3, action_plan_id=action_plan['id'], description='description action 3', uuid=w_utils.generate_uuid(), parents=[action2['uuid']], state=objects.action_plan.State.ONGOING) action4 = utils.create_test_action( id=4, action_plan_id=action_plan['id'], description='description action 4', uuid=w_utils.generate_uuid(), parents=None, state=objects.action_plan.State.ONGOING) self.dbapi.soft_delete_action(action4['uuid']) res = self.dbapi.get_action_list( self.context, filters={'state': objects.action_plan.State.ONGOING}) self.assertEqual([action3['id']], [r.id for r in res]) res = self.dbapi.get_action_list(self.context, filters={'state': 'bad-state'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_action_list( self.context, filters={'action_plan_id': 2}) self.assertEqual([action2['id']], [r.id for r in res]) res = self.dbapi.get_action_list( self.context, filters={'action_plan_uuid': action_plan['uuid']}) self.assertEqual( sorted([action1['id'], action3['id']]), sorted([r.id for r in res])) temp_context = self.context temp_context.show_deleted = True res = self.dbapi.get_action_list( temp_context, filters={'action_plan_uuid': action_plan['uuid']}) self.assertEqual( sorted([action1['id'], action3['id'], action4['id']]), sorted([r.id for r in res])) res = self.dbapi.get_action_list( self.context, filters={'audit_uuid': audit.uuid}) for action in res: self.assertEqual(action_plan['id'], action.action_plan_id) def test_get_action_list_with_filter_by_uuid(self): action = utils.create_test_action() res = self.dbapi.get_action_list( self.context, filters={'uuid': action["uuid"]}) self.assertEqual(len(res), 1) self.assertEqual(action['uuid'], res[0].uuid) def test_get_action_by_id(self): action = utils.create_test_action() action = self.dbapi.get_action_by_id(self.context, action['id']) self.assertEqual(action['uuid'], action.uuid) def test_get_action_by_uuid(self): action = utils.create_test_action() action = self.dbapi.get_action_by_uuid(self.context, action['uuid']) self.assertEqual(action['id'], action.id) def test_get_action_that_does_not_exist(self): self.assertRaises(exception.ActionNotFound, self.dbapi.get_action_by_id, self.context, 1234) def test_update_action(self): action = utils.create_test_action() res = self.dbapi.update_action( action['id'], {'state': objects.action_plan.State.CANCELLED}) self.assertEqual(objects.action_plan.State.CANCELLED, res.state) def test_update_action_that_does_not_exist(self): self.assertRaises(exception.ActionNotFound, self.dbapi.update_action, 1234, {'state': ''}) def test_update_action_uuid(self): action = utils.create_test_action() self.assertRaises(exception.Invalid, self.dbapi.update_action, action['id'], {'uuid': 'hello'}) def test_destroy_action(self): action = utils.create_test_action() self.dbapi.destroy_action(action['id']) self.assertRaises(exception.ActionNotFound, self.dbapi.get_action_by_id, self.context, action['id']) def test_destroy_action_by_uuid(self): uuid = w_utils.generate_uuid() utils.create_test_action(uuid=uuid) self.assertIsNotNone(self.dbapi.get_action_by_uuid(self.context, uuid)) self.dbapi.destroy_action(uuid) self.assertRaises(exception.ActionNotFound, self.dbapi.get_action_by_uuid, self.context, uuid) def test_destroy_action_that_does_not_exist(self): self.assertRaises(exception.ActionNotFound, self.dbapi.destroy_action, 1234) def test_create_action_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_action(id=1, uuid=uuid) self.assertRaises(exception.ActionAlreadyExists, utils.create_test_action, id=2, uuid=uuid) python-watcher-4.0.0/watcher/tests/db/test_audit.py0000664000175000017500000004055513656752270022442 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating Audit via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestDbAuditFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbAuditFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): self.audit_template_name = "Audit Template" def gen_name(): return "Audit %s" % w_utils.generate_uuid() self.audit1_name = gen_name() self.audit2_name = gen_name() self.audit3_name = gen_name() self.audit4_name = gen_name() self.audit_template = utils.create_test_audit_template( name=self.audit_template_name, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_TODAY): self.audit1 = utils.create_test_audit( audit_template_id=self.audit_template.id, id=1, uuid=None, name=self.audit1_name) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.audit2 = utils.create_test_audit( audit_template_id=self.audit_template.id, id=2, uuid=None, name=self.audit2_name, state=objects.audit.State.FAILED) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.audit3 = utils.create_test_audit( audit_template_id=self.audit_template.id, id=3, uuid=None, name=self.audit3_name, state=objects.audit.State.CANCELLED) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.audit4 = utils.create_test_audit( audit_template_id=self.audit_template.id, id=4, uuid=None, name=self.audit4_name, state=objects.audit.State.SUSPENDED) def _soft_delete_audits(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit(self.audit1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_audit(self.audit2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_audit(self.audit3.uuid) def _update_audits(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_audit( self.audit1.uuid, values={"state": objects.audit.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_audit( self.audit2.uuid, values={"state": objects.audit.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_audit( self.audit3.uuid, values={"state": objects.audit.State.SUCCEEDED}) def test_get_audit_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit(self.audit1.uuid) res = self.dbapi.get_audit_list( self.context, filters={'deleted': True}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_audit(self.audit1.uuid) res = self.dbapi.get_audit_list( self.context, filters={'deleted': False}) self.assertEqual( [self.audit2['id'], self.audit3['id'], self.audit4['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_eq(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_lt(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit2['id'], self.audit3['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_lte(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit2['id'], self.audit3['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_gt(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_deleted_at_gte(self): self._soft_delete_audits() res = self.dbapi.get_audit_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit1['id'], self.audit2['id']], [r.id for r in res]) # created_at # def test_get_audit_list_filter_created_at_eq(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_created_at_lt(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit2['id'], self.audit3['id'], self.audit4['id']], [r.id for r in res]) def test_get_audit_list_filter_created_at_lte(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit2['id'], self.audit3['id'], self.audit4['id']], [r.id for r in res]) def test_get_audit_list_filter_created_at_gt(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_created_at_gte(self): res = self.dbapi.get_audit_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit1['id'], self.audit2['id']], [r.id for r in res]) # updated_at # def test_get_audit_list_filter_updated_at_eq(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_updated_at_lt(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.audit2['id'], self.audit3['id']], [r.id for r in res]) def test_get_audit_list_filter_updated_at_lte(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit2['id'], self.audit3['id']], [r.id for r in res]) def test_get_audit_list_filter_updated_at_gt(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.audit1['id']], [r.id for r in res]) def test_get_audit_list_filter_updated_at_gte(self): self._update_audits() res = self.dbapi.get_audit_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.audit1['id'], self.audit2['id']], [r.id for r in res]) def test_get_audit_list_filter_state_in(self): res = self.dbapi.get_audit_list( self.context, filters={ 'state__in': objects.audit.AuditStateTransitionManager.INACTIVE_STATES }) self.assertEqual( [self.audit2['id'], self.audit3['id'], self.audit4['id']], [r.id for r in res]) def test_get_audit_list_filter_state_notin(self): res = self.dbapi.get_audit_list( self.context, filters={ 'state__notin': objects.audit.AuditStateTransitionManager.INACTIVE_STATES }) self.assertEqual( [self.audit1['id']], [r.id for r in res]) class DbAuditTestCase(base.DbTestCase): def test_get_audit_list(self): uuids = [] for id_ in range(1, 4): audit = utils.create_test_audit(uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(id_)) uuids.append(str(audit['uuid'])) audits = self.dbapi.get_audit_list(self.context) audit_uuids = [a.uuid for a in audits] self.assertEqual(sorted(uuids), sorted(audit_uuids)) for audit in audits: self.assertIsNone(audit.goal) self.assertIsNone(audit.strategy) def test_get_audit_list_eager(self): _goal = utils.get_test_goal() goal = self.dbapi.create_goal(_goal) _strategy = utils.get_test_strategy() strategy = self.dbapi.create_strategy(_strategy) uuids = [] for i in range(1, 4): audit = utils.create_test_audit( id=i, uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(i), goal_id=goal.id, strategy_id=strategy.id) uuids.append(str(audit['uuid'])) audits = self.dbapi.get_audit_list(self.context, eager=True) audit_map = {a.uuid: a for a in audits} self.assertEqual(sorted(uuids), sorted(audit_map.keys())) eager_audit = audit_map[audit.uuid] self.assertEqual(goal.as_dict(), eager_audit.goal.as_dict()) self.assertEqual(strategy.as_dict(), eager_audit.strategy.as_dict()) def test_get_audit_list_with_filters(self): goal = utils.create_test_goal(name='DUMMY') audit1 = utils.create_test_audit( id=1, audit_type=objects.audit.AuditType.ONESHOT.value, uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(1), state=objects.audit.State.ONGOING, goal_id=goal['id']) audit2 = utils.create_test_audit( id=2, audit_type=objects.audit.AuditType.CONTINUOUS.value, uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(2), state=objects.audit.State.PENDING, goal_id=goal['id']) audit3 = utils.create_test_audit( id=3, audit_type=objects.audit.AuditType.CONTINUOUS.value, uuid=w_utils.generate_uuid(), name='My Audit {0}'.format(3), state=objects.audit.State.ONGOING, goal_id=goal['id']) self.dbapi.soft_delete_audit(audit3['uuid']) res = self.dbapi.get_audit_list( self.context, filters={'audit_type': objects.audit.AuditType.ONESHOT.value}) self.assertEqual([audit1['id']], [r.id for r in res]) res = self.dbapi.get_audit_list( self.context, filters={'audit_type': 'bad-type'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_audit_list( self.context, filters={'state': objects.audit.State.ONGOING}) self.assertEqual([audit1['id']], [r.id for r in res]) res = self.dbapi.get_audit_list( self.context, filters={'state': objects.audit.State.PENDING}) self.assertEqual([audit2['id']], [r.id for r in res]) res = self.dbapi.get_audit_list( self.context, filters={'goal_name': 'DUMMY'}) self.assertEqual(sorted([audit1['id'], audit2['id']]), sorted([r.id for r in res])) temp_context = self.context temp_context.show_deleted = True res = self.dbapi.get_audit_list( temp_context, filters={'goal_name': 'DUMMY'}) self.assertEqual(sorted([audit1['id'], audit2['id'], audit3['id']]), sorted([r.id for r in res])) def test_get_audit_list_with_filter_by_uuid(self): audit = utils.create_test_audit() res = self.dbapi.get_audit_list( self.context, filters={'uuid': audit["uuid"]}) self.assertEqual(len(res), 1) self.assertEqual(audit['uuid'], res[0].uuid) def test_get_audit_by_id(self): audit = utils.create_test_audit() audit = self.dbapi.get_audit_by_id(self.context, audit['id']) self.assertEqual(audit['uuid'], audit.uuid) def test_get_audit_by_uuid(self): audit = utils.create_test_audit() audit = self.dbapi.get_audit_by_uuid(self.context, audit['uuid']) self.assertEqual(audit['id'], audit.id) def test_get_audit_that_does_not_exist(self): self.assertRaises(exception.AuditNotFound, self.dbapi.get_audit_by_id, self.context, 1234) def test_update_audit(self): audit = utils.create_test_audit() res = self.dbapi.update_audit(audit['id'], {'name': 'updated-model'}) self.assertEqual('updated-model', res.name) def test_update_audit_that_does_not_exist(self): self.assertRaises(exception.AuditNotFound, self.dbapi.update_audit, 1234, {'name': ''}) def test_update_audit_uuid(self): audit = utils.create_test_audit() self.assertRaises(exception.Invalid, self.dbapi.update_audit, audit['id'], {'uuid': 'hello'}) def test_destroy_audit(self): audit = utils.create_test_audit() self.dbapi.destroy_audit(audit['id']) self.assertRaises(exception.AuditNotFound, self.dbapi.get_audit_by_id, self.context, audit['id']) def test_destroy_audit_by_uuid(self): audit = utils.create_test_audit() self.assertIsNotNone(self.dbapi.get_audit_by_uuid(self.context, audit['uuid'])) self.dbapi.destroy_audit(audit['uuid']) self.assertRaises(exception.AuditNotFound, self.dbapi.get_audit_by_uuid, self.context, audit['uuid']) def test_destroy_audit_that_does_not_exist(self): self.assertRaises(exception.AuditNotFound, self.dbapi.destroy_audit, 1234) def test_destroy_audit_that_referenced_by_action_plans(self): audit = utils.create_test_audit() action_plan = utils.create_test_action_plan(audit_id=audit['id']) self.assertEqual(audit['id'], action_plan.audit_id) self.assertRaises(exception.AuditReferenced, self.dbapi.destroy_audit, audit['id']) def test_create_audit_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_audit(id=1, uuid=uuid) self.assertRaises(exception.AuditAlreadyExists, utils.create_test_audit, id=2, uuid=uuid) def test_create_same_name_audit(self): audit = utils.create_test_audit( uuid=w_utils.generate_uuid(), name='my_audit') self.assertEqual(audit['uuid'], audit.uuid) self.assertRaises( exception.AuditAlreadyExists, utils.create_test_audit, uuid=w_utils.generate_uuid(), name='my_audit') python-watcher-4.0.0/watcher/tests/db/test_action_plan.py0000664000175000017500000003640013656752270023615 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating ActionPlan via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher.objects import action_plan as ap_objects from watcher.tests.db import base from watcher.tests.db import utils class TestDbActionPlanFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbActionPlanFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): self.audit_template_name = "Audit Template" self.audit_template = utils.create_test_audit_template( name=self.audit_template_name, id=1, uuid=None) self.audit = utils.create_test_audit( audit_template_id=self.audit_template.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_TODAY): self.action_plan1 = utils.create_test_action_plan( audit_id=self.audit.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.action_plan2 = utils.create_test_action_plan( audit_id=self.audit.id, id=2, uuid=None) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.action_plan3 = utils.create_test_action_plan( audit_id=self.audit.id, id=3, uuid=None) def _soft_delete_action_plans(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_action_plan(self.action_plan2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_action_plan(self.action_plan3.uuid) def _update_action_plans(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_action_plan( self.action_plan1.uuid, values={"state": ap_objects.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_action_plan( self.action_plan2.uuid, values={"state": ap_objects.State.SUCCEEDED}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_action_plan( self.action_plan3.uuid, values={"state": ap_objects.State.SUCCEEDED}) def test_get_action_plan_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) res = self.dbapi.get_action_plan_list( self.context, filters={'deleted': True}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_action_plan(self.action_plan1.uuid) res = self.dbapi.get_action_plan_list( self.context, filters={'deleted': False}) self.assertEqual([self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_eq(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_lt(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_lte(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_gt(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_deleted_at_gte(self): self._soft_delete_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan1['id'], self.action_plan2['id']], [r.id for r in res]) # created_at # def test_get_action_plan_list_filter_created_at_eq(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_created_at_lt(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_created_at_lte(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_created_at_gt(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_created_at_gte(self): res = self.dbapi.get_action_plan_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan1['id'], self.action_plan2['id']], [r.id for r in res]) # updated_at # def test_get_action_plan_list_filter_updated_at_eq(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_updated_at_lt(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_updated_at_lte(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan2['id'], self.action_plan3['id']], [r.id for r in res]) def test_get_action_plan_list_filter_updated_at_gt(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_filter_updated_at_gte(self): self._update_action_plans() res = self.dbapi.get_action_plan_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.action_plan1['id'], self.action_plan2['id']], [r.id for r in res]) class DbActionPlanTestCase(base.DbTestCase): def test_get_action_plan_list(self): uuids = [] for _ in range(1, 4): action_plan = utils.create_test_action_plan( uuid=w_utils.generate_uuid()) uuids.append(str(action_plan['uuid'])) action_plans = self.dbapi.get_action_plan_list(self.context) action_plan_uuids = [ap.uuid for ap in action_plans] self.assertEqual(sorted(uuids), sorted(action_plan_uuids)) for action_plan in action_plans: self.assertIsNone(action_plan.audit) self.assertIsNone(action_plan.strategy) def test_get_action_plan_list_eager(self): _strategy = utils.get_test_strategy() strategy = self.dbapi.create_strategy(_strategy) _audit = utils.get_test_audit() audit = self.dbapi.create_audit(_audit) uuids = [] for _ in range(1, 4): action_plan = utils.create_test_action_plan( uuid=w_utils.generate_uuid()) uuids.append(str(action_plan['uuid'])) action_plans = self.dbapi.get_action_plan_list( self.context, eager=True) action_plan_map = {a.uuid: a for a in action_plans} self.assertEqual(sorted(uuids), sorted(action_plan_map.keys())) eager_action_plan = action_plan_map[action_plan.uuid] self.assertEqual( strategy.as_dict(), eager_action_plan.strategy.as_dict()) self.assertEqual(audit.as_dict(), eager_action_plan.audit.as_dict()) def test_get_action_plan_list_with_filters(self): audit = utils.create_test_audit( id=2, audit_type='ONESHOT', uuid=w_utils.generate_uuid(), state=ap_objects.State.ONGOING) action_plan1 = utils.create_test_action_plan( id=1, uuid=w_utils.generate_uuid(), audit_id=audit['id'], state=ap_objects.State.RECOMMENDED) action_plan2 = utils.create_test_action_plan( id=2, uuid=w_utils.generate_uuid(), audit_id=audit['id'], state=ap_objects.State.ONGOING) action_plan3 = utils.create_test_action_plan( id=3, uuid=w_utils.generate_uuid(), audit_id=audit['id'], state=ap_objects.State.RECOMMENDED) # check on bug 1761956 self.dbapi.soft_delete_action_plan(action_plan3['uuid']) res = self.dbapi.get_action_plan_list( self.context, filters={'state': ap_objects.State.RECOMMENDED}) self.assertEqual([action_plan1['id']], [r.id for r in res]) res = self.dbapi.get_action_plan_list( self.context, filters={'state': ap_objects.State.ONGOING}) self.assertEqual([action_plan2['id']], [r.id for r in res]) res = self.dbapi.get_action_plan_list( self.context, filters={'audit_uuid': audit['uuid']}) self.assertEqual( sorted([action_plan1['id'], action_plan2['id']]), sorted([r.id for r in res])) for r in res: self.assertEqual(audit['id'], r.audit_id) self.dbapi.soft_delete_action_plan(action_plan1['uuid']) res = self.dbapi.get_action_plan_list( self.context, filters={'audit_uuid': audit['uuid']}) self.assertEqual([action_plan2['id']], [r.id for r in res]) self.assertNotEqual([action_plan1['id']], [r.id for r in res]) def test_get_action_plan_list_with_filter_by_uuid(self): action_plan = utils.create_test_action_plan() res = self.dbapi.get_action_plan_list( self.context, filters={'uuid': action_plan["uuid"]}) self.assertEqual(len(res), 1) self.assertEqual(action_plan['uuid'], res[0].uuid) def test_get_action_plan_by_id(self): action_plan = utils.create_test_action_plan() action_plan = self.dbapi.get_action_plan_by_id( self.context, action_plan['id']) self.assertEqual(action_plan['uuid'], action_plan.uuid) def test_get_action_plan_by_uuid(self): action_plan = utils.create_test_action_plan() action_plan = self.dbapi.get_action_plan_by_uuid( self.context, action_plan['uuid']) self.assertEqual(action_plan['id'], action_plan.id) def test_get_action_plan_that_does_not_exist(self): self.assertRaises(exception.ActionPlanNotFound, self.dbapi.get_action_plan_by_id, self.context, 1234) def test_update_action_plan(self): action_plan = utils.create_test_action_plan() res = self.dbapi.update_action_plan( action_plan['id'], {'name': 'updated-model'}) self.assertEqual('updated-model', res.name) def test_update_action_plan_that_does_not_exist(self): self.assertRaises(exception.ActionPlanNotFound, self.dbapi.update_action_plan, 1234, {'name': ''}) def test_update_action_plan_uuid(self): action_plan = utils.create_test_action_plan() self.assertRaises(exception.Invalid, self.dbapi.update_action_plan, action_plan['id'], {'uuid': 'hello'}) def test_destroy_action_plan(self): action_plan = utils.create_test_action_plan() self.dbapi.destroy_action_plan(action_plan['id']) self.assertRaises(exception.ActionPlanNotFound, self.dbapi.get_action_plan_by_id, self.context, action_plan['id']) def test_destroy_action_plan_by_uuid(self): uuid = w_utils.generate_uuid() utils.create_test_action_plan(uuid=uuid) self.assertIsNotNone(self.dbapi.get_action_plan_by_uuid( self.context, uuid)) self.dbapi.destroy_action_plan(uuid) self.assertRaises(exception.ActionPlanNotFound, self.dbapi.get_action_plan_by_uuid, self.context, uuid) def test_destroy_action_plan_that_does_not_exist(self): self.assertRaises(exception.ActionPlanNotFound, self.dbapi.destroy_action_plan, 1234) def test_destroy_action_plan_that_referenced_by_actions(self): action_plan = utils.create_test_action_plan() action = utils.create_test_action(action_plan_id=action_plan['id']) self.assertEqual(action_plan['id'], action.action_plan_id) self.assertRaises(exception.ActionPlanReferenced, self.dbapi.destroy_action_plan, action_plan['id']) def test_create_action_plan_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_action_plan(id=1, uuid=uuid) self.assertRaises(exception.ActionPlanAlreadyExists, utils.create_test_action_plan, id=2, uuid=uuid) python-watcher-4.0.0/watcher/tests/db/test_purge.py0000664000175000017500000005563113656752270022457 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_utils import uuidutils import freezegun import mock from watcher.common import context as watcher_context from watcher.common import utils from watcher.db import purge from watcher.db.sqlalchemy import api as dbapi from watcher.tests.db import base from watcher.tests.objects import utils as obj_utils class TestPurgeCommand(base.DbTestCase): def setUp(self): super(TestPurgeCommand, self).setUp() self.cmd = purge.PurgeCommand() token_info = { 'token': { 'project': { 'id': 'fake_project' }, 'user': { 'id': 'fake_user' } } } self.context = watcher_context.RequestContext( auth_token_info=token_info, project_id='fake_project', user_id='fake_user', show_deleted=True, ) self.fake_today = '2016-02-24T09:52:05.219414+00:00' self.expired_date = '2016-01-24T09:52:05.219414+00:00' self.m_input = mock.Mock() p = mock.patch("watcher.db.purge.input", self.m_input) self.m_input.return_value = 'y' p.start() self.addCleanup(p.stop) self._id_generator = None self._data_setup() def _generate_id(self): if self._id_generator is None: self._id_generator = self._get_id_generator() return next(self._id_generator) def _get_id_generator(self): seed = 1 while True: yield seed seed += 1 def generate_unique_name(self, prefix): return "%s%s" % (prefix, uuidutils.generate_uuid()) def _data_setup(self): # All the 1's are soft_deleted and are expired # All the 2's are soft_deleted but are not expired # All the 3's are *not* soft_deleted # Number of days we want to keep in DB (no purge for them) self.cmd.age_in_days = 10 self.cmd.max_number = None self.cmd.orphans = True goal1_name = "GOAL_1" goal2_name = "GOAL_2" goal3_name = "GOAL_3" strategy1_name = "strategy_1" strategy2_name = "strategy_2" strategy3_name = "strategy_3" self.audit_template1_name = self.generate_unique_name( prefix="Audit Template 1 ") self.audit_template2_name = self.generate_unique_name( prefix="Audit Template 2 ") self.audit_template3_name = self.generate_unique_name( prefix="Audit Template 3 ") self.audit1_name = self.generate_unique_name( prefix="Audit 1 ") self.audit2_name = self.generate_unique_name( prefix="Audit 2 ") self.audit3_name = self.generate_unique_name( prefix="Audit 3 ") with freezegun.freeze_time(self.expired_date): self.goal1 = obj_utils.create_test_goal( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=goal1_name, display_name=goal1_name.lower()) self.goal2 = obj_utils.create_test_goal( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=goal2_name, display_name=goal2_name.lower()) self.goal3 = obj_utils.create_test_goal( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=goal3_name, display_name=goal3_name.lower()) self.goal1.soft_delete() with freezegun.freeze_time(self.expired_date): self.strategy1 = obj_utils.create_test_strategy( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=strategy1_name, display_name=strategy1_name.lower(), goal_id=self.goal1.id) self.strategy2 = obj_utils.create_test_strategy( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=strategy2_name, display_name=strategy2_name.lower(), goal_id=self.goal2.id) self.strategy3 = obj_utils.create_test_strategy( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=strategy3_name, display_name=strategy3_name.lower(), goal_id=self.goal3.id) self.strategy1.soft_delete() with freezegun.freeze_time(self.expired_date): self.audit_template1 = obj_utils.create_test_audit_template( self.context, name=self.audit_template1_name, id=self._generate_id(), uuid=utils.generate_uuid(), goal_id=self.goal1.id, strategy_id=self.strategy1.id) self.audit_template2 = obj_utils.create_test_audit_template( self.context, name=self.audit_template2_name, id=self._generate_id(), uuid=utils.generate_uuid(), goal_id=self.goal2.id, strategy_id=self.strategy2.id) self.audit_template3 = obj_utils.create_test_audit_template( self.context, name=self.audit_template3_name, id=self._generate_id(), uuid=utils.generate_uuid(), goal_id=self.goal3.id, strategy_id=self.strategy3.id) self.audit_template1.soft_delete() with freezegun.freeze_time(self.expired_date): self.audit1 = obj_utils.create_test_audit( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.audit1_name, goal_id=self.goal1.id, strategy_id=self.strategy1.id) self.audit2 = obj_utils.create_test_audit( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.audit2_name, goal_id=self.goal2.id, strategy_id=self.strategy2.id) self.audit3 = obj_utils.create_test_audit( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.audit3_name, goal_id=self.goal3.id, strategy_id=self.strategy3.id) self.audit1.soft_delete() with freezegun.freeze_time(self.expired_date): self.action_plan1 = obj_utils.create_test_action_plan( self.context, audit_id=self.audit1.id, id=self._generate_id(), uuid=utils.generate_uuid(), strategy_id=self.strategy1.id) self.action_plan2 = obj_utils.create_test_action_plan( self.context, audit_id=self.audit2.id, id=self._generate_id(), strategy_id=self.strategy2.id, uuid=utils.generate_uuid()) self.action_plan3 = obj_utils.create_test_action_plan( self.context, audit_id=self.audit3.id, id=self._generate_id(), uuid=utils.generate_uuid(), strategy_id=self.strategy3.id) self.action1 = obj_utils.create_test_action( self.context, action_plan_id=self.action_plan1.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.action2 = obj_utils.create_test_action( self.context, action_plan_id=self.action_plan2.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.action3 = obj_utils.create_test_action( self.context, action_plan_id=self.action_plan3.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.action_plan1.soft_delete() @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_execute_max_number_exceeded(self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): self.cmd.age_in_days = None self.cmd.max_number = 10 with freezegun.freeze_time(self.fake_today): self.goal2.soft_delete() self.strategy2.soft_delete() self.audit_template2.soft_delete() self.audit2.soft_delete() self.action_plan2.soft_delete() with freezegun.freeze_time(self.fake_today): self.cmd.execute() # The 1's and the 2's are purgeable (due to age of day set to 0), # but max_number = 10, and because of no Db integrity violation, we # should be able to purge only 6 objects. self.assertEqual(m_destroy_goal.call_count, 1) self.assertEqual(m_destroy_strategy.call_count, 1) self.assertEqual(m_destroy_audit_template.call_count, 1) self.assertEqual(m_destroy_audit.call_count, 1) self.assertEqual(m_destroy_action_plan.call_count, 1) self.assertEqual(m_destroy_action.call_count, 1) def test_find_deleted_entries(self): self.cmd.age_in_days = None with freezegun.freeze_time(self.fake_today): objects_map = self.cmd.find_objects_to_delete() self.assertEqual(len(objects_map.goals), 1) self.assertEqual(len(objects_map.strategies), 1) self.assertEqual(len(objects_map.audit_templates), 1) self.assertEqual(len(objects_map.audits), 1) self.assertEqual(len(objects_map.action_plans), 1) self.assertEqual(len(objects_map.actions), 1) def test_find_deleted_and_expired_entries(self): with freezegun.freeze_time(self.fake_today): self.goal2.soft_delete() self.strategy2.soft_delete() self.audit_template2.soft_delete() self.audit2.soft_delete() self.action_plan2.soft_delete() with freezegun.freeze_time(self.fake_today): objects_map = self.cmd.find_objects_to_delete() # The 1's are purgeable (due to age of day set to 10) self.assertEqual(len(objects_map.goals), 1) self.assertEqual(len(objects_map.strategies), 1) self.assertEqual(len(objects_map.audit_templates), 1) self.assertEqual(len(objects_map.audits), 1) self.assertEqual(len(objects_map.action_plans), 1) self.assertEqual(len(objects_map.actions), 1) def test_find_deleted_and_nonexpired_related_entries(self): with freezegun.freeze_time(self.fake_today): # orphan audit template audit_template4 = obj_utils.create_test_audit_template( self.context, goal_id=self.goal2.id, name=self.generate_unique_name(prefix="Audit Template 4 "), strategy_id=self.strategy1.id, id=self._generate_id(), uuid=utils.generate_uuid()) audit4 = obj_utils.create_test_audit( self.context, audit_template_id=audit_template4.id, strategy_id=self.strategy1.id, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.generate_unique_name(prefix="Audit 4 ")) action_plan4 = obj_utils.create_test_action_plan( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_id=audit4.id, strategy_id=self.strategy1.id) action4 = obj_utils.create_test_action( self.context, action_plan_id=action_plan4.id, id=self._generate_id(), uuid=utils.generate_uuid()) audit_template5 = obj_utils.create_test_audit_template( self.context, goal_id=self.goal1.id, name=self.generate_unique_name(prefix="Audit Template 5 "), strategy_id=None, id=self._generate_id(), uuid=utils.generate_uuid()) audit5 = obj_utils.create_test_audit( self.context, audit_template_id=audit_template5.id, strategy_id=self.strategy1.id, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.generate_unique_name(prefix="Audit 5 ")) action_plan5 = obj_utils.create_test_action_plan( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_id=audit5.id, strategy_id=self.strategy1.id) action5 = obj_utils.create_test_action( self.context, action_plan_id=action_plan5.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.goal2.soft_delete() self.strategy2.soft_delete() self.audit_template2.soft_delete() self.audit2.soft_delete() self.action_plan2.soft_delete() # All the 4's should be purged as well because they are orphans # even though they were not deleted # All the 5's should be purged as well even though they are not # expired because their related audit template is itself expired audit_template5.soft_delete() audit5.soft_delete() action_plan5.soft_delete() with freezegun.freeze_time(self.fake_today): objects_map = self.cmd.find_objects_to_delete() self.assertEqual(len(objects_map.goals), 1) self.assertEqual(len(objects_map.strategies), 1) self.assertEqual(len(objects_map.audit_templates), 3) self.assertEqual(len(objects_map.audits), 3) self.assertEqual(len(objects_map.action_plans), 3) self.assertEqual(len(objects_map.actions), 3) self.assertEqual( set([self.action1.id, action4.id, action5.id]), set([entry.id for entry in objects_map.actions])) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command(self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): with freezegun.freeze_time(self.fake_today): self.cmd.execute() m_destroy_audit_template.assert_called_once_with( self.audit_template1.uuid) m_destroy_audit.assert_called_with( self.audit1.uuid) m_destroy_action_plan.assert_called_with( self.action_plan1.uuid) m_destroy_action.assert_called_with( self.action1.uuid) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command_with_nonexpired_related_entries( self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): with freezegun.freeze_time(self.fake_today): # orphan audit template audit_template4 = obj_utils.create_test_audit_template( self.context, goal_id=self.goal2.id, name=self.generate_unique_name(prefix="Audit Template 4 "), strategy_id=None, id=self._generate_id(), uuid=utils.generate_uuid()) audit4 = obj_utils.create_test_audit( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_template_id=audit_template4.id, name=self.generate_unique_name(prefix="Audit 4 ")) action_plan4 = obj_utils.create_test_action_plan( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_id=audit4.id, strategy_id=self.strategy1.id) action4 = obj_utils.create_test_action( self.context, action_plan_id=action_plan4.id, id=self._generate_id(), uuid=utils.generate_uuid()) audit_template5 = obj_utils.create_test_audit_template( self.context, goal_id=self.goal1.id, name=self.generate_unique_name(prefix="Audit Template 5 "), strategy_id=None, id=self._generate_id(), uuid=utils.generate_uuid()) audit5 = obj_utils.create_test_audit( self.context, audit_template_id=audit_template5.id, strategy_id=self.strategy1.id, id=self._generate_id(), uuid=utils.generate_uuid(), name=self.generate_unique_name(prefix="Audit 5 ")) action_plan5 = obj_utils.create_test_action_plan( self.context, id=self._generate_id(), uuid=utils.generate_uuid(), audit_id=audit5.id, strategy_id=self.strategy1.id) action5 = obj_utils.create_test_action( self.context, action_plan_id=action_plan5.id, id=self._generate_id(), uuid=utils.generate_uuid()) self.goal2.soft_delete() self.strategy2.soft_delete() self.audit_template2.soft_delete() self.audit2.soft_delete() self.action_plan2.soft_delete() # All the 4's should be purged as well because they are orphans # even though they were not deleted # All the 5's should be purged as well even though they are not # expired because their related audit template is itself expired audit_template5.soft_delete() audit5.soft_delete() action_plan5.soft_delete() with freezegun.freeze_time(self.fake_today): self.cmd.execute() self.assertEqual(m_destroy_goal.call_count, 1) self.assertEqual(m_destroy_strategy.call_count, 1) self.assertEqual(m_destroy_audit_template.call_count, 3) self.assertEqual(m_destroy_audit.call_count, 3) self.assertEqual(m_destroy_action_plan.call_count, 3) self.assertEqual(m_destroy_action.call_count, 3) m_destroy_audit_template.assert_any_call(self.audit_template1.uuid) m_destroy_audit.assert_any_call(self.audit1.uuid) m_destroy_audit.assert_any_call(audit4.uuid) m_destroy_action_plan.assert_any_call(self.action_plan1.uuid) m_destroy_action_plan.assert_any_call(action_plan4.uuid) m_destroy_action_plan.assert_any_call(action_plan5.uuid) m_destroy_action.assert_any_call(self.action1.uuid) m_destroy_action.assert_any_call(action4.uuid) m_destroy_action.assert_any_call(action5.uuid) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command_with_strategy_uuid( self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): self.cmd.exclude_orphans = False self.cmd.uuid = self.strategy1.uuid with freezegun.freeze_time(self.fake_today): self.cmd.execute() self.assertEqual(m_destroy_goal.call_count, 0) self.assertEqual(m_destroy_strategy.call_count, 1) self.assertEqual(m_destroy_audit_template.call_count, 1) self.assertEqual(m_destroy_audit.call_count, 1) self.assertEqual(m_destroy_action_plan.call_count, 1) self.assertEqual(m_destroy_action.call_count, 1) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command_with_audit_template_not_expired( self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): self.cmd.exclude_orphans = True self.cmd.uuid = self.audit_template2.uuid with freezegun.freeze_time(self.fake_today): self.cmd.execute() self.assertEqual(m_destroy_goal.call_count, 0) self.assertEqual(m_destroy_strategy.call_count, 0) self.assertEqual(m_destroy_audit_template.call_count, 0) self.assertEqual(m_destroy_audit.call_count, 0) self.assertEqual(m_destroy_action_plan.call_count, 0) self.assertEqual(m_destroy_action.call_count, 0) @mock.patch.object(dbapi.Connection, "destroy_action") @mock.patch.object(dbapi.Connection, "destroy_action_plan") @mock.patch.object(dbapi.Connection, "destroy_audit") @mock.patch.object(dbapi.Connection, "destroy_audit_template") @mock.patch.object(dbapi.Connection, "destroy_strategy") @mock.patch.object(dbapi.Connection, "destroy_goal") def test_purge_command_with_audit_template_not_soft_deleted( self, m_destroy_goal, m_destroy_strategy, m_destroy_audit_template, m_destroy_audit, m_destroy_action_plan, m_destroy_action): self.cmd.exclude_orphans = False self.cmd.uuid = self.audit_template3.uuid with freezegun.freeze_time(self.fake_today): self.cmd.execute() self.assertEqual(m_destroy_goal.call_count, 0) self.assertEqual(m_destroy_strategy.call_count, 0) self.assertEqual(m_destroy_audit_template.call_count, 0) self.assertEqual(m_destroy_audit.call_count, 0) self.assertEqual(m_destroy_action_plan.call_count, 0) self.assertEqual(m_destroy_action.call_count, 0) python-watcher-4.0.0/watcher/tests/db/test_service.py0000664000175000017500000002540513656752270022771 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for manipulating Service via the DB API""" import freezegun from oslo_utils import timeutils from watcher.common import exception from watcher.tests.db import base from watcher.tests.db import utils class TestDbServiceFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbServiceFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): service1_name = "SERVICE_ID_1" service2_name = "SERVICE_ID_2" service3_name = "SERVICE_ID_3" with freezegun.freeze_time(self.FAKE_TODAY): self.service1 = utils.create_test_service( id=1, name=service1_name, host="controller", last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.service2 = utils.create_test_service( id=2, name=service2_name, host="controller", last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.service3 = utils.create_test_service( id=3, name=service3_name, host="controller", last_seen_up=timeutils.parse_isotime("2016-09-22T08:32:05")) def _soft_delete_services(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_service(self.service1.id) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_service(self.service2.id) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_service(self.service3.id) def _update_services(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_service( self.service1.id, values={"host": "controller1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_service( self.service2.id, values={"host": "controller2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_service( self.service3.id, values={"host": "controller3"}) def test_get_service_list_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_service(self.service1.id) res = self.dbapi.get_service_list( self.context, filters={'deleted': True}) self.assertEqual([self.service1['name']], [r.name for r in res]) def test_get_service_list_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_service(self.service1.id) res = self.dbapi.get_service_list( self.context, filters={'deleted': False}) self.assertEqual( set([self.service2['name'], self.service3['name']]), set([r.name for r in res])) def test_get_service_list_filter_deleted_at_eq(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_deleted_at_lt(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_deleted_at_lte(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_deleted_at_gt(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_deleted_at_gte(self): self._soft_delete_services() res = self.dbapi.get_service_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service1['id'], self.service2['id']]), set([r.id for r in res])) # created_at # def test_get_service_list_filter_created_at_eq(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_created_at_lt(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_created_at_lte(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_created_at_gt(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_created_at_gte(self): res = self.dbapi.get_service_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service1['id'], self.service2['id']]), set([r.id for r in res])) # updated_at # def test_get_service_list_filter_updated_at_eq(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_updated_at_lt(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_updated_at_lte(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service2['id'], self.service3['id']]), set([r.id for r in res])) def test_get_service_list_filter_updated_at_gt(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.service1['id']], [r.id for r in res]) def test_get_service_list_filter_updated_at_gte(self): self._update_services() res = self.dbapi.get_service_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( set([self.service1['id'], self.service2['id']]), set([r.id for r in res])) class DbServiceTestCase(base.DbTestCase): def test_get_service_list(self): ids = [] for i in range(1, 4): service = utils.create_test_service( id=i, name="SERVICE_ID_%s" % i, host="controller_{0}".format(i)) ids.append(service['id']) services = self.dbapi.get_service_list(self.context) service_ids = [s.id for s in services] self.assertEqual(sorted(ids), sorted(service_ids)) def test_get_service_list_with_filters(self): service1 = utils.create_test_service( id=1, name="SERVICE_ID_1", host="controller_1", ) service2 = utils.create_test_service( id=2, name="SERVICE_ID_2", host="controller_2", ) service3 = utils.create_test_service( id=3, name="SERVICE_ID_3", host="controller_3", ) self.dbapi.soft_delete_service(service3['id']) res = self.dbapi.get_service_list( self.context, filters={'host': 'controller_1'}) self.assertEqual([service1['id']], [r.id for r in res]) res = self.dbapi.get_service_list( self.context, filters={'host': 'controller_3'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_service_list( self.context, filters={'host': 'controller_2'}) self.assertEqual([service2['id']], [r.id for r in res]) def test_get_service_by_name(self): created_service = utils.create_test_service() service = self.dbapi.get_service_by_name( self.context, created_service['name']) self.assertEqual(service.name, created_service['name']) def test_get_service_that_does_not_exist(self): self.assertRaises(exception.ServiceNotFound, self.dbapi.get_service_by_id, self.context, 404) def test_update_service(self): service = utils.create_test_service() res = self.dbapi.update_service( service['id'], {'host': 'controller_test'}) self.assertEqual('controller_test', res.host) def test_update_service_that_does_not_exist(self): self.assertRaises(exception.ServiceNotFound, self.dbapi.update_service, 405, {'name': ''}) def test_create_service_already_exists(self): service_id = "STRATEGY_ID" utils.create_test_service(name=service_id) self.assertRaises(exception.ServiceAlreadyExists, utils.create_test_service, name=service_id) python-watcher-4.0.0/watcher/tests/db/test_efficacy_indicator.py0000664000175000017500000004201413656752270025131 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for manipulating EfficacyIndicator via the DB API""" import freezegun from watcher.common import exception from watcher.common import utils as w_utils from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestDbEfficacyIndicatorFilters(base.DbTestCase): FAKE_OLDER_DATE = '2014-01-01T09:52:05.219414' FAKE_OLD_DATE = '2015-01-01T09:52:05.219414' FAKE_TODAY = '2016-02-24T09:52:05.219414' def setUp(self): super(TestDbEfficacyIndicatorFilters, self).setUp() self.context.show_deleted = True self._data_setup() def _data_setup(self): self.audit_template_name = "Audit Template" self.audit_template = utils.create_test_audit_template( name=self.audit_template_name, id=1, uuid=None) self.audit = utils.create_test_audit( audit_template_id=self.audit_template.id, id=1, uuid=None) self.action_plan = utils.create_test_action_plan( audit_id=self.audit.id, id=1, uuid=None) with freezegun.freeze_time(self.FAKE_TODAY): self.efficacy_indicator1 = utils.create_test_efficacy_indicator( action_plan_id=self.action_plan.id, id=1, uuid=None, name="efficacy_indicator1", description="Test Indicator 1") with freezegun.freeze_time(self.FAKE_OLD_DATE): self.efficacy_indicator2 = utils.create_test_efficacy_indicator( action_plan_id=self.action_plan.id, id=2, uuid=None, name="efficacy_indicator2", description="Test Indicator 2") with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.efficacy_indicator3 = utils.create_test_efficacy_indicator( action_plan_id=self.action_plan.id, id=3, uuid=None, name="efficacy_indicator3", description="Test Indicator 3") def _soft_delete_efficacy_indicators(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator1.uuid) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator2.uuid) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator3.uuid) def _update_efficacy_indicators(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.update_efficacy_indicator( self.efficacy_indicator1.uuid, values={"description": "New description 1"}) with freezegun.freeze_time(self.FAKE_OLD_DATE): self.dbapi.update_efficacy_indicator( self.efficacy_indicator2.uuid, values={"description": "New description 2"}) with freezegun.freeze_time(self.FAKE_OLDER_DATE): self.dbapi.update_efficacy_indicator( self.efficacy_indicator3.uuid, values={"description": "New description 3"}) def test_get_efficacy_indicator_filter_deleted_true(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator1.uuid) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted': True}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_false(self): with freezegun.freeze_time(self.FAKE_TODAY): self.dbapi.soft_delete_efficacy_indicator( self.efficacy_indicator1.uuid) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted': False}) self.assertEqual([self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_eq(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__eq': self.FAKE_TODAY}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_lt(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_lte(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_gt(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_deleted_at_gte(self): self._soft_delete_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'deleted_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], [r.id for r in res]) # created_at # def test_get_efficacy_indicator_filter_created_at_eq(self): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__eq': self.FAKE_TODAY}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_created_at_lt(self): with freezegun.freeze_time(self.FAKE_TODAY): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_created_at_lte(self): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_created_at_gt(self): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_created_at_gte(self): res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'created_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], [r.id for r in res]) # updated_at # def test_get_efficacy_indicator_filter_updated_at_eq(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__eq': self.FAKE_TODAY}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_updated_at_lt(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__lt': self.FAKE_TODAY}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_updated_at_lte(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__lte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator2['id'], self.efficacy_indicator3['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_updated_at_gt(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__gt': self.FAKE_OLD_DATE}) self.assertEqual([self.efficacy_indicator1['id']], [r.id for r in res]) def test_get_efficacy_indicator_filter_updated_at_gte(self): self._update_efficacy_indicators() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'updated_at__gte': self.FAKE_OLD_DATE}) self.assertEqual( [self.efficacy_indicator1['id'], self.efficacy_indicator2['id']], [r.id for r in res]) class DbEfficacyIndicatorTestCase(base.DbTestCase): def test_get_efficacy_indicator_list(self): uuids = [] action_plan = utils.create_test_action_plan() for id_ in range(1, 4): efficacy_indicator = utils.create_test_efficacy_indicator( action_plan_id=action_plan.id, id=id_, uuid=None, name="efficacy_indicator", description="Test Indicator ") uuids.append(str(efficacy_indicator['uuid'])) efficacy_indicators = self.dbapi.get_efficacy_indicator_list( self.context) efficacy_indicator_uuids = [ei.uuid for ei in efficacy_indicators] self.assertEqual(sorted(uuids), sorted(efficacy_indicator_uuids)) for efficacy_indicator in efficacy_indicators: self.assertIsNone(efficacy_indicator.action_plan) def test_get_efficacy_indicator_list_eager(self): _action_plan = utils.get_test_action_plan() action_plan = self.dbapi.create_action_plan(_action_plan) uuids = [] for i in range(1, 4): efficacy_indicator = utils.create_test_efficacy_indicator( id=i, uuid=w_utils.generate_uuid(), action_plan_id=action_plan.id) uuids.append(str(efficacy_indicator['uuid'])) efficacy_indicators = self.dbapi.get_efficacy_indicator_list( self.context, eager=True) efficacy_indicator_map = {a.uuid: a for a in efficacy_indicators} self.assertEqual(sorted(uuids), sorted(efficacy_indicator_map.keys())) eager_efficacy_indicator = efficacy_indicator_map[ efficacy_indicator.uuid] self.assertEqual( action_plan.as_dict(), eager_efficacy_indicator.action_plan.as_dict()) def test_get_efficacy_indicator_list_with_filters(self): audit = utils.create_test_audit(uuid=w_utils.generate_uuid()) action_plan = utils.create_test_action_plan( id=1, uuid=w_utils.generate_uuid(), audit_id=audit.id, first_efficacy_indicator_id=None, state=objects.action_plan.State.RECOMMENDED) efficacy_indicator1 = utils.create_test_efficacy_indicator( id=1, name='indicator_1', uuid=w_utils.generate_uuid(), action_plan_id=action_plan['id'], description='Description efficacy indicator 1', unit='%') efficacy_indicator2 = utils.create_test_efficacy_indicator( id=2, name='indicator_2', uuid=w_utils.generate_uuid(), action_plan_id=2, description='Description efficacy indicator 2', unit='%') efficacy_indicator3 = utils.create_test_efficacy_indicator( id=3, name='indicator_3', uuid=w_utils.generate_uuid(), action_plan_id=action_plan['id'], description='Description efficacy indicator 3', unit='%') efficacy_indicator4 = utils.create_test_efficacy_indicator( id=4, name='indicator_4', uuid=w_utils.generate_uuid(), action_plan_id=action_plan['id'], description='Description efficacy indicator 4', unit='%') self.dbapi.soft_delete_efficacy_indicator(efficacy_indicator4['uuid']) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'name': 'indicator_3'}) self.assertEqual([efficacy_indicator3['id']], [r.id for r in res]) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'unit': 'kWh'}) self.assertEqual([], [r.id for r in res]) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'action_plan_id': 2}) self.assertEqual([efficacy_indicator2['id']], [r.id for r in res]) res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'action_plan_uuid': action_plan['uuid']}) self.assertEqual( sorted([efficacy_indicator1['id'], efficacy_indicator3['id']]), sorted([r.id for r in res])) def test_get_efficacy_indicator_list_with_filter_by_uuid(self): efficacy_indicator = utils.create_test_efficacy_indicator() res = self.dbapi.get_efficacy_indicator_list( self.context, filters={'uuid': efficacy_indicator.uuid}) self.assertEqual(len(res), 1) self.assertEqual(efficacy_indicator.uuid, res[0].uuid) def test_get_efficacy_indicator_by_id(self): efficacy_indicator = utils.create_test_efficacy_indicator() efficacy_indicator = self.dbapi.get_efficacy_indicator_by_id( self.context, efficacy_indicator.id) self.assertEqual(efficacy_indicator.uuid, efficacy_indicator.uuid) def test_get_efficacy_indicator_by_uuid(self): efficacy_indicator = utils.create_test_efficacy_indicator() efficacy_indicator = self.dbapi.get_efficacy_indicator_by_uuid( self.context, efficacy_indicator.uuid) self.assertEqual(efficacy_indicator['id'], efficacy_indicator.id) def test_get_efficacy_indicator_that_does_not_exist(self): self.assertRaises( exception.EfficacyIndicatorNotFound, self.dbapi.get_efficacy_indicator_by_id, self.context, 1234) def test_update_efficacy_indicator(self): efficacy_indicator = utils.create_test_efficacy_indicator() res = self.dbapi.update_efficacy_indicator( efficacy_indicator.id, {'state': objects.action_plan.State.CANCELLED}) self.assertEqual('CANCELLED', res.state) def test_update_efficacy_indicator_that_does_not_exist(self): self.assertRaises( exception.EfficacyIndicatorNotFound, self.dbapi.update_efficacy_indicator, 1234, {'state': ''}) def test_update_efficacy_indicator_uuid(self): efficacy_indicator = utils.create_test_efficacy_indicator() self.assertRaises( exception.Invalid, self.dbapi.update_efficacy_indicator, efficacy_indicator.id, {'uuid': 'hello'}) def test_destroy_efficacy_indicator(self): efficacy_indicator = utils.create_test_efficacy_indicator() self.dbapi.destroy_efficacy_indicator(efficacy_indicator['id']) self.assertRaises(exception.EfficacyIndicatorNotFound, self.dbapi.get_efficacy_indicator_by_id, self.context, efficacy_indicator['id']) def test_destroy_efficacy_indicator_by_uuid(self): uuid = w_utils.generate_uuid() utils.create_test_efficacy_indicator(uuid=uuid) self.assertIsNotNone(self.dbapi.get_efficacy_indicator_by_uuid( self.context, uuid)) self.dbapi.destroy_efficacy_indicator(uuid) self.assertRaises( exception.EfficacyIndicatorNotFound, self.dbapi.get_efficacy_indicator_by_uuid, self.context, uuid) def test_destroy_efficacy_indicator_that_does_not_exist(self): self.assertRaises(exception.EfficacyIndicatorNotFound, self.dbapi.destroy_efficacy_indicator, 1234) def test_create_efficacy_indicator_already_exists(self): uuid = w_utils.generate_uuid() utils.create_test_efficacy_indicator(id=1, uuid=uuid) self.assertRaises(exception.EfficacyIndicatorAlreadyExists, utils.create_test_efficacy_indicator, id=2, uuid=uuid) python-watcher-4.0.0/watcher/tests/db/base.py0000664000175000017500000000503413656752270021200 0ustar zuulzuul00000000000000# Copyright (c) 2012 NTT DOCOMO, INC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Watcher DB test base class.""" import fixtures from oslo_config import cfg from watcher.db import api as dbapi from watcher.db.sqlalchemy import api as sqla_api from watcher.db.sqlalchemy import migration from watcher.db.sqlalchemy import models from watcher.tests import base from watcher.tests.db import utils CONF = cfg.CONF CONF.import_opt('enable_authentication', 'watcher.api.acl') _DB_CACHE = None class Database(fixtures.Fixture): def __init__(self, db_api, db_migrate, sql_connection): self.sql_connection = sql_connection self.engine = db_api.get_engine() self.engine.dispose() conn = self.engine.connect() self.setup_sqlite(db_migrate) self.post_migrations() self._DB = "".join(line for line in conn.connection.iterdump()) self.engine.dispose() def setup_sqlite(self, db_migrate): if db_migrate.version(): return models.Base.metadata.create_all(self.engine) db_migrate.stamp('head') def setUp(self): super(Database, self).setUp() conn = self.engine.connect() conn.connection.executescript(self._DB) self.addCleanup(self.engine.dispose) def post_migrations(self): """Any addition steps that are needed outside of the migrations.""" class DbTestCase(base.TestCase): def get_next_id(self): return next(self._id_gen) def setUp(self): cfg.CONF.set_override("enable_authentication", False) # To use in-memory SQLite DB cfg.CONF.set_override("connection", "sqlite://", group="database") super(DbTestCase, self).setUp() self.dbapi = dbapi.get_instance() global _DB_CACHE if not _DB_CACHE: _DB_CACHE = Database(sqla_api, migration, sql_connection=CONF.database.connection) self.useFixture(_DB_CACHE) self._id_gen = utils.id_generator() python-watcher-4.0.0/watcher/tests/objects/0000775000175000017500000000000013656752352020757 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/objects/utils.py0000664000175000017500000001757213656752270022504 0ustar zuulzuul00000000000000# Copyright 2014 Rackspace Hosting # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Watcher object test utilities.""" from watcher import objects from watcher.tests.db import utils as db_utils def _load_related_objects(context, cls, db_data): """Replace the DB data with its object counterpart""" obj_data = db_data.copy() for name, (obj_cls, _) in cls.object_fields.items(): if obj_data.get(name): obj_data[name] = obj_cls(context, **obj_data.get(name).as_dict()) else: del obj_data[name] return obj_data def _load_test_obj(context, cls, obj_data, **kw): # Let DB generate ID if it isn't specified explicitly if 'id' not in kw: del obj_data['id'] obj = cls(context) for key in obj_data: setattr(obj, key, obj_data[key]) return obj def get_test_audit_template(context, **kw): """Return a AuditTemplate object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.AuditTemplate db_data = db_utils.get_test_audit_template(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_audit_template(context, **kw): """Create and return a test audit_template object. Create a audit template in the DB and return an AuditTemplate object with appropriate attributes. """ audit_template = get_test_audit_template(context, **kw) audit_template.create() return audit_template def get_test_audit(context, **kw): """Return a Audit object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Audit db_data = db_utils.get_test_audit(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_audit(context, **kw): """Create and return a test audit object. Create a audit in the DB and return an Audit object with appropriate attributes. """ audit = get_test_audit(context, **kw) audit.create() return audit def get_test_action_plan(context, **kw): """Return a ActionPlan object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.ActionPlan db_data = db_utils.get_test_action_plan(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_action_plan(context, **kw): """Create and return a test action_plan object. Create a action plan in the DB and return a ActionPlan object with appropriate attributes. """ action_plan = get_test_action_plan(context, **kw) action_plan.create() return action_plan def get_test_action(context, **kw): """Return a Action object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Action db_data = db_utils.get_test_action(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_action(context, **kw): """Create and return a test action object. Create a action in the DB and return a Action object with appropriate attributes. """ action = get_test_action(context, **kw) action.create() return action def get_test_goal(context, **kw): """Return a Goal object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Goal db_data = db_utils.get_test_goal(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_goal(context, **kw): """Create and return a test goal object. Create a goal in the DB and return a Goal object with appropriate attributes. """ goal = get_test_goal(context, **kw) goal.create() return goal def get_test_scoring_engine(context, **kw): """Return a ScoringEngine object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.ScoringEngine db_data = db_utils.get_test_scoring_engine(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_scoring_engine(context, **kw): """Create and return a test scoring engine object. Create a scoring engine in the DB and return a ScoringEngine object with appropriate attributes. """ scoring_engine = get_test_scoring_engine(context, **kw) scoring_engine.create() return scoring_engine def get_test_service(context, **kw): """Return a Service object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Service db_data = db_utils.get_test_service(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_service(context, **kw): """Create and return a test service object. Create a service in the DB and return a Service object with appropriate attributes. """ service = get_test_service(context, **kw) service.create() return service def get_test_strategy(context, **kw): """Return a Strategy object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.Strategy db_data = db_utils.get_test_strategy(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_strategy(context, **kw): """Create and return a test strategy object. Create a strategy in the DB and return a Strategy object with appropriate attributes. """ strategy = get_test_strategy(context, **kw) strategy.create() return strategy def get_test_efficacy_indicator(context, **kw): """Return a EfficacyIndicator object with appropriate attributes. NOTE: The object leaves the attributes marked as changed, such that a create() could be used to commit it to the DB. """ obj_cls = objects.EfficacyIndicator db_data = db_utils.get_test_efficacy_indicator(**kw) obj_data = _load_related_objects(context, obj_cls, db_data) return _load_test_obj(context, obj_cls, obj_data, **kw) def create_test_efficacy_indicator(context, **kw): """Create and return a test efficacy indicator object. Create a efficacy indicator in the DB and return a EfficacyIndicator object with appropriate attributes. """ efficacy_indicator = get_test_efficacy_indicator(context, **kw) efficacy_indicator.create() return efficacy_indicator python-watcher-4.0.0/watcher/tests/objects/test_goal.py0000664000175000017500000001402313656752270023311 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestGoalObject(base.DbTestCase): def setUp(self): super(TestGoalObject, self).setUp() self.fake_goal = utils.get_test_goal( created_at=datetime.datetime.utcnow()) @mock.patch.object(db_api.Connection, 'get_goal_by_id') def test_get_by_id(self, mock_get_goal): goal_id = self.fake_goal['id'] mock_get_goal.return_value = self.fake_goal goal = objects.Goal.get(self.context, goal_id) mock_get_goal.assert_called_once_with(self.context, goal_id) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') def test_get_by_uuid(self, mock_get_goal): uuid = self.fake_goal['uuid'] mock_get_goal.return_value = self.fake_goal goal = objects.Goal.get(self.context, uuid) mock_get_goal.assert_called_once_with(self.context, uuid) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'get_goal_by_name') def test_get_by_name(self, mock_get_goal): name = self.fake_goal['name'] mock_get_goal.return_value = self.fake_goal goal = objects.Goal.get_by_name(self.context, name) mock_get_goal.assert_called_once_with(self.context, name) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'get_goal_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_goal] goals = objects.Goal.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(goals)) self.assertIsInstance(goals[0], objects.Goal) self.assertEqual(self.context, goals[0]._context) @mock.patch.object(db_api.Connection, 'create_goal') def test_create(self, mock_create_goal): mock_create_goal.return_value = self.fake_goal goal = objects.Goal(self.context, **self.fake_goal) goal.create() expected_goal = self.fake_goal.copy() expected_goal['created_at'] = expected_goal['created_at'].replace( tzinfo=iso8601.UTC) mock_create_goal.assert_called_once_with(expected_goal) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'destroy_goal') @mock.patch.object(db_api.Connection, 'get_goal_by_id') def test_destroy(self, mock_get_goal, mock_destroy_goal): goal_id = self.fake_goal['id'] mock_get_goal.return_value = self.fake_goal goal = objects.Goal.get_by_id(self.context, goal_id) goal.destroy() mock_get_goal.assert_called_once_with( self.context, goal_id) mock_destroy_goal.assert_called_once_with(goal_id) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'update_goal') @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') def test_save(self, mock_get_goal, mock_update_goal): mock_get_goal.return_value = self.fake_goal goal_uuid = self.fake_goal['uuid'] fake_saved_goal = self.fake_goal.copy() fake_saved_goal['updated_at'] = datetime.datetime.utcnow() mock_update_goal.return_value = fake_saved_goal goal = objects.Goal.get_by_uuid(self.context, goal_uuid) goal.display_name = 'DUMMY' goal.save() mock_get_goal.assert_called_once_with(self.context, goal_uuid) mock_update_goal.assert_called_once_with( goal_uuid, {'display_name': 'DUMMY'}) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') def test_refresh(self, mock_get_goal): fake_goal2 = utils.get_test_goal(name="BALANCE_LOAD") returns = [self.fake_goal, fake_goal2] mock_get_goal.side_effect = returns uuid = self.fake_goal['uuid'] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] goal = objects.Goal.get(self.context, uuid) self.assertEqual("TEST", goal.name) goal.refresh() self.assertEqual("BALANCE_LOAD", goal.name) self.assertEqual(expected, mock_get_goal.call_args_list) self.assertEqual(self.context, goal._context) @mock.patch.object(db_api.Connection, 'soft_delete_goal') @mock.patch.object(db_api.Connection, 'get_goal_by_uuid') def test_soft_delete(self, mock_get_goal, mock_soft_delete_goal): mock_get_goal.return_value = self.fake_goal fake_deleted_goal = self.fake_goal.copy() fake_deleted_goal['deleted_at'] = datetime.datetime.utcnow() mock_soft_delete_goal.return_value = fake_deleted_goal expected_goal = fake_deleted_goal.copy() expected_goal['created_at'] = expected_goal['created_at'].replace( tzinfo=iso8601.UTC) expected_goal['deleted_at'] = expected_goal['deleted_at'].replace( tzinfo=iso8601.UTC) uuid = self.fake_goal['uuid'] goal = objects.Goal.get_by_uuid(self.context, uuid) goal.soft_delete() mock_get_goal.assert_called_once_with(self.context, uuid) mock_soft_delete_goal.assert_called_once_with(uuid) self.assertEqual(self.context, goal._context) self.assertEqual(expected_goal, goal.as_dict()) python-watcher-4.0.0/watcher/tests/objects/__init__.py0000664000175000017500000000000013656752270023055 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/objects/test_audit_template.py0000664000175000017500000002464013656752270025376 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from watcher.common import exception from watcher.common import utils as w_utils from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestAuditTemplateObject(base.DbTestCase): goal_id = 1 goal_data = utils.get_test_goal( id=goal_id, uuid=w_utils.generate_uuid(), name="DUMMY") scenarios = [ ('non_eager', dict( eager=False, fake_audit_template=utils.get_test_audit_template( created_at=datetime.datetime.utcnow(), goal_id=goal_id))), ('eager_with_non_eager_load', dict( eager=True, fake_audit_template=utils.get_test_audit_template( created_at=datetime.datetime.utcnow(), goal_id=goal_id))), ('eager_with_eager_load', dict( eager=True, fake_audit_template=utils.get_test_audit_template( created_at=datetime.datetime.utcnow(), goal_id=goal_id, goal=goal_data))), ] def setUp(self): super(TestAuditTemplateObject, self).setUp() self.fake_goal = utils.create_test_goal(**self.goal_data) def eager_load_audit_template_assert(self, audit_template, goal): if self.eager: self.assertIsNotNone(audit_template.goal) fields_to_check = set( super(objects.Goal, objects.Goal).fields ).symmetric_difference(objects.Goal.fields) db_data = { k: v for k, v in goal.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in audit_template.goal.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_audit_template_by_id') def test_get_by_id(self, mock_get_audit_template): mock_get_audit_template.return_value = self.fake_audit_template audit_template_id = self.fake_audit_template['id'] audit_template = objects.AuditTemplate.get( self.context, audit_template_id, eager=self.eager) mock_get_audit_template.assert_called_once_with( self.context, audit_template_id, eager=self.eager) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_get_by_uuid(self, mock_get_audit_template): mock_get_audit_template.return_value = self.fake_audit_template uuid = self.fake_audit_template['uuid'] audit_template = objects.AuditTemplate.get( self.context, uuid, eager=self.eager) mock_get_audit_template.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) @mock.patch.object(db_api.Connection, 'get_audit_template_by_name') def test_get_by_name(self, mock_get_audit_template): mock_get_audit_template.return_value = self.fake_audit_template name = self.fake_audit_template['name'] audit_template = objects.AuditTemplate.get_by_name( self.context, name, eager=self.eager) mock_get_audit_template.assert_called_once_with( self.context, name, eager=self.eager) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.AuditTemplate.get, self.context, 'not-a-uuid', eager=self.eager) @mock.patch.object(db_api.Connection, 'get_audit_template_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_audit_template] audit_templates = objects.AuditTemplate.list( self.context, eager=self.eager) mock_get_list.assert_called_once_with( self.context, eager=self.eager, filters=None, limit=None, marker=None, sort_dir=None, sort_key=None) self.assertEqual(1, len(audit_templates)) self.assertIsInstance(audit_templates[0], objects.AuditTemplate) self.assertEqual(self.context, audit_templates[0]._context) for audit_template in audit_templates: self.eager_load_audit_template_assert( audit_template, self.fake_goal) @mock.patch.object(db_api.Connection, 'update_audit_template') @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_save(self, mock_get_audit_template, mock_update_audit_template): mock_get_audit_template.return_value = self.fake_audit_template fake_saved_audit_template = self.fake_audit_template.copy() fake_saved_audit_template['updated_at'] = datetime.datetime.utcnow() mock_update_audit_template.return_value = fake_saved_audit_template uuid = self.fake_audit_template['uuid'] audit_template = objects.AuditTemplate.get_by_uuid( self.context, uuid, eager=self.eager) audit_template.goal_id = self.fake_goal.id audit_template.save() mock_get_audit_template.assert_called_once_with( self.context, uuid, eager=self.eager) mock_update_audit_template.assert_called_once_with( uuid, {'goal_id': self.fake_goal.id}) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_refresh(self, mock_get_audit_template): returns = [dict(self.fake_audit_template, name="first name"), dict(self.fake_audit_template, name="second name")] mock_get_audit_template.side_effect = returns uuid = self.fake_audit_template['uuid'] expected = [mock.call(self.context, uuid, eager=self.eager), mock.call(self.context, uuid, eager=self.eager)] audit_template = objects.AuditTemplate.get( self.context, uuid, eager=self.eager) self.assertEqual("first name", audit_template.name) audit_template.refresh(eager=self.eager) self.assertEqual("second name", audit_template.name) self.assertEqual(expected, mock_get_audit_template.call_args_list) self.assertEqual(self.context, audit_template._context) self.eager_load_audit_template_assert(audit_template, self.fake_goal) class TestCreateDeleteAuditTemplateObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteAuditTemplateObject, self).setUp() self.fake_audit_template = utils.get_test_audit_template( created_at=datetime.datetime.utcnow()) @mock.patch.object(db_api.Connection, 'create_audit_template') def test_create(self, mock_create_audit_template): goal = utils.create_test_goal() self.fake_audit_template['goal_id'] = goal.id mock_create_audit_template.return_value = self.fake_audit_template audit_template = objects.AuditTemplate( self.context, **self.fake_audit_template) audit_template.create() expected_audit_template = self.fake_audit_template.copy() expected_audit_template['created_at'] = expected_audit_template[ 'created_at'].replace(tzinfo=iso8601.UTC) mock_create_audit_template.assert_called_once_with( expected_audit_template) self.assertEqual(self.context, audit_template._context) @mock.patch.object(db_api.Connection, 'soft_delete_audit_template') @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_soft_delete(self, m_get_audit_template, m_soft_delete_audit_template): m_get_audit_template.return_value = self.fake_audit_template fake_deleted_audit_template = self.fake_audit_template.copy() fake_deleted_audit_template['deleted_at'] = datetime.datetime.utcnow() m_soft_delete_audit_template.return_value = fake_deleted_audit_template expected_audit_template = fake_deleted_audit_template.copy() expected_audit_template['created_at'] = expected_audit_template[ 'created_at'].replace(tzinfo=iso8601.UTC) expected_audit_template['deleted_at'] = expected_audit_template[ 'deleted_at'].replace(tzinfo=iso8601.UTC) del expected_audit_template['goal'] del expected_audit_template['strategy'] uuid = self.fake_audit_template['uuid'] audit_template = objects.AuditTemplate.get_by_uuid(self.context, uuid) audit_template.soft_delete() m_get_audit_template.assert_called_once_with( self.context, uuid, eager=False) m_soft_delete_audit_template.assert_called_once_with(uuid) self.assertEqual(self.context, audit_template._context) self.assertEqual(expected_audit_template, audit_template.as_dict()) @mock.patch.object(db_api.Connection, 'destroy_audit_template') @mock.patch.object(db_api.Connection, 'get_audit_template_by_uuid') def test_destroy(self, mock_get_audit_template, mock_destroy_audit_template): mock_get_audit_template.return_value = self.fake_audit_template uuid = self.fake_audit_template['uuid'] audit_template = objects.AuditTemplate.get_by_uuid(self.context, uuid) audit_template.destroy() mock_get_audit_template.assert_called_once_with( self.context, uuid, eager=False) mock_destroy_audit_template.assert_called_once_with(uuid) self.assertEqual(self.context, audit_template._context) python-watcher-4.0.0/watcher/tests/objects/test_strategy.py0000664000175000017500000001626613656752270024244 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from watcher.common import exception from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestStrategyObject(base.DbTestCase): goal_id = 2 scenarios = [ ('non_eager', dict( eager=False, fake_strategy=utils.get_test_strategy( goal_id=goal_id))), ('eager_with_non_eager_load', dict( eager=True, fake_strategy=utils.get_test_strategy( goal_id=goal_id))), ('eager_with_eager_load', dict( eager=True, fake_strategy=utils.get_test_strategy( goal_id=goal_id, goal=utils.get_test_goal(id=goal_id)))), ] def setUp(self): super(TestStrategyObject, self).setUp() self.fake_goal = utils.create_test_goal(id=self.goal_id) def eager_load_strategy_assert(self, strategy): if self.eager: self.assertIsNotNone(strategy.goal) fields_to_check = set( super(objects.Goal, objects.Goal).fields ).symmetric_difference(objects.Goal.fields) db_data = { k: v for k, v in self.fake_goal.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in strategy.goal.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_get_by_id(self, mock_get_strategy): strategy_id = self.fake_strategy['id'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get( self.context, strategy_id, eager=self.eager) mock_get_strategy.assert_called_once_with( self.context, strategy_id, eager=self.eager) self.assertEqual(self.context, strategy._context) self.eager_load_strategy_assert(strategy) @mock.patch.object(db_api.Connection, 'get_strategy_by_uuid') def test_get_by_uuid(self, mock_get_strategy): uuid = self.fake_strategy['uuid'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get(self.context, uuid, eager=self.eager) mock_get_strategy.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, strategy._context) self.eager_load_strategy_assert(strategy) def test_get_bad_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Strategy.get, self.context, 'not-a-uuid') @mock.patch.object(db_api.Connection, 'get_strategy_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_strategy] strategies = objects.Strategy.list(self.context, eager=self.eager) self.assertEqual(1, mock_get_list.call_count, 1) self.assertEqual(1, len(strategies)) self.assertIsInstance(strategies[0], objects.Strategy) self.assertEqual(self.context, strategies[0]._context) for strategy in strategies: self.eager_load_strategy_assert(strategy) @mock.patch.object(db_api.Connection, 'update_strategy') @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_save(self, mock_get_strategy, mock_update_strategy): _id = self.fake_strategy['id'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get_by_id( self.context, _id, eager=self.eager) strategy.name = 'UPDATED NAME' strategy.save() mock_get_strategy.assert_called_once_with( self.context, _id, eager=self.eager) mock_update_strategy.assert_called_once_with( _id, {'name': 'UPDATED NAME'}) self.assertEqual(self.context, strategy._context) self.eager_load_strategy_assert(strategy) @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_refresh(self, mock_get_strategy): _id = self.fake_strategy['id'] returns = [dict(self.fake_strategy, name="first name"), dict(self.fake_strategy, name="second name")] mock_get_strategy.side_effect = returns expected = [mock.call(self.context, _id, eager=self.eager), mock.call(self.context, _id, eager=self.eager)] strategy = objects.Strategy.get(self.context, _id, eager=self.eager) self.assertEqual("first name", strategy.name) strategy.refresh(eager=self.eager) self.assertEqual("second name", strategy.name) self.assertEqual(expected, mock_get_strategy.call_args_list) self.assertEqual(self.context, strategy._context) self.eager_load_strategy_assert(strategy) class TestCreateDeleteStrategyObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteStrategyObject, self).setUp() self.fake_goal = utils.create_test_goal() self.fake_strategy = utils.get_test_strategy(goal_id=self.fake_goal.id) @mock.patch.object(db_api.Connection, 'create_strategy') def test_create(self, mock_create_strategy): mock_create_strategy.return_value = self.fake_strategy strategy = objects.Strategy(self.context, **self.fake_strategy) strategy.create() mock_create_strategy.assert_called_once_with(self.fake_strategy) self.assertEqual(self.context, strategy._context) @mock.patch.object(db_api.Connection, 'soft_delete_strategy') @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_soft_delete(self, mock_get_strategy, mock_soft_delete): _id = self.fake_strategy['id'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get_by_id(self.context, _id) strategy.soft_delete() mock_get_strategy.assert_called_once_with( self.context, _id, eager=False) mock_soft_delete.assert_called_once_with(_id) self.assertEqual(self.context, strategy._context) @mock.patch.object(db_api.Connection, 'destroy_strategy') @mock.patch.object(db_api.Connection, 'get_strategy_by_id') def test_destroy(self, mock_get_strategy, mock_destroy_strategy): _id = self.fake_strategy['id'] mock_get_strategy.return_value = self.fake_strategy strategy = objects.Strategy.get_by_id(self.context, _id) strategy.destroy() mock_get_strategy.assert_called_once_with( self.context, _id, eager=False) mock_destroy_strategy.assert_called_once_with(_id) self.assertEqual(self.context, strategy._context) python-watcher-4.0.0/watcher/tests/objects/test_scoring_engine.py0000664000175000017500000001666313656752270025374 0ustar zuulzuul00000000000000# Copyright 2016 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestScoringEngineObject(base.DbTestCase): def setUp(self): super(TestScoringEngineObject, self).setUp() self.fake_scoring_engine = utils.get_test_scoring_engine( created_at=datetime.datetime.utcnow()) @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') def test_get_by_id(self, mock_get_scoring_engine): scoring_engine_id = self.fake_scoring_engine['id'] mock_get_scoring_engine.return_value = self.fake_scoring_engine scoring_engine = objects.ScoringEngine.get_by_id( self.context, scoring_engine_id) mock_get_scoring_engine.assert_called_once_with( self.context, scoring_engine_id) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') def test_get_by_uuid(self, mock_get_scoring_engine): se_uuid = self.fake_scoring_engine['uuid'] mock_get_scoring_engine.return_value = self.fake_scoring_engine scoring_engine = objects.ScoringEngine.get_by_uuid( self.context, se_uuid) mock_get_scoring_engine.assert_called_once_with( self.context, se_uuid) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') def test_get_by_name(self, mock_get_scoring_engine): scoring_engine_uuid = self.fake_scoring_engine['uuid'] mock_get_scoring_engine.return_value = self.fake_scoring_engine scoring_engine = objects.ScoringEngine.get( self.context, scoring_engine_uuid) mock_get_scoring_engine.assert_called_once_with( self.context, scoring_engine_uuid) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'get_scoring_engine_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_scoring_engine] scoring_engines = objects.ScoringEngine.list(self.context) self.assertEqual(1, mock_get_list.call_count, 1) self.assertEqual(1, len(scoring_engines)) self.assertIsInstance(scoring_engines[0], objects.ScoringEngine) self.assertEqual(self.context, scoring_engines[0]._context) @mock.patch.object(db_api.Connection, 'create_scoring_engine') def test_create(self, mock_create_scoring_engine): mock_create_scoring_engine.return_value = self.fake_scoring_engine scoring_engine = objects.ScoringEngine( self.context, **self.fake_scoring_engine) scoring_engine.create() expected_scoring_engine = self.fake_scoring_engine.copy() expected_scoring_engine['created_at'] = expected_scoring_engine[ 'created_at'].replace(tzinfo=iso8601.UTC) mock_create_scoring_engine.assert_called_once_with( expected_scoring_engine) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'destroy_scoring_engine') @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') def test_destroy(self, mock_get_scoring_engine, mock_destroy_scoring_engine): mock_get_scoring_engine.return_value = self.fake_scoring_engine _id = self.fake_scoring_engine['id'] scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) scoring_engine.destroy() mock_get_scoring_engine.assert_called_once_with(self.context, _id) mock_destroy_scoring_engine.assert_called_once_with(_id) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'update_scoring_engine') @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_uuid') def test_save(self, mock_get_scoring_engine, mock_update_scoring_engine): mock_get_scoring_engine.return_value = self.fake_scoring_engine fake_saved_scoring_engine = self.fake_scoring_engine.copy() fake_saved_scoring_engine['updated_at'] = datetime.datetime.utcnow() mock_update_scoring_engine.return_value = fake_saved_scoring_engine uuid = self.fake_scoring_engine['uuid'] scoring_engine = objects.ScoringEngine.get_by_uuid(self.context, uuid) scoring_engine.description = 'UPDATED DESCRIPTION' scoring_engine.save() mock_get_scoring_engine.assert_called_once_with(self.context, uuid) mock_update_scoring_engine.assert_called_once_with( uuid, {'description': 'UPDATED DESCRIPTION'}) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') def test_refresh(self, mock_get_scoring_engine): returns = [ dict(self.fake_scoring_engine, description="first description"), dict(self.fake_scoring_engine, description="second description")] mock_get_scoring_engine.side_effect = returns _id = self.fake_scoring_engine['id'] expected = [mock.call(self.context, _id), mock.call(self.context, _id)] scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) self.assertEqual("first description", scoring_engine.description) scoring_engine.refresh() self.assertEqual("second description", scoring_engine.description) self.assertEqual(expected, mock_get_scoring_engine.call_args_list) self.assertEqual(self.context, scoring_engine._context) @mock.patch.object(db_api.Connection, 'soft_delete_scoring_engine') @mock.patch.object(db_api.Connection, 'get_scoring_engine_by_id') def test_soft_delete(self, mock_get_scoring_engine, mock_soft_delete): mock_get_scoring_engine.return_value = self.fake_scoring_engine fake_deleted_scoring_engine = self.fake_scoring_engine.copy() fake_deleted_scoring_engine['deleted_at'] = datetime.datetime.utcnow() mock_soft_delete.return_value = fake_deleted_scoring_engine expected_scoring_engine = fake_deleted_scoring_engine.copy() expected_scoring_engine['created_at'] = expected_scoring_engine[ 'created_at'].replace(tzinfo=iso8601.UTC) expected_scoring_engine['deleted_at'] = expected_scoring_engine[ 'deleted_at'].replace(tzinfo=iso8601.UTC) _id = self.fake_scoring_engine['id'] scoring_engine = objects.ScoringEngine.get_by_id(self.context, _id) scoring_engine.soft_delete() mock_get_scoring_engine.assert_called_once_with(self.context, _id) mock_soft_delete.assert_called_once_with(_id) self.assertEqual(self.context, scoring_engine._context) self.assertEqual(expected_scoring_engine, scoring_engine.as_dict()) python-watcher-4.0.0/watcher/tests/objects/test_action_description.py0000664000175000017500000001304513656752270026252 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 ZTE # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestActionDescriptionObject(base.DbTestCase): def setUp(self): super(TestActionDescriptionObject, self).setUp() self.fake_action_desc = utils.get_test_action_desc( created_at=datetime.datetime.utcnow()) @mock.patch.object(db_api.Connection, 'get_action_description_by_id') def test_get_by_id(self, mock_get_action_desc): action_desc_id = self.fake_action_desc['id'] mock_get_action_desc.return_value = self.fake_action_desc action_desc = objects.ActionDescription.get( self.context, action_desc_id) mock_get_action_desc.assert_called_once_with( self.context, action_desc_id) self.assertEqual(self.context, action_desc._context) @mock.patch.object(db_api.Connection, 'get_action_description_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_action_desc] action_desc = objects.ActionDescription.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(action_desc)) self.assertIsInstance(action_desc[0], objects.ActionDescription) self.assertEqual(self.context, action_desc[0]._context) @mock.patch.object(db_api.Connection, 'create_action_description') def test_create(self, mock_create_action_desc): mock_create_action_desc.return_value = self.fake_action_desc action_desc = objects.ActionDescription( self.context, **self.fake_action_desc) action_desc.create() expected_action_desc = self.fake_action_desc.copy() expected_action_desc['created_at'] = expected_action_desc[ 'created_at'].replace(tzinfo=iso8601.UTC) mock_create_action_desc.assert_called_once_with(expected_action_desc) self.assertEqual(self.context, action_desc._context) @mock.patch.object(db_api.Connection, 'update_action_description') @mock.patch.object(db_api.Connection, 'get_action_description_by_id') def test_save(self, mock_get_action_desc, mock_update_action_desc): mock_get_action_desc.return_value = self.fake_action_desc fake_saved_action_desc = self.fake_action_desc.copy() fake_saved_action_desc['updated_at'] = datetime.datetime.utcnow() mock_update_action_desc.return_value = fake_saved_action_desc _id = self.fake_action_desc['id'] action_desc = objects.ActionDescription.get(self.context, _id) action_desc.description = 'This is a test' action_desc.save() mock_get_action_desc.assert_called_once_with(self.context, _id) mock_update_action_desc.assert_called_once_with( _id, {'description': 'This is a test'}) self.assertEqual(self.context, action_desc._context) @mock.patch.object(db_api.Connection, 'get_action_description_by_id') def test_refresh(self, mock_get_action_desc): returns = [dict(self.fake_action_desc, description="Test message1"), dict(self.fake_action_desc, description="Test message2")] mock_get_action_desc.side_effect = returns _id = self.fake_action_desc['id'] expected = [mock.call(self.context, _id), mock.call(self.context, _id)] action_desc = objects.ActionDescription.get(self.context, _id) self.assertEqual("Test message1", action_desc.description) action_desc.refresh() self.assertEqual("Test message2", action_desc.description) self.assertEqual(expected, mock_get_action_desc.call_args_list) self.assertEqual(self.context, action_desc._context) @mock.patch.object(db_api.Connection, 'soft_delete_action_description') @mock.patch.object(db_api.Connection, 'get_action_description_by_id') def test_soft_delete(self, mock_get_action_desc, mock_soft_delete): mock_get_action_desc.return_value = self.fake_action_desc fake_deleted_action_desc = self.fake_action_desc.copy() fake_deleted_action_desc['deleted_at'] = datetime.datetime.utcnow() mock_soft_delete.return_value = fake_deleted_action_desc expected_action_desc = fake_deleted_action_desc.copy() expected_action_desc['created_at'] = expected_action_desc[ 'created_at'].replace(tzinfo=iso8601.UTC) expected_action_desc['deleted_at'] = expected_action_desc[ 'deleted_at'].replace(tzinfo=iso8601.UTC) _id = self.fake_action_desc['id'] action_desc = objects.ActionDescription.get(self.context, _id) action_desc.soft_delete() mock_get_action_desc.assert_called_once_with(self.context, _id) mock_soft_delete.assert_called_once_with(_id) self.assertEqual(self.context, action_desc._context) self.assertEqual(expected_action_desc, action_desc.as_dict()) python-watcher-4.0.0/watcher/tests/objects/test_objects.py0000664000175000017500000005130313656752270024022 0ustar zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import datetime import gettext import iso8601 import mock from oslo_versionedobjects import base as object_base from oslo_versionedobjects import exception as object_exception from oslo_versionedobjects import fixture as object_fixture from watcher.common import context from watcher.objects import base from watcher.objects import fields from watcher.tests import base as test_base gettext.install('watcher') @base.WatcherObjectRegistry.register class MyObj(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): VERSION = '1.5' fields = {'foo': fields.IntegerField(), 'bar': fields.StringField(), 'missing': fields.StringField()} def obj_load_attr(self, attrname): setattr(self, attrname, 'loaded!') @object_base.remotable_classmethod def query(cls, context): obj = cls(context) obj.foo = 1 obj.bar = 'bar' obj.obj_reset_changes() return obj @object_base.remotable def marco(self, context=None): return 'polo' @object_base.remotable def update_test(self, context=None): if context and context.user == 'alternate': self.bar = 'alternate-context' else: self.bar = 'updated' @object_base.remotable def save(self, context=None): self.obj_reset_changes() @object_base.remotable def refresh(self, context=None): self.foo = 321 self.bar = 'refreshed' self.obj_reset_changes() @object_base.remotable def modify_save_modify(self, context=None): self.bar = 'meow' self.save() self.foo = 42 class MyObj2(object): @classmethod def obj_name(cls): return 'MyObj' @object_base.remotable_classmethod def get(cls, *args, **kwargs): pass @base.WatcherObjectRegistry.register_if(False) class WatcherTestSubclassedObject(MyObj): fields = {'new_field': fields.StringField()} class _LocalTest(test_base.TestCase): def setUp(self): super(_LocalTest, self).setUp() # Just in case base.WatcherObject.indirection_api = None @contextlib.contextmanager def things_temporarily_local(): # Temporarily go non-remote so the conductor handles # this request directly _api = base.WatcherObject.indirection_api base.WatcherObject.indirection_api = None yield base.WatcherObject.indirection_api = _api class _TestObject(object): def test_hydration_type_error(self): primitive = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.data': {'foo': 'a'}} self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive) def test_hydration(self): primitive = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.data': {'foo': 1}} obj = MyObj.obj_from_primitive(primitive) self.assertEqual(1, obj.foo) def test_hydration_bad_ns(self): primitive = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'foo', 'watcher_object.version': '1.5', 'watcher_object.data': {'foo': 1}} self.assertRaises(object_exception.UnsupportedObjectError, MyObj.obj_from_primitive, primitive) def test_dehydration(self): expected = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.data': {'foo': 1}} obj = MyObj(self.context) obj.foo = 1 obj.obj_reset_changes() self.assertEqual(expected, obj.obj_to_primitive()) def test_get_updates(self): obj = MyObj(self.context) self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_object_property(self): obj = MyObj(self.context, foo=1) self.assertEqual(1, obj.foo) def test_object_property_type_error(self): obj = MyObj(self.context) def fail(): obj.foo = 'a' self.assertRaises(ValueError, fail) def test_load(self): obj = MyObj(self.context) self.assertEqual('loaded!', obj.bar) def test_load_in_base(self): @base.WatcherObjectRegistry.register_if(False) class Foo(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): fields = {'foobar': fields.IntegerField()} obj = Foo(self.context) self.assertRaisesRegex( NotImplementedError, "Cannot load 'foobar' in the base class", getattr, obj, 'foobar') def test_loaded_in_primitive(self): obj = MyObj(self.context) obj.foo = 1 obj.obj_reset_changes() self.assertEqual('loaded!', obj.bar) expected = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.changes': ['bar'], 'watcher_object.data': {'foo': 1, 'bar': 'loaded!'}} self.assertEqual(expected, obj.obj_to_primitive()) def test_changes_in_primitive(self): obj = MyObj(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) primitive = obj.obj_to_primitive() self.assertIn('watcher_object.changes', primitive) obj2 = MyObj.obj_from_primitive(primitive) self.assertEqual(set(['foo']), obj2.obj_what_changed()) obj2.obj_reset_changes() self.assertEqual(set(), obj2.obj_what_changed()) def test_unknown_objtype(self): self.assertRaises(object_exception.UnsupportedObjectError, base.WatcherObject.obj_class_from_name, 'foo', '1.0') def test_with_alternate_context(self): ctxt1 = context.RequestContext('foo', 'foo') ctxt2 = context.RequestContext(user='alternate') obj = MyObj.query(ctxt1) obj.update_test(ctxt2) self.assertEqual('alternate-context', obj.bar) def test_orphaned_object(self): obj = MyObj.query(self.context) obj._context = None self.assertRaises(object_exception.OrphanedObjectError, obj.update_test) def test_changed_1(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.update_test(self.context) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) self.assertEqual(123, obj.foo) def test_changed_2(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.save() self.assertEqual(set([]), obj.obj_what_changed()) self.assertEqual(123, obj.foo) def test_changed_3(self): obj = MyObj.query(self.context) obj.foo = 123 self.assertEqual(set(['foo']), obj.obj_what_changed()) obj.refresh() self.assertEqual(set([]), obj.obj_what_changed()) self.assertEqual(321, obj.foo) self.assertEqual('refreshed', obj.bar) def test_changed_4(self): obj = MyObj.query(self.context) obj.bar = 'something' self.assertEqual(set(['bar']), obj.obj_what_changed()) obj.modify_save_modify(self.context) self.assertEqual(set(['foo']), obj.obj_what_changed()) self.assertEqual(42, obj.foo) self.assertEqual('meow', obj.bar) def test_static_result(self): obj = MyObj.query(self.context) self.assertEqual('bar', obj.bar) result = obj.marco() self.assertEqual('polo', result) def test_updates(self): obj = MyObj.query(self.context) self.assertEqual(1, obj.foo) obj.update_test() self.assertEqual('updated', obj.bar) def test_base_attributes(self): dt = datetime.datetime(1955, 11, 5, 0, 0, tzinfo=iso8601.UTC) datatime = fields.DateTimeField() obj = MyObj(self.context) obj.created_at = dt obj.updated_at = dt expected = {'watcher_object.name': 'MyObj', 'watcher_object.namespace': 'watcher', 'watcher_object.version': '1.5', 'watcher_object.changes': ['created_at', 'updated_at'], 'watcher_object.data': {'created_at': datatime.stringify(dt), 'updated_at': datatime.stringify(dt), } } actual = obj.obj_to_primitive() # watcher_object.changes is built from a set and order is undefined self.assertEqual(sorted(expected['watcher_object.changes']), sorted(actual['watcher_object.changes'])) del expected[ 'watcher_object.changes'], actual['watcher_object.changes'] self.assertEqual(expected, actual) def test_contains(self): obj = MyObj(self.context) self.assertNotIn('foo', obj) obj.foo = 1 self.assertIn('foo', obj) self.assertNotIn('does_not_exist', obj) def test_obj_attr_is_set(self): obj = MyObj(self.context, foo=1) self.assertTrue(obj.obj_attr_is_set('foo')) self.assertFalse(obj.obj_attr_is_set('bar')) self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang') def test_get(self): obj = MyObj(self.context, foo=1) # Foo has value, should not get the default self.assertEqual(obj.get('foo', 2), 1) # Foo has value, should return the value without error self.assertEqual(obj.get('foo'), 1) # Bar is not loaded, so we should get the default self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded') # Bar without a default should lazy-load self.assertEqual(obj.get('bar'), 'loaded!') # Bar now has a default, but loaded value should be returned self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!') # Invalid attribute should raise AttributeError self.assertRaises(AttributeError, obj.get, 'nothing') # ...even with a default self.assertRaises(AttributeError, obj.get, 'nothing', 3) def test_object_inheritance(self): base_fields = ( list(base.WatcherObject.fields) + list(base.WatcherPersistentObject.fields)) myobj_fields = ['foo', 'bar', 'missing'] + base_fields myobj3_fields = ['new_field'] self.assertTrue(issubclass(WatcherTestSubclassedObject, MyObj)) self.assertEqual(len(myobj_fields), len(MyObj.fields)) self.assertEqual(set(myobj_fields), set(MyObj.fields.keys())) self.assertEqual(len(myobj_fields) + len(myobj3_fields), len(WatcherTestSubclassedObject.fields)) self.assertEqual(set(myobj_fields) | set(myobj3_fields), set(WatcherTestSubclassedObject.fields.keys())) def test_get_changes(self): obj = MyObj(self.context) self.assertEqual({}, obj.obj_get_changes()) obj.foo = 123 self.assertEqual({'foo': 123}, obj.obj_get_changes()) obj.bar = 'test' self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes()) obj.obj_reset_changes() self.assertEqual({}, obj.obj_get_changes()) def test_obj_fields(self): @base.WatcherObjectRegistry.register_if(False) class TestObj(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): fields = {'foo': fields.IntegerField()} obj_extra_fields = ['bar'] @property def bar(self): return 'this is bar' obj = TestObj(self.context) self.assertEqual(set(['created_at', 'updated_at', 'deleted_at', 'foo', 'bar']), set(obj.obj_fields)) def test_refresh_object(self): @base.WatcherObjectRegistry.register_if(False) class TestObj(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): fields = {'foo': fields.IntegerField(), 'bar': fields.StringField()} obj = TestObj(self.context) current_obj = TestObj(self.context) obj.foo = 10 obj.bar = 'obj.bar' current_obj.foo = 2 current_obj.bar = 'current.bar' obj.obj_refresh(current_obj) self.assertEqual(obj.foo, 2) self.assertEqual(obj.bar, 'current.bar') def test_obj_constructor(self): obj = MyObj(self.context, foo=123, bar='abc') self.assertEqual(123, obj.foo) self.assertEqual('abc', obj.bar) self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed()) def test_assign_value_without_DictCompat(self): class TestObj(base.WatcherObject): fields = {'foo': fields.IntegerField(), 'bar': fields.StringField()} obj = TestObj(self.context) obj.foo = 10 err_message = '' try: obj['bar'] = 'value' except TypeError as e: err_message = str(e) finally: self.assertIn("'TestObj' object does not support item assignment", err_message) class TestObject(_LocalTest, _TestObject): pass # The hashes are help developers to check if the change of objects need a # version bump. It is md5 hash of object fields and remotable methods. # The fingerprint values should only be changed if there is a version bump. expected_object_fingerprints = { 'Goal': '1.0-93881622db05e7b67a65ca885b4a022e', 'Strategy': '1.1-73f164491bdd4c034f48083a51bdeb7b', 'AuditTemplate': '1.1-b291973ffc5efa2c61b24fe34fdccc0b', 'Audit': '1.7-19bc991c0b048263df021a36c8624f4d', 'ActionPlan': '2.2-3331270cb3666c93408934826d03c08d', 'Action': '2.0-1dd4959a7e7ac30c62ef170fe08dd935', 'EfficacyIndicator': '1.0-655b71234a82bc7478aff964639c4bb0', 'ScoringEngine': '1.0-4abbe833544000728e17bd9e83f97576', 'Service': '1.0-4b35b99ada9677a882c9de2b30212f35', 'MyObj': '1.5-23c516d1e842f365f694e688d34e47c3', 'ActionDescription': '1.0-5761a3d16651046e7a0c357b57a6583e' } def get_watcher_objects(): """Get Watcher versioned objects This returns a dict of versioned objects which are in the Watcher project namespace only. ie excludes objects from os-vif and other 3rd party modules :return: a dict mapping class names to lists of versioned objects """ all_classes = base.WatcherObjectRegistry.obj_classes() watcher_classes = {} for name in all_classes: objclasses = all_classes[name] if (objclasses[0].OBJ_PROJECT_NAMESPACE != base.WatcherObject.OBJ_PROJECT_NAMESPACE): continue watcher_classes[name] = objclasses return watcher_classes class TestObjectVersions(test_base.TestCase): def test_object_version_check(self): classes = base.WatcherObjectRegistry.obj_classes() checker = object_fixture.ObjectVersionChecker(obj_classes=classes) # Compute the difference between actual fingerprints and # expect fingerprints. expect = actual = {} if there is no change. expect, actual = checker.test_hashes(expected_object_fingerprints) self.assertEqual(expect, actual, "Some objects fields or remotable methods have been " "modified. Please make sure the version of those " "objects have been bumped and then update " "expected_object_fingerprints with the new hashes. ") class TestObjectSerializer(test_base.TestCase): def test_object_serialization(self): ser = base.WatcherObjectSerializer() obj = MyObj(self.context) primitive = ser.serialize_entity(self.context, obj) self.assertIn('watcher_object.name', primitive) obj2 = ser.deserialize_entity(self.context, primitive) self.assertIsInstance(obj2, MyObj) self.assertEqual(self.context, obj2._context) def test_object_serialization_iterables(self): ser = base.WatcherObjectSerializer() obj = MyObj(self.context) for iterable in (list, tuple, set): thing = iterable([obj]) primitive = ser.serialize_entity(self.context, thing) self.assertEqual(1, len(primitive)) for item in primitive: self.assertFalse(isinstance(item, base.WatcherObject)) thing2 = ser.deserialize_entity(self.context, primitive) self.assertEqual(1, len(thing2)) for item in thing2: self.assertIsInstance(item, MyObj) @mock.patch('watcher.objects.base.WatcherObject.indirection_api') def _test_deserialize_entity_newer(self, obj_version, backported_to, mock_indirection_api, my_version='1.6'): ser = base.WatcherObjectSerializer() mock_indirection_api.object_backport_versions.return_value \ = 'backported' @base.WatcherObjectRegistry.register class MyTestObj(MyObj): VERSION = my_version obj = MyTestObj(self.context) obj.VERSION = obj_version primitive = obj.obj_to_primitive() result = ser.deserialize_entity(self.context, primitive) if backported_to is None: self.assertFalse( mock_indirection_api.object_backport_versions.called) else: self.assertEqual('backported', result) versions = object_base.obj_tree_get_versions('MyTestObj') mock_indirection_api.object_backport_versions.assert_called_with( self.context, primitive, versions) def test_deserialize_entity_newer_version_backports(self): "Test object with unsupported (newer) version" self._test_deserialize_entity_newer('1.25', '1.6') def test_deserialize_entity_same_revision_does_not_backport(self): "Test object with supported revision" self._test_deserialize_entity_newer('1.6', None) def test_deserialize_entity_newer_revision_does_not_backport_zero(self): "Test object with supported revision" self._test_deserialize_entity_newer('1.6.0', None) def test_deserialize_entity_newer_revision_does_not_backport(self): "Test object with supported (newer) revision" self._test_deserialize_entity_newer('1.6.1', None) def test_deserialize_entity_newer_version_passes_revision(self): "Test object with unsupported (newer) version and revision" self._test_deserialize_entity_newer('1.7', '1.6.1', my_version='1.6.1') class TestRegistry(test_base.TestCase): @mock.patch('watcher.objects.base.objects') def test_hook_chooses_newer_properly(self, mock_objects): reg = base.WatcherObjectRegistry() reg.registration_hook(MyObj, 0) class MyNewerObj(object): VERSION = '1.123' @classmethod def obj_name(cls): return 'MyObj' self.assertEqual(MyObj, mock_objects.MyObj) reg.registration_hook(MyNewerObj, 0) self.assertEqual(MyNewerObj, mock_objects.MyObj) @mock.patch('watcher.objects.base.objects') def test_hook_keeps_newer_properly(self, mock_objects): reg = base.WatcherObjectRegistry() reg.registration_hook(MyObj, 0) class MyOlderObj(object): VERSION = '1.1' @classmethod def obj_name(cls): return 'MyObj' self.assertEqual(MyObj, mock_objects.MyObj) reg.registration_hook(MyOlderObj, 0) self.assertEqual(MyObj, mock_objects.MyObj) python-watcher-4.0.0/watcher/tests/objects/test_action.py0000664000175000017500000002376313656752270023657 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from watcher.common import exception from watcher.common import utils as c_utils from watcher.db.sqlalchemy import api as db_api from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestActionObject(base.DbTestCase): action_plan_id = 2 scenarios = [ ('non_eager', dict( eager=False, fake_action=utils.get_test_action( action_plan_id=action_plan_id))), ('eager_with_non_eager_load', dict( eager=True, fake_action=utils.get_test_action( action_plan_id=action_plan_id))), ('eager_with_eager_load', dict( eager=True, fake_action=utils.get_test_action( action_plan_id=action_plan_id, action_plan=utils.get_test_action_plan(id=action_plan_id)))), ] def setUp(self): super(TestActionObject, self).setUp() p_action_notifications = mock.patch.object( notifications, 'action_plan', autospec=True) self.m_action_notifications = p_action_notifications.start() self.addCleanup(p_action_notifications.stop) self.m_send_update = self.m_action_notifications.send_update self.fake_action_plan = utils.create_test_action_plan( id=self.action_plan_id) def eager_action_assert(self, action): if self.eager: self.assertIsNotNone(action.action_plan) fields_to_check = set( super(objects.ActionPlan, objects.ActionPlan).fields ).symmetric_difference(objects.ActionPlan.fields) db_data = { k: v for k, v in self.fake_action_plan.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in action.action_plan.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_action_by_id') def test_get_by_id(self, mock_get_action): mock_get_action.return_value = self.fake_action action_id = self.fake_action['id'] action = objects.Action.get(self.context, action_id, eager=self.eager) mock_get_action.assert_called_once_with( self.context, action_id, eager=self.eager) self.assertEqual(self.context, action._context) self.eager_action_assert(action) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_get_by_uuid(self, mock_get_action): mock_get_action.return_value = self.fake_action uuid = self.fake_action['uuid'] action = objects.Action.get(self.context, uuid, eager=self.eager) mock_get_action.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, action._context) self.assertEqual(0, self.m_send_update.call_count) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Action.get, self.context, 'not-a-uuid', eager=self.eager) @mock.patch.object(db_api.Connection, 'get_action_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_action] actions = objects.Action.list(self.context, eager=self.eager) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(actions)) self.assertIsInstance(actions[0], objects.Action) self.assertEqual(self.context, actions[0]._context) for action in actions: self.eager_action_assert(action) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(objects.Strategy, 'get') @mock.patch.object(objects.Audit, 'get') @mock.patch.object(db_api.Connection, 'update_action') @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_save(self, mock_get_action, mock_update_action, mock_get_audit, mock_get_strategy): mock_get_action.return_value = self.fake_action fake_saved_action = self.fake_action.copy() mock_get_audit.return_value = mock.PropertyMock( uuid=c_utils.generate_uuid()) mock_get_strategy.return_value = mock.PropertyMock( uuid=c_utils.generate_uuid()) fake_saved_action['updated_at'] = datetime.datetime.utcnow() mock_update_action.return_value = fake_saved_action uuid = self.fake_action['uuid'] action = objects.Action.get_by_uuid( self.context, uuid, eager=self.eager) action.state = objects.action.State.SUCCEEDED if not self.eager: self.assertRaises(exception.EagerlyLoadedActionRequired, action.save) else: action.save() expected_update_at = fake_saved_action['updated_at'].replace( tzinfo=iso8601.UTC) mock_get_action.assert_called_once_with( self.context, uuid, eager=self.eager) mock_update_action.assert_called_once_with( uuid, {'state': objects.action.State.SUCCEEDED}) self.assertEqual(self.context, action._context) self.assertEqual(expected_update_at, action.updated_at) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_refresh(self, mock_get_action): returns = [dict(self.fake_action, state="first state"), dict(self.fake_action, state="second state")] mock_get_action.side_effect = returns uuid = self.fake_action['uuid'] expected = [mock.call(self.context, uuid, eager=self.eager), mock.call(self.context, uuid, eager=self.eager)] action = objects.Action.get(self.context, uuid, eager=self.eager) self.assertEqual("first state", action.state) action.refresh(eager=self.eager) self.assertEqual("second state", action.state) self.assertEqual(expected, mock_get_action.call_args_list) self.assertEqual(self.context, action._context) self.eager_action_assert(action) self.assertEqual(0, self.m_send_update.call_count) class TestCreateDeleteActionObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteActionObject, self).setUp() self.fake_strategy = utils.create_test_strategy(name="DUMMY") self.fake_audit = utils.create_test_audit() self.fake_action_plan = utils.create_test_action_plan() self.fake_action = utils.get_test_action( created_at=datetime.datetime.utcnow()) @mock.patch.object(db_api.Connection, 'create_action') def test_create(self, mock_create_action): mock_create_action.return_value = self.fake_action action = objects.Action(self.context, **self.fake_action) action.create() expected_action = self.fake_action.copy() expected_action['created_at'] = expected_action['created_at'].replace( tzinfo=iso8601.UTC) mock_create_action.assert_called_once_with(expected_action) self.assertEqual(self.context, action._context) @mock.patch.object(notifications.action, 'send_delete') @mock.patch.object(notifications.action, 'send_update') @mock.patch.object(db_api.Connection, 'update_action') @mock.patch.object(db_api.Connection, 'soft_delete_action') @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_soft_delete(self, mock_get_action, mock_soft_delete_action, mock_update_action, mock_send_update, mock_send_delete): mock_get_action.return_value = self.fake_action fake_deleted_action = self.fake_action.copy() fake_deleted_action['deleted_at'] = datetime.datetime.utcnow() mock_soft_delete_action.return_value = fake_deleted_action mock_update_action.return_value = fake_deleted_action expected_action = fake_deleted_action.copy() expected_action['created_at'] = expected_action['created_at'].replace( tzinfo=iso8601.UTC) expected_action['deleted_at'] = expected_action['deleted_at'].replace( tzinfo=iso8601.UTC) del expected_action['action_plan'] uuid = self.fake_action['uuid'] action = objects.Action.get_by_uuid(self.context, uuid) action.soft_delete() mock_get_action.assert_called_once_with( self.context, uuid, eager=False) mock_soft_delete_action.assert_called_once_with(uuid) mock_update_action.assert_called_once_with( uuid, {'state': objects.action.State.DELETED}) self.assertEqual(self.context, action._context) self.assertEqual(expected_action, action.as_dict()) @mock.patch.object(db_api.Connection, 'destroy_action') @mock.patch.object(db_api.Connection, 'get_action_by_uuid') def test_destroy(self, mock_get_action, mock_destroy_action): mock_get_action.return_value = self.fake_action uuid = self.fake_action['uuid'] action = objects.Action.get_by_uuid(self.context, uuid) action.destroy() mock_get_action.assert_called_once_with( self.context, uuid, eager=False) mock_destroy_action.assert_called_once_with(uuid) self.assertEqual(self.context, action._context) python-watcher-4.0.0/watcher/tests/objects/test_audit.py0000664000175000017500000003366213656752270023507 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from watcher.common import exception from watcher.common import rpc from watcher.common import utils as w_utils from watcher.db.sqlalchemy import api as db_api from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils from watcher.tests.objects import utils as objutils class TestAuditObject(base.DbTestCase): goal_id = 2 goal_data = utils.get_test_goal( id=goal_id, uuid=w_utils.generate_uuid(), name="DUMMY") scenarios = [ ('non_eager', dict( eager=False, fake_audit=utils.get_test_audit( created_at=datetime.datetime.utcnow(), goal_id=goal_id))), ('eager_with_non_eager_load', dict( eager=True, fake_audit=utils.get_test_audit( created_at=datetime.datetime.utcnow(), goal_id=goal_id))), ('eager_with_eager_load', dict( eager=True, fake_audit=utils.get_test_audit( created_at=datetime.datetime.utcnow(), goal_id=goal_id, goal=goal_data))), ] def setUp(self): super(TestAuditObject, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.m_send_update = self.m_audit_notifications.send_update self.fake_goal = utils.create_test_goal(**self.goal_data) def eager_load_audit_assert(self, audit, goal): if self.eager: self.assertIsNotNone(audit.goal) fields_to_check = set( super(objects.Goal, objects.Goal).fields ).symmetric_difference(objects.Goal.fields) db_data = { k: v for k, v in goal.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in audit.goal.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_audit_by_id') def test_get_by_id(self, mock_get_audit): mock_get_audit.return_value = self.fake_audit audit_id = self.fake_audit['id'] audit = objects.Audit.get(self.context, audit_id, eager=self.eager) mock_get_audit.assert_called_once_with( self.context, audit_id, eager=self.eager) self.assertEqual(self.context, audit._context) self.eager_load_audit_assert(audit, self.fake_goal) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_get_by_uuid(self, mock_get_audit): mock_get_audit.return_value = self.fake_audit uuid = self.fake_audit['uuid'] audit = objects.Audit.get(self.context, uuid, eager=self.eager) mock_get_audit.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, audit._context) self.eager_load_audit_assert(audit, self.fake_goal) self.assertEqual(0, self.m_send_update.call_count) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.Audit.get, self.context, 'not-a-uuid', eager=self.eager) @mock.patch.object(db_api.Connection, 'get_audit_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_audit] audits = objects.Audit.list(self.context, eager=self.eager) mock_get_list.assert_called_once_with( self.context, eager=self.eager, filters=None, limit=None, marker=None, sort_dir=None, sort_key=None) self.assertEqual(1, len(audits)) self.assertIsInstance(audits[0], objects.Audit) self.assertEqual(self.context, audits[0]._context) for audit in audits: self.eager_load_audit_assert(audit, self.fake_goal) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'update_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_save(self, mock_get_audit, mock_update_audit): mock_get_audit.return_value = self.fake_audit fake_saved_audit = self.fake_audit.copy() fake_saved_audit['state'] = objects.audit.State.SUCCEEDED fake_saved_audit['updated_at'] = datetime.datetime.utcnow() mock_update_audit.return_value = fake_saved_audit expected_audit = fake_saved_audit.copy() expected_audit['created_at'] = expected_audit['created_at'].replace( tzinfo=iso8601.UTC) expected_audit['updated_at'] = expected_audit['updated_at'].replace( tzinfo=iso8601.UTC) uuid = self.fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid, eager=self.eager) audit.state = objects.audit.State.SUCCEEDED audit.save() mock_get_audit.assert_called_once_with( self.context, uuid, eager=self.eager) mock_update_audit.assert_called_once_with( uuid, {'state': objects.audit.State.SUCCEEDED}) self.assertEqual(self.context, audit._context) self.eager_load_audit_assert(audit, self.fake_goal) self.m_send_update.assert_called_once_with( self.context, audit, old_state=self.fake_audit['state']) self.assertEqual( {k: v for k, v in expected_audit.items() if k not in audit.object_fields}, {k: v for k, v in audit.as_dict().items() if k not in audit.object_fields}) @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_refresh(self, mock_get_audit): returns = [dict(self.fake_audit, state="first state"), dict(self.fake_audit, state="second state")] mock_get_audit.side_effect = returns uuid = self.fake_audit['uuid'] expected = [ mock.call(self.context, uuid, eager=self.eager), mock.call(self.context, uuid, eager=self.eager)] audit = objects.Audit.get(self.context, uuid, eager=self.eager) self.assertEqual("first state", audit.state) audit.refresh(eager=self.eager) self.assertEqual("second state", audit.state) self.assertEqual(expected, mock_get_audit.call_args_list) self.assertEqual(self.context, audit._context) self.eager_load_audit_assert(audit, self.fake_goal) class TestCreateDeleteAuditObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteAuditObject, self).setUp() p_audit_notifications = mock.patch.object( notifications, 'audit', autospec=True) self.m_audit_notifications = p_audit_notifications.start() self.addCleanup(p_audit_notifications.stop) self.m_send_update = self.m_audit_notifications.send_update self.goal_id = 1 self.goal = utils.create_test_goal(id=self.goal_id, name="DUMMY") self.fake_audit = utils.get_test_audit( goal_id=self.goal_id, created_at=datetime.datetime.utcnow()) @mock.patch.object(db_api.Connection, 'create_audit') def test_create(self, mock_create_audit): mock_create_audit.return_value = self.fake_audit audit = objects.Audit(self.context, **self.fake_audit) audit.create() expected_audit = self.fake_audit.copy() expected_audit['created_at'] = expected_audit['created_at'].replace( tzinfo=iso8601.UTC) mock_create_audit.assert_called_once_with(expected_audit) self.assertEqual(self.context, audit._context) @mock.patch.object(db_api.Connection, 'update_audit') @mock.patch.object(db_api.Connection, 'soft_delete_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_soft_delete(self, mock_get_audit, mock_soft_delete_audit, mock_update_audit): mock_get_audit.return_value = self.fake_audit fake_deleted_audit = self.fake_audit.copy() fake_deleted_audit['deleted_at'] = datetime.datetime.utcnow() mock_soft_delete_audit.return_value = fake_deleted_audit mock_update_audit.return_value = fake_deleted_audit expected_audit = fake_deleted_audit.copy() expected_audit['created_at'] = expected_audit['created_at'].replace( tzinfo=iso8601.UTC) expected_audit['deleted_at'] = expected_audit['deleted_at'].replace( tzinfo=iso8601.UTC) del expected_audit['goal'] del expected_audit['strategy'] uuid = self.fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid, eager=False) audit.soft_delete() mock_get_audit.assert_called_once_with(self.context, uuid, eager=False) mock_soft_delete_audit.assert_called_once_with(uuid) mock_update_audit.assert_called_once_with(uuid, {'state': 'DELETED'}) self.assertEqual(self.context, audit._context) self.assertEqual(expected_audit, audit.as_dict()) @mock.patch.object(db_api.Connection, 'destroy_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_destroy(self, mock_get_audit, mock_destroy_audit): mock_get_audit.return_value = self.fake_audit uuid = self.fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid) audit.destroy() mock_get_audit.assert_called_once_with( self.context, uuid, eager=False) mock_destroy_audit.assert_called_once_with(uuid) self.assertEqual(self.context, audit._context) class TestAuditObjectSendNotifications(base.DbTestCase): def setUp(self): super(TestAuditObjectSendNotifications, self).setUp() goal_id = 1 self.fake_goal = utils.create_test_goal(id=goal_id, name="DUMMY") self.fake_strategy = utils.create_test_strategy( id=goal_id, name="DUMMY") self.fake_audit = utils.get_test_audit( goal_id=goal_id, goal=utils.get_test_goal(id=goal_id), strategy_id=self.fake_strategy.id, strategy=self.fake_strategy) p_get_notifier = mock.patch.object(rpc, 'get_notifier') self.m_get_notifier = p_get_notifier.start() self.m_get_notifier.return_value = mock.Mock(name='m_notifier') self.m_notifier = self.m_get_notifier.return_value self.addCleanup(p_get_notifier.stop) @mock.patch.object(db_api.Connection, 'update_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_send_update_notification(self, m_get_audit, m_update_audit): fake_audit = utils.get_test_audit( goal=self.fake_goal.as_dict(), strategy_id=self.fake_strategy.id, strategy=self.fake_strategy.as_dict()) m_get_audit.return_value = fake_audit fake_saved_audit = self.fake_audit.copy() fake_saved_audit['state'] = objects.audit.State.SUCCEEDED m_update_audit.return_value = fake_saved_audit uuid = fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid, eager=True) audit.state = objects.audit.State.ONGOING audit.save() self.assertEqual(1, self.m_notifier.info.call_count) self.assertEqual('audit.update', self.m_notifier.info.call_args[1]['event_type']) @mock.patch.object(db_api.Connection, 'create_audit') def test_send_create_notification(self, m_create_audit): audit = objutils.get_test_audit( self.context, id=1, goal_id=self.fake_goal.id, strategy_id=self.fake_strategy.id, goal=self.fake_goal.as_dict(), strategy=self.fake_strategy.as_dict()) m_create_audit.return_value = audit audit.create() self.assertEqual(1, self.m_notifier.info.call_count) self.assertEqual('audit.create', self.m_notifier.info.call_args[1]['event_type']) @mock.patch.object(db_api.Connection, 'update_audit') @mock.patch.object(db_api.Connection, 'soft_delete_audit') @mock.patch.object(db_api.Connection, 'get_audit_by_uuid') def test_send_delete_notification( self, m_get_audit, m_soft_delete_audit, m_update_audit): fake_audit = utils.get_test_audit( goal=self.fake_goal.as_dict(), strategy_id=self.fake_strategy.id, strategy=self.fake_strategy.as_dict()) m_get_audit.return_value = fake_audit fake_deleted_audit = self.fake_audit.copy() fake_deleted_audit['deleted_at'] = datetime.datetime.utcnow() expected_audit = fake_deleted_audit.copy() expected_audit['deleted_at'] = expected_audit['deleted_at'].replace( tzinfo=iso8601.UTC) m_soft_delete_audit.return_value = fake_deleted_audit m_update_audit.return_value = fake_deleted_audit uuid = fake_audit['uuid'] audit = objects.Audit.get_by_uuid(self.context, uuid, eager=True) audit.soft_delete() self.assertEqual(2, self.m_notifier.info.call_count) self.assertEqual( 'audit.update', self.m_notifier.info.call_args_list[0][1]['event_type']) self.assertEqual( 'audit.delete', self.m_notifier.info.call_args_list[1][1]['event_type']) python-watcher-4.0.0/watcher/tests/objects/test_action_plan.py0000664000175000017500000003456413656752270024672 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from watcher.common import exception from watcher.common import utils as common_utils from watcher import conf from watcher.db.sqlalchemy import api as db_api from watcher import notifications from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils CONF = conf.CONF class TestActionPlanObject(base.DbTestCase): audit_id = 2 strategy_id = 2 scenarios = [ ('non_eager', dict( eager=False, fake_action_plan=utils.get_test_action_plan( created_at=datetime.datetime.utcnow(), audit_id=audit_id, strategy_id=strategy_id))), ('eager_with_non_eager_load', dict( eager=True, fake_action_plan=utils.get_test_action_plan( created_at=datetime.datetime.utcnow(), audit_id=audit_id, strategy_id=strategy_id))), ('eager_with_eager_load', dict( eager=True, fake_action_plan=utils.get_test_action_plan( created_at=datetime.datetime.utcnow(), strategy_id=strategy_id, strategy=utils.get_test_strategy(id=strategy_id), audit_id=audit_id, audit=utils.get_test_audit(id=audit_id)))), ] def setUp(self): super(TestActionPlanObject, self).setUp() p_action_plan_notifications = mock.patch.object( notifications, 'action_plan', autospec=True) self.m_action_plan_notifications = p_action_plan_notifications.start() self.addCleanup(p_action_plan_notifications.stop) self.m_send_update = self.m_action_plan_notifications.send_update self.fake_audit = utils.create_test_audit(id=self.audit_id) self.fake_strategy = utils.create_test_strategy( id=self.strategy_id, name="DUMMY") def eager_load_action_plan_assert(self, action_plan): if self.eager: self.assertIsNotNone(action_plan.audit) fields_to_check = set( super(objects.Audit, objects.Audit).fields ).symmetric_difference(objects.Audit.fields) db_data = { k: v for k, v in self.fake_audit.as_dict().items() if k in fields_to_check} object_data = { k: v for k, v in action_plan.audit.as_dict().items() if k in fields_to_check} self.assertEqual(db_data, object_data) @mock.patch.object(db_api.Connection, 'get_action_plan_by_id') def test_get_by_id(self, mock_get_action_plan): mock_get_action_plan.return_value = self.fake_action_plan action_plan_id = self.fake_action_plan['id'] action_plan = objects.ActionPlan.get( self.context, action_plan_id, eager=self.eager) mock_get_action_plan.assert_called_once_with( self.context, action_plan_id, eager=self.eager) self.assertEqual(self.context, action_plan._context) self.eager_load_action_plan_assert(action_plan) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') def test_get_by_uuid(self, mock_get_action_plan): mock_get_action_plan.return_value = self.fake_action_plan uuid = self.fake_action_plan['uuid'] action_plan = objects.ActionPlan.get( self.context, uuid, eager=self.eager) mock_get_action_plan.assert_called_once_with( self.context, uuid, eager=self.eager) self.assertEqual(self.context, action_plan._context) self.eager_load_action_plan_assert(action_plan) self.assertEqual(0, self.m_send_update.call_count) def test_get_bad_id_and_uuid(self): self.assertRaises(exception.InvalidIdentity, objects.ActionPlan.get, self.context, 'not-a-uuid', eager=self.eager) @mock.patch.object(db_api.Connection, 'get_action_plan_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_action_plan] action_plans = objects.ActionPlan.list(self.context, eager=self.eager) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(action_plans)) self.assertIsInstance(action_plans[0], objects.ActionPlan) self.assertEqual(self.context, action_plans[0]._context) for action_plan in action_plans: self.eager_load_action_plan_assert(action_plan) self.assertEqual(0, self.m_send_update.call_count) @mock.patch.object(db_api.Connection, 'update_action_plan') @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') def test_save(self, mock_get_action_plan, mock_update_action_plan): mock_get_action_plan.return_value = self.fake_action_plan fake_saved_action_plan = self.fake_action_plan.copy() fake_saved_action_plan['state'] = objects.action_plan.State.SUCCEEDED fake_saved_action_plan['updated_at'] = datetime.datetime.utcnow() mock_update_action_plan.return_value = fake_saved_action_plan expected_action_plan = fake_saved_action_plan.copy() expected_action_plan[ 'created_at'] = expected_action_plan['created_at'].replace( tzinfo=iso8601.UTC) expected_action_plan[ 'updated_at'] = expected_action_plan['updated_at'].replace( tzinfo=iso8601.UTC) uuid = self.fake_action_plan['uuid'] action_plan = objects.ActionPlan.get_by_uuid( self.context, uuid, eager=self.eager) action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() mock_get_action_plan.assert_called_once_with( self.context, uuid, eager=self.eager) mock_update_action_plan.assert_called_once_with( uuid, {'state': objects.action_plan.State.SUCCEEDED}) self.assertEqual(self.context, action_plan._context) self.eager_load_action_plan_assert(action_plan) self.m_send_update.assert_called_once_with( self.context, action_plan, old_state=self.fake_action_plan['state']) self.assertEqual( {k: v for k, v in expected_action_plan.items() if k not in action_plan.object_fields}, {k: v for k, v in action_plan.as_dict().items() if k not in action_plan.object_fields}) @mock.patch.object(db_api.Connection, 'get_action_plan_by_uuid') def test_refresh(self, mock_get_action_plan): returns = [dict(self.fake_action_plan, state="first state"), dict(self.fake_action_plan, state="second state")] mock_get_action_plan.side_effect = returns uuid = self.fake_action_plan['uuid'] expected = [mock.call(self.context, uuid, eager=self.eager), mock.call(self.context, uuid, eager=self.eager)] action_plan = objects.ActionPlan.get( self.context, uuid, eager=self.eager) self.assertEqual("first state", action_plan.state) action_plan.refresh(eager=self.eager) self.assertEqual("second state", action_plan.state) self.assertEqual(expected, mock_get_action_plan.call_args_list) self.assertEqual(self.context, action_plan._context) self.eager_load_action_plan_assert(action_plan) class TestCreateDeleteActionPlanObject(base.DbTestCase): def setUp(self): super(TestCreateDeleteActionPlanObject, self).setUp() p_action_plan_notifications = mock.patch.object( notifications, 'action_plan', autospec=True) self.m_action_plan_notifications = p_action_plan_notifications.start() self.addCleanup(p_action_plan_notifications.stop) self.m_send_update = self.m_action_plan_notifications.send_update self.fake_strategy = utils.create_test_strategy(name="DUMMY") self.fake_audit = utils.create_test_audit() self.fake_action_plan = utils.get_test_action_plan( created_at=datetime.datetime.utcnow()) @mock.patch.object(db_api.Connection, 'create_action_plan') def test_create(self, mock_create_action_plan): mock_create_action_plan.return_value = self.fake_action_plan action_plan = objects.ActionPlan( self.context, **self.fake_action_plan) action_plan.create() expected_action_plan = self.fake_action_plan.copy() expected_action_plan['created_at'] = expected_action_plan[ 'created_at'].replace(tzinfo=iso8601.UTC) mock_create_action_plan.assert_called_once_with(expected_action_plan) self.assertEqual(self.context, action_plan._context) @mock.patch.multiple( db_api.Connection, get_action_plan_by_uuid=mock.DEFAULT, soft_delete_action_plan=mock.DEFAULT, update_action_plan=mock.DEFAULT, get_efficacy_indicator_list=mock.DEFAULT, soft_delete_efficacy_indicator=mock.DEFAULT, ) def test_soft_delete(self, get_action_plan_by_uuid, soft_delete_action_plan, update_action_plan, get_efficacy_indicator_list, soft_delete_efficacy_indicator): efficacy_indicator = utils.get_test_efficacy_indicator( action_plan_id=self.fake_action_plan['id']) uuid = self.fake_action_plan['uuid'] m_get_action_plan = get_action_plan_by_uuid m_soft_delete_action_plan = soft_delete_action_plan m_get_efficacy_indicator_list = get_efficacy_indicator_list m_soft_delete_efficacy_indicator = soft_delete_efficacy_indicator m_update_action_plan = update_action_plan m_get_action_plan.return_value = self.fake_action_plan fake_deleted_action_plan = self.fake_action_plan.copy() fake_deleted_action_plan['deleted_at'] = datetime.datetime.utcnow() m_update_action_plan.return_value = fake_deleted_action_plan m_soft_delete_action_plan.return_value = fake_deleted_action_plan expected_action_plan = fake_deleted_action_plan.copy() expected_action_plan['created_at'] = expected_action_plan[ 'created_at'].replace(tzinfo=iso8601.UTC) expected_action_plan['deleted_at'] = expected_action_plan[ 'deleted_at'].replace(tzinfo=iso8601.UTC) del expected_action_plan['audit'] del expected_action_plan['strategy'] m_get_efficacy_indicator_list.return_value = [efficacy_indicator] action_plan = objects.ActionPlan.get_by_uuid( self.context, uuid, eager=False) action_plan.soft_delete() m_get_action_plan.assert_called_once_with( self.context, uuid, eager=False) m_get_efficacy_indicator_list.assert_called_once_with( self.context, filters={"action_plan_uuid": uuid}, limit=None, marker=None, sort_dir=None, sort_key=None) m_soft_delete_action_plan.assert_called_once_with(uuid) m_soft_delete_efficacy_indicator.assert_called_once_with( efficacy_indicator['uuid']) m_update_action_plan.assert_called_once_with( uuid, {'state': objects.action_plan.State.DELETED}) self.assertEqual(self.context, action_plan._context) self.assertEqual(expected_action_plan, action_plan.as_dict()) @mock.patch.multiple( db_api.Connection, get_action_plan_by_uuid=mock.DEFAULT, destroy_action_plan=mock.DEFAULT, get_efficacy_indicator_list=mock.DEFAULT, destroy_efficacy_indicator=mock.DEFAULT, ) def test_destroy(self, get_action_plan_by_uuid, destroy_action_plan, get_efficacy_indicator_list, destroy_efficacy_indicator): m_get_action_plan = get_action_plan_by_uuid m_destroy_action_plan = destroy_action_plan m_get_efficacy_indicator_list = get_efficacy_indicator_list m_destroy_efficacy_indicator = destroy_efficacy_indicator efficacy_indicator = utils.get_test_efficacy_indicator( action_plan_id=self.fake_action_plan['id']) uuid = self.fake_action_plan['uuid'] m_get_action_plan.return_value = self.fake_action_plan m_get_efficacy_indicator_list.return_value = [efficacy_indicator] action_plan = objects.ActionPlan.get_by_uuid(self.context, uuid) action_plan.destroy() m_get_action_plan.assert_called_once_with( self.context, uuid, eager=False) m_get_efficacy_indicator_list.assert_called_once_with( self.context, filters={"action_plan_uuid": uuid}, limit=None, marker=None, sort_dir=None, sort_key=None) m_destroy_action_plan.assert_called_once_with(uuid) m_destroy_efficacy_indicator.assert_called_once_with( efficacy_indicator['uuid']) self.assertEqual(self.context, action_plan._context) @mock.patch.object(notifications.action_plan, 'send_update', mock.Mock()) class TestStateManager(base.DbTestCase): def setUp(self): super(TestStateManager, self).setUp() self.state_manager = objects.action_plan.StateManager() def test_check_expired(self): CONF.set_default('action_plan_expiry', 0, group='watcher_decision_engine') strategy_1 = utils.create_test_strategy( uuid=common_utils.generate_uuid()) audit_1 = utils.create_test_audit( uuid=common_utils.generate_uuid()) action_plan_1 = utils.create_test_action_plan( state=objects.action_plan.State.RECOMMENDED, uuid=common_utils.generate_uuid(), audit_id=audit_1.id, strategy_id=strategy_1.id) self.state_manager.check_expired(self.context) action_plan = objects.action_plan.ActionPlan.get_by_uuid( self.context, action_plan_1.uuid) self.assertEqual(objects.action_plan.State.SUPERSEDED, action_plan.state) python-watcher-4.0.0/watcher/tests/objects/test_service.py0000664000175000017500000001173613656752270024037 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import iso8601 import mock from watcher.db.sqlalchemy import api as db_api from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestServiceObject(base.DbTestCase): def setUp(self): super(TestServiceObject, self).setUp() self.fake_service = utils.get_test_service( created_at=datetime.datetime.utcnow()) @mock.patch.object(db_api.Connection, 'get_service_by_id') def test_get_by_id(self, mock_get_service): service_id = self.fake_service['id'] mock_get_service.return_value = self.fake_service service = objects.Service.get(self.context, service_id) mock_get_service.assert_called_once_with(self.context, service_id) self.assertEqual(self.context, service._context) @mock.patch.object(db_api.Connection, 'get_service_list') def test_list(self, mock_get_list): mock_get_list.return_value = [self.fake_service] services = objects.Service.list(self.context) self.assertEqual(1, mock_get_list.call_count, 1) self.assertEqual(1, len(services)) self.assertIsInstance(services[0], objects.Service) self.assertEqual(self.context, services[0]._context) @mock.patch.object(db_api.Connection, 'create_service') def test_create(self, mock_create_service): mock_create_service.return_value = self.fake_service service = objects.Service(self.context, **self.fake_service) service.create() expected_service = self.fake_service.copy() expected_service['created_at'] = expected_service[ 'created_at'].replace(tzinfo=iso8601.UTC) mock_create_service.assert_called_once_with(expected_service) self.assertEqual(self.context, service._context) @mock.patch.object(db_api.Connection, 'update_service') @mock.patch.object(db_api.Connection, 'get_service_by_id') def test_save(self, mock_get_service, mock_update_service): mock_get_service.return_value = self.fake_service fake_saved_service = self.fake_service.copy() fake_saved_service['updated_at'] = datetime.datetime.utcnow() mock_update_service.return_value = fake_saved_service _id = self.fake_service['id'] service = objects.Service.get(self.context, _id) service.name = 'UPDATED NAME' service.save() mock_get_service.assert_called_once_with(self.context, _id) mock_update_service.assert_called_once_with( _id, {'name': 'UPDATED NAME'}) self.assertEqual(self.context, service._context) @mock.patch.object(db_api.Connection, 'get_service_by_id') def test_refresh(self, mock_get_service): returns = [dict(self.fake_service, name="first name"), dict(self.fake_service, name="second name")] mock_get_service.side_effect = returns _id = self.fake_service['id'] expected = [mock.call(self.context, _id), mock.call(self.context, _id)] service = objects.Service.get(self.context, _id) self.assertEqual("first name", service.name) service.refresh() self.assertEqual("second name", service.name) self.assertEqual(expected, mock_get_service.call_args_list) self.assertEqual(self.context, service._context) @mock.patch.object(db_api.Connection, 'soft_delete_service') @mock.patch.object(db_api.Connection, 'get_service_by_id') def test_soft_delete(self, mock_get_service, mock_soft_delete): mock_get_service.return_value = self.fake_service fake_deleted_service = self.fake_service.copy() fake_deleted_service['deleted_at'] = datetime.datetime.utcnow() mock_soft_delete.return_value = fake_deleted_service expected_service = fake_deleted_service.copy() expected_service['created_at'] = expected_service[ 'created_at'].replace(tzinfo=iso8601.UTC) expected_service['deleted_at'] = expected_service[ 'deleted_at'].replace(tzinfo=iso8601.UTC) _id = self.fake_service['id'] service = objects.Service.get(self.context, _id) service.soft_delete() mock_get_service.assert_called_once_with(self.context, _id) mock_soft_delete.assert_called_once_with(_id) self.assertEqual(self.context, service._context) self.assertEqual(expected_service, service.as_dict()) python-watcher-4.0.0/watcher/tests/objects/test_efficacy_indicator.py0000664000175000017500000001533213656752270026200 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from watcher.common import exception # from watcher.common import utils as w_utils from watcher import objects from watcher.tests.db import base from watcher.tests.db import utils class TestEfficacyIndicatorObject(base.DbTestCase): def setUp(self): super(TestEfficacyIndicatorObject, self).setUp() self.fake_efficacy_indicator = utils.get_test_efficacy_indicator() def test_get_by_id(self): efficacy_indicator_id = self.fake_efficacy_indicator['id'] with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_id', autospec=True) as mock_get_efficacy_indicator: mock_get_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) efficacy_indicator = objects.EfficacyIndicator.get( self.context, efficacy_indicator_id) mock_get_efficacy_indicator.assert_called_once_with( self.context, efficacy_indicator_id) self.assertEqual(self.context, efficacy_indicator._context) def test_get_by_uuid(self): uuid = self.fake_efficacy_indicator['uuid'] with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_uuid', autospec=True) as mock_get_efficacy_indicator: mock_get_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) efficacy_indicator = objects.EfficacyIndicator.get( self.context, uuid) mock_get_efficacy_indicator.assert_called_once_with( self.context, uuid) self.assertEqual(self.context, efficacy_indicator._context) def test_get_bad_id_and_uuid(self): self.assertRaises( exception.InvalidIdentity, objects.EfficacyIndicator.get, self.context, 'not-a-uuid') def test_list(self): with mock.patch.object(self.dbapi, 'get_efficacy_indicator_list', autospec=True) as mock_get_list: mock_get_list.return_value = [self.fake_efficacy_indicator] efficacy_indicators = objects.EfficacyIndicator.list(self.context) self.assertEqual(1, mock_get_list.call_count) self.assertEqual(1, len(efficacy_indicators)) self.assertIsInstance( efficacy_indicators[0], objects.EfficacyIndicator) self.assertEqual(self.context, efficacy_indicators[0]._context) def test_create(self): with mock.patch.object( self.dbapi, 'create_efficacy_indicator', autospec=True ) as mock_create_efficacy_indicator: mock_create_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) efficacy_indicator = objects.EfficacyIndicator( self.context, **self.fake_efficacy_indicator) efficacy_indicator.create() mock_create_efficacy_indicator.assert_called_once_with( self.fake_efficacy_indicator) self.assertEqual(self.context, efficacy_indicator._context) def test_destroy(self): uuid = self.fake_efficacy_indicator['uuid'] with mock.patch.object( self.dbapi, 'get_efficacy_indicator_by_uuid', autospec=True ) as mock_get_efficacy_indicator: mock_get_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) with mock.patch.object( self.dbapi, 'destroy_efficacy_indicator', autospec=True ) as mock_destroy_efficacy_indicator: efficacy_indicator = objects.EfficacyIndicator.get_by_uuid( self.context, uuid) efficacy_indicator.destroy() mock_get_efficacy_indicator.assert_called_once_with( self.context, uuid) mock_destroy_efficacy_indicator.assert_called_once_with(uuid) self.assertEqual(self.context, efficacy_indicator._context) def test_save(self): uuid = self.fake_efficacy_indicator['uuid'] with mock.patch.object( self.dbapi, 'get_efficacy_indicator_by_uuid', autospec=True ) as mock_get_efficacy_indicator: mock_get_efficacy_indicator.return_value = ( self.fake_efficacy_indicator) with mock.patch.object( self.dbapi, 'update_efficacy_indicator', autospec=True ) as mock_update_efficacy_indicator: efficacy_indicator = objects.EfficacyIndicator.get_by_uuid( self.context, uuid) efficacy_indicator.description = 'Indicator Description' efficacy_indicator.save() mock_get_efficacy_indicator.assert_called_once_with( self.context, uuid) mock_update_efficacy_indicator.assert_called_once_with( uuid, {'description': 'Indicator Description'}) self.assertEqual(self.context, efficacy_indicator._context) def test_refresh(self): uuid = self.fake_efficacy_indicator['uuid'] returns = [dict(self.fake_efficacy_indicator, description="first description"), dict(self.fake_efficacy_indicator, description="second description")] expected = [mock.call(self.context, uuid), mock.call(self.context, uuid)] with mock.patch.object(self.dbapi, 'get_efficacy_indicator_by_uuid', side_effect=returns, autospec=True) as mock_get_efficacy_indicator: efficacy_indicator = objects.EfficacyIndicator.get( self.context, uuid) self.assertEqual( "first description", efficacy_indicator.description) efficacy_indicator.refresh() self.assertEqual( "second description", efficacy_indicator.description) self.assertEqual( expected, mock_get_efficacy_indicator.call_args_list) self.assertEqual(self.context, efficacy_indicator._context) python-watcher-4.0.0/watcher/tests/cmd/0000775000175000017500000000000013656752352020071 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/cmd/__init__.py0000664000175000017500000000000013656752270022167 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/cmd/test_decision_engine.py0000664000175000017500000000420413656752270024623 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import unicode_literals import types import mock from oslo_config import cfg from oslo_service import service from watcher.cmd import decisionengine from watcher.common import service as watcher_service from watcher.decision_engine.audit import continuous from watcher.decision_engine import sync from watcher.tests import base class TestDecisionEngine(base.BaseTestCase): def setUp(self): super(TestDecisionEngine, self).setUp() self.conf = cfg.CONF self._parse_cli_opts = self.conf._parse_cli_opts def _fake_parse(self, args=[]): return cfg.ConfigOpts._parse_cli_opts(self, []) _fake_parse_method = types.MethodType(_fake_parse, self.conf) self.conf._parse_cli_opts = _fake_parse_method p_heartbeat = mock.patch.object( watcher_service.ServiceHeartbeat, "send_beat") self.m_heartbeat = p_heartbeat.start() self.addCleanup(p_heartbeat.stop) p_continuoushandler = mock.patch.object( continuous.ContinuousAuditHandler, "start") self.m_continuoushandler = p_continuoushandler.start() self.addCleanup(p_continuoushandler.stop) def tearDown(self): super(TestDecisionEngine, self).tearDown() self.conf._parse_cli_opts = self._parse_cli_opts @mock.patch.object(sync.Syncer, "sync", mock.Mock()) @mock.patch.object(service, "launch") def test_run_de_app(self, m_launch): decisionengine.main() self.assertEqual(1, m_launch.call_count) python-watcher-4.0.0/watcher/tests/cmd/test_applier.py0000664000175000017500000000351513656752270023141 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import unicode_literals import types import mock from oslo_config import cfg from oslo_service import service from watcher.applier import sync from watcher.common import service as watcher_service from watcher.cmd import applier from watcher.tests import base class TestApplier(base.BaseTestCase): def setUp(self): super(TestApplier, self).setUp() self.conf = cfg.CONF self._parse_cli_opts = self.conf._parse_cli_opts def _fake_parse(self, args=[]): return cfg.ConfigOpts._parse_cli_opts(self, []) _fake_parse_method = types.MethodType(_fake_parse, self.conf) self.conf._parse_cli_opts = _fake_parse_method p_heartbeat = mock.patch.object( watcher_service.ServiceHeartbeat, "send_beat") self.m_heartbeat = p_heartbeat.start() self.addCleanup(p_heartbeat.stop) def tearDown(self): super(TestApplier, self).tearDown() self.conf._parse_cli_opts = self._parse_cli_opts @mock.patch.object(sync.Syncer, "sync", mock.Mock()) @mock.patch.object(service, "launch") def test_run_applier_app(self, m_launch): applier.main() self.assertEqual(1, m_launch.call_count) python-watcher-4.0.0/watcher/tests/cmd/test_api.py0000664000175000017500000000433113656752270022253 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import from __future__ import unicode_literals import types import mock from oslo_config import cfg from oslo_service import wsgi from pecan.testing import load_test_app from watcher.api import config as api_config from watcher.cmd import api from watcher.common import service from watcher.tests import base class TestApi(base.BaseTestCase): def setUp(self): super(TestApi, self).setUp() self.conf = cfg.CONF self._parse_cli_opts = self.conf._parse_cli_opts def _fake_parse(self, args=[]): return cfg.ConfigOpts._parse_cli_opts(self, []) _fake_parse_method = types.MethodType(_fake_parse, self.conf) self.conf._parse_cli_opts = _fake_parse_method def tearDown(self): super(TestApi, self).tearDown() self.conf._parse_cli_opts = self._parse_cli_opts @mock.patch.object(wsgi, "Server", mock.Mock()) @mock.patch("watcher.api.app.pecan.make_app") @mock.patch.object(service, "launch") def test_run_api_app(self, m_launcher, m_make_app): m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG) api.main() self.assertEqual(1, m_launcher.call_count) @mock.patch.object(wsgi, "Server", mock.Mock()) @mock.patch("watcher.api.app.pecan.make_app") @mock.patch.object(service, "launch") def test_run_api_app_serve_specific_address(self, m_launcher, m_make_app): cfg.CONF.set_default("host", "localhost", group="api") m_make_app.return_value = load_test_app(config=api_config.PECAN_CONFIG) api.main() self.assertEqual(1, m_launcher.call_count) python-watcher-4.0.0/watcher/tests/cmd/test_db_manage.py0000664000175000017500000001644713656752270023412 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import mock from oslo_config import cfg from watcher.cmd import dbmanage from watcher.db import migration from watcher.db import purge from watcher.tests import base class TestDBManageRunApp(base.TestCase): scenarios = ( ("upgrade", {"command": "upgrade", "expected": "upgrade"}), ("downgrade", {"command": "downgrade", "expected": "downgrade"}), ("revision", {"command": "revision", "expected": "revision"}), ("stamp", {"command": "stamp", "expected": "stamp"}), ("version", {"command": "version", "expected": "version"}), ("create_schema", {"command": "create_schema", "expected": "create_schema"}), ("purge", {"command": "purge", "expected": "purge"}), ("no_param", {"command": None, "expected": "upgrade"}), ) @mock.patch.object(dbmanage, "register_sub_command_opts", mock.Mock()) @mock.patch("watcher.cmd.dbmanage.service.prepare_service") @mock.patch("watcher.cmd.dbmanage.sys") def test_run_db_manage_app(self, m_sys, m_prepare_service): # Patch command function m_func = mock.Mock() cfg.CONF.register_opt(cfg.SubCommandOpt("command")) cfg.CONF.command.func = m_func # Only append if the command is not None m_sys.argv = list(filter(None, ["watcher-db-manage", self.command])) dbmanage.main() self.assertEqual(1, m_func.call_count) m_prepare_service.assert_called_once_with( ["watcher-db-manage", self.expected], cfg.CONF) class TestDBManageRunCommand(base.TestCase): @mock.patch.object(migration, "upgrade") def test_run_db_upgrade(self, m_upgrade): cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") cfg.CONF.set_default("revision", "dummy", group="command") dbmanage.DBCommand.upgrade() m_upgrade.assert_called_once_with("dummy") @mock.patch.object(migration, "downgrade") def test_run_db_downgrade(self, m_downgrade): cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") cfg.CONF.set_default("revision", "dummy", group="command") dbmanage.DBCommand.downgrade() m_downgrade.assert_called_once_with("dummy") @mock.patch.object(migration, "revision") def test_run_db_revision(self, m_revision): cfg.CONF.register_opt(cfg.StrOpt("message"), group="command") cfg.CONF.register_opt(cfg.StrOpt("autogenerate"), group="command") cfg.CONF.set_default( "message", "dummy_message", group="command" ) cfg.CONF.set_default( "autogenerate", "dummy_autogenerate", group="command" ) dbmanage.DBCommand.revision() m_revision.assert_called_once_with( "dummy_message", "dummy_autogenerate" ) @mock.patch.object(migration, "stamp") def test_run_db_stamp(self, m_stamp): cfg.CONF.register_opt(cfg.StrOpt("revision"), group="command") cfg.CONF.set_default("revision", "dummy", group="command") dbmanage.DBCommand.stamp() @mock.patch.object(migration, "version") def test_run_db_version(self, m_version): dbmanage.DBCommand.version() self.assertEqual(1, m_version.call_count) @mock.patch.object(purge, "PurgeCommand") def test_run_db_purge(self, m_purge_cls): m_purge = mock.Mock() m_purge_cls.return_value = m_purge m_purge_cls.get_goal_uuid.return_value = 'Some UUID' cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") cfg.CONF.set_default("age_in_days", None, group="command") cfg.CONF.set_default("max_number", None, group="command") cfg.CONF.set_default("goal", None, group="command") cfg.CONF.set_default("exclude_orphans", True, group="command") cfg.CONF.set_default("dry_run", False, group="command") dbmanage.DBCommand.purge() m_purge_cls.assert_called_once_with( None, None, 'Some UUID', True, False) m_purge.execute.assert_called_once_with() @mock.patch.object(sys, "exit") @mock.patch.object(purge, "PurgeCommand") def test_run_db_purge_negative_max_number(self, m_purge_cls, m_exit): m_purge = mock.Mock() m_purge_cls.return_value = m_purge m_purge_cls.get_goal_uuid.return_value = 'Some UUID' cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") cfg.CONF.set_default("age_in_days", None, group="command") cfg.CONF.set_default("max_number", -1, group="command") cfg.CONF.set_default("goal", None, group="command") cfg.CONF.set_default("exclude_orphans", True, group="command") cfg.CONF.set_default("dry_run", False, group="command") dbmanage.DBCommand.purge() self.assertEqual(0, m_purge_cls.call_count) self.assertEqual(0, m_purge.execute.call_count) self.assertEqual(0, m_purge.do_delete.call_count) self.assertEqual(1, m_exit.call_count) @mock.patch.object(sys, "exit") @mock.patch.object(purge, "PurgeCommand") def test_run_db_purge_dry_run(self, m_purge_cls, m_exit): m_purge = mock.Mock() m_purge_cls.return_value = m_purge m_purge_cls.get_goal_uuid.return_value = 'Some UUID' cfg.CONF.register_opt(cfg.IntOpt("age_in_days"), group="command") cfg.CONF.register_opt(cfg.IntOpt("max_number"), group="command") cfg.CONF.register_opt(cfg.StrOpt("goal"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("exclude_orphans"), group="command") cfg.CONF.register_opt(cfg.BoolOpt("dry_run"), group="command") cfg.CONF.set_default("age_in_days", None, group="command") cfg.CONF.set_default("max_number", None, group="command") cfg.CONF.set_default("goal", None, group="command") cfg.CONF.set_default("exclude_orphans", True, group="command") cfg.CONF.set_default("dry_run", True, group="command") dbmanage.DBCommand.purge() m_purge_cls.assert_called_once_with( None, None, 'Some UUID', True, True) self.assertEqual(1, m_purge.execute.call_count) self.assertEqual(0, m_purge.do_delete.call_count) self.assertEqual(0, m_exit.call_count) python-watcher-4.0.0/watcher/tests/cmd/test_status.py0000664000175000017500000000306213656752270023025 0ustar zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_upgradecheck.upgradecheck import Code from watcher.cmd import status from watcher import conf from watcher.tests import base CONF = conf.CONF class TestUpgradeChecks(base.TestCase): def setUp(self): super(TestUpgradeChecks, self).setUp() self.cmd = status.Checks() def test_minimum_nova_api_version_ok(self): # Tests that the default [nova_client]/api_version meets the minimum # required version. result = self.cmd._minimum_nova_api_version() self.assertEqual(Code.SUCCESS, result.code) def test_minimum_nova_api_version_fail(self): # Tests the scenario that [nova_client]/api_version is less than the # minimum required version. CONF.set_override('api_version', '2.47', group='nova_client') result = self.cmd._minimum_nova_api_version() self.assertEqual(Code.FAILURE, result.code) self.assertIn('Invalid nova_client.api_version 2.47.', result.details) python-watcher-4.0.0/watcher/tests/base.py0000664000175000017500000001116613656752270020616 0ustar zuulzuul00000000000000# Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import os import mock from oslo_config import cfg from oslo_log import log from oslo_messaging import conffixture from oslotest import base import pecan from pecan import testing import testscenarios from watcher.common import context as watcher_context from watcher.common import service from watcher.objects import base as objects_base from watcher.tests import conf_fixture from watcher.tests import policy_fixture CONF = cfg.CONF try: log.register_options(CONF) except cfg.ArgsAlreadyParsedError: pass CONF.set_override('use_stderr', False) class BaseTestCase(testscenarios.WithScenarios, base.BaseTestCase): """Test base class.""" def setUp(self): super(BaseTestCase, self).setUp() self.addCleanup(cfg.CONF.reset) class TestCase(BaseTestCase): """Test case base class for all unit tests.""" def setUp(self): super(TestCase, self).setUp() self.useFixture(conf_fixture.ConfReloadFixture()) self.policy = self.useFixture(policy_fixture.PolicyFixture()) self.messaging_conf = self.useFixture(conffixture.ConfFixture(CONF)) self.messaging_conf.transport_url = 'fake:/' cfg.CONF.set_override("auth_type", "admin_token", group='keystone_authtoken') app_config_path = os.path.join(os.path.dirname(__file__), 'config.py') self.app = testing.load_test_app(app_config_path) self.token_info = { 'token': { 'project': { 'id': 'fake_project' }, 'user': { 'id': 'fake_user' } } } objects_base.WatcherObject.indirection_api = None self.context = watcher_context.RequestContext( auth_token_info=self.token_info, project_id='fake_project', user_id='fake_user') self.policy = self.useFixture(policy_fixture.PolicyFixture()) def make_context(*args, **kwargs): # If context hasn't been constructed with token_info if not kwargs.get('auth_token_info'): kwargs['auth_token_info'] = copy.deepcopy(self.token_info) if not kwargs.get('project_id'): kwargs['project_id'] = 'fake_project' if not kwargs.get('user_id'): kwargs['user_id'] = 'fake_user' context = watcher_context.RequestContext(*args, **kwargs) return watcher_context.RequestContext.from_dict(context.to_dict()) p = mock.patch.object(watcher_context, 'make_context', side_effect=make_context) self.mock_make_context = p.start() self.addCleanup(p.stop) self.useFixture(conf_fixture.ConfFixture(cfg.CONF)) self._reset_singletons() self._base_test_obj_backup = copy.copy( objects_base.WatcherObjectRegistry._registry._obj_classes) self.addCleanup(self._restore_obj_registry) self.addCleanup(self._reset_singletons) def _reset_singletons(self): service.Singleton._instances.clear() def reset_pecan(): pecan.set_config({}, overwrite=True) self.addCleanup(reset_pecan) def _restore_obj_registry(self): objects_base.WatcherObjectRegistry._registry._obj_classes = ( self._base_test_obj_backup) def config(self, **kw): """Override config options for a test.""" group = kw.pop('group', None) for k, v in kw.items(): CONF.set_override(k, v, group) def get_path(self, project_file=None): """Get the absolute path to a file. Used for testing the API. :param project_file: File whose path to return. Default: None. :returns: path to the specified file, or path to project root. """ root = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..')) if project_file: return os.path.join(root, project_file) else: return root python-watcher-4.0.0/watcher/tests/test_threading.py0000664000175000017500000001325313656752270022707 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import futurist import mock from watcher.decision_engine import threading from watcher.tests import base class TestDecisionEngineThreadPool(base.TestCase): def setUp(self): super(TestDecisionEngineThreadPool, self).setUp() self.m_function = mock.Mock() self.m_function.return_value = None self.m_do_while_function = mock.Mock() self.m_do_while_function.return_value = None # override the underlying threadpool for testing # this is like a 'fixture' were the original state of the singleton # is restored after these tests finish but the threadpool can still # be used as intended with its methods self.p_threadool = mock.patch.object( threading, 'DecisionEngineThreadPool', new=threading.DecisionEngineThreadPool) self.m_threadpool = self.p_threadool.start() self.addCleanup(self.p_threadool.stop) # bind unbound patched methods for python 2.7 compatibility # class methods can be used unbounded in Python 3.x self.m_threadpool.submit = self.m_threadpool.submit.__get__( self.m_threadpool, threading.DecisionEngineThreadPool) # perform all tests synchronously self.m_threadpool._threadpool = futurist.SynchronousExecutor() def test_singleton(self): """Ensure only one object of DecisionEngineThreadPool can be created""" threadpool1 = threading.DecisionEngineThreadPool() threadpool2 = threading.DecisionEngineThreadPool() self.assertEqual(threadpool1, threadpool2) def test_fixture_not_singleton(self): """Ensure the fixture does create a new instance of the singleton""" threadpool1 = threading.DecisionEngineThreadPool() threadpool2 = self.m_threadpool self.assertNotEqual(threadpool1, threadpool2) def test_do_while(self): """Test the regular operation of the threadpool and do_while_futures With the regular operation of do_while_futures the collection of futures will be shallow copied and left unmodified to the caller. """ # create a collection of futures from submitted m_function tasks futures = [self.m_threadpool.submit(self.m_function, 1, 2)] self.m_function.assert_called_once_with(1, 2) # execute m_do_while_function for every future that completes # and block until all futures are completed self.m_threadpool.do_while_futures( futures, self.m_do_while_function, 3, 4) # assert that m_do_while_function was called self.m_do_while_function.assert_called_once_with(futures[0], 3, 4) # assert that the collection of futures is unmodified self.assertEqual(1, len(futures)) def test_do_while_modify(self): """Test the operation of the threadpool and do_while_futures_modify The do_while_future_modify function has slightly better performance because it will not create a copy of the collection and will modify it directly. """ # create a collection of futures from submitted m_function tasks futures = [self.m_threadpool.submit(self.m_function, 1, 2)] self.m_function.assert_called_once_with(1, 2) # hold reference because element is going to be removed from the list future_ref = futures[0] # execute m_do_while_function for every future that completes # and block until all futures are completed self.m_threadpool.do_while_futures_modify( futures, self.m_do_while_function, 3, 4) # assert that m_do_while_function was called self.m_do_while_function.assert_called_once_with(future_ref, 3, 4) # assert that the collection of futures is modified self.assertEqual(0, len(futures)) def test_multiple_tasks(self): """Test that 10 tasks are all executed with the correct arguments""" # create a collection of 10 futures from submitted m_function tasks futures = [self.m_threadpool.submit( self.m_function, i, 2) for i in range(10)] # assert that there are 10 submitted tasks self.assertEqual(10, len(futures)) # execute m_do_while_function for every future that completes # and block until all futures are completed self.m_threadpool.do_while_futures( futures, self.m_do_while_function, 3, 4) # create list of 10 calls that should have occurred calls_submit = [] for i in range(10): calls_submit.append(mock.call(i, 2)) # test that the submit function has been called 10 times self.m_function.assert_has_calls( calls_submit, any_order=True) # create list of 10 calls that should have occurred calls_do_while = [] for i in range(10): calls_do_while.append(mock.call(futures[i], 3, 4)) # test that the passed do_while function has been called 10 times self.m_do_while_function.assert_has_calls( calls_do_while, any_order=True) python-watcher-4.0.0/watcher/tests/conf/0000775000175000017500000000000013656752352020253 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/conf/__init__.py0000664000175000017500000000000013656752270022351 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/tests/conf/test_list_opts.py0000775000175000017500000001554513656752270023720 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mock from oslo_config import cfg from stevedore import extension from watcher.conf import opts from watcher.conf import plugins from watcher.tests import base from watcher.tests.decision_engine import fake_strategies class TestListOpts(base.TestCase): def setUp(self): super(TestListOpts, self).setUp() # These option groups will be registered using strings instead of # OptGroup objects this should be avoided if possible. self.none_objects = ['DEFAULT', 'watcher_clients_auth', 'watcher_strategies.strategy_1'] self.base_sections = [ 'DEFAULT', 'api', 'database', 'watcher_decision_engine', 'watcher_applier', 'watcher_datasources', 'watcher_planner', 'nova_client', 'glance_client', 'gnocchi_client', 'grafana_client', 'grafana_translators', 'cinder_client', 'ceilometer_client', 'monasca_client', 'ironic_client', 'keystone_client', 'neutron_client', 'watcher_clients_auth', 'collector', 'placement_client'] self.opt_sections = list(dict(opts.list_opts()).keys()) def _assert_name_or_group(self, actual_sections, expected_sections): for name_or_group, options in actual_sections: section_name = name_or_group if isinstance(name_or_group, cfg.OptGroup): section_name = name_or_group.name elif section_name in self.none_objects: pass else: # All option groups should be added to list_otps with an # OptGroup object for some exceptions this is not possible but # new groups should use OptGroup raise Exception( "Invalid option group: {0} should be of type OptGroup not " "string.".format(section_name)) self.assertIn(section_name, expected_sections) self.assertTrue(len(options)) def test_run_list_opts(self): expected_sections = self.opt_sections result = opts.list_opts() self.assertIsNotNone(result) for section_name, options in result: self.assertIn(section_name, expected_sections) self.assertTrue(len(options)) def test_list_opts_no_opts(self): expected_sections = self.base_sections # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=fake_strategies.FakeDummy1Strategy2.get_name(), entry_point="%s:%s" % ( fake_strategies.FakeDummy1Strategy2.__module__, fake_strategies.FakeDummy1Strategy2.__name__), plugin=fake_strategies.FakeDummy1Strategy2, obj=None, )], namespace="watcher_strategies", ) def m_list_available(namespace): if namespace == "watcher_strategies": return fake_extmanager_call else: return extension.ExtensionManager.make_test_instance( extensions=[], namespace=namespace) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: m_ext_manager.side_effect = m_list_available result = opts.list_opts() self._assert_name_or_group(result, expected_sections) self.assertIsNotNone(result) def test_list_opts_with_opts(self): expected_sections = self.base_sections + [ 'watcher_strategies.strategy_1'] # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=fake_strategies.FakeDummy1Strategy1.get_name(), entry_point="%s:%s" % ( fake_strategies.FakeDummy1Strategy1.__module__, fake_strategies.FakeDummy1Strategy1.__name__), plugin=fake_strategies.FakeDummy1Strategy1, obj=None, )], namespace="watcher_strategies", ) def m_list_available(namespace): if namespace == "watcher_strategies": return fake_extmanager_call else: return extension.ExtensionManager.make_test_instance( extensions=[], namespace=namespace) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: m_ext_manager.side_effect = m_list_available result = opts.list_opts() self.assertIsNotNone(result) self._assert_name_or_group(result, expected_sections) result_map = dict(result) strategy_opts = result_map['watcher_strategies.strategy_1'] self.assertEqual(['test_opt'], [opt.name for opt in strategy_opts]) class TestPlugins(base.TestCase): def test_show_plugins(self): # Set up the fake Stevedore extensions fake_extmanager_call = extension.ExtensionManager.make_test_instance( extensions=[extension.Extension( name=fake_strategies.FakeDummy1Strategy1.get_name(), entry_point="%s:%s" % ( fake_strategies.FakeDummy1Strategy1.__module__, fake_strategies.FakeDummy1Strategy1.__name__), plugin=fake_strategies.FakeDummy1Strategy1, obj=None, )], namespace="watcher_strategies", ) def m_list_available(namespace): if namespace == "watcher_strategies": return fake_extmanager_call else: return extension.ExtensionManager.make_test_instance( extensions=[], namespace=namespace) with mock.patch.object(extension, "ExtensionManager") as m_ext_manager: with mock.patch.object( plugins, "_show_plugins_ascii_table" ) as m_show: m_ext_manager.side_effect = m_list_available plugins.show_plugins() m_show.assert_called_once_with( [('watcher_strategies.strategy_1', 'strategy_1', 'watcher.tests.decision_engine.' 'fake_strategies.FakeDummy1Strategy1')]) python-watcher-4.0.0/watcher/api/0000775000175000017500000000000013656752352016735 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/api/__init__.py0000664000175000017500000000000013656752270021033 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/api/middleware/0000775000175000017500000000000013656752352021052 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/api/middleware/__init__.py0000664000175000017500000000000013656752270023150 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/api/middleware/auth_token.py0000664000175000017500000000407013656752270023565 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from oslo_log import log from keystonemiddleware import auth_token from watcher._i18n import _ from watcher.common import exception from watcher.common import utils LOG = log.getLogger(__name__) class AuthTokenMiddleware(auth_token.AuthProtocol): """A wrapper on Keystone auth_token middleware. Does not perform verification of authentication tokens for public routes in the API. """ def __init__(self, app, conf, public_api_routes=()): route_pattern_tpl = r'%s(\.json|\.xml)?$' try: self.public_api_routes = [re.compile(route_pattern_tpl % route_tpl) for route_tpl in public_api_routes] except re.error as e: LOG.exception(e) raise exception.ConfigInvalid( error_msg=_('Cannot compile public API routes')) super(AuthTokenMiddleware, self).__init__(app, conf) def __call__(self, env, start_response): path = utils.safe_rstrip(env.get('PATH_INFO'), '/') # The information whether the API call is being performed against the # public API is required for some other components. Saving it to the # WSGI environment is reasonable thereby. env['is_public_api'] = any(re.match(pattern, path) for pattern in self.public_api_routes) if env['is_public_api']: return self._app(env, start_response) return super(AuthTokenMiddleware, self).__call__(env, start_response) python-watcher-4.0.0/watcher/api/middleware/parsable_error.py0000664000175000017500000000746113656752270024435 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Middleware to replace the plain text message body of an error response with one formatted so the client can parse it. Based on pecan.middleware.errordocument """ from xml import etree as et from oslo_log import log from oslo_serialization import jsonutils import six import webob from watcher._i18n import _ LOG = log.getLogger(__name__) class ParsableErrorMiddleware(object): """Replace error body with something the client can parse.""" def __init__(self, app): self.app = app def __call__(self, environ, start_response): # Request for this state, modified by replace_start_response() # and used when an error is being reported. state = {} def replacement_start_response(status, headers, exc_info=None): """Overrides the default response to make errors parsable.""" try: status_code = int(status.split(' ')[0]) state['status_code'] = status_code except (ValueError, TypeError): # pragma: nocover raise Exception(_( 'ErrorDocumentMiddleware received an invalid ' 'status %s') % status) else: if (state['status_code'] // 100) not in (2, 3): # Remove some headers so we can replace them later # when we have the full error message and can # compute the length. headers = [(h, v) for (h, v) in headers if h not in ('Content-Length', 'Content-Type')] # Save the headers in case we need to modify them. state['headers'] = headers return start_response(status, headers, exc_info) app_iter = self.app(environ, replacement_start_response) if (state['status_code'] // 100) not in (2, 3): req = webob.Request(environ) if ( req.accept.best_match( ['application/json', 'application/xml']) == 'application/xml' ): try: # simple check xml is valid body = [ et.ElementTree.tostring( et.ElementTree.Element( 'error_message', text='\n'.join(app_iter)))] except et.ElementTree.ParseError as err: LOG.error('Error parsing HTTP response: %s', err) body = ['%s' '' % state['status_code']] state['headers'].append(('Content-Type', 'application/xml')) else: if six.PY3: app_iter = [i.decode('utf-8') for i in app_iter] body = [jsonutils.dumps( {'error_message': '\n'.join(app_iter)})] if six.PY3: body = [item.encode('utf-8') for item in body] state['headers'].append(('Content-Type', 'application/json')) state['headers'].append(('Content-Length', str(len(body[0])))) else: body = app_iter return body python-watcher-4.0.0/watcher/api/config.py0000664000175000017500000000321013656752270020547 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals from oslo_config import cfg from watcher.api import hooks # Server Specific Configurations # See https://pecan.readthedocs.org/en/latest/configuration.html#server-configuration # noqa server = { 'port': '9322', 'host': '127.0.0.1' } # Pecan Application Configurations # See https://pecan.readthedocs.org/en/latest/configuration.html#application-configuration # noqa acl_public_routes = ['/'] if not cfg.CONF.api.get("enable_webhooks_auth"): acl_public_routes.append('/v1/webhooks/.*') app = { 'root': 'watcher.api.controllers.root.RootController', 'modules': ['watcher.api'], 'hooks': [ hooks.ContextHook(), hooks.NoExceptionTracebackHook(), ], 'static_root': '%(confdir)s/public', 'enable_acl': True, 'acl_public_routes': acl_public_routes, } # WSME Configurations # See https://wsme.readthedocs.org/en/latest/integrate.html#configuration wsme = { 'debug': cfg.CONF.get("debug") if "debug" in cfg.CONF else False, } PECAN_CONFIG = { "server": server, "app": app, "wsme": wsme, } python-watcher-4.0.0/watcher/api/wsgi.py0000664000175000017500000000236513656752270020265 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """WSGI script for Watcher API, installed by pbr.""" import sys from oslo_config import cfg import oslo_i18n as i18n from oslo_log import log from watcher.api import app from watcher.common import service CONF = cfg.CONF LOG = log.getLogger(__name__) def initialize_wsgi_app(show_deprecated=False): i18n.install('watcher') service.prepare_service(sys.argv) LOG.debug("Configuration:") CONF.log_opt_values(LOG, log.DEBUG) if show_deprecated: LOG.warning("Using watcher/api/app.wsgi is deprecated and it will " "be removed in U release. Please use automatically " "generated watcher-api-wsgi instead.") return app.VersionSelectorApplication() python-watcher-4.0.0/watcher/api/app.wsgi0000664000175000017500000000161213656752270020407 0ustar zuulzuul00000000000000# -*- mode: python -*- # -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Use this file for deploying the API service under Apache2 mod_wsgi. """ # This script is deprecated and it will be removed in U release. # Please switch to automatically generated watcher-api-wsgi script instead. from watcher.api import wsgi application = wsgi.initialize_wsgi_app(show_deprecated=True) python-watcher-4.0.0/watcher/api/app.py0000664000175000017500000000317413656752270020073 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright © 2012 New Dream Network, LLC (DreamHost) # All Rights Reserved. # Copyright (c) 2016 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import pecan from watcher.api import acl from watcher.api import config as api_config from watcher.api.middleware import parsable_error from watcher import conf CONF = conf.CONF def get_pecan_config(): # Set up the pecan configuration return pecan.configuration.conf_from_dict(api_config.PECAN_CONFIG) def setup_app(config=None): if not config: config = get_pecan_config() app_conf = dict(config.app) app = pecan.make_app( app_conf.pop('root'), logging=getattr(config, 'logging', {}), debug=CONF.debug, wrap_app=parsable_error.ParsableErrorMiddleware, **app_conf ) return acl.install(app, CONF, config.app.acl_public_routes) class VersionSelectorApplication(object): def __init__(self): pc = get_pecan_config() self.v1 = setup_app(config=pc) def __call__(self, environ, start_response): return self.v1(environ, start_response) python-watcher-4.0.0/watcher/api/scheduling.py0000664000175000017500000001227313656752270021440 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import itertools from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import six from watcher.common import context as watcher_context from watcher.common import scheduling from watcher import notifications from watcher import objects CONF = cfg.CONF LOG = log.getLogger(__name__) class APISchedulingService(scheduling.BackgroundSchedulerService): def __init__(self, gconfig={}, **options): self.services_status = {} super(APISchedulingService, self).__init__(gconfig, **options) def get_services_status(self, context): services = objects.service.Service.list(context) active_s = objects.service.ServiceStatus.ACTIVE failed_s = objects.service.ServiceStatus.FAILED for service in services: result = self.get_service_status(context, service.id) if service.id not in self.services_status: self.services_status[service.id] = result continue if self.services_status[service.id] != result: self.services_status[service.id] = result notifications.service.send_service_update(context, service, state=result) if (result == failed_s) and ( service.name == 'watcher-decision-engine'): audit_filters = { 'audit_type': objects.audit.AuditType.CONTINUOUS.value, 'state': objects.audit.State.ONGOING, 'hostname': service.host } ongoing_audits = objects.Audit.list( context, filters=audit_filters, eager=True) alive_services = [ s.host for s in services if (self.services_status[s.id] == active_s and s.name == 'watcher-decision-engine')] round_robin = itertools.cycle(alive_services) for audit in ongoing_audits: audit.hostname = round_robin.__next__() audit.save() LOG.info('Audit %(audit)s has been migrated to ' '%(host)s since %(failed_host)s is in' ' %(state)s', {'audit': audit.uuid, 'host': audit.hostname, 'failed_host': service.host, 'state': failed_s}) def get_service_status(self, context, service_id): service = objects.Service.get(context, service_id) last_heartbeat = (service.last_seen_up or service.updated_at or service.created_at) if isinstance(last_heartbeat, six.string_types): # NOTE(russellb) If this service came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: # Objects have proper UTC timezones, but the timeutils comparison # below does not (and will fail) last_heartbeat = last_heartbeat.replace(tzinfo=None) elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) is_up = abs(elapsed) <= CONF.service_down_time if not is_up: LOG.warning('Seems service %(name)s on host %(host)s is down. ' 'Last heartbeat was %(lhb)s. Elapsed time is %(el)s', {'name': service.name, 'host': service.host, 'lhb': str(last_heartbeat), 'el': str(elapsed)}) return objects.service.ServiceStatus.FAILED return objects.service.ServiceStatus.ACTIVE def start(self): """Start service.""" context = watcher_context.make_context(is_admin=True) self.add_job(self.get_services_status, name='service_status', trigger='interval', jobstore='default', args=[context], next_run_time=datetime.datetime.now(), seconds=CONF.periodic_interval) super(APISchedulingService, self).start() def stop(self): """Stop service.""" self.shutdown() def wait(self): """Wait for service to complete.""" def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ python-watcher-4.0.0/watcher/api/controllers/0000775000175000017500000000000013656752352021303 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/api/controllers/root.py0000664000175000017500000000710213656752270022637 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import importlib import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers import v1 class APIStatus(object): CURRENT = "CURRENT" SUPPORTED = "SUPPORTED" DEPRECATED = "DEPRECATED" EXPERIMENTAL = "EXPERIMENTAL" class Version(base.APIBase): """An API version representation.""" id = wtypes.text """The ID of the version, also acts as the release number""" status = wtypes.text """The state of this API version""" max_version = wtypes.text """The maximum version supported""" min_version = wtypes.text """The minimum version supported""" links = [link.Link] """A Link that point to a specific version of the API""" @staticmethod def convert(id, status=APIStatus.CURRENT): v = importlib.import_module('watcher.api.controllers.%s.versions' % id) version = Version() version.id = id version.status = status version.max_version = v.max_version_string() version.min_version = v.min_version_string() version.links = [link.Link.make_link('self', pecan.request.application_url, id, '', bookmark=True)] return version class Root(base.APIBase): name = wtypes.text """The name of the API""" description = wtypes.text """Some information about this API""" versions = [Version] """Links to all the versions available in this API""" default_version = Version """A link to the default version of the API""" @staticmethod def convert(): root = Root() root.name = "OpenStack Watcher API" root.description = ("Watcher is an OpenStack project which aims to " "improve physical resources usage through " "better VM placement.") root.versions = [Version.convert('v1')] root.default_version = Version.convert('v1') return root class RootController(rest.RestController): _versions = ['v1'] """All supported API versions""" _default_version = 'v1' """The default API version""" v1 = v1.Controller() @wsme_pecan.wsexpose(Root) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return Root.convert() @pecan.expose() def _route(self, args): """Overrides the default routing behavior. It redirects the request to the default version of the watcher API if the version number is not specified in the url. """ if args[0] and args[0] not in self._versions: args = [self._default_version] + args return super(RootController, self)._route(args) python-watcher-4.0.0/watcher/api/controllers/__init__.py0000664000175000017500000000000013656752270023401 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/api/controllers/link.py0000664000175000017500000000376313656752270022622 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import pecan from wsme import types as wtypes from watcher.api.controllers import base def build_url(resource, resource_args, bookmark=False, base_url=None): if base_url is None: base_url = pecan.request.application_url template = '%(url)s/%(res)s' if bookmark else '%(url)s/v1/%(res)s' # FIXME(lucasagomes): I'm getting a 404 when doing a GET on # a nested resource that the URL ends with a '/'. # https://groups.google.com/forum/#!topic/pecan-dev/QfSeviLg5qs template += '%(args)s' if resource_args.startswith('?') else '/%(args)s' return template % {'url': base_url, 'res': resource, 'args': resource_args} class Link(base.APIBase): """A link representation.""" href = wtypes.text """The url of a link.""" rel = wtypes.text """The name of a link.""" type = wtypes.text """Indicates the type of document/link.""" @staticmethod def make_link(rel_name, url, resource, resource_args, bookmark=False, type=wtypes.Unset): href = build_url(resource, resource_args, bookmark=bookmark, base_url=url) return Link(href=href, rel=rel_name, type=type) @classmethod def sample(cls): sample = cls(href="http://localhost:6385/chassis/" "eaaca217-e7d8-47b4-bb41-3f99f20eed89", rel="bookmark") return sample python-watcher-4.0.0/watcher/api/controllers/v1/0000775000175000017500000000000013656752352021631 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/api/controllers/v1/utils.py0000664000175000017500000001374013656752270023347 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from operator import attrgetter import jsonpatch from oslo_config import cfg from oslo_utils import reflection from oslo_utils import uuidutils import pecan import wsme from watcher._i18n import _ from watcher.api.controllers.v1 import versions from watcher.common import utils from watcher import objects CONF = cfg.CONF JSONPATCH_EXCEPTIONS = (jsonpatch.JsonPatchException, jsonpatch.JsonPointerException, KeyError) def validate_limit(limit): if limit is None: return CONF.api.max_limit if limit <= 0: # Case where we don't a valid limit value raise wsme.exc.ClientSideError(_("Limit must be positive")) if limit and not CONF.api.max_limit: # Case where we don't have an upper limit return limit return min(CONF.api.max_limit, limit) def validate_sort_dir(sort_dir): if sort_dir not in ['asc', 'desc']: raise wsme.exc.ClientSideError(_("Invalid sort direction: %s. " "Acceptable values are " "'asc' or 'desc'") % sort_dir) def validate_sort_key(sort_key, allowed_fields): # Very lightweight validation for now if sort_key not in allowed_fields: raise wsme.exc.ClientSideError( _("Invalid sort key: %s") % sort_key) def validate_search_filters(filters, allowed_fields): # Very lightweight validation for now # todo: improve this (e.g. https://www.parse.com/docs/rest/guide/#queries) for filter_name in filters: if filter_name not in allowed_fields: raise wsme.exc.ClientSideError( _("Invalid filter: %s") % filter_name) def check_need_api_sort(sort_key, additional_fields): return sort_key in additional_fields def make_api_sort(sorting_list, sort_key, sort_dir): # First sort by uuid field, than sort by sort_key # sort() ensures stable sorting, so we could # make lexicographical sort reverse_direction = (sort_dir == 'desc') sorting_list.sort(key=attrgetter('uuid'), reverse=reverse_direction) sorting_list.sort(key=attrgetter(sort_key), reverse=reverse_direction) def apply_jsonpatch(doc, patch): for p in patch: if p['op'] == 'add' and p['path'].count('/') == 1: if p['path'].lstrip('/') not in doc: msg = _('Adding a new attribute (%s) to the root of ' ' the resource is not allowed') raise wsme.exc.ClientSideError(msg % p['path']) return jsonpatch.apply_patch(doc, jsonpatch.JsonPatch(patch)) def get_patch_value(patch, key): for p in patch: if p['op'] == 'replace' and p['path'] == '/%s' % key: return p['value'] def set_patch_value(patch, key, value): for p in patch: if p['op'] == 'replace' and p['path'] == '/%s' % key: p['value'] = value def get_patch_key(patch, key): for p in patch: if p['op'] == 'replace' and key in p.keys(): return p[key][1:] def check_audit_state_transition(patch, initial): is_transition_valid = True state_value = get_patch_value(patch, "state") if state_value is not None: is_transition_valid = objects.audit.AuditStateTransitionManager( ).check_transition(initial, state_value) return is_transition_valid def as_filters_dict(**filters): filters_dict = {} for filter_name, filter_value in filters.items(): if filter_value: filters_dict[filter_name] = filter_value return filters_dict def get_resource(resource, resource_id, eager=False): """Get the resource from the uuid, id or logical name. :param resource: the resource type. :param resource_id: the UUID, ID or logical name of the resource. :returns: The resource. """ resource = getattr(objects, resource) _get = None if utils.is_int_like(resource_id): resource_id = int(resource_id) _get = resource.get elif uuidutils.is_uuid_like(resource_id): _get = resource.get_by_uuid else: _get = resource.get_by_name method_signature = reflection.get_signature(_get) if 'eager' in method_signature.parameters: return _get(pecan.request.context, resource_id, eager=eager) return _get(pecan.request.context, resource_id) def allow_start_end_audit_time(): """Check if we should support optional start/end attributes for Audit. Version 1.1 of the API added support for start and end time of continuous audits. """ return pecan.request.version.minor >= ( versions.VERSIONS.MINOR_1_START_END_TIMING.value) def allow_force(): """Check if we should support optional force attribute for Audit. Version 1.2 of the API added support for forced audits that allows to launch audit when other action plan is ongoing. """ return pecan.request.version.minor >= ( versions.VERSIONS.MINOR_2_FORCE.value) def allow_list_datamodel(): """Check if we should support list data model API. Version 1.3 of the API added support to list data model. """ return pecan.request.version.minor >= ( versions.VERSIONS.MINOR_3_DATAMODEL.value) def allow_webhook_api(): """Check if we should support webhook API. Version 1.4 of the API added support to trigger webhook. """ return pecan.request.version.minor >= ( versions.VERSIONS.MINOR_4_WEBHOOK_API.value) python-watcher-4.0.0/watcher/api/controllers/v1/__init__.py0000664000175000017500000002525613656752270023753 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Version 1 of the Watcher API NOTE: IN PROGRESS AND NOT FULLY IMPLEMENTED. """ import datetime import pecan from pecan import rest from webob import exc import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import action from watcher.api.controllers.v1 import action_plan from watcher.api.controllers.v1 import audit from watcher.api.controllers.v1 import audit_template from watcher.api.controllers.v1 import data_model from watcher.api.controllers.v1 import goal from watcher.api.controllers.v1 import scoring_engine from watcher.api.controllers.v1 import service from watcher.api.controllers.v1 import strategy from watcher.api.controllers.v1 import utils from watcher.api.controllers.v1 import versions from watcher.api.controllers.v1 import webhooks def min_version(): return base.Version( {base.Version.string: ' '.join([versions.service_type_string(), versions.min_version_string()])}, versions.min_version_string(), versions.max_version_string()) def max_version(): return base.Version( {base.Version.string: ' '.join([versions.service_type_string(), versions.max_version_string()])}, versions.min_version_string(), versions.max_version_string()) class APIBase(wtypes.Base): created_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is created""" updated_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is updated""" deleted_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is deleted""" def as_dict(self): """Render this object as a dict of its fields.""" return dict((k, getattr(self, k)) for k in self.fields if hasattr(self, k) and getattr(self, k) != wsme.Unset) def unset_fields_except(self, except_list=None): """Unset fields so they don't appear in the message body. :param except_list: A list of fields that won't be touched. """ if except_list is None: except_list = [] for k in self.as_dict(): if k not in except_list: setattr(self, k, wsme.Unset) class MediaType(APIBase): """A media type representation.""" base = wtypes.text type = wtypes.text def __init__(self, base, type): self.base = base self.type = type class V1(APIBase): """The representation of the version 1 of the API.""" id = wtypes.text """The ID of the version, also acts as the release number""" media_types = [MediaType] """An array of supcontainersed media types for this version""" audit_templates = [link.Link] """Links to the audit templates resource""" audits = [link.Link] """Links to the audits resource""" data_model = [link.Link] """Links to the data model resource""" actions = [link.Link] """Links to the actions resource""" action_plans = [link.Link] """Links to the action plans resource""" scoring_engines = [link.Link] """Links to the Scoring Engines resource""" services = [link.Link] """Links to the services resource""" webhooks = [link.Link] """Links to the webhooks resource""" links = [link.Link] """Links that point to a specific URL for this version and documentation""" @staticmethod def convert(): v1 = V1() v1.id = "v1" base_url = pecan.request.application_url v1.links = [link.Link.make_link('self', base_url, 'v1', '', bookmark=True), link.Link.make_link('describedby', 'http://docs.openstack.org', 'developer/watcher/dev', 'api-spec-v1.html', bookmark=True, type='text/html') ] v1.media_types = [MediaType('application/json', 'application/vnd.openstack.watcher.v1+json')] v1.audit_templates = [link.Link.make_link('self', base_url, 'audit_templates', ''), link.Link.make_link('bookmark', base_url, 'audit_templates', '', bookmark=True) ] v1.audits = [link.Link.make_link('self', base_url, 'audits', ''), link.Link.make_link('bookmark', base_url, 'audits', '', bookmark=True) ] if utils.allow_list_datamodel(): v1.data_model = [link.Link.make_link('self', base_url, 'data_model', ''), link.Link.make_link('bookmark', base_url, 'data_model', '', bookmark=True) ] v1.actions = [link.Link.make_link('self', base_url, 'actions', ''), link.Link.make_link('bookmark', base_url, 'actions', '', bookmark=True) ] v1.action_plans = [link.Link.make_link( 'self', base_url, 'action_plans', ''), link.Link.make_link('bookmark', base_url, 'action_plans', '', bookmark=True) ] v1.scoring_engines = [link.Link.make_link( 'self', base_url, 'scoring_engines', ''), link.Link.make_link('bookmark', base_url, 'scoring_engines', '', bookmark=True) ] v1.services = [link.Link.make_link( 'self', base_url, 'services', ''), link.Link.make_link('bookmark', base_url, 'services', '', bookmark=True) ] if utils.allow_webhook_api(): v1.webhooks = [link.Link.make_link( 'self', base_url, 'webhooks', ''), link.Link.make_link('bookmark', base_url, 'webhooks', '', bookmark=True) ] return v1 class Controller(rest.RestController): """Version 1 API controller root.""" audits = audit.AuditsController() audit_templates = audit_template.AuditTemplatesController() actions = action.ActionsController() action_plans = action_plan.ActionPlansController() goals = goal.GoalsController() scoring_engines = scoring_engine.ScoringEngineController() services = service.ServicesController() strategies = strategy.StrategiesController() data_model = data_model.DataModelController() webhooks = webhooks.WebhookController() @wsme_pecan.wsexpose(V1) def get(self): # NOTE: The reason why convert() it's being called for every # request is because we need to get the host url from # the request object to make the links. return V1.convert() def _check_version(self, version, headers=None): if headers is None: headers = {} # ensure that major version in the URL matches the header if version.major != versions.BASE_VERSION: raise exc.HTTPNotAcceptable( "Mutually exclusive versions requested. Version %(ver)s " "requested but not supported by this service. The supported " "version range is: [%(min)s, %(max)s]." % {'ver': version, 'min': versions.min_version_string(), 'max': versions.max_version_string()}, headers=headers) # ensure the minor version is within the supported range if version < min_version() or version > max_version(): raise exc.HTTPNotAcceptable( "Version %(ver)s was requested but the minor version is not " "supported by this service. The supported version range is: " "[%(min)s, %(max)s]." % {'ver': version, 'min': versions.min_version_string(), 'max': versions.max_version_string()}, headers=headers) @pecan.expose() def _route(self, args, request=None): v = base.Version(pecan.request.headers, versions.min_version_string(), versions.max_version_string()) # The Vary header is used as a hint to caching proxies and user agents # that the response is also dependent on the OpenStack-API-Version and # not just the body and query parameters. See RFC 7231 for details. pecan.response.headers['Vary'] = base.Version.string # Always set the min and max headers pecan.response.headers[base.Version.min_string] = ( versions.min_version_string()) pecan.response.headers[base.Version.max_string] = ( versions.max_version_string()) # assert that requested version is supported self._check_version(v, pecan.response.headers) pecan.response.headers[base.Version.string] = ( ' '.join([versions.service_type_string(), str(v)])) pecan.request.version = v return super(Controller, self)._route(args, request) __all__ = ("Controller", ) python-watcher-4.0.0/watcher/api/controllers/v1/collection.py0000664000175000017500000000333413656752270024340 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import pecan from wsme import types as wtypes from watcher.api.controllers import base from watcher.api.controllers import link class Collection(base.APIBase): next = wtypes.text """A link to retrieve the next subset of the collection""" @property def collection(self): return getattr(self, self._type) def has_next(self, limit): """Return whether collection has more items.""" return len(self.collection) and len(self.collection) == limit def get_next(self, limit, url=None, marker_field="uuid", **kwargs): """Return a link to the next subset of the collection.""" if not self.has_next(limit): return wtypes.Unset resource_url = url or self._type q_args = ''.join(['%s=%s&' % (key, kwargs[key]) for key in kwargs]) next_args = '?%(args)slimit=%(limit)d&marker=%(marker)s' % { 'args': q_args, 'limit': limit, 'marker': getattr(self.collection[-1], marker_field)} return link.Link.make_link('next', pecan.request.host_url, resource_url, next_args).href python-watcher-4.0.0/watcher/api/controllers/v1/efficacy_indicator.py0000664000175000017500000000523713656752270026016 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An efficacy indicator is a single value that gives an indication on how the :ref:`solution ` produced by a given :ref:`strategy ` performed. These efficacy indicators are specific to a given :ref:`goal ` and are usually used to compute the :ref:`global efficacy ` of the resulting :ref:`action plan `. In Watcher, these efficacy indicators are specified alongside the goal they relate to. When a strategy (which always relates to a goal) is executed, it produces a solution containing the efficacy indicators specified by the goal. This solution, which has been translated by the :ref:`Watcher Planner ` into an action plan, will see its indicators and global efficacy stored and would now be accessible through the :ref:`Watcher API `. """ import numbers from wsme import types as wtypes from watcher.api.controllers import base from watcher import objects class EfficacyIndicator(base.APIBase): """API representation of a efficacy indicator. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an efficacy indicator. """ name = wtypes.wsattr(wtypes.text, mandatory=True) """Name of this efficacy indicator""" description = wtypes.wsattr(wtypes.text, mandatory=False) """Description of this efficacy indicator""" unit = wtypes.wsattr(wtypes.text, mandatory=False) """Unit of this efficacy indicator""" value = wtypes.wsattr(numbers.Number, mandatory=True) """Value of this efficacy indicator""" def __init__(self, **kwargs): super(EfficacyIndicator, self).__init__() self.fields = [] fields = list(objects.EfficacyIndicator.fields) for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) python-watcher-4.0.0/watcher/api/controllers/v1/action_plan.py0000664000175000017500000005636413656752270024507 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Action Plan ` specifies a flow of :ref:`Actions ` that should be executed in order to satisfy a given :ref:`Goal `. It also contains an estimated :ref:`global efficacy ` alongside a set of :ref:`efficacy indicators `. An :ref:`Action Plan ` is generated by Watcher when an :ref:`Audit ` is successful which implies that the :ref:`Strategy ` which was used has found a :ref:`Solution ` to achieve the :ref:`Goal ` of this :ref:`Audit `. In the default implementation of Watcher, an action plan is composed of a list of successive :ref:`Actions ` (i.e., a Workflow of :ref:`Actions ` belonging to a unique branch). However, Watcher provides abstract interfaces for many of its components, allowing other implementations to generate and handle more complex :ref:`Action Plan(s) ` composed of two types of Action Item(s): - simple :ref:`Actions `: atomic tasks, which means it can not be split into smaller tasks or commands from an OpenStack point of view. - composite Actions: which are composed of several simple :ref:`Actions ` ordered in sequential and/or parallel flows. An :ref:`Action Plan ` may be described using standard workflow model description formats such as `Business Process Model and Notation 2.0 (BPMN 2.0) `_ or `Unified Modeling Language (UML) `_. To see the life-cycle and description of :ref:`Action Plan ` states, visit :ref:`the Action Plan state machine `. """ import datetime from oslo_log import log import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import efficacy_indicator as efficacyindicator from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.applier import rpcapi from watcher.common import exception from watcher.common import policy from watcher.common import utils from watcher import objects from watcher.objects import action_plan as ap_objects LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class ActionPlanPatchType(types.JsonPatchType): @staticmethod def _validate_state(patch): serialized_patch = {'path': patch.path, 'op': patch.op} if patch.value is not wtypes.Unset: serialized_patch['value'] = patch.value # todo: use state machines to handle state transitions state_value = patch.value if state_value and not hasattr(ap_objects.State, state_value): msg = _("Invalid state: %(state)s") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(state=state_value)) @staticmethod def validate(patch): if patch.path == "/state": ActionPlanPatchType._validate_state(patch) return types.JsonPatchType.validate(patch) @staticmethod def internal_attrs(): return types.JsonPatchType.internal_attrs() @staticmethod def mandatory_attrs(): return ["audit_id", "state"] class ActionPlan(base.APIBase): """API representation of a action plan. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an action plan. """ _audit_uuid = None _strategy_uuid = None _strategy_name = None _efficacy_indicators = None def _get_audit_uuid(self): return self._audit_uuid def _set_audit_uuid(self, value): if value == wtypes.Unset: self._audit_uuid = wtypes.Unset elif value and self._audit_uuid != value: try: audit = objects.Audit.get(pecan.request.context, value) self._audit_uuid = audit.uuid self.audit_id = audit.id except exception.AuditNotFound: self._audit_uuid = None def _get_efficacy_indicators(self): if self._efficacy_indicators is None: self._set_efficacy_indicators(wtypes.Unset) return self._efficacy_indicators def _set_efficacy_indicators(self, value): efficacy_indicators = [] if value == wtypes.Unset and not self._efficacy_indicators: try: _efficacy_indicators = objects.EfficacyIndicator.list( pecan.request.context, filters={"action_plan_uuid": self.uuid}) for indicator in _efficacy_indicators: efficacy_indicator = efficacyindicator.EfficacyIndicator( context=pecan.request.context, name=indicator.name, description=indicator.description, unit=indicator.unit, value=float(indicator.value), ) efficacy_indicators.append(efficacy_indicator.as_dict()) self._efficacy_indicators = efficacy_indicators except exception.EfficacyIndicatorNotFound as exc: LOG.exception(exc) elif value and self._efficacy_indicators != value: self._efficacy_indicators = value def _get_strategy(self, value): if value == wtypes.Unset: return None strategy = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) """Unique UUID for this action plan""" audit_uuid = wtypes.wsproperty(types.uuid, _get_audit_uuid, _set_audit_uuid, mandatory=True) """The UUID of the audit this port belongs to""" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) """Strategy UUID the action plan refers to""" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) """The name of the strategy this action plan refers to""" efficacy_indicators = wtypes.wsproperty( types.jsontype, _get_efficacy_indicators, _set_efficacy_indicators, mandatory=True) """The list of efficacy indicators associated to this action plan""" global_efficacy = wtypes.wsattr(types.jsontype, readonly=True) """The global efficacy of this action plan""" state = wtypes.text """This action plan state""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated action links""" hostname = wtypes.wsattr(wtypes.text, mandatory=False) """Hostname the actionplan is running on""" def __init__(self, **kwargs): super(ActionPlan, self).__init__() self.fields = [] fields = list(objects.ActionPlan.fields) for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('audit_uuid') self.fields.append('efficacy_indicators') setattr(self, 'audit_uuid', kwargs.get('audit_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(action_plan, url, expand=True): if not expand: action_plan.unset_fields_except( ['uuid', 'state', 'efficacy_indicators', 'global_efficacy', 'updated_at', 'audit_uuid', 'strategy_uuid', 'strategy_name']) action_plan.links = [ link.Link.make_link( 'self', url, 'action_plans', action_plan.uuid), link.Link.make_link( 'bookmark', url, 'action_plans', action_plan.uuid, bookmark=True)] return action_plan @classmethod def convert_with_links(cls, rpc_action_plan, expand=True): action_plan = ActionPlan(**rpc_action_plan.as_dict()) hide_fields_in_newer_versions(action_plan) return cls._convert_with_links(action_plan, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='9ef4d84c-41e8-4418-9220-ce55be0436af', state='ONGOING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow()) sample._audit_uuid = 'abcee106-14d3-4515-b744-5a26885cf6f6' sample._efficacy_indicators = [{'description': 'Test indicator', 'name': 'test_indicator', 'unit': '%'}] sample._global_efficacy = {'description': 'Global efficacy', 'name': 'test_global_efficacy', 'unit': '%'} return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionPlanCollection(collection.Collection): """API representation of a collection of action_plans.""" action_plans = [ActionPlan] """A list containing action_plans objects""" def __init__(self, **kwargs): self._type = 'action_plans' @staticmethod def convert_with_links(rpc_action_plans, limit, url=None, expand=False, **kwargs): ap_collection = ActionPlanCollection() ap_collection.action_plans = [ActionPlan.convert_with_links( p, expand) for p in rpc_action_plans] ap_collection.next = ap_collection.get_next(limit, url=url, **kwargs) return ap_collection @classmethod def sample(cls): sample = cls() sample.action_plans = [ActionPlan.sample(expand=False)] return sample class ActionPlansController(rest.RestController): """REST controller for Actions.""" def __init__(self): super(ActionPlansController, self).__init__() self.applier_client = rpcapi.ApplierAPI() from_actionsPlans = False """A flag to indicate if the requests to this controller are coming from the top-level resource ActionPlan.""" _custom_actions = { 'start': ['POST'], 'detail': ['GET'] } def _get_action_plans_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, audit_uuid=None, strategy=None): additional_fields = ['audit_uuid', 'strategy_uuid', 'strategy_name'] api_utils.validate_sort_key( sort_key, list(objects.ActionPlan.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ActionPlan.get_by_uuid( pecan.request.context, marker) filters = {} if audit_uuid: filters['audit_uuid'] = audit_uuid if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) action_plans = objects.ActionPlan.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) action_plans_collection = ActionPlanCollection.convert_with_links( action_plans, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(action_plans_collection.action_plans, sort_key, sort_dir) return action_plans_collection @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): """Retrieve a list of action plans. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. :param strategy: strategy UUID or name to filter by """ context = pecan.request.context policy.enforce(context, 'action_plan:get_all', action='action_plan:get_all') return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlanCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', audit_uuid=None, strategy=None): """Retrieve a list of action_plans with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. :param strategy: strategy UUID or name to filter by """ context = pecan.request.context policy.enforce(context, 'action_plan:detail', action='action_plan:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "action_plans": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['action_plans', 'detail']) return self._get_action_plans_collection( marker, limit, sort_key, sort_dir, expand, resource_url, audit_uuid=audit_uuid, strategy=strategy) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def get_one(self, action_plan_uuid): """Retrieve information about the given action plan. :param action_plan_uuid: UUID of a action plan. """ if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan = api_utils.get_resource('ActionPlan', action_plan_uuid) policy.enforce( context, 'action_plan:get', action_plan, action='action_plan:get') return ActionPlan.convert_with_links(action_plan) @wsme_pecan.wsexpose(None, types.uuid, status_code=204) def delete(self, action_plan_uuid): """Delete an action plan. :param action_plan_uuid: UUID of a action. """ context = pecan.request.context action_plan = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:delete', action_plan, action='action_plan:delete') allowed_states = (ap_objects.State.SUCCEEDED, ap_objects.State.RECOMMENDED, ap_objects.State.FAILED, ap_objects.State.SUPERSEDED, ap_objects.State.CANCELLED) if action_plan.state not in allowed_states: raise exception.DeleteError( state=action_plan.state) action_plan.soft_delete() @wsme.validate(types.uuid, [ActionPlanPatchType]) @wsme_pecan.wsexpose(ActionPlan, types.uuid, body=[ActionPlanPatchType]) def patch(self, action_plan_uuid, patch): """Update an existing action plan. :param action_plan_uuid: UUID of a action plan. :param patch: a json PATCH document to apply to this action plan. """ if self.from_actionsPlans: raise exception.OperationNotPermitted context = pecan.request.context action_plan_to_update = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) policy.enforce(context, 'action_plan:update', action_plan_to_update, action='action_plan:update') try: action_plan_dict = action_plan_to_update.as_dict() action_plan = ActionPlan(**api_utils.apply_jsonpatch( action_plan_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) launch_action_plan = False cancel_action_plan = False # transitions that are allowed via PATCH allowed_patch_transitions = [ (ap_objects.State.RECOMMENDED, ap_objects.State.PENDING), (ap_objects.State.RECOMMENDED, ap_objects.State.CANCELLED), (ap_objects.State.ONGOING, ap_objects.State.CANCELLING), (ap_objects.State.PENDING, ap_objects.State.CANCELLED), ] # todo: improve this in blueprint watcher-api-validation if hasattr(action_plan, 'state'): transition = (action_plan_to_update.state, action_plan.state) if transition not in allowed_patch_transitions: error_message = _("State transition not allowed: " "(%(initial_state)s -> %(new_state)s)") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=action_plan_to_update.state, new_state=action_plan.state)) if action_plan.state == ap_objects.State.PENDING: launch_action_plan = True if action_plan.state == ap_objects.State.CANCELLED: cancel_action_plan = True # Update only the fields that have changed for field in objects.ActionPlan.fields: try: patch_val = getattr(action_plan, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if action_plan_to_update[field] != patch_val: action_plan_to_update[field] = patch_val if (field == 'state' and patch_val == objects.action_plan.State.PENDING): launch_action_plan = True action_plan_to_update.save() # NOTE: if action plan is cancelled from pending or recommended # state update action state here only if cancel_action_plan: filters = {'action_plan_uuid': action_plan.uuid} actions = objects.Action.list(pecan.request.context, filters=filters, eager=True) for a in actions: a.state = objects.action.State.CANCELLED a.save() if launch_action_plan: self.applier_client.launch_action_plan(pecan.request.context, action_plan.uuid) action_plan_to_update = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_update) @wsme_pecan.wsexpose(ActionPlan, types.uuid) def start(self, action_plan_uuid, **kwargs): """Start an action_plan :param action_plan_uuid: UUID of an action_plan. """ action_plan_to_start = api_utils.get_resource( 'ActionPlan', action_plan_uuid, eager=True) context = pecan.request.context policy.enforce(context, 'action_plan:start', action_plan_to_start, action='action_plan:start') if action_plan_to_start['state'] != \ objects.action_plan.State.RECOMMENDED: raise exception.StartError( state=action_plan_to_start.state) action_plan_to_start['state'] = objects.action_plan.State.PENDING action_plan_to_start.save() self.applier_client.launch_action_plan(pecan.request.context, action_plan_uuid) action_plan_to_start = objects.ActionPlan.get_by_uuid( pecan.request.context, action_plan_uuid) return ActionPlan.convert_with_links(action_plan_to_start) python-watcher-4.0.0/watcher/api/controllers/v1/scoring_engine.py0000664000175000017500000002205613656752270025200 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2016 Intel # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Scoring Engine ` is an executable that has a well-defined input, a well-defined output, and performs a purely mathematical task. That is, the calculation does not depend on the environment in which it is running - it would produce the same result anywhere. Because there might be multiple algorithms used to build a particular data model (and therefore a scoring engine), the usage of scoring engine might vary. A metainfo field is supposed to contain any information which might be needed by the user of a given scoring engine. """ import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class ScoringEngine(base.APIBase): """API representation of a scoring engine. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a scoring engine. """ uuid = types.uuid """Unique UUID of the scoring engine""" name = wtypes.text """The name of the scoring engine""" description = wtypes.text """A human readable description of the Scoring Engine""" metainfo = wtypes.text """A metadata associated with the scoring engine""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated action links""" def __init__(self, **kwargs): super(ScoringEngine, self).__init__() self.fields = [] self.fields.append('uuid') self.fields.append('name') self.fields.append('description') self.fields.append('metainfo') setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset)) setattr(self, 'name', kwargs.get('name', wtypes.Unset)) setattr(self, 'description', kwargs.get('description', wtypes.Unset)) setattr(self, 'metainfo', kwargs.get('metainfo', wtypes.Unset)) @staticmethod def _convert_with_links(se, url, expand=True): if not expand: se.unset_fields_except( ['uuid', 'name', 'description']) se.links = [link.Link.make_link('self', url, 'scoring_engines', se.uuid), link.Link.make_link('bookmark', url, 'scoring_engines', se.uuid, bookmark=True)] return se @classmethod def convert_with_links(cls, scoring_engine, expand=True): scoring_engine = ScoringEngine(**scoring_engine.as_dict()) hide_fields_in_newer_versions(scoring_engine) return cls._convert_with_links( scoring_engine, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='81bbd3c7-3b08-4d12-a268-99354dbf7b71', name='sample-se-123', description='Sample Scoring Engine 123 just for testing') return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ScoringEngineCollection(collection.Collection): """API representation of a collection of scoring engines.""" scoring_engines = [ScoringEngine] """A list containing scoring engine objects""" def __init__(self, **kwargs): super(ScoringEngineCollection, self).__init__() self._type = 'scoring_engines' @staticmethod def convert_with_links(scoring_engines, limit, url=None, expand=False, **kwargs): collection = ScoringEngineCollection() collection.scoring_engines = [ScoringEngine.convert_with_links( se, expand) for se in scoring_engines] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.scoring_engines = [ScoringEngine.sample(expand=False)] return sample class ScoringEngineController(rest.RestController): """REST controller for Scoring Engines.""" def __init__(self): super(ScoringEngineController, self).__init__() from_scoring_engines = False """A flag to indicate if the requests to this controller are coming from the top-level resource Scoring Engines.""" _custom_actions = { 'detail': ['GET'], } def _get_scoring_engines_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): api_utils.validate_sort_key( sort_key, list(objects.ScoringEngine.fields)) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.ScoringEngine.get_by_uuid( pecan.request.context, marker) filters = {} sort_db_key = (sort_key if sort_key in objects.ScoringEngine.fields else None) scoring_engines = objects.ScoringEngine.list( context=pecan.request.context, limit=limit, marker=marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) return ScoringEngineCollection.convert_with_links( scoring_engines, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of Scoring Engines. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: name. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'scoring_engine:get_all', action='scoring_engine:get_all') return self._get_scoring_engines_collection( marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(ScoringEngineCollection, wtypes.text, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of Scoring Engines with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: name. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'scoring_engine:detail', action='scoring_engine:detail') parent = pecan.request.path.split('/')[:-1][-1] if parent != "scoring_engines": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['scoring_engines', 'detail']) return self._get_scoring_engines_collection( marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(ScoringEngine, wtypes.text) def get_one(self, scoring_engine): """Retrieve information about the given Scoring Engine. :param scoring_engine_name: The name of the Scoring Engine. """ context = pecan.request.context policy.enforce(context, 'scoring_engine:get', action='scoring_engine:get') if self.from_scoring_engines: raise exception.OperationNotPermitted rpc_scoring_engine = api_utils.get_resource( 'ScoringEngine', scoring_engine) return ScoringEngine.convert_with_links(rpc_scoring_engine) python-watcher-4.0.0/watcher/api/controllers/v1/data_model.py0000664000175000017500000000514513656752270024300 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An Interface for users and admin to List Data Model. """ import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils from watcher.common import exception from watcher.common import policy from watcher.decision_engine import rpcapi class DataModelController(rest.RestController): """REST controller for data model""" def __init__(self): super(DataModelController, self).__init__() from_data_model = False """A flag to indicate if the requests to this controller are coming from the top-level resource DataModel.""" @wsme_pecan.wsexpose(wtypes.text, wtypes.text, types.uuid) def get_all(self, data_model_type='compute', audit_uuid=None): """Retrieve information about the given data model. :param data_model_type: The type of data model user wants to list. Supported values: compute. Future support values: storage, baremetal. The default value is compute. :param audit_uuid: The UUID of the audit, used to filter data model by the scope in audit. """ if not utils.allow_list_datamodel(): raise exception.NotAcceptable if self.from_data_model: raise exception.OperationNotPermitted allowed_data_model_type = [ 'compute', ] if data_model_type not in allowed_data_model_type: raise exception.DataModelTypeNotFound( data_model_type=data_model_type) context = pecan.request.context de_client = rpcapi.DecisionEngineAPI() policy.enforce(context, 'data_model:get_all', action='data_model:get_all') rpc_all_data_model = de_client.get_data_model_info( context, data_model_type, audit_uuid) return rpc_all_data_model python-watcher-4.0.0/watcher/api/controllers/v1/action.py0000664000175000017500000004034713656752270023467 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Action ` is what enables Watcher to transform the current state of a :ref:`Cluster ` after an :ref:`Audit `. An :ref:`Action ` is an atomic task which changes the current state of a target :ref:`Managed resource ` of the OpenStack :ref:`Cluster ` such as: - Live migration of an instance from one compute node to another compute node with Nova - Changing the power level of a compute node (ACPI level, ...) - Changing the current state of a compute node (enable or disable) with Nova In most cases, an :ref:`Action ` triggers some concrete commands on an existing OpenStack module (Nova, Neutron, Cinder, Ironic, etc.). An :ref:`Action ` has a life-cycle and its current state may be one of the following: - **PENDING** : the :ref:`Action ` has not been executed yet by the :ref:`Watcher Applier ` - **ONGOING** : the :ref:`Action ` is currently being processed by the :ref:`Watcher Applier ` - **SUCCEEDED** : the :ref:`Action ` has been executed successfully - **FAILED** : an error occurred while trying to execute the :ref:`Action ` - **DELETED** : the :ref:`Action ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Action ` was in **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` :ref:`Some default implementations are provided `, but it is possible to :ref:`develop new implementations ` which are dynamically loaded by Watcher at launch time. """ import datetime import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class ActionPatchType(types.JsonPatchType): @staticmethod def mandatory_attrs(): return [] class Action(base.APIBase): """API representation of a action. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a action. """ _action_plan_uuid = None def _get_action_plan_uuid(self): return self._action_plan_uuid def _set_action_plan_uuid(self, value): if value == wtypes.Unset: self._action_plan_uuid = wtypes.Unset elif value and self._action_plan_uuid != value: try: action_plan = objects.ActionPlan.get( pecan.request.context, value) self._action_plan_uuid = action_plan.uuid self.action_plan_id = action_plan.id except exception.ActionPlanNotFound: self._action_plan_uuid = None uuid = wtypes.wsattr(types.uuid, readonly=True) """Unique UUID for this action""" action_plan_uuid = wtypes.wsproperty(types.uuid, _get_action_plan_uuid, _set_action_plan_uuid, mandatory=True) """The action plan this action belongs to """ state = wtypes.text """This audit state""" action_type = wtypes.text """Action type""" description = wtypes.text """Action description""" input_parameters = types.jsontype """One or more key/value pairs """ parents = wtypes.wsattr(types.jsontype, readonly=True) """UUIDs of parent actions""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated action links""" def __init__(self, **kwargs): super(Action, self).__init__() self.fields = [] fields = list(objects.Action.fields) fields.append('action_plan_uuid') for field in fields: # Skip fields we do not expose. if not hasattr(self, field): continue self.fields.append(field) setattr(self, field, kwargs.get(field, wtypes.Unset)) self.fields.append('action_plan_id') self.fields.append('description') setattr(self, 'action_plan_uuid', kwargs.get('action_plan_id', wtypes.Unset)) @staticmethod def _convert_with_links(action, url, expand=True): if not expand: action.unset_fields_except(['uuid', 'state', 'action_plan_uuid', 'action_plan_id', 'action_type', 'parents']) action.links = [link.Link.make_link('self', url, 'actions', action.uuid), link.Link.make_link('bookmark', url, 'actions', action.uuid, bookmark=True) ] return action @classmethod def convert_with_links(cls, action, expand=True): action = Action(**action.as_dict()) try: obj_action_desc = objects.ActionDescription.get_by_type( pecan.request.context, action.action_type) description = obj_action_desc.description except exception.ActionDescriptionNotFound: description = "" setattr(action, 'description', description) hide_fields_in_newer_versions(action) return cls._convert_with_links(action, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', description='action description', state='PENDING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow(), parents=[]) sample._action_plan_uuid = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ActionCollection(collection.Collection): """API representation of a collection of actions.""" actions = [Action] """A list containing actions objects""" def __init__(self, **kwargs): self._type = 'actions' @staticmethod def convert_with_links(actions, limit, url=None, expand=False, **kwargs): collection = ActionCollection() collection.actions = [Action.convert_with_links(p, expand) for p in actions] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.actions = [Action.sample(expand=False)] return sample class ActionsController(rest.RestController): """REST controller for Actions.""" def __init__(self): super(ActionsController, self).__init__() from_actions = False """A flag to indicate if the requests to this controller are coming from the top-level resource Actions.""" _custom_actions = { 'detail': ['GET'], } def _get_actions_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, action_plan_uuid=None, audit_uuid=None): additional_fields = ['action_plan_uuid'] api_utils.validate_sort_key(sort_key, list(objects.Action.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Action.get_by_uuid(pecan.request.context, marker) filters = {} if action_plan_uuid: filters['action_plan_uuid'] = action_plan_uuid if audit_uuid: filters['audit_uuid'] = audit_uuid need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) actions = objects.Action.list(pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) actions_collection = ActionCollection.convert_with_links( actions, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(actions_collection.actions, sort_key, sort_dir) return actions_collection @wsme_pecan.wsexpose(ActionCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, types.uuid) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', action_plan_uuid=None, audit_uuid=None): """Retrieve a list of actions. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param action_plan_uuid: Optional UUID of an action plan, to get only actions for that action plan. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. """ context = pecan.request.context policy.enforce(context, 'action:get_all', action='action:get_all') if action_plan_uuid and audit_uuid: raise exception.ActionFilterCombinationProhibited return self._get_actions_collection( marker, limit, sort_key, sort_dir, action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) @wsme_pecan.wsexpose(ActionCollection, types.uuid, int, wtypes.text, wtypes.text, types.uuid, types.uuid) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc', action_plan_uuid=None, audit_uuid=None): """Retrieve a list of actions with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param action_plan_uuid: Optional UUID of an action plan, to get only actions for that action plan. :param audit_uuid: Optional UUID of an audit, to get only actions for that audit. """ context = pecan.request.context policy.enforce(context, 'action:detail', action='action:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "actions": raise exception.HTTPNotFound if action_plan_uuid and audit_uuid: raise exception.ActionFilterCombinationProhibited expand = True resource_url = '/'.join(['actions', 'detail']) return self._get_actions_collection( marker, limit, sort_key, sort_dir, expand, resource_url, action_plan_uuid=action_plan_uuid, audit_uuid=audit_uuid) @wsme_pecan.wsexpose(Action, types.uuid) def get_one(self, action_uuid): """Retrieve information about the given action. :param action_uuid: UUID of a action. """ if self.from_actions: raise exception.OperationNotPermitted context = pecan.request.context action = api_utils.get_resource('Action', action_uuid) policy.enforce(context, 'action:get', action, action='action:get') return Action.convert_with_links(action) @wsme_pecan.wsexpose(Action, body=Action, status_code=201) def post(self, action): """Create a new action(forbidden). :param action: a action within the request body. """ # FIXME: blueprint edit-action-plan-flow raise exception.OperationNotPermitted( _("Cannot create an action directly")) if self.from_actions: raise exception.OperationNotPermitted action_dict = action.as_dict() context = pecan.request.context new_action = objects.Action(context, **action_dict) new_action.create() # Set the HTTP Location Header pecan.response.location = link.build_url('actions', new_action.uuid) return Action.convert_with_links(new_action) @wsme.validate(types.uuid, [ActionPatchType]) @wsme_pecan.wsexpose(Action, types.uuid, body=[ActionPatchType]) def patch(self, action_uuid, patch): """Update an existing action(forbidden). :param action_uuid: UUID of a action. :param patch: a json PATCH document to apply to this action. """ # FIXME: blueprint edit-action-plan-flow raise exception.OperationNotPermitted( _("Cannot modify an action directly")) if self.from_actions: raise exception.OperationNotPermitted action_to_update = objects.Action.get_by_uuid(pecan.request.context, action_uuid) try: action_dict = action_to_update.as_dict() action = Action(**api_utils.apply_jsonpatch(action_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Action.fields: try: patch_val = getattr(action, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if action_to_update[field] != patch_val: action_to_update[field] = patch_val action_to_update.save() return Action.convert_with_links(action_to_update) @wsme_pecan.wsexpose(None, types.uuid, status_code=204) def delete(self, action_uuid): """Delete a action(forbidden). :param action_uuid: UUID of a action. """ # FIXME: blueprint edit-action-plan-flow raise exception.OperationNotPermitted( _("Cannot delete an action directly")) action_to_delete = objects.Action.get_by_uuid( pecan.request.context, action_uuid) action_to_delete.soft_delete() python-watcher-4.0.0/watcher/api/controllers/v1/strategy.py0000664000175000017500000003046313656752270024052 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Strategy ` is an algorithm implementation which is able to find a :ref:`Solution ` for a given :ref:`Goal `. There may be several potential strategies which are able to achieve the same :ref:`Goal `. This is why it is possible to configure which specific :ref:`Strategy ` should be used for each goal. Some strategies may provide better optimization results but may take more time to find an optimal :ref:`Solution `. """ import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher.common import utils as common_utils from watcher.decision_engine import rpcapi from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class Strategy(base.APIBase): """API representation of a strategy. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a strategy. """ _goal_uuid = None _goal_name = None def _get_goal(self, value): if value == wtypes.Unset: return None goal = None try: if (common_utils.is_uuid_like(value) or common_utils.is_int_like(value)): goal = objects.Goal.get(pecan.request.context, value) else: goal = objects.Goal.get_by_name(pecan.request.context, value) except exception.GoalNotFound: pass if goal: self.goal_id = goal.id return goal def _get_goal_uuid(self): return self._goal_uuid def _set_goal_uuid(self, value): if value and self._goal_uuid != value: self._goal_uuid = None goal = self._get_goal(value) if goal: self._goal_uuid = goal.uuid def _get_goal_name(self): return self._goal_name def _set_goal_name(self, value): if value and self._goal_name != value: self._goal_name = None goal = self._get_goal(value) if goal: self._goal_name = goal.name uuid = types.uuid """Unique UUID for this strategy""" name = wtypes.text """Name of the strategy""" display_name = wtypes.text """Localized name of the strategy""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated goal links""" goal_uuid = wtypes.wsproperty(wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) """The UUID of the goal this audit refers to""" goal_name = wtypes.wsproperty(wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) """The name of the goal this audit refers to""" parameters_spec = {wtypes.text: types.jsontype} """Parameters spec dict""" def __init__(self, **kwargs): super(Strategy, self).__init__() self.fields = [] self.fields.append('uuid') self.fields.append('name') self.fields.append('display_name') self.fields.append('goal_uuid') self.fields.append('goal_name') self.fields.append('parameters_spec') setattr(self, 'uuid', kwargs.get('uuid', wtypes.Unset)) setattr(self, 'name', kwargs.get('name', wtypes.Unset)) setattr(self, 'display_name', kwargs.get('display_name', wtypes.Unset)) setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) setattr(self, 'parameters_spec', kwargs.get('parameters_spec', wtypes.Unset)) @staticmethod def _convert_with_links(strategy, url, expand=True): if not expand: strategy.unset_fields_except( ['uuid', 'name', 'display_name', 'goal_uuid', 'goal_name']) strategy.links = [ link.Link.make_link('self', url, 'strategies', strategy.uuid), link.Link.make_link('bookmark', url, 'strategies', strategy.uuid, bookmark=True)] return strategy @classmethod def convert_with_links(cls, strategy, expand=True): strategy = Strategy(**strategy.as_dict()) hide_fields_in_newer_versions(strategy) return cls._convert_with_links( strategy, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='DUMMY', display_name='Dummy strategy') return cls._convert_with_links(sample, 'http://localhost:9322', expand) class StrategyCollection(collection.Collection): """API representation of a collection of strategies.""" strategies = [Strategy] """A list containing strategies objects""" def __init__(self, **kwargs): super(StrategyCollection, self).__init__() self._type = 'strategies' @staticmethod def convert_with_links(strategies, limit, url=None, expand=False, **kwargs): strategy_collection = StrategyCollection() strategy_collection.strategies = [ Strategy.convert_with_links(g, expand) for g in strategies] strategy_collection.next = strategy_collection.get_next( limit, url=url, **kwargs) return strategy_collection @classmethod def sample(cls): sample = cls() sample.strategies = [Strategy.sample(expand=False)] return sample class StrategiesController(rest.RestController): """REST controller for Strategies.""" def __init__(self): super(StrategiesController, self).__init__() from_strategies = False """A flag to indicate if the requests to this controller are coming from the top-level resource Strategies.""" _custom_actions = { 'detail': ['GET'], 'state': ['GET'], } def _get_strategies_collection(self, filters, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): additional_fields = ["goal_uuid", "goal_name"] api_utils.validate_sort_key( sort_key, list(objects.Strategy.fields) + additional_fields) api_utils.validate_search_filters( filters, list(objects.Strategy.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Strategy.get_by_uuid( pecan.request.context, marker) need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) strategies = objects.Strategy.list( pecan.request.context, limit, marker_obj, filters=filters, sort_key=sort_db_key, sort_dir=sort_dir) strategies_collection = StrategyCollection.convert_with_links( strategies, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(strategies_collection.strategies, sort_key, sort_dir) return strategies_collection @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text, int, wtypes.text, wtypes.text) def get_all(self, goal=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of strategies. :param goal: goal UUID or name to filter by. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'strategy:get_all', action='strategy:get_all') filters = {} if goal: if common_utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: filters['goal_name'] = goal return self._get_strategies_collection( filters, marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(StrategyCollection, wtypes.text, wtypes.text, int, wtypes.text, wtypes.text) def detail(self, goal=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of strategies with detail. :param goal: goal UUID or name to filter by. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'strategy:detail', action='strategy:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "strategies": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['strategies', 'detail']) filters = {} if goal: if common_utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: filters['goal_name'] = goal return self._get_strategies_collection( filters, marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(wtypes.text, wtypes.text) def state(self, strategy): """Retrieve an information about strategy requirements. :param strategy: name of the strategy. """ context = pecan.request.context policy.enforce(context, 'strategy:state', action='strategy:state') parents = pecan.request.path.split('/')[:-1] if parents[-2] != "strategies": raise exception.HTTPNotFound rpc_strategy = api_utils.get_resource('Strategy', strategy) de_client = rpcapi.DecisionEngineAPI() strategy_state = de_client.get_strategy_info(context, rpc_strategy.name) strategy_state.extend([{ 'type': 'Name', 'state': rpc_strategy.name, 'mandatory': '', 'comment': ''}]) return strategy_state @wsme_pecan.wsexpose(Strategy, wtypes.text) def get_one(self, strategy): """Retrieve information about the given strategy. :param strategy: UUID or name of the strategy. """ if self.from_strategies: raise exception.OperationNotPermitted context = pecan.request.context rpc_strategy = api_utils.get_resource('Strategy', strategy) policy.enforce(context, 'strategy:get', rpc_strategy, action='strategy:get') return Strategy.convert_with_links(rpc_strategy) python-watcher-4.0.0/watcher/api/controllers/v1/goal.py0000664000175000017500000002133413656752270023127 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Goal ` is a human readable, observable and measurable end result having one objective to be achieved. Here are some examples of :ref:`Goals `: - minimize the energy consumption - minimize the number of compute nodes (consolidation) - balance the workload among compute nodes - minimize the license cost (some softwares have a licensing model which is based on the number of sockets or cores where the software is deployed) - find the most appropriate moment for a planned maintenance on a given group of host (which may be an entire availability zone): power supply replacement, cooling system replacement, hardware modification, ... """ import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class Goal(base.APIBase): """API representation of a goal. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a goal. """ uuid = types.uuid """Unique UUID for this goal""" name = wtypes.text """Name of the goal""" display_name = wtypes.text """Localized name of the goal""" efficacy_specification = wtypes.wsattr(types.jsontype, readonly=True) """Efficacy specification for this goal""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated audit template links""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.Goal.fields) for k in fields: # Skip fields we do not expose. if not hasattr(self, k): continue self.fields.append(k) setattr(self, k, kwargs.get(k, wtypes.Unset)) @staticmethod def _convert_with_links(goal, url, expand=True): if not expand: goal.unset_fields_except(['uuid', 'name', 'display_name', 'efficacy_specification']) goal.links = [link.Link.make_link('self', url, 'goals', goal.uuid), link.Link.make_link('bookmark', url, 'goals', goal.uuid, bookmark=True)] return goal @classmethod def convert_with_links(cls, goal, expand=True): goal = Goal(**goal.as_dict()) hide_fields_in_newer_versions(goal) return cls._convert_with_links(goal, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls( uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='DUMMY', display_name='Dummy strategy', efficacy_specification=[ {'description': 'Dummy indicator', 'name': 'dummy', 'schema': 'Range(min=0, max=100, min_included=True, ' 'max_included=True, msg=None)', 'unit': '%'} ]) return cls._convert_with_links(sample, 'http://localhost:9322', expand) class GoalCollection(collection.Collection): """API representation of a collection of goals.""" goals = [Goal] """A list containing goals objects""" def __init__(self, **kwargs): super(GoalCollection, self).__init__() self._type = 'goals' @staticmethod def convert_with_links(goals, limit, url=None, expand=False, **kwargs): goal_collection = GoalCollection() goal_collection.goals = [ Goal.convert_with_links(g, expand) for g in goals] goal_collection.next = goal_collection.get_next( limit, url=url, **kwargs) return goal_collection @classmethod def sample(cls): sample = cls() sample.goals = [Goal.sample(expand=False)] return sample class GoalsController(rest.RestController): """REST controller for Goals.""" def __init__(self): super(GoalsController, self).__init__() from_goals = False """A flag to indicate if the requests to this controller are coming from the top-level resource Goals.""" _custom_actions = { 'detail': ['GET'], } def _get_goals_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): api_utils.validate_sort_key( sort_key, list(objects.Goal.fields)) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Goal.get_by_uuid( pecan.request.context, marker) sort_db_key = (sort_key if sort_key in objects.Goal.fields else None) goals = objects.Goal.list(pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir) return GoalCollection.convert_with_links(goals, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @wsme_pecan.wsexpose(GoalCollection, wtypes.text, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of goals. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'goal:get_all', action='goal:get_all') return self._get_goals_collection(marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(GoalCollection, wtypes.text, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of goals with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'goal:detail', action='goal:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "goals": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['goals', 'detail']) return self._get_goals_collection(marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(Goal, wtypes.text) def get_one(self, goal): """Retrieve information about the given goal. :param goal: UUID or name of the goal. """ if self.from_goals: raise exception.OperationNotPermitted context = pecan.request.context rpc_goal = api_utils.get_resource('Goal', goal) policy.enforce(context, 'goal:get', rpc_goal, action='goal:get') return Goal.convert_with_links(rpc_goal) python-watcher-4.0.0/watcher/api/controllers/v1/versions.py0000664000175000017500000000344313656752270024056 0ustar zuulzuul00000000000000# Copyright (c) 2015 Intel Corporation # Copyright (c) 2018 SBCloud # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import enum class VERSIONS(enum.Enum): MINOR_0_ROCKY = 0 # v1.0: corresponds to Rocky API MINOR_1_START_END_TIMING = 1 # v1.1: Add start/end timei for audit MINOR_2_FORCE = 2 # v1.2: Add force field to audit MINOR_3_DATAMODEL = 3 # v1.3: Add list datamodel API MINOR_4_WEBHOOK_API = 4 # v1.4: Add webhook trigger API MINOR_MAX_VERSION = 4 # This is the version 1 API BASE_VERSION = 1 # String representations of the minor and maximum versions _MIN_VERSION_STRING = '{}.{}'.format(BASE_VERSION, VERSIONS.MINOR_0_ROCKY.value) _MAX_VERSION_STRING = '{}.{}'.format(BASE_VERSION, VERSIONS.MINOR_MAX_VERSION.value) def service_type_string(): return 'infra-optim' def min_version_string(): """Returns the minimum supported API version (as a string)""" return _MIN_VERSION_STRING def max_version_string(): """Returns the maximum supported API version (as a string). If the service is pinned, the maximum API version is the pinned version. Otherwise, it is the maximum supported API version. """ return _MAX_VERSION_STRING python-watcher-4.0.0/watcher/api/controllers/v1/service.py0000664000175000017500000002270613656752270023651 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Service mechanism provides ability to monitor Watcher services state. """ import datetime import six from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import utils as api_utils from watcher.common import context from watcher.common import exception from watcher.common import policy from watcher import objects CONF = cfg.CONF LOG = log.getLogger(__name__) def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class Service(base.APIBase): """API representation of a service. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of a service. """ _status = None _context = context.RequestContext(is_admin=True) def _get_status(self): return self._status def _set_status(self, id): service = objects.Service.get(pecan.request.context, id) last_heartbeat = (service.last_seen_up or service.updated_at or service.created_at) if isinstance(last_heartbeat, six.string_types): # NOTE(russellb) If this service came in over rpc via # conductor, then the timestamp will be a string and needs to be # converted back to a datetime. last_heartbeat = timeutils.parse_strtime(last_heartbeat) else: # Objects have proper UTC timezones, but the timeutils comparison # below does not (and will fail) last_heartbeat = last_heartbeat.replace(tzinfo=None) elapsed = timeutils.delta_seconds(last_heartbeat, timeutils.utcnow()) is_up = abs(elapsed) <= CONF.service_down_time if not is_up: LOG.warning('Seems service %(name)s on host %(host)s is down. ' 'Last heartbeat was %(lhb)s.' 'Elapsed time is %(el)s', {'name': service.name, 'host': service.host, 'lhb': str(last_heartbeat), 'el': str(elapsed)}) self._status = objects.service.ServiceStatus.FAILED else: self._status = objects.service.ServiceStatus.ACTIVE id = wtypes.wsattr(int, readonly=True) """ID for this service.""" name = wtypes.text """Name of the service.""" host = wtypes.text """Host where service is placed on.""" last_seen_up = wtypes.wsattr(datetime.datetime, readonly=True) """Time when Watcher service sent latest heartbeat.""" status = wtypes.wsproperty(wtypes.text, _get_status, _set_status, mandatory=True) links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link.""" def __init__(self, **kwargs): super(Service, self).__init__() fields = list(objects.Service.fields) + ['status'] self.fields = [] for field in fields: self.fields.append(field) setattr(self, field, kwargs.get( field if field != 'status' else 'id', wtypes.Unset)) @staticmethod def _convert_with_links(service, url, expand=True): if not expand: service.unset_fields_except( ['id', 'name', 'host', 'status']) service.links = [ link.Link.make_link('self', url, 'services', str(service.id)), link.Link.make_link('bookmark', url, 'services', str(service.id), bookmark=True)] return service @classmethod def convert_with_links(cls, service, expand=True): service = Service(**service.as_dict()) hide_fields_in_newer_versions(service) return cls._convert_with_links( service, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(id=1, name='watcher-applier', host='Controller', last_seen_up=datetime.datetime(2016, 1, 1)) return cls._convert_with_links(sample, 'http://localhost:9322', expand) class ServiceCollection(collection.Collection): """API representation of a collection of services.""" services = [Service] """A list containing services objects""" def __init__(self, **kwargs): super(ServiceCollection, self).__init__() self._type = 'services' @staticmethod def convert_with_links(services, limit, url=None, expand=False, **kwargs): service_collection = ServiceCollection() service_collection.services = [ Service.convert_with_links(g, expand) for g in services] service_collection.next = service_collection.get_next( limit, url=url, marker_field='id', **kwargs) return service_collection @classmethod def sample(cls): sample = cls() sample.services = [Service.sample(expand=False)] return sample class ServicesController(rest.RestController): """REST controller for Services.""" def __init__(self): super(ServicesController, self).__init__() from_services = False """A flag to indicate if the requests to this controller are coming from the top-level resource Services.""" _custom_actions = { 'detail': ['GET'], } def _get_services_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): api_utils.validate_sort_key( sort_key, list(objects.Service.fields)) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Service.get( pecan.request.context, marker) sort_db_key = (sort_key if sort_key in objects.Service.fields else None) services = objects.Service.list( pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir) return ServiceCollection.convert_with_links( services, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) @wsme_pecan.wsexpose(ServiceCollection, int, int, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of services. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'service:get_all', action='service:get_all') return self._get_services_collection(marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(ServiceCollection, int, int, wtypes.text, wtypes.text) def detail(self, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of services with detail. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'service:detail', action='service:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "services": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['services', 'detail']) return self._get_services_collection( marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(Service, wtypes.text) def get_one(self, service): """Retrieve information about the given service. :param service: ID or name of the service. """ if self.from_services: raise exception.OperationNotPermitted context = pecan.request.context rpc_service = api_utils.get_resource('Service', service) policy.enforce(context, 'service:get', rpc_service, action='service:get') return Service.convert_with_links(rpc_service) python-watcher-4.0.0/watcher/api/controllers/v1/audit.py0000664000175000017500000006647413656752270023331 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ In the Watcher system, an :ref:`Audit ` is a request for optimizing a :ref:`Cluster `. The optimization is done in order to satisfy one :ref:`Goal ` on a given :ref:`Cluster `. For each :ref:`Audit `, the Watcher system generates an :ref:`Action Plan `. To see the life-cycle and description of an :ref:`Audit ` states, visit :ref:`the Audit State machine `. """ import datetime from dateutil import tz import pecan from pecan import rest import wsme from wsme import types as wtypes from wsme import utils as wutils import wsmeext.pecan as wsme_pecan from oslo_log import log from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import exception from watcher.common import policy from watcher.common import utils from watcher.decision_engine import rpcapi from watcher import objects LOG = log.getLogger(__name__) def _get_object_by_value(context, class_name, value): if utils.is_uuid_like(value) or utils.is_int_like(value): return class_name.get(context, value) else: return class_name.get_by_name(context, value) def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ if not api_utils.allow_start_end_audit_time(): obj.start_time = wtypes.Unset obj.end_time = wtypes.Unset if not api_utils.allow_force(): obj.force = wtypes.Unset class AuditPostType(wtypes.Base): name = wtypes.wsattr(wtypes.text, mandatory=False) audit_template_uuid = wtypes.wsattr(types.uuid, mandatory=False) goal = wtypes.wsattr(wtypes.text, mandatory=False) strategy = wtypes.wsattr(wtypes.text, mandatory=False) audit_type = wtypes.wsattr(wtypes.text, mandatory=True) state = wtypes.wsattr(wtypes.text, readonly=True, default=objects.audit.State.PENDING) parameters = wtypes.wsattr({wtypes.text: types.jsontype}, mandatory=False, default={}) interval = wtypes.wsattr(types.interval_or_cron, mandatory=False) scope = wtypes.wsattr(types.jsontype, readonly=True) auto_trigger = wtypes.wsattr(bool, mandatory=False) hostname = wtypes.wsattr(wtypes.text, readonly=True, mandatory=False) start_time = wtypes.wsattr(datetime.datetime, mandatory=False) end_time = wtypes.wsattr(datetime.datetime, mandatory=False) force = wtypes.wsattr(bool, mandatory=False) def as_audit(self, context): audit_type_values = [val.value for val in objects.audit.AuditType] if self.audit_type not in audit_type_values: raise exception.AuditTypeNotFound(audit_type=self.audit_type) if (self.audit_type == objects.audit.AuditType.ONESHOT.value and self.interval not in (wtypes.Unset, None)): raise exception.AuditIntervalNotAllowed(audit_type=self.audit_type) if (self.audit_type == objects.audit.AuditType.CONTINUOUS.value and self.interval in (wtypes.Unset, None)): raise exception.AuditIntervalNotSpecified( audit_type=self.audit_type) if self.audit_template_uuid and self.goal: raise exception.Invalid('Either audit_template_uuid ' 'or goal should be provided.') if (self.audit_type == objects.audit.AuditType.ONESHOT.value and (self.start_time not in (wtypes.Unset, None) or self.end_time not in (wtypes.Unset, None))): raise exception.AuditStartEndTimeNotAllowed( audit_type=self.audit_type) if not api_utils.allow_start_end_audit_time(): for field in ('start_time', 'end_time'): if getattr(self, field) not in (wtypes.Unset, None): raise exception.NotAcceptable() # If audit_template_uuid was provided, we will provide any # variables not included in the request, but not override # those variables that were included. if self.audit_template_uuid: try: audit_template = objects.AuditTemplate.get( context, self.audit_template_uuid) except exception.AuditTemplateNotFound: raise exception.Invalid( message=_('The audit template UUID or name specified is ' 'invalid')) at2a = { 'goal': 'goal_id', 'strategy': 'strategy_id', 'scope': 'scope', } to_string_fields = set(['goal', 'strategy']) for k in at2a: if not getattr(self, k): try: at_attr = getattr(audit_template, at2a[k]) if at_attr and (k in to_string_fields): at_attr = str(at_attr) setattr(self, k, at_attr) except AttributeError: pass # Note: If audit name was not provided, used a default name if not self.name: if self.strategy: strategy = _get_object_by_value(context, objects.Strategy, self.strategy) self.name = "%s-%s" % (strategy.name, datetime.datetime.utcnow().isoformat()) elif self.audit_template_uuid: audit_template = objects.AuditTemplate.get( context, self.audit_template_uuid) self.name = "%s-%s" % (audit_template.name, datetime.datetime.utcnow().isoformat()) else: goal = _get_object_by_value(context, objects.Goal, self.goal) self.name = "%s-%s" % (goal.name, datetime.datetime.utcnow().isoformat()) # No more than 63 characters if len(self.name) > 63: LOG.warning("Audit: %s length exceeds 63 characters", self.name) self.name = self.name[0:63] return Audit( name=self.name, audit_type=self.audit_type, parameters=self.parameters, goal_id=self.goal, strategy_id=self.strategy, interval=self.interval, scope=self.scope, auto_trigger=self.auto_trigger, start_time=self.start_time, end_time=self.end_time, force=self.force) class AuditPatchType(types.JsonPatchType): @staticmethod def mandatory_attrs(): return ['/audit_template_uuid', '/type'] @staticmethod def validate(patch): def is_new_state_none(p): return p.path == '/state' and p.op == 'replace' and p.value is None serialized_patch = {'path': patch.path, 'op': patch.op, 'value': patch.value} if (patch.path in AuditPatchType.mandatory_attrs() or is_new_state_none(patch)): msg = _("%(field)s can't be updated.") raise exception.PatchError( patch=serialized_patch, reason=msg % dict(field=patch.path)) return types.JsonPatchType.validate(patch) class Audit(base.APIBase): """API representation of an audit. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an audit. """ _goal_uuid = None _goal_name = None _strategy_uuid = None _strategy_name = None def _get_goal(self, value): if value == wtypes.Unset: return None goal = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): goal = objects.Goal.get( pecan.request.context, value) else: goal = objects.Goal.get_by_name( pecan.request.context, value) except exception.GoalNotFound: pass if goal: self.goal_id = goal.id return goal def _get_goal_uuid(self): return self._goal_uuid def _set_goal_uuid(self, value): if value and self._goal_uuid != value: self._goal_uuid = None goal = self._get_goal(value) if goal: self._goal_uuid = goal.uuid def _get_goal_name(self): return self._goal_name def _set_goal_name(self, value): if value and self._goal_name != value: self._goal_name = None goal = self._get_goal(value) if goal: self._goal_name = goal.name def _get_strategy(self, value): if value == wtypes.Unset: return None strategy = None try: if utils.is_uuid_like(value) or utils.is_int_like(value): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = types.uuid """Unique UUID for this audit""" name = wtypes.text """Name of this audit""" audit_type = wtypes.text """Type of this audit""" state = wtypes.text """This audit state""" goal_uuid = wtypes.wsproperty( wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) """Goal UUID the audit refers to""" goal_name = wtypes.wsproperty( wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) """The name of the goal this audit refers to""" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) """Strategy UUID the audit refers to""" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) """The name of the strategy this audit refers to""" parameters = {wtypes.text: types.jsontype} """The strategy parameters for this audit""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated audit links""" interval = wtypes.wsattr(wtypes.text, mandatory=False) """Launch audit periodically (in seconds)""" scope = wtypes.wsattr(types.jsontype, mandatory=False) """Audit Scope""" auto_trigger = wtypes.wsattr(bool, mandatory=False, default=False) """Autoexecute action plan once audit is succeeded""" next_run_time = wtypes.wsattr(datetime.datetime, mandatory=False) """The next time audit launch""" hostname = wtypes.wsattr(wtypes.text, mandatory=False) """Hostname the audit is running on""" start_time = wtypes.wsattr(datetime.datetime, mandatory=False) """The start time for continuous audit launch""" end_time = wtypes.wsattr(datetime.datetime, mandatory=False) """The end time that stopping continuous audit""" force = wsme.wsattr(bool, mandatory=False, default=False) """Allow Action Plan of this Audit be executed in parallel with other Action Plan""" def __init__(self, **kwargs): self.fields = [] fields = list(objects.Audit.fields) for k in fields: # Skip fields we do not expose. if not hasattr(self, k): continue self.fields.append(k) setattr(self, k, kwargs.get(k, wtypes.Unset)) self.fields.append('goal_id') self.fields.append('strategy_id') fields.append('goal_uuid') setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) fields.append('goal_name') setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) fields.append('strategy_uuid') setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) fields.append('strategy_name') setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(audit, url, expand=True): if not expand: audit.unset_fields_except(['uuid', 'name', 'audit_type', 'state', 'goal_uuid', 'interval', 'scope', 'strategy_uuid', 'goal_name', 'strategy_name', 'auto_trigger', 'next_run_time']) audit.links = [link.Link.make_link('self', url, 'audits', audit.uuid), link.Link.make_link('bookmark', url, 'audits', audit.uuid, bookmark=True) ] return audit @classmethod def convert_with_links(cls, rpc_audit, expand=True): audit = Audit(**rpc_audit.as_dict()) hide_fields_in_newer_versions(audit) return cls._convert_with_links(audit, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='My Audit', audit_type='ONESHOT', state='PENDING', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow(), interval='7200', scope=[], auto_trigger=False, next_run_time=datetime.datetime.utcnow(), start_time=datetime.datetime.utcnow(), end_time=datetime.datetime.utcnow()) sample.goal_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ae' sample.strategy_id = '7ae81bb3-dec3-4289-8d6c-da80bd8001ff' return cls._convert_with_links(sample, 'http://localhost:9322', expand) class AuditCollection(collection.Collection): """API representation of a collection of audits.""" audits = [Audit] """A list containing audits objects""" def __init__(self, **kwargs): super(AuditCollection, self).__init__() self._type = 'audits' @staticmethod def convert_with_links(rpc_audits, limit, url=None, expand=False, **kwargs): collection = AuditCollection() collection.audits = [Audit.convert_with_links(p, expand) for p in rpc_audits] collection.next = collection.get_next(limit, url=url, **kwargs) return collection @classmethod def sample(cls): sample = cls() sample.audits = [Audit.sample(expand=False)] return sample class AuditsController(rest.RestController): """REST controller for Audits.""" def __init__(self): super(AuditsController, self).__init__() self.dc_client = rpcapi.DecisionEngineAPI() from_audits = False """A flag to indicate if the requests to this controller are coming from the top-level resource Audits.""" _custom_actions = { 'detail': ['GET'], } def _get_audits_collection(self, marker, limit, sort_key, sort_dir, expand=False, resource_url=None, goal=None, strategy=None): additional_fields = ["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"] api_utils.validate_sort_key( sort_key, list(objects.Audit.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.Audit.get_by_uuid(pecan.request.context, marker) filters = {} if goal: if utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: # TODO(michaelgugino): add method to get goal by name. filters['goal_name'] = goal if strategy: if utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: # TODO(michaelgugino): add method to get goal by name. filters['strategy_name'] = strategy need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) audits = objects.Audit.list(pecan.request.context, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir, filters=filters) audits_collection = AuditCollection.convert_with_links( audits, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort(audits_collection.audits, sort_key, sort_dir) return audits_collection @wsme_pecan.wsexpose(AuditCollection, types.uuid, int, wtypes.text, wtypes.text, wtypes.text, wtypes.text) def get_all(self, marker=None, limit=None, sort_key='id', sort_dir='asc', goal=None, strategy=None): """Retrieve a list of audits. :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. :param goal: goal UUID or name to filter by :param strategy: strategy UUID or name to filter by """ context = pecan.request.context policy.enforce(context, 'audit:get_all', action='audit:get_all') return self._get_audits_collection(marker, limit, sort_key, sort_dir, goal=goal, strategy=strategy) @wsme_pecan.wsexpose(AuditCollection, wtypes.text, types.uuid, int, wtypes.text, wtypes.text) def detail(self, goal=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of audits with detail. :param goal: goal UUID or name to filter by :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'audit:detail', action='audit:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "audits": raise exception.HTTPNotFound expand = True resource_url = '/'.join(['audits', 'detail']) return self._get_audits_collection(marker, limit, sort_key, sort_dir, expand, resource_url, goal=goal) @wsme_pecan.wsexpose(Audit, wtypes.text) def get_one(self, audit): """Retrieve information about the given audit. :param audit: UUID or name of an audit. """ if self.from_audits: raise exception.OperationNotPermitted context = pecan.request.context rpc_audit = api_utils.get_resource('Audit', audit) policy.enforce(context, 'audit:get', rpc_audit, action='audit:get') return Audit.convert_with_links(rpc_audit) @wsme_pecan.wsexpose(Audit, body=AuditPostType, status_code=201) def post(self, audit_p): """Create a new audit. :param audit_p: an audit within the request body. """ context = pecan.request.context policy.enforce(context, 'audit:create', action='audit:create') audit = audit_p.as_audit(context) if self.from_audits: raise exception.OperationNotPermitted if not audit._goal_uuid: raise exception.Invalid( message=_('A valid goal_id or audit_template_id ' 'must be provided')) strategy_uuid = audit.strategy_uuid no_schema = True if strategy_uuid is not None: # validate parameter when predefined strategy in audit template strategy = objects.Strategy.get(pecan.request.context, strategy_uuid) schema = strategy.parameters_spec if schema: # validate input parameter with default value feedback no_schema = False utils.StrictDefaultValidatingDraft4Validator(schema).validate( audit.parameters) if no_schema and audit.parameters: raise exception.Invalid(_('Specify parameters but no predefined ' 'strategy for audit, or no ' 'parameter spec in predefined strategy')) audit_dict = audit.as_dict() # convert local time to UTC time start_time_value = audit_dict.get('start_time') end_time_value = audit_dict.get('end_time') if start_time_value: audit_dict['start_time'] = start_time_value.replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) if end_time_value: audit_dict['end_time'] = end_time_value.replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) new_audit = objects.Audit(context, **audit_dict) new_audit.create() # Set the HTTP Location Header pecan.response.location = link.build_url('audits', new_audit.uuid) # trigger decision-engine to run the audit if new_audit.audit_type == objects.audit.AuditType.ONESHOT.value: self.dc_client.trigger_audit(context, new_audit.uuid) return Audit.convert_with_links(new_audit) @wsme.validate(types.uuid, [AuditPatchType]) @wsme_pecan.wsexpose(Audit, wtypes.text, body=[AuditPatchType]) def patch(self, audit, patch): """Update an existing audit. :param audit: UUID or name of an audit. :param patch: a json PATCH document to apply to this audit. """ if self.from_audits: raise exception.OperationNotPermitted context = pecan.request.context audit_to_update = api_utils.get_resource( 'Audit', audit, eager=True) policy.enforce(context, 'audit:update', audit_to_update, action='audit:update') try: audit_dict = audit_to_update.as_dict() initial_state = audit_dict['state'] new_state = api_utils.get_patch_value(patch, 'state') if not api_utils.check_audit_state_transition( patch, initial_state): error_message = _("State transition not allowed: " "(%(initial_state)s -> %(new_state)s)") raise exception.PatchError( patch=patch, reason=error_message % dict( initial_state=initial_state, new_state=new_state)) patch_path = api_utils.get_patch_key(patch, 'path') if patch_path in ('start_time', 'end_time'): patch_value = api_utils.get_patch_value(patch, patch_path) # convert string format to UTC time new_patch_value = wutils.parse_isodatetime( patch_value).replace( tzinfo=tz.tzlocal()).astimezone( tz.tzutc()).replace(tzinfo=None) api_utils.set_patch_value(patch, patch_path, new_patch_value) audit = Audit(**api_utils.apply_jsonpatch(audit_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.Audit.fields: try: patch_val = getattr(audit, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if audit_to_update[field] != patch_val: audit_to_update[field] = patch_val audit_to_update.save() return Audit.convert_with_links(audit_to_update) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, audit): """Delete an audit. :param audit: UUID or name of an audit. """ context = pecan.request.context audit_to_delete = api_utils.get_resource( 'Audit', audit, eager=True) policy.enforce(context, 'audit:delete', audit_to_delete, action='audit:delete') initial_state = audit_to_delete.state new_state = objects.audit.State.DELETED if not objects.audit.AuditStateTransitionManager( ).check_transition(initial_state, new_state): raise exception.DeleteError( state=initial_state) audit_to_delete.soft_delete() python-watcher-4.0.0/watcher/api/controllers/v1/audit_template.py0000664000175000017500000006555413656752270025222 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Audit ` may be launched several times with the same settings (:ref:`Goal `, thresholds, ...). Therefore it makes sense to save those settings in some sort of Audit preset object, which is known as an :ref:`Audit Template `. An :ref:`Audit Template ` contains at least the :ref:`Goal ` of the :ref:`Audit `. It may also contain some error handling settings indicating whether: - :ref:`Watcher Applier ` stops the entire operation - :ref:`Watcher Applier ` performs a rollback and how many retries should be attempted before failure occurs (also the latter can be complex: for example the scenario in which there are many first-time failures on ultimately successful :ref:`Actions `). Moreover, an :ref:`Audit Template ` may contain some settings related to the level of automation for the :ref:`Action Plan ` that will be generated by the :ref:`Audit `. A flag will indicate whether the :ref:`Action Plan ` will be launched automatically or will need a manual confirmation from the :ref:`Administrator `. """ import datetime import pecan from pecan import rest import wsme from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher._i18n import _ from watcher.api.controllers import base from watcher.api.controllers import link from watcher.api.controllers.v1 import collection from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils as api_utils from watcher.common import context as context_utils from watcher.common import exception from watcher.common import policy from watcher.common import utils as common_utils from watcher.decision_engine.loading import default as default_loading from watcher import objects def hide_fields_in_newer_versions(obj): """This method hides fields that were added in newer API versions. Certain node fields were introduced at certain API versions. These fields are only made available when the request's API version matches or exceeds the versions when these fields were introduced. """ pass class AuditTemplatePostType(wtypes.Base): _ctx = context_utils.make_context() name = wtypes.wsattr(wtypes.text, mandatory=True) """Name of this audit template""" description = wtypes.wsattr(wtypes.text, mandatory=False) """Short description of this audit template""" goal = wtypes.wsattr(wtypes.text, mandatory=True) """Goal UUID or name of the audit template""" strategy = wtypes.wsattr(wtypes.text, mandatory=False) """Strategy UUID or name of the audit template""" scope = wtypes.wsattr(types.jsontype, mandatory=False, default=[]) """Audit Scope""" def as_audit_template(self): return AuditTemplate( name=self.name, description=self.description, goal_id=self.goal, # Dirty trick ... goal=self.goal, strategy_id=self.strategy, # Dirty trick ... strategy_uuid=self.strategy, scope=self.scope, ) @staticmethod def _build_schema(): SCHEMA = { "$schema": "http://json-schema.org/draft-04/schema#", "type": "array", "items": { "type": "object", "properties": AuditTemplatePostType._get_schemas(), "additionalProperties": False } } return SCHEMA @staticmethod def _get_schemas(): collectors = default_loading.ClusterDataModelCollectorLoader( ).list_available() schemas = {k: c.SCHEMA for k, c in collectors.items() if hasattr(c, "SCHEMA")} return schemas @staticmethod def validate(audit_template): available_goals = objects.Goal.list(AuditTemplatePostType._ctx) available_goal_uuids_map = {g.uuid: g for g in available_goals} available_goal_names_map = {g.name: g for g in available_goals} if audit_template.goal in available_goal_uuids_map: goal = available_goal_uuids_map[audit_template.goal] elif audit_template.goal in available_goal_names_map: goal = available_goal_names_map[audit_template.goal] else: raise exception.InvalidGoal(goal=audit_template.goal) if audit_template.scope: common_utils.Draft4Validator( AuditTemplatePostType._build_schema() ).validate(audit_template.scope) include_host_aggregates = False exclude_host_aggregates = False for rule in audit_template.scope[0]['compute']: if 'host_aggregates' in rule: include_host_aggregates = True elif 'exclude' in rule: for resource in rule['exclude']: if 'host_aggregates' in resource: exclude_host_aggregates = True if include_host_aggregates and exclude_host_aggregates: raise exception.Invalid( message=_( "host_aggregates can't be " "included and excluded together")) if audit_template.strategy: available_strategies = objects.Strategy.list( AuditTemplatePostType._ctx) available_strategies_map = { s.uuid: s for s in available_strategies} if audit_template.strategy not in available_strategies_map: raise exception.InvalidStrategy( strategy=audit_template.strategy) strategy = available_strategies_map[audit_template.strategy] # Check that the strategy we indicate is actually related to the # specified goal if strategy.goal_id != goal.id: choices = ["'%s' (%s)" % (s.uuid, s.name) for s in available_strategies] raise exception.InvalidStrategy( message=_( "'%(strategy)s' strategy does relate to the " "'%(goal)s' goal. Possible choices: %(choices)s") % dict(strategy=strategy.name, goal=goal.name, choices=", ".join(choices))) audit_template.strategy = strategy.uuid # We force the UUID so that we do not need to query the DB with the # name afterwards audit_template.goal = goal.uuid return audit_template class AuditTemplatePatchType(types.JsonPatchType): _ctx = context_utils.make_context() @staticmethod def mandatory_attrs(): return [] @staticmethod def validate(patch): if patch.path == "/goal" and patch.op != "remove": AuditTemplatePatchType._validate_goal(patch) elif patch.path == "/goal" and patch.op == "remove": raise exception.OperationNotPermitted( _("Cannot remove 'goal' attribute " "from an audit template")) if patch.path == "/strategy": AuditTemplatePatchType._validate_strategy(patch) return types.JsonPatchType.validate(patch) @staticmethod def _validate_goal(patch): patch.path = "/goal_id" goal = patch.value if goal: available_goals = objects.Goal.list( AuditTemplatePatchType._ctx) available_goal_uuids_map = {g.uuid: g for g in available_goals} available_goal_names_map = {g.name: g for g in available_goals} if goal in available_goal_uuids_map: patch.value = available_goal_uuids_map[goal].id elif goal in available_goal_names_map: patch.value = available_goal_names_map[goal].id else: raise exception.InvalidGoal(goal=goal) @staticmethod def _validate_strategy(patch): patch.path = "/strategy_id" strategy = patch.value if strategy: available_strategies = objects.Strategy.list( AuditTemplatePatchType._ctx) available_strategy_uuids_map = { s.uuid: s for s in available_strategies} available_strategy_names_map = { s.name: s for s in available_strategies} if strategy in available_strategy_uuids_map: patch.value = available_strategy_uuids_map[strategy].id elif strategy in available_strategy_names_map: patch.value = available_strategy_names_map[strategy].id else: raise exception.InvalidStrategy(strategy=strategy) class AuditTemplate(base.APIBase): """API representation of a audit template. This class enforces type checking and value constraints, and converts between the internal object model and the API representation of an audit template. """ _goal_uuid = None _goal_name = None _strategy_uuid = None _strategy_name = None def _get_goal(self, value): if value == wtypes.Unset: return None goal = None try: if (common_utils.is_uuid_like(value) or common_utils.is_int_like(value)): goal = objects.Goal.get( pecan.request.context, value) else: goal = objects.Goal.get_by_name( pecan.request.context, value) except exception.GoalNotFound: pass if goal: self.goal_id = goal.id return goal def _get_strategy(self, value): if value == wtypes.Unset: return None strategy = None try: if (common_utils.is_uuid_like(value) or common_utils.is_int_like(value)): strategy = objects.Strategy.get( pecan.request.context, value) else: strategy = objects.Strategy.get_by_name( pecan.request.context, value) except exception.StrategyNotFound: pass if strategy: self.strategy_id = strategy.id return strategy def _get_goal_uuid(self): return self._goal_uuid def _set_goal_uuid(self, value): if value and self._goal_uuid != value: self._goal_uuid = None goal = self._get_goal(value) if goal: self._goal_uuid = goal.uuid def _get_strategy_uuid(self): return self._strategy_uuid def _set_strategy_uuid(self, value): if value and self._strategy_uuid != value: self._strategy_uuid = None strategy = self._get_strategy(value) if strategy: self._strategy_uuid = strategy.uuid def _get_goal_name(self): return self._goal_name def _set_goal_name(self, value): if value and self._goal_name != value: self._goal_name = None goal = self._get_goal(value) if goal: self._goal_name = goal.name def _get_strategy_name(self): return self._strategy_name def _set_strategy_name(self, value): if value and self._strategy_name != value: self._strategy_name = None strategy = self._get_strategy(value) if strategy: self._strategy_name = strategy.name uuid = wtypes.wsattr(types.uuid, readonly=True) """Unique UUID for this audit template""" name = wtypes.text """Name of this audit template""" description = wtypes.wsattr(wtypes.text, mandatory=False) """Short description of this audit template""" goal_uuid = wtypes.wsproperty( wtypes.text, _get_goal_uuid, _set_goal_uuid, mandatory=True) """Goal UUID the audit template refers to""" goal_name = wtypes.wsproperty( wtypes.text, _get_goal_name, _set_goal_name, mandatory=False) """The name of the goal this audit template refers to""" strategy_uuid = wtypes.wsproperty( wtypes.text, _get_strategy_uuid, _set_strategy_uuid, mandatory=False) """Strategy UUID the audit template refers to""" strategy_name = wtypes.wsproperty( wtypes.text, _get_strategy_name, _set_strategy_name, mandatory=False) """The name of the strategy this audit template refers to""" audits = wtypes.wsattr([link.Link], readonly=True) """Links to the collection of audits contained in this audit template""" links = wtypes.wsattr([link.Link], readonly=True) """A list containing a self link and associated audit template links""" scope = wtypes.wsattr(types.jsontype, mandatory=False) """Audit Scope""" def __init__(self, **kwargs): super(AuditTemplate, self).__init__() self.fields = [] fields = list(objects.AuditTemplate.fields) for k in fields: # Skip fields we do not expose. if not hasattr(self, k): continue self.fields.append(k) setattr(self, k, kwargs.get(k, wtypes.Unset)) self.fields.append('goal_id') self.fields.append('strategy_id') setattr(self, 'strategy_id', kwargs.get('strategy_id', wtypes.Unset)) # goal_uuid & strategy_uuid are not part of # objects.AuditTemplate.fields because they're API-only attributes. self.fields.append('goal_uuid') self.fields.append('goal_name') self.fields.append('strategy_uuid') self.fields.append('strategy_name') setattr(self, 'goal_uuid', kwargs.get('goal_id', wtypes.Unset)) setattr(self, 'goal_name', kwargs.get('goal_id', wtypes.Unset)) setattr(self, 'strategy_uuid', kwargs.get('strategy_id', wtypes.Unset)) setattr(self, 'strategy_name', kwargs.get('strategy_id', wtypes.Unset)) @staticmethod def _convert_with_links(audit_template, url, expand=True): if not expand: audit_template.unset_fields_except( ['uuid', 'name', 'goal_uuid', 'goal_name', 'scope', 'strategy_uuid', 'strategy_name']) # The numeric ID should not be exposed to # the user, it's internal only. audit_template.goal_id = wtypes.Unset audit_template.strategy_id = wtypes.Unset audit_template.links = [link.Link.make_link('self', url, 'audit_templates', audit_template.uuid), link.Link.make_link('bookmark', url, 'audit_templates', audit_template.uuid, bookmark=True)] return audit_template @classmethod def convert_with_links(cls, rpc_audit_template, expand=True): audit_template = AuditTemplate(**rpc_audit_template.as_dict()) hide_fields_in_newer_versions(audit_template) return cls._convert_with_links(audit_template, pecan.request.host_url, expand) @classmethod def sample(cls, expand=True): sample = cls(uuid='27e3153e-d5bf-4b7e-b517-fb518e17f34c', name='My Audit Template', description='Description of my audit template', goal_uuid='83e44733-b640-40e2-8d8a-7dd3be7134e6', strategy_uuid='367d826e-b6a4-4b70-bc44-c3f6fe1c9986', created_at=datetime.datetime.utcnow(), deleted_at=None, updated_at=datetime.datetime.utcnow(), scope=[],) return cls._convert_with_links(sample, 'http://localhost:9322', expand) class AuditTemplateCollection(collection.Collection): """API representation of a collection of audit templates.""" audit_templates = [AuditTemplate] """A list containing audit templates objects""" def __init__(self, **kwargs): super(AuditTemplateCollection, self).__init__() self._type = 'audit_templates' @staticmethod def convert_with_links(rpc_audit_templates, limit, url=None, expand=False, **kwargs): at_collection = AuditTemplateCollection() at_collection.audit_templates = [ AuditTemplate.convert_with_links(p, expand) for p in rpc_audit_templates] at_collection.next = at_collection.get_next(limit, url=url, **kwargs) return at_collection @classmethod def sample(cls): sample = cls() sample.audit_templates = [AuditTemplate.sample(expand=False)] return sample class AuditTemplatesController(rest.RestController): """REST controller for AuditTemplates.""" def __init__(self): super(AuditTemplatesController, self).__init__() from_audit_templates = False """A flag to indicate if the requests to this controller are coming from the top-level resource AuditTemplates.""" _custom_actions = { 'detail': ['GET'], } def _get_audit_templates_collection(self, filters, marker, limit, sort_key, sort_dir, expand=False, resource_url=None): additional_fields = ["goal_uuid", "goal_name", "strategy_uuid", "strategy_name"] api_utils.validate_sort_key( sort_key, list(objects.AuditTemplate.fields) + additional_fields) api_utils.validate_search_filters( filters, list(objects.AuditTemplate.fields) + additional_fields) limit = api_utils.validate_limit(limit) api_utils.validate_sort_dir(sort_dir) marker_obj = None if marker: marker_obj = objects.AuditTemplate.get_by_uuid( pecan.request.context, marker) need_api_sort = api_utils.check_need_api_sort(sort_key, additional_fields) sort_db_key = (sort_key if not need_api_sort else None) audit_templates = objects.AuditTemplate.list( pecan.request.context, filters, limit, marker_obj, sort_key=sort_db_key, sort_dir=sort_dir) audit_templates_collection = \ AuditTemplateCollection.convert_with_links( audit_templates, limit, url=resource_url, expand=expand, sort_key=sort_key, sort_dir=sort_dir) if need_api_sort: api_utils.make_api_sort( audit_templates_collection.audit_templates, sort_key, sort_dir) return audit_templates_collection @wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text, types.uuid, int, wtypes.text, wtypes.text) def get_all(self, goal=None, strategy=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of audit templates. :param goal: goal UUID or name to filter by :param strategy: strategy UUID or name to filter by :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'audit_template:get_all', action='audit_template:get_all') filters = {} if goal: if common_utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: filters['goal_name'] = goal if strategy: if common_utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy return self._get_audit_templates_collection( filters, marker, limit, sort_key, sort_dir) @wsme_pecan.wsexpose(AuditTemplateCollection, wtypes.text, wtypes.text, types.uuid, int, wtypes.text, wtypes.text) def detail(self, goal=None, strategy=None, marker=None, limit=None, sort_key='id', sort_dir='asc'): """Retrieve a list of audit templates with detail. :param goal: goal UUID or name to filter by :param strategy: strategy UUID or name to filter by :param marker: pagination marker for large data sets. :param limit: maximum number of resources to return in a single result. :param sort_key: column to sort results by. Default: id. :param sort_dir: direction to sort. "asc" or "desc". Default: asc. """ context = pecan.request.context policy.enforce(context, 'audit_template:detail', action='audit_template:detail') # NOTE(lucasagomes): /detail should only work agaist collections parent = pecan.request.path.split('/')[:-1][-1] if parent != "audit_templates": raise exception.HTTPNotFound filters = {} if goal: if common_utils.is_uuid_like(goal): filters['goal_uuid'] = goal else: filters['goal_name'] = goal if strategy: if common_utils.is_uuid_like(strategy): filters['strategy_uuid'] = strategy else: filters['strategy_name'] = strategy expand = True resource_url = '/'.join(['audit_templates', 'detail']) return self._get_audit_templates_collection(filters, marker, limit, sort_key, sort_dir, expand, resource_url) @wsme_pecan.wsexpose(AuditTemplate, wtypes.text) def get_one(self, audit_template): """Retrieve information about the given audit template. :param audit_template: UUID or name of an audit template. """ if self.from_audit_templates: raise exception.OperationNotPermitted context = pecan.request.context rpc_audit_template = api_utils.get_resource('AuditTemplate', audit_template) policy.enforce(context, 'audit_template:get', rpc_audit_template, action='audit_template:get') return AuditTemplate.convert_with_links(rpc_audit_template) @wsme.validate(types.uuid, AuditTemplatePostType) @wsme_pecan.wsexpose(AuditTemplate, body=AuditTemplatePostType, status_code=201) def post(self, audit_template_postdata): """Create a new audit template. :param audit_template_postdata: the audit template POST data from the request body. """ if self.from_audit_templates: raise exception.OperationNotPermitted context = pecan.request.context policy.enforce(context, 'audit_template:create', action='audit_template:create') context = pecan.request.context audit_template = audit_template_postdata.as_audit_template() audit_template_dict = audit_template.as_dict() new_audit_template = objects.AuditTemplate(context, **audit_template_dict) new_audit_template.create() # Set the HTTP Location Header pecan.response.location = link.build_url( 'audit_templates', new_audit_template.uuid) return AuditTemplate.convert_with_links(new_audit_template) @wsme.validate(types.uuid, [AuditTemplatePatchType]) @wsme_pecan.wsexpose(AuditTemplate, wtypes.text, body=[AuditTemplatePatchType]) def patch(self, audit_template, patch): """Update an existing audit template. :param template_uuid: UUID of a audit template. :param patch: a json PATCH document to apply to this audit template. """ if self.from_audit_templates: raise exception.OperationNotPermitted context = pecan.request.context audit_template_to_update = api_utils.get_resource('AuditTemplate', audit_template) policy.enforce(context, 'audit_template:update', audit_template_to_update, action='audit_template:update') if common_utils.is_uuid_like(audit_template): audit_template_to_update = objects.AuditTemplate.get_by_uuid( pecan.request.context, audit_template) else: audit_template_to_update = objects.AuditTemplate.get_by_name( pecan.request.context, audit_template) try: audit_template_dict = audit_template_to_update.as_dict() audit_template = AuditTemplate(**api_utils.apply_jsonpatch( audit_template_dict, patch)) except api_utils.JSONPATCH_EXCEPTIONS as e: raise exception.PatchError(patch=patch, reason=e) # Update only the fields that have changed for field in objects.AuditTemplate.fields: try: patch_val = getattr(audit_template, field) except AttributeError: # Ignore fields that aren't exposed in the API continue if patch_val == wtypes.Unset: patch_val = None if audit_template_to_update[field] != patch_val: audit_template_to_update[field] = patch_val audit_template_to_update.save() return AuditTemplate.convert_with_links(audit_template_to_update) @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) def delete(self, audit_template): """Delete a audit template. :param template_uuid: UUID or name of an audit template. """ context = pecan.request.context audit_template_to_delete = api_utils.get_resource('AuditTemplate', audit_template) policy.enforce(context, 'audit_template:delete', audit_template_to_delete, action='audit_template:delete') audit_template_to_delete.soft_delete() python-watcher-4.0.0/watcher/api/controllers/v1/webhooks.py0000664000175000017500000000417313656752270024030 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Webhook endpoint for Watcher v1 REST API. """ from oslo_log import log import pecan from pecan import rest from wsme import types as wtypes import wsmeext.pecan as wsme_pecan from watcher.api.controllers.v1 import types from watcher.api.controllers.v1 import utils from watcher.common import exception from watcher.decision_engine import rpcapi from watcher import objects LOG = log.getLogger(__name__) class WebhookController(rest.RestController): """REST controller for webhooks resource.""" def __init__(self): super(WebhookController, self).__init__() self.dc_client = rpcapi.DecisionEngineAPI() @wsme_pecan.wsexpose(None, wtypes.text, body=types.jsontype, status_code=202) def post(self, audit_ident, body): """Trigger the given audit. :param audit_ident: UUID or name of an audit. """ LOG.debug("Webhook trigger Audit: %s.", audit_ident) context = pecan.request.context audit = utils.get_resource('Audit', audit_ident) if audit is None: raise exception.AuditNotFound(audit=audit_ident) if audit.audit_type != objects.audit.AuditType.EVENT.value: raise exception.AuditTypeNotAllowed(audit_type=audit.audit_type) allowed_state = ( objects.audit.State.PENDING, objects.audit.State.SUCCEEDED, ) if audit.state not in allowed_state: raise exception.AuditStateNotAllowed(state=audit.state) # trigger decision-engine to run the audit self.dc_client.trigger_audit(context, audit.uuid) python-watcher-4.0.0/watcher/api/controllers/v1/types.py0000664000175000017500000001462713656752270023360 0ustar zuulzuul00000000000000# Copyright 2013 Red Hat, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_serialization import jsonutils from oslo_utils import strutils import six import wsme from wsme import types as wtypes from watcher._i18n import _ from watcher.common import exception from watcher.common import utils class UuidOrNameType(wtypes.UserType): """A simple UUID or logical name type.""" basetype = wtypes.text name = 'uuid_or_name' @staticmethod def validate(value): if not (utils.is_uuid_like(value) or utils.is_hostname_safe(value)): raise exception.InvalidUuidOrName(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return UuidOrNameType.validate(value) class IntervalOrCron(wtypes.UserType): """A simple int value or cron syntax type""" basetype = wtypes.text name = 'interval_or_cron' @staticmethod def validate(value): if not (utils.is_int_like(value) or utils.is_cron_like(value)): raise exception.InvalidIntervalOrCron(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return IntervalOrCron.validate(value) interval_or_cron = IntervalOrCron() class NameType(wtypes.UserType): """A simple logical name type.""" basetype = wtypes.text name = 'name' @staticmethod def validate(value): if not utils.is_hostname_safe(value): raise exception.InvalidName(name=value) return value @staticmethod def frombasetype(value): if value is None: return None return NameType.validate(value) class UuidType(wtypes.UserType): """A simple UUID type.""" basetype = wtypes.text name = 'uuid' @staticmethod def validate(value): if not utils.is_uuid_like(value): raise exception.InvalidUUID(uuid=value) return value @staticmethod def frombasetype(value): if value is None: return None return UuidType.validate(value) class BooleanType(wtypes.UserType): """A simple boolean type.""" basetype = wtypes.text name = 'boolean' @staticmethod def validate(value): try: return strutils.bool_from_string(value, strict=True) except ValueError as e: # raise Invalid to return 400 (BadRequest) in the API raise exception.Invalid(e) @staticmethod def frombasetype(value): if value is None: return None return BooleanType.validate(value) class JsonType(wtypes.UserType): """A simple JSON type.""" basetype = wtypes.text name = 'json' def __str__(self): # These are the json serializable native types return ' | '.join(map(str, (wtypes.text, six.integer_types, float, BooleanType, list, dict, None))) @staticmethod def validate(value): try: jsonutils.dumps(value, default=None) except TypeError: raise exception.Invalid(_('%s is not JSON serializable') % value) else: return value @staticmethod def frombasetype(value): return JsonType.validate(value) uuid = UuidType() boolean = BooleanType() jsontype = JsonType() class MultiType(wtypes.UserType): """A complex type that represents one or more types. Used for validating that a value is an instance of one of the types. :param types: Variable-length list of types. """ def __init__(self, *types): self.types = types def __str__(self): return ' | '.join(map(str, self.types)) def validate(self, value): for t in self.types: if t is wsme.types.text and isinstance(value, wsme.types.bytes): value = value.decode() if isinstance(value, t): return value else: raise ValueError( _("Wrong type. Expected '%(type)s', got '%(value)s'"), type=self.types, value=type(value) ) class JsonPatchType(wtypes.Base): """A complex type that represents a single json-patch operation.""" path = wtypes.wsattr(wtypes.StringType(pattern=r'^(/[\w-]+)+$'), mandatory=True) op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'), mandatory=True) value = wsme.wsattr(jsontype, default=wtypes.Unset) @staticmethod def internal_attrs(): """Returns a list of internal attributes. Internal attributes can't be added, replaced or removed. This method may be overwritten by derived class. """ return ['/created_at', '/id', '/links', '/updated_at', '/deleted_at', '/uuid'] @staticmethod def mandatory_attrs(): """Returns a list of mandatory attributes. Mandatory attributes can't be removed from the document. This method should be overwritten by derived class. """ return [] @staticmethod def validate(patch): _path = '/{0}'.format(patch.path.split('/')[1]) if _path in patch.internal_attrs(): msg = _("'%s' is an internal attribute and can not be updated") raise wsme.exc.ClientSideError(msg % patch.path) if patch.path in patch.mandatory_attrs() and patch.op == 'remove': msg = _("'%s' is a mandatory attribute and can not be removed") raise wsme.exc.ClientSideError(msg % patch.path) if patch.op != 'remove': if patch.value is wsme.Unset: msg = _("'add' and 'replace' operations needs value") raise wsme.exc.ClientSideError(msg) ret = {'path': patch.path, 'op': patch.op} if patch.value is not wsme.Unset: ret['value'] = patch.value return ret python-watcher-4.0.0/watcher/api/controllers/rest_api_version_history.rst0000664000175000017500000000200213656752270027162 0ustar zuulzuul00000000000000REST API Version History ======================== This documents the changes made to the REST API with every microversion change. The description for each version should be a verbose one which has enough information to be suitable for use in user documentation. 1.0 (Initial version) ----------------------- This is the initial version of the Watcher API which supports microversions. A user can specify a header in the API request:: OpenStack-API-Version: infra-optim where ```` is any valid api version for this API. If no version is specified then the API will behave as if version 1.0 was requested. 1.1 --- Added the parameters ``start_time`` and ``end_time`` to create audit request. Supported for start and end time of continuous audits. 1.2 --- Added ``force`` into create audit request. If ``force`` is true, audit will be executed despite of ongoing actionplan. 1.3 --- Added list data model API. 1.4 --- Added Watcher webhook API. It can be used to trigger audit with ``event`` type. python-watcher-4.0.0/watcher/api/controllers/base.py0000664000175000017500000001045513656752270022573 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import functools import microversion_parse from webob import exc import wsme from wsme import types as wtypes class APIBase(wtypes.Base): created_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is created""" updated_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is updated""" deleted_at = wsme.wsattr(datetime.datetime, readonly=True) """The time in UTC at which the object is deleted""" def as_dict(self): """Render this object as a dict of its fields.""" return dict((k, getattr(self, k)) for k in self.fields if hasattr(self, k) and getattr(self, k) != wsme.Unset) def unset_fields_except(self, except_list=None): """Unset fields so they don't appear in the message body. :param except_list: A list of fields that won't be touched. """ if except_list is None: except_list = [] for k in self.as_dict(): if k not in except_list: setattr(self, k, wsme.Unset) @functools.total_ordering class Version(object): """API Version object.""" string = 'OpenStack-API-Version' """HTTP Header string carrying the requested version""" min_string = 'OpenStack-API-Minimum-Version' """HTTP response header""" max_string = 'OpenStack-API-Maximum-Version' """HTTP response header""" def __init__(self, headers, default_version, latest_version): """Create an API Version object from the supplied headers. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :raises: webob.HTTPNotAcceptable """ (self.major, self.minor) = Version.parse_headers( headers, default_version, latest_version) def __repr__(self): return '%s.%s' % (self.major, self.minor) @staticmethod def parse_headers(headers, default_version, latest_version): """Determine the API version requested based on the headers supplied. :param headers: webob headers :param default_version: version to use if not specified in headers :param latest_version: version to use if latest is requested :returns: a tuple of (major, minor) version numbers :raises: webob.HTTPNotAcceptable """ version_str = microversion_parse.get_version( headers, service_type='infra-optim') minimal_version = (1, 0) if version_str is None: # If requested header is wrong, Watcher answers with the minimal # supported version. return minimal_version if version_str.lower() == 'latest': parse_str = latest_version else: parse_str = version_str try: version = tuple(int(i) for i in parse_str.split('.')) except ValueError: version = minimal_version # NOTE (alexchadin): Old python-watcherclient sends requests with # value of version header is "1". It should be transformed to 1.0 as # it was supposed to be. if len(version) == 1 and version[0] == 1: version = minimal_version if len(version) != 2: raise exc.HTTPNotAcceptable( "Invalid value for %s header" % Version.string) return version def __gt__(self, other): return (self.major, self.minor) > (other.major, other.minor) def __eq__(self, other): return (self.major, self.minor) == (other.major, other.minor) def __ne__(self, other): return not self.__eq__(other) python-watcher-4.0.0/watcher/api/hooks.py0000664000175000017500000000763013656752270020437 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from pecan import hooks from six.moves import http_client from watcher.common import context class ContextHook(hooks.PecanHook): """Configures a request context and attaches it to the request. The following HTTP request headers are used: X-User: Used for context.user. X-User-Id: Used for context.user_id. X-Project-Name: Used for context.project. X-Project-Id: Used for context.project_id. X-Auth-Token: Used for context.auth_token. """ def before(self, state): headers = state.request.headers user = headers.get('X-User') user_id = headers.get('X-User-Id') project = headers.get('X-Project-Name') project_id = headers.get('X-Project-Id') domain_id = headers.get('X-User-Domain-Id') domain_name = headers.get('X-User-Domain-Name') auth_token = headers.get('X-Storage-Token') auth_token = headers.get('X-Auth-Token', auth_token) show_deleted = headers.get('X-Show-Deleted') auth_token_info = state.request.environ.get('keystone.token_info') roles = (headers.get('X-Roles', None) and headers.get('X-Roles').split(',')) state.request.context = context.make_context( auth_token=auth_token, auth_token_info=auth_token_info, user=user, user_id=user_id, project=project, project_id=project_id, domain_id=domain_id, domain_name=domain_name, show_deleted=show_deleted, roles=roles) class NoExceptionTracebackHook(hooks.PecanHook): """Workaround rpc.common: deserialize_remote_exception. deserialize_remote_exception builds rpc exception traceback into error message which is then sent to the client. Such behavior is a security concern so this hook is aimed to cut-off traceback from the error message. """ # NOTE(max_lobur): 'after' hook used instead of 'on_error' because # 'on_error' never fired for wsme+pecan pair. wsme @wsexpose decorator # catches and handles all the errors, so 'on_error' dedicated for unhandled # exceptions never fired. def after(self, state): # Omit empty body. Some errors may not have body at this level yet. if not state.response.body: return # Do nothing if there is no error. # Status codes in the range 200 (OK) to 399 (400 = BAD_REQUEST) are not # an error. if (http_client.OK <= state.response.status_int < http_client.BAD_REQUEST): return json_body = state.response.json # Do not remove traceback when traceback config is set if cfg.CONF.debug: return faultstring = json_body.get('faultstring') traceback_marker = 'Traceback (most recent call last):' if faultstring and traceback_marker in faultstring: # Cut-off traceback. faultstring = faultstring.split(traceback_marker, 1)[0] # Remove trailing newlines and spaces if any. json_body['faultstring'] = faultstring.rstrip() # Replace the whole json. Cannot change original one because it's # generated on the fly. state.response.json = json_body python-watcher-4.0.0/watcher/api/acl.py0000664000175000017500000000270213656752270020046 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # Copyright (c) 2016 Intel Corp # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Access Control Lists (ACL's) control access the API server.""" from watcher.api.middleware import auth_token from watcher import conf CONF = conf.CONF def install(app, conf, public_routes): """Install ACL check on application. :param app: A WSGI application. :param conf: Settings. Dict'ified and passed to keystonemiddleware :param public_routes: The list of the routes which will be allowed to access without authentication. :return: The same WSGI application with ACL installed. """ if not CONF.get('enable_authentication'): return app return auth_token.AuthTokenMiddleware(app, conf=dict(conf.keystone_authtoken), public_api_routes=public_routes) python-watcher-4.0.0/watcher/notifications/0000775000175000017500000000000013656752352021035 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/notifications/__init__.py0000664000175000017500000000237613656752270023155 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # Note(gibi): Importing publicly called functions so the caller code does not # need to be changed after we moved these function inside the package # Todo(gibi): remove these imports after legacy notifications using these are # transformed to versioned notifications from watcher.notifications import action # noqa from watcher.notifications import action_plan # noqa from watcher.notifications import audit # noqa from watcher.notifications import exception # noqa from watcher.notifications import goal # noqa from watcher.notifications import service # noqa from watcher.notifications import strategy # noqa python-watcher-4.0.0/watcher/notifications/action_plan.py0000664000175000017500000003241513656752270023702 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import context as wcontext from watcher.common import exception from watcher.notifications import audit as audit_notifications from watcher.notifications import base as notificationbase from watcher.notifications import exception as exception_notifications from watcher.notifications import strategy as strategy_notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields CONF = cfg.CONF @base.WatcherObjectRegistry.register_notification class TerseActionPlanPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('action_plan', 'uuid'), 'state': ('action_plan', 'state'), 'global_efficacy': ('action_plan', 'global_efficacy'), 'created_at': ('action_plan', 'created_at'), 'updated_at': ('action_plan', 'updated_at'), 'deleted_at': ('action_plan', 'deleted_at'), } # Version 1.0: Initial version # Version 1.1: Changed 'global_efficacy' type Dictionary to List VERSION = '1.1' fields = { 'uuid': wfields.UUIDField(), 'state': wfields.StringField(), 'global_efficacy': wfields.FlexibleListOfDictField(nullable=True), 'audit_uuid': wfields.UUIDField(), 'strategy_uuid': wfields.UUIDField(nullable=True), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, action_plan, audit=None, strategy=None, **kwargs): super(TerseActionPlanPayload, self).__init__(audit=audit, strategy=strategy, **kwargs) self.populate_schema(action_plan=action_plan) @base.WatcherObjectRegistry.register_notification class ActionPlanPayload(TerseActionPlanPayload): SCHEMA = { 'uuid': ('action_plan', 'uuid'), 'state': ('action_plan', 'state'), 'global_efficacy': ('action_plan', 'global_efficacy'), 'created_at': ('action_plan', 'created_at'), 'updated_at': ('action_plan', 'updated_at'), 'deleted_at': ('action_plan', 'deleted_at'), } # Version 1.0: Initial version # Vesrsion 1.1: changed global_efficacy type VERSION = '1.1' fields = { 'audit': wfields.ObjectField('TerseAuditPayload'), 'strategy': wfields.ObjectField('StrategyPayload'), } def __init__(self, action_plan, audit, strategy, **kwargs): if not kwargs.get('audit_uuid'): kwargs['audit_uuid'] = audit.uuid if strategy and not kwargs.get('strategy_uuid'): kwargs['strategy_uuid'] = strategy.uuid super(ActionPlanPayload, self).__init__( action_plan, audit=audit, strategy=strategy, **kwargs) @base.WatcherObjectRegistry.register_notification class ActionPlanStateUpdatePayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': wfields.StringField(nullable=True), 'state': wfields.StringField(nullable=True), } @base.WatcherObjectRegistry.register_notification class ActionPlanCreatePayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = {} def __init__(self, action_plan, audit, strategy): super(ActionPlanCreatePayload, self).__init__( action_plan=action_plan, audit=audit, strategy=strategy) @base.WatcherObjectRegistry.register_notification class ActionPlanUpdatePayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = { 'state_update': wfields.ObjectField('ActionPlanStateUpdatePayload'), } def __init__(self, action_plan, state_update, audit, strategy): super(ActionPlanUpdatePayload, self).__init__( action_plan=action_plan, state_update=state_update, audit=audit, strategy=strategy) @base.WatcherObjectRegistry.register_notification class ActionPlanActionPayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, action_plan, audit, strategy, **kwargs): super(ActionPlanActionPayload, self).__init__( action_plan=action_plan, audit=audit, strategy=strategy, **kwargs) @base.WatcherObjectRegistry.register_notification class ActionPlanDeletePayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = {} def __init__(self, action_plan, audit, strategy): super(ActionPlanDeletePayload, self).__init__( action_plan=action_plan, audit=audit, strategy=strategy) @base.WatcherObjectRegistry.register_notification class ActionPlanCancelPayload(ActionPlanPayload): # Version 1.0: Initial version # Version 1.1: Changed global_efficacy_type VERSION = '1.1' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, action_plan, audit, strategy, **kwargs): super(ActionPlanCancelPayload, self).__init__( action_plan=action_plan, audit=audit, strategy=strategy, **kwargs) @notificationbase.notification_sample('action_plan-execution-error.json') @notificationbase.notification_sample('action_plan-execution-end.json') @notificationbase.notification_sample('action_plan-execution-start.json') @base.WatcherObjectRegistry.register_notification class ActionPlanActionNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanActionPayload') } @notificationbase.notification_sample('action_plan-create.json') @base.WatcherObjectRegistry.register_notification class ActionPlanCreateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanCreatePayload') } @notificationbase.notification_sample('action_plan-update.json') @base.WatcherObjectRegistry.register_notification class ActionPlanUpdateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanUpdatePayload') } @notificationbase.notification_sample('action_plan-delete.json') @base.WatcherObjectRegistry.register_notification class ActionPlanDeleteNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanDeletePayload') } @notificationbase.notification_sample('action_plan-cancel-error.json') @notificationbase.notification_sample('action_plan-cancel-end.json') @notificationbase.notification_sample('action_plan-cancel-start.json') @base.WatcherObjectRegistry.register_notification class ActionPlanCancelNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionPlanCancelPayload') } def _get_common_payload(action_plan): audit = None strategy = None try: audit = action_plan.audit strategy = action_plan.strategy except NotImplementedError: raise exception.EagerlyLoadedActionPlanRequired( action_plan=action_plan.uuid) goal = objects.Goal.get( wcontext.make_context(show_deleted=True), audit.goal_id) audit_payload = audit_notifications.TerseAuditPayload( audit=audit, goal_uuid=goal.uuid) strategy_payload = strategy_notifications.StrategyPayload( strategy=strategy) return audit_payload, strategy_payload def send_create(context, action_plan, service='infra-optim', host=None): """Emit an action_plan.create notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) versioned_payload = ActionPlanCreatePayload( action_plan=action_plan, audit=audit_payload, strategy=strategy_payload, ) notification = ActionPlanCreateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action_plan', action=wfields.NotificationAction.CREATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_update(context, action_plan, service='infra-optim', host=None, old_state=None): """Emit an action_plan.update notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) state_update = ActionPlanStateUpdatePayload( old_state=old_state, state=action_plan.state if old_state else None) versioned_payload = ActionPlanUpdatePayload( action_plan=action_plan, state_update=state_update, audit=audit_payload, strategy=strategy_payload, ) notification = ActionPlanUpdateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action_plan', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_delete(context, action_plan, service='infra-optim', host=None): """Emit an action_plan.delete notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) versioned_payload = ActionPlanDeletePayload( action_plan=action_plan, audit=audit_payload, strategy=strategy_payload, ) notification = ActionPlanDeleteNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action_plan', action=wfields.NotificationAction.DELETE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_action_notification(context, action_plan, action, phase=None, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an action_plan action notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = ActionPlanActionPayload( action_plan=action_plan, audit=audit_payload, strategy=strategy_payload, fault=fault, ) notification = ActionPlanActionNotification( priority=priority, event_type=notificationbase.EventType( object='action_plan', action=action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_cancel_notification(context, action_plan, action, phase=None, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an action_plan cancel notification.""" audit_payload, strategy_payload = _get_common_payload(action_plan) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = ActionPlanCancelPayload( action_plan=action_plan, audit=audit_payload, strategy=strategy_payload, fault=fault, ) notification = ActionPlanCancelNotification( priority=priority, event_type=notificationbase.EventType( object='action_plan', action=action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) python-watcher-4.0.0/watcher/notifications/action.py0000664000175000017500000002666313656752270022700 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import context as wcontext from watcher.common import exception from watcher.notifications import action_plan as ap_notifications from watcher.notifications import base as notificationbase from watcher.notifications import exception as exception_notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields CONF = cfg.CONF @base.WatcherObjectRegistry.register_notification class ActionPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('action', 'uuid'), 'action_type': ('action', 'action_type'), 'input_parameters': ('action', 'input_parameters'), 'state': ('action', 'state'), 'parents': ('action', 'parents'), 'created_at': ('action', 'created_at'), 'updated_at': ('action', 'updated_at'), 'deleted_at': ('action', 'deleted_at'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'uuid': wfields.UUIDField(), 'action_type': wfields.StringField(nullable=False), 'input_parameters': wfields.DictField(nullable=False, default={}), 'state': wfields.StringField(nullable=False), 'parents': wfields.ListOfUUIDsField(nullable=False, default=[]), 'action_plan_uuid': wfields.UUIDField(), 'action_plan': wfields.ObjectField('TerseActionPlanPayload'), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, action, **kwargs): super(ActionPayload, self).__init__(**kwargs) self.populate_schema(action=action) @base.WatcherObjectRegistry.register_notification class ActionStateUpdatePayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': wfields.StringField(nullable=True), 'state': wfields.StringField(nullable=True), } @base.WatcherObjectRegistry.register_notification class ActionCreatePayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = {} def __init__(self, action, action_plan): super(ActionCreatePayload, self).__init__( action=action, action_plan=action_plan) @base.WatcherObjectRegistry.register_notification class ActionUpdatePayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = { 'state_update': wfields.ObjectField('ActionStateUpdatePayload'), } def __init__(self, action, state_update, action_plan): super(ActionUpdatePayload, self).__init__( action=action, state_update=state_update, action_plan=action_plan) @base.WatcherObjectRegistry.register_notification class ActionExecutionPayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, action, action_plan, **kwargs): super(ActionExecutionPayload, self).__init__( action=action, action_plan=action_plan, **kwargs) @base.WatcherObjectRegistry.register_notification class ActionCancelPayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, action, action_plan, **kwargs): super(ActionCancelPayload, self).__init__( action=action, action_plan=action_plan, **kwargs) @base.WatcherObjectRegistry.register_notification class ActionDeletePayload(ActionPayload): # Version 1.0: Initial version VERSION = '1.0' fields = {} def __init__(self, action, action_plan): super(ActionDeletePayload, self).__init__( action=action, action_plan=action_plan) @notificationbase.notification_sample('action-execution-error.json') @notificationbase.notification_sample('action-execution-end.json') @notificationbase.notification_sample('action-execution-start.json') @base.WatcherObjectRegistry.register_notification class ActionExecutionNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionExecutionPayload') } @notificationbase.notification_sample('action-create.json') @base.WatcherObjectRegistry.register_notification class ActionCreateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionCreatePayload') } @notificationbase.notification_sample('action-update.json') @base.WatcherObjectRegistry.register_notification class ActionUpdateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionUpdatePayload') } @notificationbase.notification_sample('action-delete.json') @base.WatcherObjectRegistry.register_notification class ActionDeleteNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionDeletePayload') } @notificationbase.notification_sample('action-cancel-error.json') @notificationbase.notification_sample('action-cancel-end.json') @notificationbase.notification_sample('action-cancel-start.json') @base.WatcherObjectRegistry.register_notification class ActionCancelNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ActionCancelPayload') } def _get_action_plan_payload(action): action_plan = None strategy_uuid = None audit = None try: action_plan = action.action_plan audit = objects.Audit.get(wcontext.make_context(show_deleted=True), action_plan.audit_id) if audit.strategy_id: strategy_uuid = objects.Strategy.get( wcontext.make_context(show_deleted=True), audit.strategy_id).uuid except NotImplementedError: raise exception.EagerlyLoadedActionRequired(action=action.uuid) action_plan_payload = ap_notifications.TerseActionPlanPayload( action_plan=action_plan, audit_uuid=audit.uuid, strategy_uuid=strategy_uuid) return action_plan_payload def send_create(context, action, service='infra-optim', host=None): """Emit an action.create notification.""" action_plan_payload = _get_action_plan_payload(action) versioned_payload = ActionCreatePayload( action=action, action_plan=action_plan_payload, ) notification = ActionCreateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action', action=wfields.NotificationAction.CREATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_update(context, action, service='infra-optim', host=None, old_state=None): """Emit an action.update notification.""" action_plan_payload = _get_action_plan_payload(action) state_update = ActionStateUpdatePayload( old_state=old_state, state=action.state if old_state else None) versioned_payload = ActionUpdatePayload( action=action, state_update=state_update, action_plan=action_plan_payload, ) notification = ActionUpdateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_delete(context, action, service='infra-optim', host=None): """Emit an action.delete notification.""" action_plan_payload = _get_action_plan_payload(action) versioned_payload = ActionDeletePayload( action=action, action_plan=action_plan_payload, ) notification = ActionDeleteNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='action', action=wfields.NotificationAction.DELETE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_execution_notification(context, action, notification_action, phase, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an action execution notification.""" action_plan_payload = _get_action_plan_payload(action) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = ActionExecutionPayload( action=action, action_plan=action_plan_payload, fault=fault, ) notification = ActionExecutionNotification( priority=priority, event_type=notificationbase.EventType( object='action', action=notification_action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_cancel_notification(context, action, notification_action, phase, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an action cancel notification.""" action_plan_payload = _get_action_plan_payload(action) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = ActionCancelPayload( action=action, action_plan=action_plan_payload, fault=fault, ) notification = ActionCancelNotification( priority=priority, event_type=notificationbase.EventType( object='action', action=notification_action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) python-watcher-4.0.0/watcher/notifications/exception.py0000664000175000017500000000370613656752270023412 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import inspect import sys from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register_notification class ExceptionPayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'module_name': wfields.StringField(), 'function_name': wfields.StringField(), 'exception': wfields.StringField(), 'exception_message': wfields.StringField() } @classmethod def from_exception(cls, fault=None): fault = fault or sys.exc_info()[1] trace = inspect.trace()[-1] # TODO(gibi): apply strutils.mask_password on exception_message and # consider emitting the exception_message only if the safe flag is # true in the exception like in the REST API return cls( function_name=trace[3], module_name=inspect.getmodule(trace[0]).__name__, exception=fault.__class__.__name__, exception_message=str(fault)) @notificationbase.notification_sample('infra-optim-exception.json') @base.WatcherObjectRegistry.register_notification class ExceptionNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ExceptionPayload') } python-watcher-4.0.0/watcher/notifications/strategy.py0000664000175000017500000000352513656752270023255 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register_notification class StrategyPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('strategy', 'uuid'), 'name': ('strategy', 'name'), 'display_name': ('strategy', 'display_name'), 'parameters_spec': ('strategy', 'parameters_spec'), 'created_at': ('strategy', 'created_at'), 'updated_at': ('strategy', 'updated_at'), 'deleted_at': ('strategy', 'deleted_at'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'display_name': wfields.StringField(), 'parameters_spec': wfields.FlexibleDictField(nullable=True), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, strategy, **kwargs): super(StrategyPayload, self).__init__(**kwargs) self.populate_schema(strategy=strategy) python-watcher-4.0.0/watcher/notifications/goal.py0000664000175000017500000000346313656752270022336 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register_notification class GoalPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('goal', 'uuid'), 'name': ('goal', 'name'), 'display_name': ('goal', 'display_name'), 'efficacy_specification': ('goal', 'efficacy_specification'), 'created_at': ('goal', 'created_at'), 'updated_at': ('goal', 'updated_at'), 'deleted_at': ('goal', 'deleted_at'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'display_name': wfields.StringField(), 'efficacy_specification': wfields.FlexibleListOfDictField(), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, goal, **kwargs): super(GoalPayload, self).__init__(**kwargs) self.populate_schema(goal=goal) python-watcher-4.0.0/watcher/notifications/service.py0000664000175000017500000000745513656752270023061 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.notifications import base as notificationbase from watcher.objects import base from watcher.objects import fields as wfields from watcher.objects import service as o_service CONF = cfg.CONF @base.WatcherObjectRegistry.register_notification class ServicePayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'sevice_host': ('failed_service', 'host'), 'name': ('failed_service', 'name'), 'last_seen_up': ('failed_service', 'last_seen_up'), } # Version 1.0: Initial version VERSION = '1.0' fields = { 'sevice_host': wfields.StringField(), 'name': wfields.StringField(), 'last_seen_up': wfields.DateTimeField(nullable=True), } def __init__(self, failed_service, status_update, **kwargs): super(ServicePayload, self).__init__( failed_service=failed_service, status_update=status_update, **kwargs) self.populate_schema(failed_service=failed_service) @base.WatcherObjectRegistry.register_notification class ServiceStatusUpdatePayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': wfields.StringField(nullable=True), 'state': wfields.StringField(nullable=True), } @base.WatcherObjectRegistry.register_notification class ServiceUpdatePayload(ServicePayload): # Version 1.0: Initial version VERSION = '1.0' fields = { 'status_update': wfields.ObjectField('ServiceStatusUpdatePayload'), } def __init__(self, failed_service, status_update): super(ServiceUpdatePayload, self).__init__( failed_service=failed_service, status_update=status_update) @notificationbase.notification_sample('service-update.json') @base.WatcherObjectRegistry.register_notification class ServiceUpdateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('ServiceUpdatePayload') } def send_service_update(context, failed_service, state, service='infra-optim', host=None): """Emit an service failed notification.""" if state == o_service.ServiceStatus.FAILED: priority = wfields.NotificationPriority.WARNING status_update = ServiceStatusUpdatePayload( old_state=o_service.ServiceStatus.ACTIVE, state=o_service.ServiceStatus.FAILED) else: priority = wfields.NotificationPriority.INFO status_update = ServiceStatusUpdatePayload( old_state=o_service.ServiceStatus.FAILED, state=o_service.ServiceStatus.ACTIVE) versioned_payload = ServiceUpdatePayload( failed_service=failed_service, status_update=status_update ) notification = ServiceUpdateNotification( priority=priority, event_type=notificationbase.EventType( object='service', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) python-watcher-4.0.0/watcher/notifications/audit.py0000664000175000017500000002744713656752270022532 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import exception from watcher.notifications import base as notificationbase from watcher.notifications import exception as exception_notifications from watcher.notifications import goal as goal_notifications from watcher.notifications import strategy as strategy_notifications from watcher.objects import base from watcher.objects import fields as wfields CONF = cfg.CONF @base.WatcherObjectRegistry.register_notification class TerseAuditPayload(notificationbase.NotificationPayloadBase): SCHEMA = { 'uuid': ('audit', 'uuid'), 'name': ('audit', 'name'), 'audit_type': ('audit', 'audit_type'), 'state': ('audit', 'state'), 'parameters': ('audit', 'parameters'), 'interval': ('audit', 'interval'), 'scope': ('audit', 'scope'), 'auto_trigger': ('audit', 'auto_trigger'), 'next_run_time': ('audit', 'next_run_time'), 'created_at': ('audit', 'created_at'), 'updated_at': ('audit', 'updated_at'), 'deleted_at': ('audit', 'deleted_at'), } # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' boolean field, # Added 'next_run_time' DateTime field, # 'interval' type has been changed from Integer to String # Version 1.2: Added 'name' string field VERSION = '1.2' fields = { 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'audit_type': wfields.StringField(), 'state': wfields.StringField(), 'parameters': wfields.FlexibleDictField(nullable=True), 'interval': wfields.StringField(nullable=True), 'scope': wfields.FlexibleListOfDictField(nullable=True), 'goal_uuid': wfields.UUIDField(), 'strategy_uuid': wfields.UUIDField(nullable=True), 'auto_trigger': wfields.BooleanField(), 'next_run_time': wfields.DateTimeField(nullable=True), 'created_at': wfields.DateTimeField(nullable=True), 'updated_at': wfields.DateTimeField(nullable=True), 'deleted_at': wfields.DateTimeField(nullable=True), } def __init__(self, audit, goal_uuid, strategy_uuid=None, **kwargs): super(TerseAuditPayload, self).__init__( goal_uuid=goal_uuid, strategy_uuid=strategy_uuid, **kwargs) self.populate_schema(audit=audit) @base.WatcherObjectRegistry.register_notification class AuditPayload(TerseAuditPayload): SCHEMA = { 'uuid': ('audit', 'uuid'), 'name': ('audit', 'name'), 'audit_type': ('audit', 'audit_type'), 'state': ('audit', 'state'), 'parameters': ('audit', 'parameters'), 'interval': ('audit', 'interval'), 'scope': ('audit', 'scope'), 'auto_trigger': ('audit', 'auto_trigger'), 'next_run_time': ('audit', 'next_run_time'), 'created_at': ('audit', 'created_at'), 'updated_at': ('audit', 'updated_at'), 'deleted_at': ('audit', 'deleted_at'), } # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field # Version 1.2: Added 'name' string field VERSION = '1.2' fields = { 'goal': wfields.ObjectField('GoalPayload'), 'strategy': wfields.ObjectField('StrategyPayload', nullable=True), } def __init__(self, audit, goal, strategy=None, **kwargs): if not kwargs.get('goal_uuid'): kwargs['goal_uuid'] = goal.uuid if strategy and not kwargs.get('strategy_uuid'): kwargs['strategy_uuid'] = strategy.uuid super(AuditPayload, self).__init__( audit=audit, goal=goal, strategy=strategy, **kwargs) @base.WatcherObjectRegistry.register_notification class AuditStateUpdatePayload(notificationbase.NotificationPayloadBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'old_state': wfields.StringField(nullable=True), 'state': wfields.StringField(nullable=True), } @base.WatcherObjectRegistry.register_notification class AuditCreatePayload(AuditPayload): # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field VERSION = '1.1' fields = {} def __init__(self, audit, goal, strategy): super(AuditCreatePayload, self).__init__( audit=audit, goal=goal, goal_uuid=goal.uuid, strategy=strategy) @base.WatcherObjectRegistry.register_notification class AuditUpdatePayload(AuditPayload): # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field VERSION = '1.1' fields = { 'state_update': wfields.ObjectField('AuditStateUpdatePayload'), } def __init__(self, audit, state_update, goal, strategy): super(AuditUpdatePayload, self).__init__( audit=audit, state_update=state_update, goal=goal, goal_uuid=goal.uuid, strategy=strategy) @base.WatcherObjectRegistry.register_notification class AuditActionPayload(AuditPayload): # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field VERSION = '1.1' fields = { 'fault': wfields.ObjectField('ExceptionPayload', nullable=True), } def __init__(self, audit, goal, strategy, **kwargs): super(AuditActionPayload, self).__init__( audit=audit, goal=goal, goal_uuid=goal.uuid, strategy=strategy, **kwargs) @base.WatcherObjectRegistry.register_notification class AuditDeletePayload(AuditPayload): # Version 1.0: Initial version # Version 1.1: Added 'auto_trigger' field, # Added 'next_run_time' field VERSION = '1.1' fields = {} def __init__(self, audit, goal, strategy): super(AuditDeletePayload, self).__init__( audit=audit, goal=goal, goal_uuid=goal.uuid, strategy=strategy) @notificationbase.notification_sample('audit-strategy-error.json') @notificationbase.notification_sample('audit-strategy-end.json') @notificationbase.notification_sample('audit-strategy-start.json') @base.WatcherObjectRegistry.register_notification class AuditActionNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('AuditActionPayload') } @notificationbase.notification_sample('audit-create.json') @base.WatcherObjectRegistry.register_notification class AuditCreateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('AuditCreatePayload') } @notificationbase.notification_sample('audit-update.json') @base.WatcherObjectRegistry.register_notification class AuditUpdateNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('AuditUpdatePayload') } @notificationbase.notification_sample('audit-delete.json') @base.WatcherObjectRegistry.register_notification class AuditDeleteNotification(notificationbase.NotificationBase): # Version 1.0: Initial version VERSION = '1.0' fields = { 'payload': wfields.ObjectField('AuditDeletePayload') } def _get_common_payload(audit): goal = None strategy = None try: goal = audit.goal if audit.strategy_id: strategy = audit.strategy except NotImplementedError: raise exception.EagerlyLoadedAuditRequired(audit=audit.uuid) goal_payload = goal_notifications.GoalPayload(goal=goal) strategy_payload = None if strategy: strategy_payload = strategy_notifications.StrategyPayload( strategy=strategy) return goal_payload, strategy_payload def send_create(context, audit, service='infra-optim', host=None): """Emit an audit.create notification.""" goal_payload, strategy_payload = _get_common_payload(audit) versioned_payload = AuditCreatePayload( audit=audit, goal=goal_payload, strategy=strategy_payload, ) notification = AuditCreateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='audit', action=wfields.NotificationAction.CREATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_update(context, audit, service='infra-optim', host=None, old_state=None): """Emit an audit.update notification.""" goal_payload, strategy_payload = _get_common_payload(audit) state_update = AuditStateUpdatePayload( old_state=old_state, state=audit.state if old_state else None) versioned_payload = AuditUpdatePayload( audit=audit, state_update=state_update, goal=goal_payload, strategy=strategy_payload, ) notification = AuditUpdateNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='audit', action=wfields.NotificationAction.UPDATE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_delete(context, audit, service='infra-optim', host=None): goal_payload, strategy_payload = _get_common_payload(audit) versioned_payload = AuditDeletePayload( audit=audit, goal=goal_payload, strategy=strategy_payload, ) notification = AuditDeleteNotification( priority=wfields.NotificationPriority.INFO, event_type=notificationbase.EventType( object='audit', action=wfields.NotificationAction.DELETE), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) def send_action_notification(context, audit, action, phase=None, priority=wfields.NotificationPriority.INFO, service='infra-optim', host=None): """Emit an audit action notification.""" goal_payload, strategy_payload = _get_common_payload(audit) fault = None if phase == wfields.NotificationPhase.ERROR: fault = exception_notifications.ExceptionPayload.from_exception() versioned_payload = AuditActionPayload( audit=audit, goal=goal_payload, strategy=strategy_payload, fault=fault, ) notification = AuditActionNotification( priority=priority, event_type=notificationbase.EventType( object='audit', action=action, phase=phase), publisher=notificationbase.NotificationPublisher( host=host or CONF.host, binary=service), payload=versioned_payload) notification.emit(context) python-watcher-4.0.0/watcher/notifications/base.py0000664000175000017500000001763613656752270022335 0ustar zuulzuul00000000000000# All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from watcher.common import exception from watcher.common import rpc from watcher.objects import base from watcher.objects import fields as wfields CONF = cfg.CONF LOG = log.getLogger(__name__) # Definition of notification levels in increasing order of severity NOTIFY_LEVELS = { wfields.NotificationPriority.DEBUG: 0, wfields.NotificationPriority.INFO: 1, wfields.NotificationPriority.WARNING: 2, wfields.NotificationPriority.ERROR: 3, wfields.NotificationPriority.CRITICAL: 4 } @base.WatcherObjectRegistry.register_if(False) class NotificationObject(base.WatcherObject): """Base class for every notification related versioned object.""" # Version 1.0: Initial version VERSION = '1.0' def __init__(self, **kwargs): super(NotificationObject, self).__init__(**kwargs) # The notification objects are created on the fly when watcher emits # the notification. This causes that every object shows every field as # changed. We don't want to send this meaningless information so we # reset the object after creation. self.obj_reset_changes(recursive=False) def save(self, context): raise exception.UnsupportedError() def obj_load_attr(self, attrname): raise exception.UnsupportedError() @base.WatcherObjectRegistry.register_notification class EventType(NotificationObject): # Version 1.0: Initial version # Version 1.1: Added STRATEGY action in NotificationAction enum # Version 1.2: Added PLANNER action in NotificationAction enum # Version 1.3: Added EXECUTION action in NotificationAction enum VERSION = '1.3' fields = { 'object': wfields.StringField(), 'action': wfields.NotificationActionField(), 'phase': wfields.NotificationPhaseField(nullable=True), } def to_notification_event_type_field(self): """Serialize the object to the wire format.""" s = '%s.%s' % (self.object, self.action) if self.obj_attr_is_set('phase'): s += '.%s' % self.phase return s @base.WatcherObjectRegistry.register_if(False) class NotificationPayloadBase(NotificationObject): """Base class for the payload of versioned notifications.""" # SCHEMA defines how to populate the payload fields. It is a dictionary # where every key value pair has the following format: # : (, # ) # The is the name where the data will be stored in the # payload object, this field has to be defined as a field of the payload. # The shall refer to name of the parameter passed as # kwarg to the payload's populate_schema() call and this object will be # used as the source of the data. The shall be # a valid field of the passed argument. # The SCHEMA needs to be applied with the populate_schema() call before the # notification can be emitted. # The value of the payload. field will be set by the # . field. The # will not be part of the payload object internal or # external representation. # Payload fields that are not set by the SCHEMA can be filled in the same # way as in any versioned object. SCHEMA = {} # Version 1.0: Initial version VERSION = '1.0' def __init__(self, **kwargs): super(NotificationPayloadBase, self).__init__(**kwargs) self.populated = not self.SCHEMA def populate_schema(self, **kwargs): """Populate the object based on the SCHEMA and the source objects :param kwargs: A dict contains the source object at the key defined in the SCHEMA """ for key, (obj, field) in self.SCHEMA.items(): source = kwargs[obj] if source.obj_attr_is_set(field): setattr(self, key, getattr(source, field)) self.populated = True # the schema population will create changed fields but we don't need # this information in the notification self.obj_reset_changes(recursive=False) @base.WatcherObjectRegistry.register_notification class NotificationPublisher(NotificationObject): # Version 1.0: Initial version VERSION = '1.0' fields = { 'host': wfields.StringField(nullable=False), 'binary': wfields.StringField(nullable=False), } @base.WatcherObjectRegistry.register_if(False) class NotificationBase(NotificationObject): """Base class for versioned notifications. Every subclass shall define a 'payload' field. """ # Version 1.0: Initial version VERSION = '1.0' fields = { 'priority': wfields.NotificationPriorityField(), 'event_type': wfields.ObjectField('EventType'), 'publisher': wfields.ObjectField('NotificationPublisher'), } def save(self, context): raise exception.UnsupportedError() def obj_load_attr(self, attrname): raise exception.UnsupportedError() def _should_notify(self): """Determine whether the notification should be sent. A notification is sent when the level of the notification is greater than or equal to the level specified in the configuration, in the increasing order of DEBUG, INFO, WARNING, ERROR, CRITICAL. :return: True if notification should be sent, False otherwise. """ if not CONF.notification_level: return False return (NOTIFY_LEVELS[self.priority] >= NOTIFY_LEVELS[CONF.notification_level]) def _emit(self, context, event_type, publisher_id, payload): notifier = rpc.get_notifier(publisher_id) notify = getattr(notifier, self.priority) LOG.debug("Emitting notification `%s`", event_type) notify(context, event_type=event_type, payload=payload) def emit(self, context): """Send the notification.""" if not self._should_notify(): return if not self.payload.populated: raise exception.NotificationPayloadError( class_name=self.__class__.__name__) # Note(gibi): notification payload will be a newly populated object # therefore every field of it will look changed so this does not carry # any extra information so we drop this from the payload. self.payload.obj_reset_changes(recursive=False) self._emit( context, event_type=self.event_type.to_notification_event_type_field(), publisher_id='%s:%s' % (self.publisher.binary, self.publisher.host), payload=self.payload.obj_to_primitive()) def notification_sample(sample): """Provide a notification sample of the decorated notification. Class decorator to attach the notification sample information to the notification object for documentation generation purposes. :param sample: the path of the sample json file relative to the doc/notification_samples/ directory in the watcher repository root. """ def wrap(cls): if not getattr(cls, 'samples', None): cls.samples = [sample] else: cls.samples.append(sample) return cls return wrap python-watcher-4.0.0/watcher/decision_engine/0000775000175000017500000000000013656752352021306 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/__init__.py0000664000175000017500000000000013656752270023404 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/scoring/0000775000175000017500000000000013656752352022752 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/scoring/__init__.py0000664000175000017500000000000013656752270025050 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/scoring/scoring_factory.py0000664000175000017500000000711713656752270026524 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A module providing helper methods to work with Scoring Engines. """ from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.loading import default LOG = log.getLogger(__name__) _scoring_engine_map = None def get_scoring_engine(scoring_engine_name): """Returns a Scoring Engine by its name. Method retrieves a Scoring Engine instance by its name. Scoring Engine instances are being cached in memory to avoid enumerating the Stevedore plugins on each call. When called for the first time, it reloads the cache. :return: A Scoring Engine instance with a given name :rtype: :class: `watcher.decision_engine.scoring.scoring_engine.ScoringEngine` """ global _scoring_engine_map _reload_scoring_engines() scoring_engine = _scoring_engine_map.get(scoring_engine_name) if scoring_engine is None: raise KeyError(_('Scoring Engine with name=%s not found') % scoring_engine_name) return scoring_engine def get_scoring_engine_list(): """Returns a list of Scoring Engine instances. The main use case for this method is discoverability, so the Scoring Engine list is always reloaded before returning any results. Frequent calling of this method might have a negative performance impact. :return: A list of all available Scoring Engine instances :rtype: List of :class: `watcher.decision_engine.scoring.scoring_engine.ScoringEngine` """ global _scoring_engine_map _reload_scoring_engines(True) return _scoring_engine_map.values() def _reload_scoring_engines(refresh=False): """Reloads Scoring Engines from Stevedore plugins to memory. Please note that two Stevedore entry points are used: - watcher_scoring_engines: for simple plugin implementations - watcher_scoring_engine_containers: for container plugins, which enable the dynamic scenarios (its get_scoring_engine_list method might return different values on each call) """ global _scoring_engine_map if _scoring_engine_map is None or refresh: LOG.debug("Reloading Scoring Engine plugins") engines = default.DefaultScoringLoader().list_available() _scoring_engine_map = dict() for name in engines.keys(): se_impl = default.DefaultScoringLoader().load(name) LOG.debug("Found Scoring Engine plugin: %s", se_impl.get_name()) _scoring_engine_map[se_impl.get_name()] = se_impl engine_containers = \ default.DefaultScoringContainerLoader().list_available() for container_id, container_cls in engine_containers.items(): LOG.debug("Found Scoring Engine container plugin: %s", container_id) for se in container_cls.get_scoring_engine_list(): LOG.debug("Found Scoring Engine plugin: %s", se.get_name()) _scoring_engine_map[se.get_name()] = se python-watcher-4.0.0/watcher/decision_engine/scoring/dummy_scorer.py0000664000175000017500000001456713656752270026050 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import units from watcher._i18n import _ from watcher.decision_engine.scoring import base LOG = log.getLogger(__name__) class DummyScorer(base.ScoringEngine): """Sample Scoring Engine implementing simplified workload classification. Typically a scoring engine would be implemented using machine learning techniques. For example, for workload classification problem the solution could consist of the following steps: 1. Define a problem to solve: we want to detect the workload on the machine based on the collected metrics like power consumption, temperature, CPU load, memory usage, disk usage, network usage, etc. 2. The workloads could be predefined, e.g. IDLE, CPU-INTENSIVE, MEMORY-INTENSIVE, IO-BOUND, ... Or we could let the ML algorithm to find the workloads based on the learning data provided. The decision here leads to learning algorithm used (supervised vs. non-supervised learning). 3. Collect metrics from sample servers (learning data). 4. Define the analytical model, pick ML framework and algorithm. 5. Apply learning data to the data model. Once taught, the data model becomes a scoring engine and can start doing predictions or classifications. 6. Wrap up the scoring engine with the class like this one, so it has a standard interface and can be used inside Watcher. This class is a greatly very simplified version of the above model. The goal is to provide an example how such class could be implemented and used in Watcher, without adding additional dependencies like machine learning frameworks (which can be quite heavy) or over-complicating it's internal implementation, which can distract from looking at the overall picture. That said, this class implements a workload classification "manually" (in plain python code) and is not intended to be used in production. """ # Constants defining column indices for the input data PROCESSOR_TIME_PERC = 0 MEM_TOTAL_BYTES = 1 MEM_AVAIL_BYTES = 2 MEM_PAGE_READS_PER_SEC = 3 MEM_PAGE_WRITES_PER_SEC = 4 DISK_READ_BYTES_PER_SEC = 5 DISK_WRITE_BYTES_PER_SEC = 6 NET_BYTES_RECEIVED_PER_SEC = 7 NET_BYTES_SENT_PER_SEC = 8 # Types of workload WORKLOAD_IDLE = 0 WORKLOAD_CPU = 1 WORKLOAD_MEM = 2 WORKLOAD_DISK = 3 def get_name(self): return 'dummy_scorer' def get_description(self): return 'Dummy workload classifier' def get_metainfo(self): """Metadata about input/output format of this scoring engine. This information is used in strategy using this scoring engine to prepare the input information and to understand the results. """ return """{ "feature_columns": [ "proc-processor-time-%", "mem-total-bytes", "mem-avail-bytes", "mem-page-reads/sec", "mem-page-writes/sec", "disk-read-bytes/sec", "disk-write-bytes/sec", "net-bytes-received/sec", "net-bytes-sent/sec"], "result_columns": [ "workload", "idle-probability", "cpu-probability", "memory-probability", "disk-probability"], "workloads": [ "idle", "cpu-intensive", "memory-intensive", "disk-intensive"] }""" def calculate_score(self, features): """Arbitrary algorithm calculating the score. It demonstrates how to parse the input data (features) and serialize the results. It detects the workload type based on the metrics and also returns the probabilities of each workload detection (again, the arbitrary values are returned, just for demonstration how the "real" machine learning algorithm could work. For example, the Gradient Boosting Machine from H2O framework is using exactly the same format: http://www.h2o.ai/verticals/algos/gbm/ """ LOG.debug('Calculating score, features: %s', features) # By default IDLE workload will be returned workload = self.WORKLOAD_IDLE idle_prob = 0.0 cpu_prob = 0.0 mem_prob = 0.0 disk_prob = 0.0 # Basic input validation try: flist = jsonutils.loads(features) except Exception as e: raise ValueError(_('Unable to parse features: ') % e) if type(flist) is not list: raise ValueError(_('JSON list expected in feature argument')) if len(flist) != 9: raise ValueError(_('Invalid number of features, expected 9')) # Simple logic for workload classification if flist[self.PROCESSOR_TIME_PERC] >= 80: workload = self.WORKLOAD_CPU cpu_prob = 100.0 elif flist[self.MEM_PAGE_READS_PER_SEC] >= 1000 \ and flist[self.MEM_PAGE_WRITES_PER_SEC] >= 1000: workload = self.WORKLOAD_MEM mem_prob = 100.0 elif flist[self.DISK_READ_BYTES_PER_SEC] >= 50*units.Mi \ and flist[self.DISK_WRITE_BYTES_PER_SEC] >= 50*units.Mi: workload = self.WORKLOAD_DISK disk_prob = 100.0 else: idle_prob = 100.0 if flist[self.PROCESSOR_TIME_PERC] >= 40: cpu_prob = 50.0 if flist[self.MEM_PAGE_READS_PER_SEC] >= 500 \ or flist[self.MEM_PAGE_WRITES_PER_SEC] >= 500: mem_prob = 50.0 return jsonutils.dumps( [workload, idle_prob, cpu_prob, mem_prob, disk_prob]) python-watcher-4.0.0/watcher/decision_engine/scoring/dummy_scoring_container.py0000664000175000017500000000651013656752270030246 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from oslo_serialization import jsonutils from watcher._i18n import _ from watcher.decision_engine.scoring import base LOG = log.getLogger(__name__) class DummyScoringContainer(base.ScoringEngineContainer): """Sample Scoring Engine container returning a list of scoring engines. Please note that it can be used in dynamic scenarios and the returned list might return instances based on some external configuration (e.g. in database). In order for these scoring engines to become discoverable in Watcher API and Watcher CLI, a database re-sync is required. It can be executed using watcher-sync tool for example. """ @classmethod def get_scoring_engine_list(cls): return [ SimpleFunctionScorer( 'dummy_min_scorer', 'Dummy Scorer calculating the minimum value', min), SimpleFunctionScorer( 'dummy_max_scorer', 'Dummy Scorer calculating the maximum value', max), SimpleFunctionScorer( 'dummy_avg_scorer', 'Dummy Scorer calculating the average value', lambda x: float(sum(x)) / len(x)), ] class SimpleFunctionScorer(base.ScoringEngine): """A simple generic scoring engine for demonstration purposes only. A generic scoring engine implementation, which is expecting a JSON formatted array of numbers to be passed as an input for score calculation. It then executes the aggregate function on this array and returns an array with a single aggregated number (also JSON formatted). """ def __init__(self, name, description, aggregate_function): super(SimpleFunctionScorer, self).__init__(config=None) self._name = name self._description = description self._aggregate_function = aggregate_function def get_name(self): return self._name def get_description(self): return self._description def get_metainfo(self): return '' def calculate_score(self, features): LOG.debug('Calculating score, features: %s', features) # Basic input validation try: flist = jsonutils.loads(features) except Exception as e: raise ValueError(_('Unable to parse features: %s') % e) if type(flist) is not list: raise ValueError(_('JSON list expected in feature argument')) if len(flist) < 1: raise ValueError(_('At least one feature is required')) # Calculate the result result = self._aggregate_function(flist) # Return the aggregated result return jsonutils.dumps([result]) python-watcher-4.0.0/watcher/decision_engine/scoring/base.py0000664000175000017500000001042213656752270024234 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from watcher.common.loader import loadable @six.add_metaclass(abc.ABCMeta) class ScoringEngine(loadable.Loadable): """A base class for all the Scoring Engines. A Scoring Engine is an instance of a data model, to which the learning data was applied. Please note that this class contains non-static and non-class methods by design, so that it's easy to create multiple Scoring Engine instances using a single class (possibly configured differently). """ @abc.abstractmethod def get_name(self): """Returns the name of the Scoring Engine. The name should be unique across all Scoring Engines. :return: A Scoring Engine name :rtype: str """ @abc.abstractmethod def get_description(self): """Returns the description of the Scoring Engine. The description might contain any human readable information, which might be useful for Strategy developers planning to use this Scoring Engine. It will be also visible in the Watcher API and CLI. :return: A Scoring Engine description :rtype: str """ @abc.abstractmethod def get_metainfo(self): """Returns the metadata information about Scoring Engine. The metadata might contain a machine-friendly (e.g. in JSON format) information needed to use this Scoring Engine. For example, some Scoring Engines require to pass the array of features in particular order to be able to calculate the score value. This order can be defined in metadata and used in Strategy. :return: A Scoring Engine metadata :rtype: str """ @abc.abstractmethod def calculate_score(self, features): """Calculates a score value based on arguments passed. Scoring Engines might be very different to each other. They might solve different problems or use different algorithms or frameworks internally. To enable this kind of flexibility, the method takes only one argument (string) and produces the results in the same format (string). The consumer of the Scoring Engine is ultimately responsible for providing the right arguments and parsing the result. :param features: Input data for Scoring Engine :type features: str :return: A score result :rtype: str """ @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] @six.add_metaclass(abc.ABCMeta) class ScoringEngineContainer(loadable.Loadable): """A base class for all the Scoring Engines Containers. A Scoring Engine Container is an abstraction which allows to plugin multiple Scoring Engines as a single Stevedore plugin. This enables some more advanced scenarios like dynamic reloading of Scoring Engine implementations without having to restart any Watcher services. """ @classmethod @abc.abstractmethod def get_scoring_engine_list(self): """Returns a list of Scoring Engine instances. :return: A list of Scoring Engine instances :rtype: :class: `~.scoring_engine.ScoringEngine` """ @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] python-watcher-4.0.0/watcher/decision_engine/gmr.py0000664000175000017500000000274013656752270022447 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_reports import guru_meditation_report as gmr from watcher._i18n import _ from watcher.decision_engine.model.collector import manager def register_gmr_plugins(): """Register GMR plugins that are specific to watcher-decision-engine.""" gmr.TextGuruMeditation.register_section(_('CDMCs'), show_models) def show_models(): """Create a formatted output of all the CDMs Mainly used as a Guru Meditation Report (GMR) plugin """ mgr = manager.CollectorManager() output = [] for name, cdmc in mgr.get_collectors().items(): output.append("") output.append("~" * len(name)) output.append(name) output.append("~" * len(name)) output.append("") cdmc_struct = cdmc.cluster_data_model.to_string() output.append(cdmc_struct) return "\n".join(output) python-watcher-4.0.0/watcher/decision_engine/messaging/0000775000175000017500000000000013656752352023263 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/messaging/__init__.py0000664000175000017500000000000013656752270025361 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/messaging/audit_endpoint.py0000664000175000017500000000414013656752270026641 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import futurist from oslo_config import cfg from oslo_log import log from watcher.decision_engine.audit import continuous as c_handler from watcher.decision_engine.audit import event as e_handler from watcher.decision_engine.audit import oneshot as o_handler from watcher import objects CONF = cfg.CONF LOG = log.getLogger(__name__) class AuditEndpoint(object): def __init__(self, messaging): self._messaging = messaging self._executor = futurist.GreenThreadPoolExecutor( max_workers=CONF.watcher_decision_engine.max_audit_workers) self._oneshot_handler = o_handler.OneShotAuditHandler() self._continuous_handler = c_handler.ContinuousAuditHandler().start() self._event_handler = e_handler.EventAuditHandler() @property def executor(self): return self._executor def do_trigger_audit(self, context, audit_uuid): audit = objects.Audit.get_by_uuid(context, audit_uuid, eager=True) if audit.audit_type == objects.audit.AuditType.ONESHOT.value: self._oneshot_handler.execute(audit, context) if audit.audit_type == objects.audit.AuditType.EVENT.value: self._event_handler.execute(audit, context) def trigger_audit(self, context, audit_uuid): LOG.debug("Trigger audit %s", audit_uuid) self.executor.submit(self.do_trigger_audit, context, audit_uuid) return audit_uuid python-watcher-4.0.0/watcher/decision_engine/messaging/data_model_endpoint.py0000664000175000017500000000420413656752270027625 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2019 ZTE corporation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher.common import exception from watcher.common import utils from watcher.decision_engine.model.collector import manager from watcher import objects class DataModelEndpoint(object): def __init__(self, messaging): self._messaging = messaging def get_audit_scope(self, context, audit=None): scope = None try: if utils.is_uuid_like(audit) or utils.is_int_like(audit): audit = objects.Audit.get( context, audit) else: audit = objects.Audit.get_by_name( context, audit) except exception.AuditNotFound: raise exception.InvalidIdentity(identity=audit) if audit: scope = audit.scope else: scope = [] return scope def get_data_model_info(self, context, data_model_type='compute', audit=None): if audit is not None: scope = self.get_audit_scope(context, audit) else: scope = [] collector_manager = manager.CollectorManager() collector = collector_manager.get_cluster_model_collector( data_model_type) audit_scope_handler = collector.get_audit_scope_handler( audit_scope=scope) available_data_model = audit_scope_handler.get_scoped_model( collector.get_latest_cluster_data_model()) if not available_data_model: return {"context": []} return {"context": available_data_model.to_list()} python-watcher-4.0.0/watcher/decision_engine/planner/0000775000175000017500000000000013656752352022745 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/planner/__init__.py0000664000175000017500000000000013656752270025043 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/planner/manager.py0000664000175000017500000000200213656752270024722 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.decision_engine.loading import default as loader LOG = log.getLogger(__name__) class PlannerManager(object): def __init__(self): self._loader = loader.DefaultPlannerLoader() @property def loader(self): return self._loader def load(self, planner_name): LOG.debug("Loading %s", planner_name) return self.loader.load(name=planner_name) python-watcher-4.0.0/watcher/decision_engine/planner/workload_stabilization.py0000664000175000017500000002567013656752270030106 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc from oslo_config import cfg from oslo_log import log from watcher.common import clients from watcher.common import exception from watcher.common import nova_helper from watcher.common import utils from watcher.decision_engine.planner import base from watcher import objects LOG = log.getLogger(__name__) class WorkloadStabilizationPlanner(base.BasePlanner): """Workload Stabilization planner implementation This implementation comes with basic rules with a set of action types that are weighted. An action having a lower weight will be scheduled before the other ones. The set of action types can be specified by 'weights' in the ``watcher.conf``. You need to associate a different weight to all available actions into the configuration file, otherwise you will get an error when the new action will be referenced in the solution produced by a strategy. *Limitations* - This is a proof of concept that is not meant to be used in production """ def __init__(self, config): super(WorkloadStabilizationPlanner, self).__init__(config) self._osc = clients.OpenStackClients() @property def osc(self): return self._osc weights_dict = { 'turn_host_to_acpi_s3_state': 0, 'resize': 1, 'migrate': 2, 'sleep': 3, 'change_nova_service_state': 4, 'nop': 5, } @classmethod def get_config_opts(cls): return [ cfg.DictOpt( 'weights', help="These weights are used to schedule the actions", default=cls.weights_dict), ] def create_action(self, action_plan_id, action_type, input_parameters=None): uuid = utils.generate_uuid() action = { 'uuid': uuid, 'action_plan_id': int(action_plan_id), 'action_type': action_type, 'input_parameters': input_parameters, 'state': objects.action.State.PENDING, 'parents': None } return action def load_child_class(self, child_name): for c in BaseActionValidator.__subclasses__(): if child_name == c.action_name: return c() return None def schedule(self, context, audit_id, solution): LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) weights = self.config.weights action_plan = self._create_action_plan(context, audit_id, solution) actions = list(solution.actions) to_schedule = [] for action in actions: json_action = self.create_action( action_plan_id=action_plan.id, action_type=action.get('action_type'), input_parameters=action.get('input_parameters')) to_schedule.append((weights[action.get('action_type')], json_action)) self._create_efficacy_indicators( context, action_plan.id, solution.efficacy_indicators) # scheduling scheduled = sorted(to_schedule, key=lambda weight: (weight[0]), reverse=True) if len(scheduled) == 0: LOG.warning("The action plan is empty") action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() else: resource_action_map = {} scheduled_actions = [x[1] for x in scheduled] for action in scheduled_actions: a_type = action['action_type'] if a_type != 'turn_host_to_acpi_s3_state': plugin_action = self.load_child_class( action.get("action_type")) if not plugin_action: raise exception.UnsupportedActionType( action_type=action.get("action_type")) db_action = self._create_action(context, action) parents = plugin_action.validate_parents( resource_action_map, action) if parents: db_action.parents = parents db_action.save() # if we have an action that will make host unreachable, we need # to complete all actions (resize and migration type) # related to the host. # Note(alexchadin): turn_host_to_acpi_s3_state doesn't # actually exist. Placed code shows relations between # action types. # TODO(alexchadin): add turn_host_to_acpi_s3_state action type. else: host_to_acpi_s3 = action['input_parameters']['resource_id'] host_actions = resource_action_map.get(host_to_acpi_s3) action_parents = [] if host_actions: resize_actions = [x[0] for x in host_actions if x[1] == 'resize'] migrate_actions = [x[0] for x in host_actions if x[1] == 'migrate'] resize_migration_parents = [ x.parents for x in [objects.Action.get_by_uuid(context, resize_action) for resize_action in resize_actions]] # resize_migration_parents should be one level list resize_migration_parents = [ parent for sublist in resize_migration_parents for parent in sublist] action_parents.extend([uuid for uuid in resize_actions]) action_parents.extend([uuid for uuid in migrate_actions if uuid not in resize_migration_parents]) db_action = self._create_action(context, action) db_action.parents = action_parents db_action.save() return action_plan def _create_action_plan(self, context, audit_id, solution): strategy = objects.Strategy.get_by_name( context, solution.strategy.name) action_plan_dict = { 'uuid': utils.generate_uuid(), 'audit_id': audit_id, 'strategy_id': strategy.id, 'state': objects.action_plan.State.RECOMMENDED, 'global_efficacy': solution.global_efficacy, } new_action_plan = objects.ActionPlan(context, **action_plan_dict) new_action_plan.create() return new_action_plan def _create_efficacy_indicators(self, context, action_plan_id, indicators): efficacy_indicators = [] for indicator in indicators: efficacy_indicator_dict = { 'uuid': utils.generate_uuid(), 'name': indicator.name, 'description': indicator.description, 'unit': indicator.unit, 'value': indicator.value, 'action_plan_id': action_plan_id, } new_efficacy_indicator = objects.EfficacyIndicator( context, **efficacy_indicator_dict) new_efficacy_indicator.create() efficacy_indicators.append(new_efficacy_indicator) return efficacy_indicators def _create_action(self, context, _action): try: LOG.debug("Creating the %s in the Watcher database", _action.get("action_type")) new_action = objects.Action(context, **_action) new_action.create() return new_action except Exception as exc: LOG.exception(exc) raise class BaseActionValidator(object): action_name = None def __init__(self): super(BaseActionValidator, self).__init__() self._osc = None @property def osc(self): if not self._osc: self._osc = clients.OpenStackClients() return self._osc @abc.abstractmethod def validate_parents(self, resource_action_map, action): raise NotImplementedError() def _mapping(self, resource_action_map, resource_id, action_uuid, action_type): if resource_id not in resource_action_map: resource_action_map[resource_id] = [(action_uuid, action_type,)] else: resource_action_map[resource_id].append((action_uuid, action_type,)) class MigrationActionValidator(BaseActionValidator): action_name = "migrate" def validate_parents(self, resource_action_map, action): instance_uuid = action['input_parameters']['resource_id'] host_name = action['input_parameters']['source_node'] self._mapping(resource_action_map, instance_uuid, action['uuid'], 'migrate') self._mapping(resource_action_map, host_name, action['uuid'], 'migrate') class ResizeActionValidator(BaseActionValidator): action_name = "resize" def validate_parents(self, resource_action_map, action): nova = nova_helper.NovaHelper(osc=self.osc) instance_uuid = action['input_parameters']['resource_id'] parent_actions = resource_action_map.get(instance_uuid) host_of_instance = nova.get_hostname( nova.get_instance_by_uuid(instance_uuid)[0]) self._mapping(resource_action_map, host_of_instance, action['uuid'], 'resize') if parent_actions: return [x[0] for x in parent_actions] else: return [] class ChangeNovaServiceStateActionValidator(BaseActionValidator): action_name = "change_nova_service_state" def validate_parents(self, resource_action_map, action): host_name = action['input_parameters']['resource_id'] self._mapping(resource_action_map, host_name, action['uuid'], 'change_nova_service_state') return [] class SleepActionValidator(BaseActionValidator): action_name = "sleep" def validate_parents(self, resource_action_map, action): return [] class NOPActionValidator(BaseActionValidator): action_name = "nop" def validate_parents(self, resource_action_map, action): return [] python-watcher-4.0.0/watcher/decision_engine/planner/weight.py0000664000175000017500000002013213656752270024603 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Authors: Vincent Francoise # Alexander Chadin # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import collections import networkx as nx from oslo_config import cfg from oslo_log import log from watcher.common import utils from watcher.decision_engine.planner import base from watcher import objects LOG = log.getLogger(__name__) class WeightPlanner(base.BasePlanner): """Weight planner implementation This implementation builds actions with parents in accordance with weights. Set of actions having a higher weight will be scheduled before the other ones. There are two config options to configure: action_weights and parallelization. *Limitations* - This planner requires to have action_weights and parallelization configs tuned well. """ def __init__(self, config): super(WeightPlanner, self).__init__(config) action_weights = { 'nop': 70, 'volume_migrate': 60, 'change_nova_service_state': 50, 'sleep': 40, 'migrate': 30, 'resize': 20, 'turn_host_to_acpi_s3_state': 10, 'change_node_power_state': 9, } parallelization = { 'turn_host_to_acpi_s3_state': 2, 'resize': 2, 'migrate': 2, 'sleep': 1, 'change_nova_service_state': 1, 'nop': 1, 'change_node_power_state': 2, 'volume_migrate': 2 } @classmethod def get_config_opts(cls): return [ cfg.DictOpt( 'weights', help="These weights are used to schedule the actions. " "Action Plan will be build in accordance with sets of " "actions ordered by descending weights." "Two action types cannot have the same weight. ", default=cls.action_weights), cfg.DictOpt( 'parallelization', help="Number of actions to be run in parallel on a per " "action type basis.", default=cls.parallelization), ] @staticmethod def chunkify(lst, n): """Yield successive n-sized chunks from lst.""" n = int(n) if n < 1: # Just to make sure the number is valid n = 1 # Split a flat list in a list of chunks of size n. # e.g. chunkify([0, 1, 2, 3, 4], 2) -> [[0, 1], [2, 3], [4]] for i in range(0, len(lst), n): yield lst[i:i + n] def compute_action_graph(self, sorted_weighted_actions): reverse_weights = {v: k for k, v in self.config.weights.items()} # leaf_groups contains a list of list of nodes called groups # each group is a set of nodes from which a future node will # branch off (parent nodes). # START --> migrate-1 --> migrate-3 # \ \--> resize-1 --> FINISH # \--> migrate-2 -------------/ # In the above case migrate-1 will be the only member of the leaf # group that migrate-3 will use as parent group, whereas # resize-1 will have both migrate-2 and migrate-3 in its # parent/leaf group leaf_groups = [] action_graph = nx.DiGraph() # We iterate through each action type category (sorted by weight) to # insert them in a Directed Acyclic Graph for idx, (weight, actions) in enumerate(sorted_weighted_actions): action_chunks = self.chunkify( actions, self.config.parallelization[reverse_weights[weight]]) # We split the actions into chunks/layers that will have to be # spread across all the available branches of the graph for chunk_idx, actions_chunk in enumerate(action_chunks): for action in actions_chunk: action_graph.add_node(action) # all other actions parent_nodes = [] if not idx and not chunk_idx: parent_nodes = [] elif leaf_groups: parent_nodes = leaf_groups for parent_node in parent_nodes: action_graph.add_edge(parent_node, action) action.parents.append(parent_node.uuid) if leaf_groups: leaf_groups = [] leaf_groups.extend([a for a in actions_chunk]) return action_graph def schedule(self, context, audit_id, solution): LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) action_plan = self.create_action_plan(context, audit_id, solution) sorted_weighted_actions = self.get_sorted_actions_by_weight( context, action_plan, solution) action_graph = self.compute_action_graph(sorted_weighted_actions) self._create_efficacy_indicators( context, action_plan.id, solution.efficacy_indicators) if len(action_graph.nodes()) == 0: LOG.warning("The action plan is empty") action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() self.create_scheduled_actions(action_graph) return action_plan def get_sorted_actions_by_weight(self, context, action_plan, solution): # We need to make them immutable to add them to the graph action_objects = list([ objects.Action( context, uuid=utils.generate_uuid(), parents=[], action_plan_id=action_plan.id, **a) for a in solution.actions]) # This is a dict of list with each being a weight and the list being # all the actions associated to this weight weighted_actions = collections.defaultdict(list) for action in action_objects: action_weight = self.config.weights[action.action_type] weighted_actions[action_weight].append(action) return reversed(sorted(weighted_actions.items(), key=lambda x: x[0])) def create_scheduled_actions(self, graph): for action in graph.nodes(): LOG.debug("Creating the %s in the Watcher database", action.action_type) try: action.create() except Exception as exc: LOG.exception(exc) raise def create_action_plan(self, context, audit_id, solution): strategy = objects.Strategy.get_by_name( context, solution.strategy.name) action_plan_dict = { 'uuid': utils.generate_uuid(), 'audit_id': audit_id, 'strategy_id': strategy.id, 'state': objects.action_plan.State.RECOMMENDED, 'global_efficacy': solution.global_efficacy, } new_action_plan = objects.ActionPlan(context, **action_plan_dict) new_action_plan.create() return new_action_plan def _create_efficacy_indicators(self, context, action_plan_id, indicators): efficacy_indicators = [] for indicator in indicators: efficacy_indicator_dict = { 'uuid': utils.generate_uuid(), 'name': indicator.name, 'description': indicator.description, 'unit': indicator.unit, 'value': indicator.value, 'action_plan_id': action_plan_id, } new_efficacy_indicator = objects.EfficacyIndicator( context, **efficacy_indicator_dict) new_efficacy_indicator.create() efficacy_indicators.append(new_efficacy_indicator) return efficacy_indicators python-watcher-4.0.0/watcher/decision_engine/planner/base.py0000664000175000017500000000602113656752270024227 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ The :ref:`Watcher Planner ` is part of the :ref:`Watcher Decision Engine `. This module takes the set of :ref:`Actions ` generated by a :ref:`Strategy ` and builds the design of a workflow which defines how-to schedule in time those different :ref:`Actions ` and for each :ref:`Action ` what are the prerequisite conditions. It is important to schedule :ref:`Actions ` in time in order to prevent overload of the :ref:`Cluster ` while applying the :ref:`Action Plan `. For example, it is important not to migrate too many instances at the same time in order to avoid a network congestion which may decrease the :ref:`SLA ` for :ref:`Customers `. It is also important to schedule :ref:`Actions ` in order to avoid security issues such as denial of service on core OpenStack services. :ref:`Some default implementations are provided `, but it is possible to :ref:`develop new implementations ` which are dynamically loaded by Watcher at launch time. See :doc:`../architecture` for more details on this component. """ import abc import six from watcher.common.loader import loadable @six.add_metaclass(abc.ABCMeta) class BasePlanner(loadable.Loadable): @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] @abc.abstractmethod def schedule(self, context, audit_uuid, solution): """The planner receives a solution to schedule :param solution: A solution provided by a strategy for scheduling :type solution: :py:class:`~.BaseSolution` subclass instance :param audit_uuid: the audit uuid :type audit_uuid: str :return: Action plan with an ordered sequence of actions such that all security, dependency, and performance requirements are met. :rtype: :py:class:`watcher.objects.ActionPlan` instance """ # example: directed acyclic graph raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/planner/node_resource_consolidation.py0000664000175000017500000001343413656752270031104 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.common import exception from watcher.common import utils from watcher.decision_engine.model import element from watcher.decision_engine.planner import base from watcher import objects LOG = log.getLogger(__name__) class NodeResourceConsolidationPlanner(base.BasePlanner): """Node Resource Consolidation planner implementation This implementation preserves the original order of actions in the solution and try to parallelize actions which have the same action type. *Limitations* - This is a proof of concept that is not meant to be used in production """ def create_action(self, action_plan_id, action_type, input_parameters=None): uuid = utils.generate_uuid() action = { 'uuid': uuid, 'action_plan_id': int(action_plan_id), 'action_type': action_type, 'input_parameters': input_parameters, 'state': objects.action.State.PENDING, 'parents': None } return action def schedule(self, context, audit_id, solution): LOG.debug('Creating an action plan for the audit uuid: %s', audit_id) action_plan = self._create_action_plan(context, audit_id, solution) actions = list(solution.actions) if len(actions) == 0: LOG.warning("The action plan is empty") action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() return action_plan node_disabled_actions = [] node_enabled_actions = [] node_migrate_actions = {} for action in actions: action_type = action.get('action_type') parameters = action.get('input_parameters') json_action = self.create_action( action_plan_id=action_plan.id, action_type=action_type, input_parameters=parameters) # classing actions if action_type == 'change_nova_service_state': if parameters.get('state') == ( element.ServiceState.DISABLED.value): node_disabled_actions.append(json_action) else: node_enabled_actions.append(json_action) elif action_type == 'migrate': source_node = parameters.get('source_node') if source_node in node_migrate_actions: node_migrate_actions[source_node].append(json_action) else: node_migrate_actions[source_node] = [json_action] else: raise exception.UnsupportedActionType( action_type=action.get("action_type")) # creating actions mig_parents = [] for action in node_disabled_actions: mig_parents.append(action['uuid']) self._create_action(context, action) enabled_parents = [] for actions in node_migrate_actions.values(): enabled_parents.append(actions[-1].get('uuid')) pre_action_uuid = [] for action in actions: action['parents'] = mig_parents + pre_action_uuid pre_action_uuid = [action['uuid']] self._create_action(context, action) for action in node_enabled_actions: action['parents'] = enabled_parents self._create_action(context, action) self._create_efficacy_indicators( context, action_plan.id, solution.efficacy_indicators) return action_plan def _create_action_plan(self, context, audit_id, solution): strategy = objects.Strategy.get_by_name( context, solution.strategy.name) action_plan_dict = { 'uuid': utils.generate_uuid(), 'audit_id': audit_id, 'strategy_id': strategy.id, 'state': objects.action_plan.State.RECOMMENDED, 'global_efficacy': solution.global_efficacy, } new_action_plan = objects.ActionPlan(context, **action_plan_dict) new_action_plan.create() return new_action_plan def _create_efficacy_indicators(self, context, action_plan_id, indicators): efficacy_indicators = [] for indicator in indicators: efficacy_indicator_dict = { 'uuid': utils.generate_uuid(), 'name': indicator.name, 'description': indicator.description, 'unit': indicator.unit, 'value': indicator.value, 'action_plan_id': action_plan_id, } new_efficacy_indicator = objects.EfficacyIndicator( context, **efficacy_indicator_dict) new_efficacy_indicator.create() efficacy_indicators.append(new_efficacy_indicator) return efficacy_indicators def _create_action(self, context, _action): try: LOG.debug("Creating the %s in the Watcher database", _action.get("action_type")) new_action = objects.Action(context, **_action) new_action.create() return new_action except Exception as exc: LOG.exception(exc) raise python-watcher-4.0.0/watcher/decision_engine/goal/0000775000175000017500000000000013656752352022230 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/goal/__init__.py0000664000175000017500000000224513656752270024343 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.goal import goals Dummy = goals.Dummy ServerConsolidation = goals.ServerConsolidation ThermalOptimization = goals.ThermalOptimization Unclassified = goals.Unclassified WorkloadBalancing = goals.WorkloadBalancing NoisyNeighborOptimization = goals.NoisyNeighborOptimization SavingEnergy = goals.SavingEnergy HardwareMaintenance = goals.HardwareMaintenance __all__ = ("Dummy", "ServerConsolidation", "ThermalOptimization", "Unclassified", "WorkloadBalancing", "NoisyNeighborOptimization", "SavingEnergy", "HardwareMaintenance") python-watcher-4.0.0/watcher/decision_engine/goal/goals.py0000664000175000017500000001520513656752270023711 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher._i18n import _ from watcher.decision_engine.goal import base from watcher.decision_engine.goal.efficacy import specs class Dummy(base.Goal): """Dummy Reserved goal that is used for testing purposes. """ @classmethod def get_name(cls): return "dummy" @classmethod def get_display_name(cls): return _("Dummy goal") @classmethod def get_translatable_display_name(cls): return "Dummy goal" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class Unclassified(base.Goal): """Unclassified This goal is used to ease the development process of a strategy. Containing no actual indicator specification, this goal can be used whenever a strategy has yet to be formally associated with an existing goal. If the goal achieve has been identified but there is no available implementation, this Goal can also be used as a transitional stage. """ @classmethod def get_name(cls): return "unclassified" @classmethod def get_display_name(cls): return _("Unclassified") @classmethod def get_translatable_display_name(cls): return "Unclassified" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class ServerConsolidation(base.Goal): """ServerConsolidation This goal is for efficient usage of compute server resources in order to reduce the total number of servers. """ @classmethod def get_name(cls): return "server_consolidation" @classmethod def get_display_name(cls): return _("Server Consolidation") @classmethod def get_translatable_display_name(cls): return "Server Consolidation" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.ServerConsolidation() class ThermalOptimization(base.Goal): """ThermalOptimization This goal is used to balance the temperature across different servers. """ @classmethod def get_name(cls): return "thermal_optimization" @classmethod def get_display_name(cls): return _("Thermal Optimization") @classmethod def get_translatable_display_name(cls): return "Thermal Optimization" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class WorkloadBalancing(base.Goal): """WorkloadBalancing This goal is used to evenly distribute workloads across different servers. """ @classmethod def get_name(cls): return "workload_balancing" @classmethod def get_display_name(cls): return _("Workload Balancing") @classmethod def get_translatable_display_name(cls): return "Workload Balancing" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.WorkloadBalancing() class AirflowOptimization(base.Goal): """AirflowOptimization This goal is used to optimize the airflow within a cloud infrastructure. """ @classmethod def get_name(cls): return "airflow_optimization" @classmethod def get_display_name(cls): return _("Airflow Optimization") @classmethod def get_translatable_display_name(cls): return "Airflow Optimization" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class NoisyNeighborOptimization(base.Goal): """NoisyNeighborOptimization This goal is used to identify and migrate a Noisy Neighbor - a low priority VM that negatively affects performance of a high priority VM in terms of IPC by over utilizing Last Level Cache. """ @classmethod def get_name(cls): return "noisy_neighbor" @classmethod def get_display_name(cls): return _("Noisy Neighbor") @classmethod def get_translatable_display_name(cls): return "Noisy Neighbor" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class SavingEnergy(base.Goal): """SavingEnergy This goal is used to reduce power consumption within a data center. """ @classmethod def get_name(cls): return "saving_energy" @classmethod def get_display_name(cls): return _("Saving Energy") @classmethod def get_translatable_display_name(cls): return "Saving Energy" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() class HardwareMaintenance(base.Goal): """HardwareMaintenance This goal is to migrate instances and volumes on a set of compute nodes and storage from nodes under maintenance """ @classmethod def get_name(cls): return "hardware_maintenance" @classmethod def get_display_name(cls): return _("Hardware Maintenance") @classmethod def get_translatable_display_name(cls): return "Hardware Maintenance" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.HardwareMaintenance() class ClusterMaintaining(base.Goal): """ClusterMaintenance This goal is used to maintain compute nodes without having the user's application being interrupted. """ @classmethod def get_name(cls): return "cluster_maintaining" @classmethod def get_display_name(cls): return _("Cluster Maintaining") @classmethod def get_translatable_display_name(cls): return "Cluster Maintaining" @classmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" return specs.Unclassified() python-watcher-4.0.0/watcher/decision_engine/goal/efficacy/0000775000175000017500000000000013656752352024001 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/goal/efficacy/__init__.py0000664000175000017500000000000013656752270026077 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/goal/efficacy/specs.py0000664000175000017500000001360013656752270025467 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher._i18n import _ from watcher.decision_engine.goal.efficacy import base from watcher.decision_engine.goal.efficacy import indicators from watcher.decision_engine.solution import efficacy class Unclassified(base.EfficacySpecification): def get_indicators_specifications(self): return () def get_global_efficacy_indicator(self, indicators_map): return None class ServerConsolidation(base.EfficacySpecification): def get_indicators_specifications(self): return [ indicators.ComputeNodesCount(), indicators.ReleasedComputeNodesCount(), indicators.InstanceMigrationsCount(), ] def get_global_efficacy_indicator(self, indicators_map=None): value = 0 global_efficacy = [] if indicators_map and indicators_map.compute_nodes_count > 0: value = (float(indicators_map.released_compute_nodes_count) / float(indicators_map.compute_nodes_count)) * 100 global_efficacy.append(efficacy.Indicator( name="released_nodes_ratio", description=_("Ratio of released compute nodes divided by the " "total number of enabled compute nodes."), unit='%', value=value, )) return global_efficacy class WorkloadBalancing(base.EfficacySpecification): def get_indicators_specifications(self): return [ indicators.InstanceMigrationsCount(), indicators.InstancesCount(), indicators.StandardDeviationValue(), indicators.OriginalStandardDeviationValue() ] def get_global_efficacy_indicator(self, indicators_map=None): gl_indicators = [] mig_value = 0 if indicators_map and indicators_map.instance_migrations_count > 0: mig_value = ( indicators_map.instance_migrations_count / float(indicators_map.instances_count) * 100) gl_indicators.append(efficacy.Indicator( name="live_migrations_count", description=_("Ratio of migrated virtual machines to audited " "virtual machines"), unit='%', value=mig_value)) return gl_indicators class HardwareMaintenance(base.EfficacySpecification): def get_indicators_specifications(self): return [ indicators.LiveInstanceMigrateCount(), indicators.PlannedLiveInstanceMigrateCount(), indicators.ColdInstanceMigrateCount(), indicators.PlannedColdInstanceMigrateCount(), indicators.VolumeMigrateCount(), indicators.PlannedVolumeMigrateCount(), indicators.VolumeUpdateCount(), indicators.PlannedVolumeUpdateCount() ] def get_global_efficacy_indicator(self, indicators_map=None): li_value = 0 if (indicators_map and indicators_map.planned_live_migrate_instance_count > 0): li_value = ( float(indicators_map.planned_live_migrate_instance_count) / float(indicators_map.live_migrate_instance_count) * 100 ) li_indicator = efficacy.Indicator( name="live_instance_migrate_ratio", description=_("Ratio of actual live migrated instances " "to planned live migrate instances."), unit='%', value=li_value) ci_value = 0 if (indicators_map and indicators_map.planned_cold_migrate_instance_count > 0): ci_value = ( float(indicators_map.planned_cold_migrate_instance_count) / float(indicators_map.cold_migrate_instance_count) * 100 ) ci_indicator = efficacy.Indicator( name="cold_instance_migrate_ratio", description=_("Ratio of actual cold migrated instances " "to planned cold migrate instances."), unit='%', value=ci_value) dv_value = 0 if (indicators_map and indicators_map.planned_volume_migrate_count > 0): dv_value = (float(indicators_map.planned_volume_migrate_count) / float(indicators_map. volume_migrate_count) * 100) dv_indicator = efficacy.Indicator( name="volume_migrate_ratio", description=_("Ratio of actual detached volumes migrated to" " planned detached volumes migrate."), unit='%', value=dv_value) av_value = 0 if (indicators_map and indicators_map.planned_volume_update_count > 0): av_value = (float(indicators_map.planned_volume_update_count) / float(indicators_map. volume_update_count) * 100) av_indicator = efficacy.Indicator( name="volume_update_ratio", description=_("Ratio of actual attached volumes migrated to" " planned attached volumes migrate."), unit='%', value=av_value) return [li_indicator, ci_indicator, dv_indicator, av_indicator] python-watcher-4.0.0/watcher/decision_engine/goal/efficacy/indicators.py0000664000175000017500000002010713656752270026511 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import jsonschema from jsonschema import SchemaError from jsonschema import ValidationError import six from oslo_log import log from oslo_serialization import jsonutils from watcher._i18n import _ from watcher.common import exception LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class IndicatorSpecification(object): def __init__(self, name=None, description=None, unit=None, required=True): self.name = name self.description = description self.unit = unit self.required = required @abc.abstractproperty def schema(self): """JsonSchema used to validate the indicator value :return: A Schema """ raise NotImplementedError() @classmethod def validate(cls, solution): """Validate the given solution :raises: :py:class:`~.InvalidIndicatorValue` when the validation fails """ indicator = cls() value = None try: value = getattr(solution, indicator.name) jsonschema.validate(value, cls.schema) except (SchemaError, ValidationError) as exc: LOG.exception(exc) raise except Exception as exc: LOG.exception(exc) raise exception.InvalidIndicatorValue( name=indicator.name, value=value, spec_type=type(indicator)) def to_dict(self): return { "name": self.name, "description": self.description, "unit": self.unit, "schema": jsonutils.dumps(self.schema) if self.schema else None, } def __str__(self): return str(self.to_dict()) class ComputeNodesCount(IndicatorSpecification): def __init__(self): super(ComputeNodesCount, self).__init__( name="compute_nodes_count", description=_("The total number of enabled compute nodes."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class ReleasedComputeNodesCount(IndicatorSpecification): def __init__(self): super(ReleasedComputeNodesCount, self).__init__( name="released_compute_nodes_count", description=_("The number of compute nodes to be released."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class InstancesCount(IndicatorSpecification): def __init__(self): super(InstancesCount, self).__init__( name="instances_count", description=_("The total number of audited instances in " "strategy."), unit=None, required=False, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class InstanceMigrationsCount(IndicatorSpecification): def __init__(self): super(InstanceMigrationsCount, self).__init__( name="instance_migrations_count", description=_("The number of VM migrations to be performed."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class LiveInstanceMigrateCount(IndicatorSpecification): def __init__(self): super(LiveInstanceMigrateCount, self).__init__( name="live_migrate_instance_count", description=_("The number of instances actually live migrated."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class PlannedLiveInstanceMigrateCount(IndicatorSpecification): def __init__(self): super(PlannedLiveInstanceMigrateCount, self).__init__( name="planned_live_migrate_instance_count", description=_("The number of instances planned to live migrate."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class ColdInstanceMigrateCount(IndicatorSpecification): def __init__(self): super(ColdInstanceMigrateCount, self).__init__( name="cold_migrate_instance_count", description=_("The number of instances actually cold migrated."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class PlannedColdInstanceMigrateCount(IndicatorSpecification): def __init__(self): super(PlannedColdInstanceMigrateCount, self).__init__( name="planned_cold_migrate_instance_count", description=_("The number of instances planned to cold migrate."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class VolumeMigrateCount(IndicatorSpecification): def __init__(self): super(VolumeMigrateCount, self).__init__( name="volume_migrate_count", description=_("The number of detached volumes actually migrated."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class PlannedVolumeMigrateCount(IndicatorSpecification): def __init__(self): super(PlannedVolumeMigrateCount, self).__init__( name="planned_volume_migrate_count", description=_("The number of detached volumes planned" " to migrate."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class VolumeUpdateCount(IndicatorSpecification): def __init__(self): super(VolumeUpdateCount, self).__init__( name="volume_update_count", description=_("The number of attached volumes actually" " migrated."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class PlannedVolumeUpdateCount(IndicatorSpecification): def __init__(self): super(PlannedVolumeUpdateCount, self).__init__( name="planned_volume_update_count", description=_("The number of attached volumes planned to" " migrate."), unit=None, ) @property def schema(self): return { "type": "integer", "minimum": 0 } class StandardDeviationValue(IndicatorSpecification): def __init__(self): super(StandardDeviationValue, self).__init__( name="standard_deviation_after_audit", description=_("The value of resulted standard deviation."), unit=None, required=False, ) @property def schema(self): return { "type": "number", "minimum": 0 } class OriginalStandardDeviationValue(IndicatorSpecification): def __init__(self): super(OriginalStandardDeviationValue, self).__init__( name="standard_deviation_before_audit", description=_("The value of original standard deviation."), unit=None, required=False, ) @property def schema(self): return { "type": "number", "minimum": 0 } python-watcher-4.0.0/watcher/decision_engine/goal/efficacy/base.py0000664000175000017500000000612413656752270025267 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An efficacy specification is a contract that is associated to each :ref:`Goal ` that defines the various :ref:`efficacy indicators ` a strategy achieving the associated goal should provide within its :ref:`solution `. Indeed, each solution proposed by a strategy will be validated against this contract before calculating its :ref:`global efficacy `. """ import abc import jsonschema from oslo_serialization import jsonutils import six @six.add_metaclass(abc.ABCMeta) class EfficacySpecification(object): def __init__(self): self._indicators_specs = self.get_indicators_specifications() @property def indicators_specs(self): return self._indicators_specs @abc.abstractmethod def get_indicators_specifications(self): """List the specifications of the indicator for this efficacy spec :return: Tuple of indicator specifications :rtype: Tuple of :py:class:`~.IndicatorSpecification` instances """ raise NotImplementedError() @abc.abstractmethod def get_global_efficacy_indicator(self, indicators_map): """Compute the global efficacy for the goal it achieves :param indicators_map: dict-like object containing the efficacy indicators related to this spec :type indicators_map: :py:class:`~.IndicatorsMap` instance :raises: NotImplementedError :returns: :py:class:`~.Indicator` instance list, each instance specify global efficacy for different openstack resource. """ raise NotImplementedError() @property def schema(self): """Combined schema from the schema of the indicators""" schema = { "type": "object", "properties": {}, "required": [] } for indicator in self.indicators_specs: schema["properties"][indicator.name] = indicator.schema if indicator.required: schema["required"].append(indicator.name) return schema def validate_efficacy_indicators(self, indicators_map): if indicators_map: jsonschema.validate(indicators_map, self.schema) else: True def get_indicators_specs_dicts(self): return [indicator.to_dict() for indicator in self.indicators_specs] def serialize_indicators_specs(self): return jsonutils.dumps(self.get_indicators_specs_dicts()) python-watcher-4.0.0/watcher/decision_engine/goal/base.py0000664000175000017500000000375713656752270023527 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from watcher.common.loader import loadable @six.add_metaclass(abc.ABCMeta) class Goal(loadable.Loadable): def __init__(self, config): super(Goal, self).__init__(config) self.name = self.get_name() self.display_name = self.get_display_name() self.efficacy_specification = self.get_efficacy_specification() @classmethod @abc.abstractmethod def get_name(cls): """Name of the goal: should be identical to the related entry point""" raise NotImplementedError() @classmethod @abc.abstractmethod def get_display_name(cls): """The goal display name for the goal""" raise NotImplementedError() @classmethod @abc.abstractmethod def get_translatable_display_name(cls): """The translatable msgid of the goal""" # Note(v-francoise): Defined here to be used as the translation key for # other services raise NotImplementedError() @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] @abc.abstractmethod def get_efficacy_specification(cls): """The efficacy spec for the current goal""" raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/solution/0000775000175000017500000000000013656752352023162 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/solution/__init__.py0000664000175000017500000000000013656752270025260 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/solution/efficacy.py0000664000175000017500000000706413656752270025313 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import numbers from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.common import utils LOG = log.getLogger(__name__) class IndicatorsMap(utils.Struct): pass class Indicator(utils.Struct): def __init__(self, name, description, unit, value): super(Indicator, self).__init__() self.name = name self.description = description self.unit = unit if not isinstance(value, numbers.Number): raise exception.InvalidIndicatorValue( _("An indicator value should be a number")) self.value = value class Efficacy(object): """Solution efficacy""" def __init__(self, goal, strategy): """Solution efficacy :param goal: Goal associated to this solution :type goal: :py:class:`~.base.Goal` instance :param strategy: Strategy associated to this solution :type strategy: :py:class:`~.BaseStrategy` instance """ self.goal = goal self.strategy = strategy self._efficacy_spec = self.goal.efficacy_specification # Used to store in DB the info related to the efficacy indicators self.indicators = [] # Used to compute the global efficacy self._indicators_mapping = IndicatorsMap() self.global_efficacy = [] def set_efficacy_indicators(self, **indicators_map): """Set the efficacy indicators :param indicators_map: kwargs where the key is the name of the efficacy indicator as defined in the associated :py:class:`~.IndicatorSpecification` and the value is a number. :type indicators_map: dict {str: numerical value} """ self._indicators_mapping.update(indicators_map) def compute_global_efficacy(self): self._efficacy_spec.validate_efficacy_indicators( self._indicators_mapping) try: self.global_efficacy = ( self._efficacy_spec.get_global_efficacy_indicator( self._indicators_mapping)) indicators_specs_map = { indicator_spec.name: indicator_spec for indicator_spec in self._efficacy_spec.indicators_specs} indicators = [] for indicator_name, value in self._indicators_mapping.items(): related_indicator_spec = indicators_specs_map[indicator_name] indicators.append( Indicator( name=related_indicator_spec.name, description=related_indicator_spec.description, unit=related_indicator_spec.unit, value=value)) self.indicators = indicators except Exception as exc: LOG.exception(exc) raise exception.GlobalEfficacyComputationError( goal=self.goal.name, strategy=self.strategy.name) python-watcher-4.0.0/watcher/decision_engine/solution/default.py0000664000175000017500000000466013656752270025165 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.applier.actions import base as baction from watcher.common import exception from watcher.decision_engine.solution import base LOG = log.getLogger(__name__) class DefaultSolution(base.BaseSolution): def __init__(self, goal, strategy): """Stores a set of actions generated by a strategy The DefaultSolution class store a set of actions generated by a strategy in order to achieve the goal. :param goal: Goal associated to this solution :type goal: :py:class:`~.base.Goal` instance :param strategy: Strategy associated to this solution :type strategy: :py:class:`~.BaseStrategy` instance """ super(DefaultSolution, self).__init__(goal, strategy) self._actions = [] def add_action(self, action_type, input_parameters=None, resource_id=None): if input_parameters is not None: if baction.BaseAction.RESOURCE_ID in input_parameters.keys(): raise exception.ReservedWord(name=baction.BaseAction. RESOURCE_ID) else: input_parameters = {} if resource_id is not None: input_parameters[baction.BaseAction.RESOURCE_ID] = resource_id action = { 'action_type': action_type, 'input_parameters': input_parameters } if action not in self._actions: self._actions.append(action) else: LOG.warning('Action %s has been added into the solution, ' 'duplicate action will be dropped.', str(action)) def __str__(self): return "\n".join(self._actions) @property def actions(self): """Get the current actions of the solution""" return self._actions python-watcher-4.0.0/watcher/decision_engine/solution/solution_comparator.py0000664000175000017500000000152413656752270027640 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import six @six.add_metaclass(abc.ABCMeta) class BaseSolutionComparator(object): @abc.abstractmethod def compare(self, sol1, sol2): raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/solution/base.py0000664000175000017500000001116513656752270024451 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A :ref:`Solution ` is the result of execution of a :ref:`strategy ` (i.e., an algorithm). Each solution is composed of many pieces of information: - A set of :ref:`actions ` generated by the strategy in order to achieve the :ref:`goal ` of an associated :ref:`audit `. - A set of :ref:`efficacy indicators ` as defined by the associated goal - A :ref:`global efficacy ` which is computed by the associated goal using the aforementioned efficacy indicators. A :ref:`Solution ` is different from an :ref:`Action Plan ` because it contains the non-scheduled list of :ref:`Actions ` which is produced by a :ref:`Strategy `. In other words, the list of Actions in a :ref:`Solution ` has not yet been re-ordered by the :ref:`Watcher Planner `. Note that some algorithms (i.e. :ref:`Strategies `) may generate several :ref:`Solutions `. This gives rise to the problem of determining which :ref:`Solution ` should be applied. Two approaches to dealing with this can be envisaged: - **fully automated mode**: only the :ref:`Solution ` with the highest ranking (i.e., the highest :ref:`Optimization Efficacy `) will be sent to the :ref:`Watcher Planner ` and translated into concrete :ref:`Actions `. - **manual mode**: several :ref:`Solutions ` are proposed to the :ref:`Administrator ` with a detailed measurement of the estimated :ref:`Optimization Efficacy ` and he/she decides which one will be launched. """ import abc import six from watcher.decision_engine.solution import efficacy @six.add_metaclass(abc.ABCMeta) class BaseSolution(object): def __init__(self, goal, strategy): """Base Solution constructor :param goal: Goal associated to this solution :type goal: :py:class:`~.base.Goal` instance :param strategy: Strategy associated to this solution :type strategy: :py:class:`~.BaseStrategy` instance """ self.goal = goal self._strategy = strategy self.origin = None self.model = None self.efficacy = efficacy.Efficacy(self.goal, self.strategy) @property def global_efficacy(self): return self.efficacy.global_efficacy @property def efficacy_indicators(self): return self.efficacy.indicators @property def strategy(self): return self._strategy def compute_global_efficacy(self): """Compute the global efficacy given a map of efficacy indicators""" self.efficacy.compute_global_efficacy() def set_efficacy_indicators(self, **indicators_map): """Set the efficacy indicators mapping (no validation) :param indicators_map: mapping between the indicator name and its value :type indicators_map: dict {`str`: `object`} """ self.efficacy.set_efficacy_indicators(**indicators_map) @abc.abstractmethod def add_action(self, action_type, resource_id, input_parameters=None): """Add a new Action in the Solution :param action_type: the unique id of an action type defined in entry point 'watcher_actions' :param resource_id: the unique id of the resource to which the `Action` applies. :param input_parameters: An array of input parameters provided as key-value pairs of strings. Each key-pair contains names and values that match what was previously defined in the `Action` type schema. """ raise NotImplementedError() @abc.abstractproperty def actions(self): raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/solution/solution_evaluator.py0000664000175000017500000000152213656752270027471 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import six @six.add_metaclass(abc.ABCMeta) class BaseSolutionEvaluator(object): @abc.abstractmethod def evaluate(self, solution): raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/scope/0000775000175000017500000000000013656752352022417 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/scope/__init__.py0000664000175000017500000000000013656752270024515 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/scope/compute.py0000664000175000017500000002215113656752270024445 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher.common import exception from watcher.common import nova_helper from watcher.decision_engine.scope import base LOG = log.getLogger(__name__) class ComputeScope(base.BaseScope): """Compute Audit Scope Handler""" def __init__(self, scope, config, osc=None): super(ComputeScope, self).__init__(scope, config) self._osc = osc self.wrapper = nova_helper.NovaHelper(osc=self._osc) def remove_instance(self, cluster_model, instance, node_uuid): node = cluster_model.get_node_by_uuid(node_uuid) cluster_model.delete_instance(instance, node) def update_exclude_instance(self, cluster_model, instance, node_uuid): node = cluster_model.get_node_by_uuid(node_uuid) cluster_model.unmap_instance(instance, node) instance.update({"watcher_exclude": True}) cluster_model.map_instance(instance, node) def _check_wildcard(self, aggregate_list): if '*' in aggregate_list: if len(aggregate_list) == 1: return True else: raise exception.WildcardCharacterIsUsed( resource="host aggregates") return False def _collect_aggregates(self, host_aggregates, compute_nodes): aggregate_list = self.wrapper.get_aggregate_list() aggregate_ids = [aggregate['id'] for aggregate in host_aggregates if 'id' in aggregate] aggregate_names = [aggregate['name'] for aggregate in host_aggregates if 'name' in aggregate] include_all_nodes = any(self._check_wildcard(field) for field in (aggregate_ids, aggregate_names)) for aggregate in aggregate_list: if (aggregate.id in aggregate_ids or aggregate.name in aggregate_names or include_all_nodes): compute_nodes.extend(aggregate.hosts) def _collect_zones(self, availability_zones, allowed_nodes): service_list = self.wrapper.get_service_list() zone_names = [zone['name'] for zone in availability_zones] include_all_nodes = False if '*' in zone_names: if len(zone_names) == 1: include_all_nodes = True else: raise exception.WildcardCharacterIsUsed( resource="availability zones") for service in service_list: if service.zone in zone_names or include_all_nodes: allowed_nodes.extend(service.host) def exclude_resources(self, resources, **kwargs): instances_to_exclude = kwargs.get('instances') nodes_to_exclude = kwargs.get('nodes') instance_metadata = kwargs.get('instance_metadata') projects_to_exclude = kwargs.get('projects') for resource in resources: if 'instances' in resource: instances_to_exclude.extend( [instance['uuid'] for instance in resource['instances']]) elif 'compute_nodes' in resource: nodes_to_exclude.extend( [host['name'] for host in resource['compute_nodes']]) elif 'host_aggregates' in resource: prohibited_nodes = [] self._collect_aggregates(resource['host_aggregates'], prohibited_nodes) nodes_to_exclude.extend(prohibited_nodes) elif 'instance_metadata' in resource: instance_metadata.extend( [metadata for metadata in resource['instance_metadata']]) elif 'projects' in resource: projects_to_exclude.extend( [project['uuid'] for project in resource['projects']]) def remove_nodes_from_model(self, nodes_to_remove, cluster_model): for node_name in nodes_to_remove: node = cluster_model.get_node_by_name(node_name) instances = cluster_model.get_node_instances(node) for instance in instances: self.remove_instance(cluster_model, instance, node.uuid) cluster_model.remove_node(node) def update_exclude_instance_in_model( self, instances_to_exclude, cluster_model): for instance_uuid in instances_to_exclude: try: node_uuid = cluster_model.get_node_by_instance_uuid( instance_uuid).uuid except exception.ComputeResourceNotFound: LOG.warning("The following instance %s cannot be found. " "It might be deleted from CDM along with node" " instance was hosted on.", instance_uuid) continue self.update_exclude_instance( cluster_model, cluster_model.get_instance_by_uuid(instance_uuid), node_uuid) def exclude_instances_with_given_metadata( self, instance_metadata, cluster_model, instances_to_remove): metadata_dict = { key: val for d in instance_metadata for key, val in d.items()} instances = cluster_model.get_all_instances() for uuid, instance in instances.items(): metadata = instance.metadata common_metadata = set(metadata_dict) & set(metadata) if common_metadata and len(common_metadata) == len(metadata_dict): for key, value in metadata_dict.items(): if str(value).lower() == str(metadata.get(key)).lower(): instances_to_remove.add(uuid) def exclude_instances_with_given_project( self, projects_to_exclude, cluster_model, instances_to_exclude): all_instances = cluster_model.get_all_instances() for uuid, instance in all_instances.items(): if instance.project_id in projects_to_exclude: instances_to_exclude.add(uuid) def get_scoped_model(self, cluster_model): """Leave only nodes and instances proposed in the audit scope""" if not cluster_model: return None allowed_nodes = [] nodes_to_exclude = [] nodes_to_remove = set() instances_to_exclude = [] instance_metadata = [] projects_to_exclude = [] compute_scope = [] found_nothing_flag = False model_hosts = [n.hostname for n in cluster_model.get_all_compute_nodes().values()] if not self.scope: return cluster_model for scope in self.scope: compute_scope = scope.get('compute') if compute_scope: break if not compute_scope: return cluster_model for rule in compute_scope: if 'host_aggregates' in rule: self._collect_aggregates(rule['host_aggregates'], allowed_nodes) if not allowed_nodes: found_nothing_flag = True elif 'availability_zones' in rule: self._collect_zones(rule['availability_zones'], allowed_nodes) if not allowed_nodes: found_nothing_flag = True elif 'exclude' in rule: self.exclude_resources( rule['exclude'], instances=instances_to_exclude, nodes=nodes_to_exclude, instance_metadata=instance_metadata, projects=projects_to_exclude) instances_to_exclude = set(instances_to_exclude) if allowed_nodes: nodes_to_remove = set(model_hosts) - set(allowed_nodes) # This branch means user set host_aggregates and/or availability_zones # but can't find any nodes, so we should remove all nodes. elif found_nothing_flag: nodes_to_remove = set(model_hosts) nodes_to_remove.update(nodes_to_exclude) self.remove_nodes_from_model(nodes_to_remove, cluster_model) if instance_metadata and self.config.check_optimize_metadata: self.exclude_instances_with_given_metadata( instance_metadata, cluster_model, instances_to_exclude) if projects_to_exclude: self.exclude_instances_with_given_project( projects_to_exclude, cluster_model, instances_to_exclude) self.update_exclude_instance_in_model(instances_to_exclude, cluster_model) return cluster_model python-watcher-4.0.0/watcher/decision_engine/scope/storage.py0000664000175000017500000001442213656752270024437 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.scope import base class StorageScope(base.BaseScope): """Storage Audit Scope Handler""" def __init__(self, scope, config, osc=None): super(StorageScope, self).__init__(scope, config) self._osc = osc self.wrapper = cinder_helper.CinderHelper(osc=self._osc) def _collect_vtype(self, volume_types, allowed_nodes): service_list = self.wrapper.get_storage_node_list() vt_names = [volume_type['name'] for volume_type in volume_types] include_all_nodes = False if '*' in vt_names: if len(vt_names) == 1: include_all_nodes = True else: raise exception.WildcardCharacterIsUsed( resource="volume_types") for service in service_list: if include_all_nodes: allowed_nodes.append(service.host) continue backend = service.host.split('@')[1] v_types = self.wrapper.get_volume_type_by_backendname( backend) for volume_type in v_types: if volume_type in vt_names: # Note(adisky): It can generate duplicate values # but it will later converted to set allowed_nodes.append(service.host) def _collect_zones(self, availability_zones, allowed_nodes): service_list = self.wrapper.get_storage_node_list() zone_names = [zone['name'] for zone in availability_zones] include_all_nodes = False if '*' in zone_names: if len(zone_names) == 1: include_all_nodes = True else: raise exception.WildcardCharacterIsUsed( resource="availability zones") for service in service_list: if service.zone in zone_names or include_all_nodes: allowed_nodes.append(service.host) def exclude_resources(self, resources, **kwargs): pools_to_exclude = kwargs.get('pools') volumes_to_exclude = kwargs.get('volumes') projects_to_exclude = kwargs.get('projects') for resource in resources: if 'storage_pools' in resource: pools_to_exclude.extend( [storage_pool['name'] for storage_pool in resource['storage_pools']]) elif 'volumes' in resource: volumes_to_exclude.extend( [volume['uuid'] for volume in resource['volumes']]) elif 'projects' in resource: projects_to_exclude.extend( [project['uuid'] for project in resource['projects']]) def exclude_pools(self, pools_to_exclude, cluster_model): for pool_name in pools_to_exclude: pool = cluster_model.get_pool_by_pool_name(pool_name) volumes = cluster_model.get_pool_volumes(pool) for volume in volumes: cluster_model.remove_volume(volume) cluster_model.remove_pool(pool) def exclude_volumes(self, volumes_to_exclude, cluster_model): for volume_uuid in volumes_to_exclude: volume = cluster_model.get_volume_by_uuid(volume_uuid) cluster_model.remove_volume(volume) def exclude_projects(self, projects_to_exclude, cluster_model): all_volumes = cluster_model.get_all_volumes() for volume_uuid in all_volumes: volume = all_volumes.get(volume_uuid) if volume.project_id in projects_to_exclude: cluster_model.remove_volume(volume) def remove_nodes_from_model(self, nodes_to_remove, cluster_model): for hostname in nodes_to_remove: node = cluster_model.get_node_by_name(hostname) pools = cluster_model.get_node_pools(node) for pool in pools: volumes = cluster_model.get_pool_volumes(pool) for volume in volumes: cluster_model.remove_volume(volume) cluster_model.remove_pool(pool) cluster_model.remove_node(node) def get_scoped_model(self, cluster_model): """Leave only nodes, pools and volumes proposed in the audit scope""" if not cluster_model: return None allowed_nodes = [] nodes_to_remove = set() volumes_to_exclude = [] projects_to_exclude = [] pools_to_exclude = [] model_hosts = list(cluster_model.get_all_storage_nodes().keys()) storage_scope = [] for scope in self.scope: storage_scope = scope.get('storage') if storage_scope: break if not storage_scope: return cluster_model for rule in storage_scope: if 'volume_types' in rule: self._collect_vtype(rule['volume_types'], allowed_nodes, cluster_model) elif 'availability_zones' in rule: self._collect_zones(rule['availability_zones'], allowed_nodes) elif 'exclude' in rule: self.exclude_resources( rule['exclude'], pools=pools_to_exclude, volumes=volumes_to_exclude, projects=projects_to_exclude) if allowed_nodes: nodes_to_remove = set(model_hosts) - set(allowed_nodes) self.remove_nodes_from_model(nodes_to_remove, cluster_model) self.exclude_pools(pools_to_exclude, cluster_model) self.exclude_volumes(volumes_to_exclude, cluster_model) self.exclude_projects(projects_to_exclude, cluster_model) return cluster_model python-watcher-4.0.0/watcher/decision_engine/scope/baremetal.py0000664000175000017500000000420613656752270024726 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2018 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.scope import base class BaremetalScope(base.BaseScope): """Baremetal Audit Scope Handler""" def __init__(self, scope, config, osc=None): super(BaremetalScope, self).__init__(scope, config) self._osc = osc def exclude_resources(self, resources, **kwargs): nodes_to_exclude = kwargs.get('nodes') for resource in resources: if 'ironic_nodes' in resource: nodes_to_exclude.extend( [node['uuid'] for node in resource['ironic_nodes']]) def remove_nodes_from_model(self, nodes_to_exclude, cluster_model): for node_uuid in nodes_to_exclude: node = cluster_model.get_node_by_uuid(node_uuid) cluster_model.remove_node(node) def get_scoped_model(self, cluster_model): """Leave only nodes and instances proposed in the audit scope""" if not cluster_model: return None nodes_to_exclude = [] baremetal_scope = [] if not self.scope: return cluster_model for scope in self.scope: baremetal_scope = scope.get('baremetal') if baremetal_scope: break if not baremetal_scope: return cluster_model for rule in baremetal_scope: if 'exclude' in rule: self.exclude_resources( rule['exclude'], nodes=nodes_to_exclude) self.remove_nodes_from_model(nodes_to_exclude, cluster_model) return cluster_model python-watcher-4.0.0/watcher/decision_engine/scope/base.py0000664000175000017500000000227413656752270023707 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import six from watcher.common import context @six.add_metaclass(abc.ABCMeta) class BaseScope(object): """A base class for Scope mechanism Child of this class is called when audit launches strategy. This strategy requires Cluster Data Model which can be segregated to achieve audit scope. """ def __init__(self, scope, config): self.ctx = context.make_context() self.scope = scope self.config = config @abc.abstractmethod def get_scoped_model(self, cluster_model): """Leave only nodes and instances proposed in the audit scope""" python-watcher-4.0.0/watcher/decision_engine/model/0000775000175000017500000000000013656752352022406 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/model/__init__.py0000664000175000017500000000000013656752270024504 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/model/notification/0000775000175000017500000000000013656752352025074 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/model/notification/__init__.py0000664000175000017500000000000013656752270027172 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/model/notification/filtering.py0000664000175000017500000000553713656752270027442 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re import oslo_messaging as om import six class NotificationFilter(om.NotificationFilter): """Notification Endpoint base class This class is responsible for handling incoming notifications. Depending on the priority level of the incoming, you may need to implement one or more of the following methods: .. code: py def audit(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) def info(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) def warn(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) def error(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) def critical(self, ctxt, publisher_id, event_type, payload, metadata): do_something(payload) """ def _build_regex_dict(self, regex_list): if regex_list is None: return {} regex_mapping = {} for key, value in regex_list.items(): if isinstance(value, dict): regex_mapping[key] = self._build_regex_dict(value) else: if callable(value): regex_mapping[key] = value elif value is not None: regex_mapping[key] = re.compile(value) else: regex_mapping[key] = None return regex_mapping def _check_for_mismatch(self, data, regex): if isinstance(regex, dict): mismatch_results = [ k not in data or not self._check_for_mismatch(data[k], v) for k, v in regex.items() ] if not mismatch_results: return False return all(mismatch_results) elif callable(regex): # The filter is a callable that should return True # if there is a mismatch return regex(data) elif regex is not None and data is None: return True elif (regex is not None and isinstance(data, six.string_types) and not regex.match(data)): return True return False python-watcher-4.0.0/watcher/decision_engine/model/notification/nova.py0000664000175000017500000003456513656752270026425 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os_resource_classes as orc from oslo_log import log from watcher.common import exception from watcher.common import nova_helper from watcher.common import placement_helper from watcher.common import utils from watcher.decision_engine.model import element from watcher.decision_engine.model.notification import base from watcher.decision_engine.model.notification import filtering LOG = log.getLogger(__name__) class NovaNotification(base.NotificationEndpoint): def __init__(self, collector): super(NovaNotification, self).__init__(collector) self._nova = None self._placement_helper = None @property def nova(self): if self._nova is None: self._nova = nova_helper.NovaHelper() return self._nova @property def placement_helper(self): if self._placement_helper is None: self._placement_helper = placement_helper.PlacementHelper() return self._placement_helper def get_or_create_instance(self, instance_uuid, node_name=None): try: node = None if node_name: node = self.get_or_create_node(node_name) except exception.ComputeNodeNotFound: LOG.warning("Could not find compute node %(node)s for " "instance %(instance)s", dict(node=node_name, instance=instance_uuid)) try: instance = self.cluster_data_model.get_instance_by_uuid( instance_uuid) except exception.InstanceNotFound: # The instance didn't exist yet so we create a new instance object LOG.debug("New instance created: %s", instance_uuid) instance = element.Instance(uuid=instance_uuid) self.cluster_data_model.add_instance(instance) if node: self.cluster_data_model.map_instance(instance, node) return instance def update_instance(self, instance, data): n_version = float(data['nova_object.version']) instance_data = data['nova_object.data'] instance_flavor_data = instance_data['flavor']['nova_object.data'] memory_mb = instance_flavor_data['memory_mb'] num_cores = instance_flavor_data['vcpus'] disk_gb = instance_flavor_data['root_gb'] instance_metadata = data['nova_object.data']['metadata'] instance.update({ 'state': instance_data['state'], 'hostname': instance_data['host_name'], # this is the user-provided display name of the server which is not # guaranteed to be unique nor is it immutable. 'name': instance_data['display_name'], 'memory': memory_mb, 'vcpus': num_cores, 'disk': disk_gb, 'metadata': instance_metadata, 'project_id': instance_data['tenant_id'] }) # locked was added in nova notification payload version 1.1 if n_version > 1.0: instance.update({'locked': instance_data['locked']}) try: node = self.get_or_create_node(instance_data['host']) except exception.ComputeNodeNotFound as exc: LOG.exception(exc) # If we can't create the node, we consider the instance as unmapped node = None self.update_instance_mapping(instance, node) def update_compute_node(self, node, data): """Update the compute node using the notification data.""" node_data = data['nova_object.data'] node_state = ( element.ServiceState.OFFLINE.value if node_data['forced_down'] else element.ServiceState.ONLINE.value) node_status = ( element.ServiceState.DISABLED.value if node_data['disabled'] else element.ServiceState.ENABLED.value) disabled_reason = ( node_data['disabled_reason'] if node_data['disabled'] else None) node.update({ 'hostname': node_data['host'], 'state': node_state, 'status': node_status, 'disabled_reason': disabled_reason, }) def create_compute_node(self, uuid_or_name): """Create the computeNode node.""" try: if utils.is_uuid_like(uuid_or_name): _node = self.nova.get_compute_node_by_uuid(uuid_or_name) else: _node = self.nova.get_compute_node_by_hostname(uuid_or_name) inventories = self.placement_helper.get_inventories(_node.id) if inventories and orc.VCPU in inventories: vcpus = inventories[orc.VCPU]['total'] vcpu_reserved = inventories[orc.VCPU]['reserved'] vcpu_ratio = inventories[orc.VCPU]['allocation_ratio'] else: vcpus = _node.vcpus vcpu_reserved = 0 vcpu_ratio = 1.0 if inventories and orc.MEMORY_MB in inventories: memory_mb = inventories[orc.MEMORY_MB]['total'] memory_mb_reserved = inventories[orc.MEMORY_MB]['reserved'] memory_ratio = inventories[orc.MEMORY_MB]['allocation_ratio'] else: memory_mb = _node.memory_mb memory_mb_reserved = 0 memory_ratio = 1.0 # NOTE(licanwei): A BP support-shared-storage-resource-provider # will move DISK_GB from compute node to shared storage RP. # Here may need to be updated when the nova BP released. if inventories and orc.DISK_GB in inventories: disk_capacity = inventories[orc.DISK_GB]['total'] disk_gb_reserved = inventories[orc.DISK_GB]['reserved'] disk_ratio = inventories[orc.DISK_GB]['allocation_ratio'] else: disk_capacity = _node.local_gb disk_gb_reserved = 0 disk_ratio = 1.0 # build up the compute node. node_attributes = { # The id of the hypervisor as a UUID from version 2.53. "uuid": _node.id, "hostname": _node.service["host"], "memory": memory_mb, "memory_ratio": memory_ratio, "memory_mb_reserved": memory_mb_reserved, "disk": disk_capacity, "disk_gb_reserved": disk_gb_reserved, "disk_ratio": disk_ratio, "vcpus": vcpus, "vcpu_reserved": vcpu_reserved, "vcpu_ratio": vcpu_ratio, "state": _node.state, "status": _node.status, "disabled_reason": _node.service["disabled_reason"]} node = element.ComputeNode(**node_attributes) self.cluster_data_model.add_node(node) LOG.debug("New compute node mapped: %s", node.uuid) return node except Exception as exc: LOG.exception(exc) LOG.debug("Could not refresh the node %s.", uuid_or_name) raise exception.ComputeNodeNotFound(name=uuid_or_name) def get_or_create_node(self, uuid_or_name): if uuid_or_name is None: LOG.debug("Compute node UUID or name not provided: skipping") return try: if utils.is_uuid_like(uuid_or_name): return self.cluster_data_model.get_node_by_uuid(uuid_or_name) else: return self.cluster_data_model.get_node_by_name(uuid_or_name) except exception.ComputeNodeNotFound: # The node didn't exist yet so we create a new node object node = self.create_compute_node(uuid_or_name) LOG.debug("New compute node created: %s", uuid_or_name) return node def update_instance_mapping(self, instance, node): if node is None: self.cluster_data_model.add_instance(instance) LOG.debug("Instance %s not yet attached to any node: skipping", instance.uuid) return try: try: current_node = ( self.cluster_data_model.get_node_by_instance_uuid( instance.uuid)) except exception.ComputeResourceNotFound as exc: LOG.exception(exc) # If we can't create the node, # we consider the instance as unmapped current_node = None LOG.debug("Mapped node %s found", node.uuid) if current_node and node != current_node: LOG.debug("Unmapping instance %s from %s", instance.uuid, node.uuid) self.cluster_data_model.unmap_instance(instance, current_node) except exception.InstanceNotFound: # The instance didn't exist yet so we map it for the first time LOG.debug("New instance: mapping it to %s", node.uuid) finally: if node: self.cluster_data_model.map_instance(instance, node) LOG.debug("Mapped instance %s to %s", instance.uuid, node.uuid) def delete_instance(self, instance, node): try: self.cluster_data_model.delete_instance(instance, node) except Exception: LOG.info("Instance %s already deleted", instance.uuid) def delete_node(self, node): try: self.cluster_data_model.remove_node(node) except Exception: LOG.info("Node %s already deleted", node.uuid) class VersionedNotification(NovaNotification): publisher_id_regex = r'^nova-.*' def service_updated(self, payload): node_data = payload['nova_object.data'] node_name = node_data['host'] try: node = self.get_or_create_node(node_name) self.update_compute_node(node, payload) except exception.ComputeNodeNotFound as exc: LOG.exception(exc) def service_deleted(self, payload): node_data = payload['nova_object.data'] node_name = node_data['host'] try: node = self.get_or_create_node(node_name) self.delete_node(node) except exception.ComputeNodeNotFound as exc: LOG.exception(exc) def instance_updated(self, payload): instance_data = payload['nova_object.data'] instance_uuid = instance_data['uuid'] instance_state = instance_data['state'] node_name = instance_data.get('host') # if instance state is building, don't update data model if instance_state == 'building': return instance = self.get_or_create_instance(instance_uuid, node_name) self.update_instance(instance, payload) def instance_created(self, payload): instance_data = payload['nova_object.data'] instance_uuid = instance_data['uuid'] instance = element.Instance(uuid=instance_uuid) self.cluster_data_model.add_instance(instance) node_name = instance_data.get('host') if node_name: node = self.get_or_create_node(node_name) self.cluster_data_model.map_instance(instance, node) self.update_instance(instance, payload) def instance_deleted(self, payload): instance_data = payload['nova_object.data'] instance_uuid = instance_data['uuid'] node_name = instance_data.get('host') instance = self.get_or_create_instance(instance_uuid, node_name) try: node = self.get_or_create_node(instance_data['host']) except exception.ComputeNodeNotFound as exc: LOG.exception(exc) # If we can't create the node, we consider the instance as unmapped node = None self.delete_instance(instance, node) notification_mapping = { 'instance.create.end': instance_created, 'instance.lock': instance_updated, 'instance.unlock': instance_updated, 'instance.pause.end': instance_updated, 'instance.power_off.end': instance_updated, 'instance.power_on.end': instance_updated, 'instance.resize_confirm.end': instance_updated, 'instance.restore.end': instance_updated, 'instance.resume.end': instance_updated, 'instance.shelve.end': instance_updated, 'instance.shutdown.end': instance_updated, 'instance.suspend.end': instance_updated, 'instance.unpause.end': instance_updated, 'instance.unrescue.end': instance_updated, 'instance.unshelve.end': instance_updated, 'instance.rebuild.end': instance_updated, 'instance.rescue.end': instance_updated, 'instance.update': instance_updated, 'instance.live_migration_force_complete.end': instance_updated, 'instance.live_migration_post.end': instance_updated, 'instance.delete.end': instance_deleted, 'instance.soft_delete.end': instance_deleted, 'service.create': service_updated, 'service.delete': service_deleted, 'service.update': service_updated, } @property def filter_rule(self): """Nova notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, ) def info(self, ctxt, publisher_id, event_type, payload, metadata): LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) func = self.notification_mapping.get(event_type) if func: # The nova CDM is not built until an audit is performed. if self.cluster_data_model: LOG.debug(payload) func(self, payload) else: LOG.debug('Nova CDM has not yet been built; ignoring ' 'notifications until an audit is performed.') python-watcher-4.0.0/watcher/decision_engine/model/notification/cinder.py0000664000175000017500000003352313656752270026717 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_log import log from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.model.notification import base from watcher.decision_engine.model.notification import filtering LOG = log.getLogger(__name__) class CinderNotification(base.NotificationEndpoint): def __init__(self, collector): super(CinderNotification, self).__init__(collector) self._cinder = None @property def cinder(self): if self._cinder is None: self._cinder = cinder_helper.CinderHelper() return self._cinder def update_pool(self, pool, data): """Update the storage pool using the notification data.""" pool.update({ "total_capacity_gb": data['total'], "free_capacity_gb": data['free'], "provisioned_capacity_gb": data['provisioned'], "allocated_capacity_gb": data['allocated'], "virtual_free": data['virtual_free'] }) node_name = pool.name.split("#")[0] node = self.get_or_create_node(node_name) self.cluster_data_model.map_pool(pool, node) LOG.debug("Mapped pool %s to %s", pool.name, node.host) def update_pool_by_api(self, pool): """Update the storage pool using the API data.""" if not pool: return _pool = self.cinder.get_storage_pool_by_name(pool.name) pool.update({ "total_volumes": _pool.total_volumes, "total_capacity_gb": _pool.total_capacity_gb, "free_capacity_gb": _pool.free_capacity_gb, "provisioned_capacity_gb": _pool.provisioned_capacity_gb, "allocated_capacity_gb": _pool.allocated_capacity_gb }) node_name = pool.name.split("#")[0] node = self.get_or_create_node(node_name) self.cluster_data_model.map_pool(pool, node) LOG.debug("Mapped pool %s to %s", pool.name, node.host) def create_storage_node(self, name): """Create the storage node by querying the Cinder API.""" try: _node = self.cinder.get_storage_node_by_name(name) _volume_type = self.cinder.get_volume_type_by_backendname( # name is formatted as host@backendname name.split('@')[1]) storage_node = element.StorageNode( host=_node.host, zone=_node.zone, state=_node.state, status=_node.status, volume_type=_volume_type) return storage_node except Exception as exc: LOG.exception(exc) LOG.debug("Could not create storage node %s.", name) raise exception.StorageNodeNotFound(name=name) def get_or_create_node(self, name): """Get storage node by name, otherwise create storage node""" if name is None: LOG.debug("Storage node name not provided: skipping") return try: return self.cluster_data_model.get_node_by_name(name) except exception.StorageNodeNotFound: # The node didn't exist yet so we create a new node object node = self.create_storage_node(name) LOG.debug("New storage node created: %s", name) self.cluster_data_model.add_node(node) LOG.debug("New storage node added: %s", name) return node def create_pool(self, pool_name): """Create the storage pool by querying the Cinder API.""" try: _pool = self.cinder.get_storage_pool_by_name(pool_name) pool = element.Pool( name=_pool.name, total_volumes=_pool.total_volumes, total_capacity_gb=_pool.total_capacity_gb, free_capacity_gb=_pool.free_capacity_gb, provisioned_capacity_gb=_pool.provisioned_capacity_gb, allocated_capacity_gb=_pool.allocated_capacity_gb) return pool except Exception as exc: LOG.exception(exc) LOG.debug("Could not refresh the pool %s.", pool_name) raise exception.PoolNotFound(name=pool_name) def get_or_create_pool(self, name): if not name: LOG.debug("Pool name not provided: skipping") return try: return self.cluster_data_model.get_pool_by_pool_name(name) except exception.PoolNotFound: # The pool didn't exist yet so we create a new pool object pool = self.create_pool(name) LOG.debug("New storage pool created: %s", name) self.cluster_data_model.add_pool(pool) LOG.debug("New storage pool added: %s", name) return pool def get_or_create_volume(self, volume_id, pool_name=None): try: if pool_name: self.get_or_create_pool(pool_name) except exception.PoolNotFound: LOG.warning("Could not find storage pool %(pool)s for " "volume %(volume)s", dict(pool=pool_name, volume=volume_id)) try: return self.cluster_data_model.get_volume_by_uuid(volume_id) except exception.VolumeNotFound: # The volume didn't exist yet so we create a new volume object volume = element.Volume(uuid=volume_id) self.cluster_data_model.add_volume(volume) return volume def update_volume(self, volume, data): """Update the volume using the notification data.""" def _keyReplace(key): if key == 'instance_uuid': return 'server_id' if key == 'id': return 'attachment_id' attachments = [ {_keyReplace(k): v for k, v in six.iteritems(d) if k in ('instance_uuid', 'id')} for d in data['volume_attachment'] ] # glance_metadata is provided if volume is bootable bootable = False if 'glance_metadata' in data: bootable = True volume.update({ "name": data['display_name'] or "", "size": data['size'], "status": data['status'], "attachments": attachments, "snapshot_id": data['snapshot_id'] or "", "project_id": data['tenant_id'], "metadata": data['metadata'], "bootable": bootable }) try: # if volume is under pool, let's update pool element. # get existing pool or create pool by cinder api pool = self.get_or_create_pool(data['host']) self.update_pool_by_api(pool) except exception.PoolNotFound as exc: LOG.exception(exc) pool = None self.update_volume_mapping(volume, pool) def update_volume_mapping(self, volume, pool): if pool is None: self.cluster_data_model.add_volume(volume) LOG.debug("Volume %s not yet attached to any pool: skipping", volume.uuid) return try: try: current_pool = ( self.cluster_data_model.get_pool_by_volume( volume) or self.get_or_create_pool(pool.name)) except exception.PoolNotFound as exc: LOG.exception(exc) # If we can't create the pool, # we consider the volume as unmapped current_pool = None LOG.debug("Mapped pool %s found", pool.name) if current_pool and pool != current_pool: LOG.debug("Unmapping volume %s from %s", volume.uuid, pool.name) self.cluster_data_model.unmap_volume(volume, current_pool) except exception.VolumeNotFound: # The instance didn't exist yet so we map it for the first time LOG.debug("New volume: mapping it to %s", pool.name) finally: if pool: self.cluster_data_model.map_volume(volume, pool) LOG.debug("Mapped volume %s to %s", volume.uuid, pool.name) def delete_volume(self, volume, pool): try: self.cluster_data_model.delete_volume(volume) except Exception: LOG.info("Volume %s already deleted", volume.uuid) try: if pool: # if volume is under pool, let's update pool element. # get existing pool or create pool by cinder api pool = self.get_or_create_pool(pool.name) self.update_pool_by_api(pool) except exception.PoolNotFound as exc: LOG.exception(exc) pool = None class CapacityNotificationEndpoint(CinderNotification): @property def filter_rule(self): """Cinder capacity notification filter""" return filtering.NotificationFilter( publisher_id=r'capacity.*', event_type='capacity.pool', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) LOG.debug(payload) name = payload['name_to_id'] try: pool = self.get_or_create_pool(name) self.update_pool(pool, payload) except exception.PoolNotFound as exc: LOG.exception(exc) class VolumeNotificationEndpoint(CinderNotification): publisher_id_regex = r'^volume.*' class VolumeCreateEnd(VolumeNotificationEndpoint): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.create.end', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) LOG.debug(payload) volume_id = payload['volume_id'] poolname = payload['host'] volume = self.get_or_create_volume(volume_id, poolname) self.update_volume(volume, payload) class VolumeUpdateEnd(VolumeNotificationEndpoint): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.update.end', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) LOG.debug(payload) volume_id = payload['volume_id'] poolname = payload['host'] volume = self.get_or_create_volume(volume_id, poolname) self.update_volume(volume, payload) class VolumeAttachEnd(VolumeUpdateEnd): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.attach.end', ) class VolumeDetachEnd(VolumeUpdateEnd): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.detach.end', ) class VolumeResizeEnd(VolumeUpdateEnd): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.resize.end', ) class VolumeDeleteEnd(VolumeNotificationEndpoint): @property def filter_rule(self): """Cinder volume notification filter""" return filtering.NotificationFilter( publisher_id=self.publisher_id_regex, event_type='volume.delete.end', ) def info(self, ctxt, publisher_id, event_type, payload, metadata): ctxt.request_id = metadata['message_id'] ctxt.project_domain = event_type LOG.info("Event '%(event)s' received from %(publisher)s " "with metadata %(metadata)s", dict(event=event_type, publisher=publisher_id, metadata=metadata)) LOG.debug(payload) volume_id = payload['volume_id'] poolname = payload['host'] volume = self.get_or_create_volume(volume_id, poolname) try: pool = self.get_or_create_pool(poolname) except exception.PoolNotFound as exc: LOG.exception(exc) pool = None self.delete_volume(volume, pool) python-watcher-4.0.0/watcher/decision_engine/model/notification/base.py0000664000175000017500000000215013656752270026355 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six @six.add_metaclass(abc.ABCMeta) class NotificationEndpoint(object): def __init__(self, collector): super(NotificationEndpoint, self).__init__() self.collector = collector self._notifier = None @abc.abstractproperty def filter_rule(self): """Notification Filter""" raise NotImplementedError() @property def cluster_data_model(self): return self.collector.cluster_data_model python-watcher-4.0.0/watcher/decision_engine/model/element/0000775000175000017500000000000013656752352024037 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/model/element/__init__.py0000664000175000017500000000245013656752270026150 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.model.element import instance from watcher.decision_engine.model.element import node from watcher.decision_engine.model.element import volume ServiceState = node.ServiceState ComputeNode = node.ComputeNode StorageNode = node.StorageNode IronicNode = node.IronicNode Pool = node.Pool InstanceState = instance.InstanceState Instance = instance.Instance VolumeState = volume.VolumeState Volume = volume.Volume __all__ = ['ServiceState', 'ComputeNode', 'InstanceState', 'Instance', 'StorageNode', 'Pool', 'VolumeState', 'Volume', 'IronicNode'] python-watcher-4.0.0/watcher/decision_engine/model/element/baremetal_resource.py0000664000175000017500000000166013656752270030256 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from watcher.decision_engine.model.element import base from watcher.objects import fields as wfields @six.add_metaclass(abc.ABCMeta) class BaremetalResource(base.Element): VERSION = '1.0' fields = { "uuid": wfields.StringField(), "human_id": wfields.StringField(default=""), } python-watcher-4.0.0/watcher/decision_engine/model/element/volume.py0000664000175000017500000000350013656752270025715 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from watcher.decision_engine.model.element import storage_resource from watcher.objects import base from watcher.objects import fields as wfields class VolumeState(enum.Enum): # https://docs.openstack.org/api-ref/block-storage/v3/#volumes-volumes CREATING = 'creating' AVAILABLE = 'available' ATTACHING = 'attaching' IN_USE = 'in-use' DELETING = 'deleting' ERROR = 'error' ERROR_DELETING = 'error_deleting' BACKING_UP = 'backing-up' RESTORING_BACKUP = 'restoring-backup' ERROR_RESTORING = 'error_restoring' ERROR_EXTENDING = 'error_extending' @base.WatcherObjectRegistry.register_if(False) class Volume(storage_resource.StorageResource): fields = { "size": wfields.NonNegativeIntegerField(), "status": wfields.StringField(default=VolumeState.AVAILABLE.value), "attachments": wfields.FlexibleListOfDictField(), "name": wfields.StringField(), "multiattach": wfields.BooleanField(), "snapshot_id": wfields.UUIDField(nullable=True), "project_id": wfields.UUIDField(), "metadata": wfields.JsonField(), "bootable": wfields.BooleanField() } def accept(self, visitor): raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/model/element/node.py0000664000175000017500000000730113656752270025336 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from watcher.decision_engine.model.element import baremetal_resource from watcher.decision_engine.model.element import compute_resource from watcher.decision_engine.model.element import storage_resource from watcher.objects import base from watcher.objects import fields as wfields class ServiceState(enum.Enum): ONLINE = 'up' OFFLINE = 'down' ENABLED = 'enabled' DISABLED = 'disabled' @base.WatcherObjectRegistry.register_if(False) class ComputeNode(compute_resource.ComputeResource): fields = { "hostname": wfields.StringField(), "status": wfields.StringField(default=ServiceState.ENABLED.value), "disabled_reason": wfields.StringField(nullable=True), "state": wfields.StringField(default=ServiceState.ONLINE.value), "memory": wfields.NonNegativeIntegerField(), "memory_mb_reserved": wfields.NonNegativeIntegerField(), "disk": wfields.NonNegativeIntegerField(), "disk_gb_reserved": wfields.NonNegativeIntegerField(), "vcpus": wfields.NonNegativeIntegerField(), "vcpu_reserved": wfields.NonNegativeIntegerField(), "memory_ratio": wfields.NonNegativeFloatField(), "vcpu_ratio": wfields.NonNegativeFloatField(), "disk_ratio": wfields.NonNegativeFloatField(), } def accept(self, visitor): raise NotImplementedError() @property def memory_mb_capacity(self): return (self.memory-self.memory_mb_reserved)*self.memory_ratio @property def disk_gb_capacity(self): return (self.disk-self.disk_gb_reserved)*self.disk_ratio @property def vcpu_capacity(self): return (self.vcpus-self.vcpu_reserved)*self.vcpu_ratio @base.WatcherObjectRegistry.register_if(False) class StorageNode(storage_resource.StorageResource): fields = { "host": wfields.StringField(), "zone": wfields.StringField(), "status": wfields.StringField(default=ServiceState.ENABLED.value), "state": wfields.StringField(default=ServiceState.ONLINE.value), "volume_type": wfields.ListOfStringsField() } def accept(self, visitor): raise NotImplementedError() @base.WatcherObjectRegistry.register_if(False) class Pool(storage_resource.StorageResource): fields = { "name": wfields.StringField(), "total_volumes": wfields.NonNegativeIntegerField(), "total_capacity_gb": wfields.NonNegativeIntegerField(), "free_capacity_gb": wfields.NonNegativeIntegerField(), "provisioned_capacity_gb": wfields.NonNegativeIntegerField(), "allocated_capacity_gb": wfields.NonNegativeIntegerField(), "virtual_free": wfields.NonNegativeIntegerField(default=0), } def accept(self, visitor): raise NotImplementedError() @base.WatcherObjectRegistry.register_if(False) class IronicNode(baremetal_resource.BaremetalResource): fields = { "power_state": wfields.StringField(), "maintenance": wfields.BooleanField(), "maintenance_reason": wfields.StringField(), "extra": wfields.DictField() } def accept(self, visitor): raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/model/element/instance.py0000664000175000017500000000416213656752270026217 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import enum from watcher.decision_engine.model.element import compute_resource from watcher.objects import base from watcher.objects import fields as wfields class InstanceState(enum.Enum): ACTIVE = 'active' # Instance is running BUILDING = 'building' # Instance only exists in DB PAUSED = 'paused' SUSPENDED = 'suspended' # Instance is suspended to disk. STOPPED = 'stopped' # Instance is shut off, the disk image is still there. RESCUED = 'rescued' # A rescue image is running with the original image # attached. RESIZED = 'resized' # an Instance with the new size is active. SHELVED = 'shelved' SOFT_DELETED = 'soft-delete' # still available to restore. DELETED = 'deleted' # Instance is permanently deleted. ERROR = 'error' @base.WatcherObjectRegistry.register_if(False) class Instance(compute_resource.ComputeResource): fields = { # If the resource is excluded by the scope, # 'watcher_exclude' property will be set True. "watcher_exclude": wfields.BooleanField(default=False), "name": wfields.StringField(), "state": wfields.StringField(default=InstanceState.ACTIVE.value), "memory": wfields.NonNegativeIntegerField(), "disk": wfields.NonNegativeIntegerField(), "vcpus": wfields.NonNegativeIntegerField(), "metadata": wfields.JsonField(), "project_id": wfields.UUIDField(), "locked": wfields.BooleanField(default=False), } def accept(self, visitor): raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/model/element/compute_resource.py0000664000175000017500000000156013656752270027775 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from watcher.decision_engine.model.element import base from watcher.objects import fields as wfields @six.add_metaclass(abc.ABCMeta) class ComputeResource(base.Element): VERSION = '1.0' fields = { "uuid": wfields.StringField(), } python-watcher-4.0.0/watcher/decision_engine/model/element/storage_resource.py0000664000175000017500000000166413656752270027772 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from watcher.decision_engine.model.element import base from watcher.objects import fields as wfields @six.add_metaclass(abc.ABCMeta) class StorageResource(base.Element): VERSION = '1.0' fields = { "uuid": wfields.StringField(default=""), "human_id": wfields.StringField(default=""), } python-watcher-4.0.0/watcher/decision_engine/model/element/base.py0000664000175000017500000000404313656752270025323 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import collections from lxml import etree from oslo_log import log import six from watcher.objects import base from watcher.objects import fields as wfields LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) class Element(base.WatcherObject, base.WatcherObjectDictCompat, base.WatcherComparableObject): # Initial version VERSION = '1.0' fields = {} def __init__(self, context=None, **kwargs): for name, field in self.fields.items(): # The idea here is to force the initialization of unspecified # fields that have a default value if (name not in kwargs and not field.nullable and field.default != wfields.UnspecifiedDefault): kwargs[name] = field.default super(Element, self).__init__(context, **kwargs) @abc.abstractmethod def accept(self, visitor): raise NotImplementedError() def as_xml_element(self): sorted_fieldmap = [] for field in self.fields: try: value = str(self[field]) sorted_fieldmap.append((field, value)) except Exception as exc: LOG.exception(exc) attrib = collections.OrderedDict(sorted_fieldmap) element_name = self.__class__.__name__ instance_el = etree.Element(element_name, attrib=attrib) return instance_el python-watcher-4.0.0/watcher/decision_engine/model/model_root.py0000664000175000017500000005711413656752270025132 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Innovation and Research Ireland Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Openstack implementation of the cluster graph. """ import ast from lxml import etree import networkx as nx from oslo_concurrency import lockutils from oslo_log import log import six from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import base from watcher.decision_engine.model import element LOG = log.getLogger(__name__) class ModelRoot(nx.DiGraph, base.Model): """Cluster graph for an Openstack cluster.""" def __init__(self, stale=False): super(ModelRoot, self).__init__() self.stale = stale def __nonzero__(self): return not self.stale __bool__ = __nonzero__ @staticmethod def assert_node(obj): if not isinstance(obj, element.ComputeNode): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @staticmethod def assert_instance(obj): if not isinstance(obj, element.Instance): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid")) @lockutils.synchronized("model_root") def add_node(self, node): self.assert_node(node) super(ModelRoot, self).add_node(node.uuid, attr=node) @lockutils.synchronized("model_root") def remove_node(self, node): self.assert_node(node) try: super(ModelRoot, self).remove_node(node.uuid) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.ComputeNodeNotFound(name=node.uuid) @lockutils.synchronized("model_root") def add_instance(self, instance): self.assert_instance(instance) try: super(ModelRoot, self).add_node(instance.uuid, attr=instance) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.InstanceNotFound(name=instance.uuid) @lockutils.synchronized("model_root") def remove_instance(self, instance): self.assert_instance(instance) super(ModelRoot, self).remove_node(instance.uuid) @lockutils.synchronized("model_root") def map_instance(self, instance, node): """Map a newly created instance to a node :param instance: :py:class:`~.instance.Instance` object or instance UUID :type instance: str or :py:class:`~.instance.Instance` :param node: :py:class:`~.node.ComputeNode` object or node UUID :type node: str or :py:class:`~.instance.Instance` """ if isinstance(instance, six.string_types): instance = self.get_instance_by_uuid(instance) if isinstance(node, six.string_types): node = self.get_node_by_uuid(node) self.assert_node(node) self.assert_instance(instance) self.add_edge(instance.uuid, node.uuid) @lockutils.synchronized("model_root") def unmap_instance(self, instance, node): if isinstance(instance, six.string_types): instance = self.get_instance_by_uuid(instance) if isinstance(node, six.string_types): node = self.get_node_by_uuid(node) self.remove_edge(instance.uuid, node.uuid) def delete_instance(self, instance, node=None): self.assert_instance(instance) self.remove_instance(instance) @lockutils.synchronized("model_root") def migrate_instance(self, instance, source_node, destination_node): """Migrate single instance from source_node to destination_node :param instance: :param source_node: :param destination_node: :return: """ self.assert_instance(instance) self.assert_node(source_node) self.assert_node(destination_node) if source_node == destination_node: return False # unmap self.remove_edge(instance.uuid, source_node.uuid) # map self.add_edge(instance.uuid, destination_node.uuid) return True @lockutils.synchronized("model_root") def get_all_compute_nodes(self): return {uuid: cn['attr'] for uuid, cn in self.nodes(data=True) if isinstance(cn['attr'], element.ComputeNode)} @lockutils.synchronized("model_root") def get_node_by_uuid(self, uuid): try: return self._get_by_uuid(uuid) except exception.ComputeResourceNotFound: raise exception.ComputeNodeNotFound(name=uuid) @lockutils.synchronized("model_root") def get_node_by_name(self, name): try: node_list = [cn['attr'] for uuid, cn in self.nodes(data=True) if (isinstance(cn['attr'], element.ComputeNode) and cn['attr']['hostname'] == name)] if node_list: return node_list[0] else: raise exception.ComputeResourceNotFound except exception.ComputeResourceNotFound: raise exception.ComputeNodeNotFound(name=name) @lockutils.synchronized("model_root") def get_instance_by_uuid(self, uuid): try: return self._get_by_uuid(uuid) except exception.ComputeResourceNotFound: raise exception.InstanceNotFound(name=uuid) def _get_by_uuid(self, uuid): try: return self.nodes[uuid]['attr'] except Exception as exc: LOG.exception(exc) raise exception.ComputeResourceNotFound(name=uuid) @lockutils.synchronized("model_root") def get_node_by_instance_uuid(self, instance_uuid): instance = self._get_by_uuid(instance_uuid) for node_uuid in self.neighbors(instance.uuid): node = self._get_by_uuid(node_uuid) if isinstance(node, element.ComputeNode): return node raise exception.InstanceNotMapped(uuid=instance_uuid) @lockutils.synchronized("model_root") def get_all_instances(self): return {uuid: inst['attr'] for uuid, inst in self.nodes(data=True) if isinstance(inst['attr'], element.Instance)} @lockutils.synchronized("model_root") def get_node_instances(self, node): self.assert_node(node) node_instances = [] for instance_uuid in self.predecessors(node.uuid): instance = self._get_by_uuid(instance_uuid) if isinstance(instance, element.Instance): node_instances.append(instance) return node_instances def get_node_used_resources(self, node): vcpu_used = 0 memory_used = 0 disk_used = 0 for instance in self.get_node_instances(node): vcpu_used += instance.vcpus memory_used += instance.memory disk_used += instance.disk return dict(vcpu=vcpu_used, memory=memory_used, disk=disk_used) def get_node_free_resources(self, node): resources_used = self.get_node_used_resources(node) vcpu_free = node.vcpu_capacity-resources_used.get('vcpu') memory_free = node.memory_mb_capacity-resources_used.get('memory') disk_free = node.disk_gb_capacity-resources_used.get('disk') return dict(vcpu=vcpu_free, memory=memory_free, disk=disk_free) def to_string(self): return self.to_xml() def to_xml(self): root = etree.Element("ModelRoot") # Build compute node tree for cn in sorted(self.get_all_compute_nodes().values(), key=lambda cn: cn.uuid): compute_node_el = cn.as_xml_element() # Build mapped instance tree node_instances = self.get_node_instances(cn) for instance in sorted(node_instances, key=lambda x: x.uuid): instance_el = instance.as_xml_element() compute_node_el.append(instance_el) root.append(compute_node_el) # Build unmapped instance tree (i.e. not assigned to any compute node) for instance in sorted(self.get_all_instances().values(), key=lambda inst: inst.uuid): try: self.get_node_by_instance_uuid(instance.uuid) except exception.ComputeResourceNotFound: root.append(instance.as_xml_element()) return etree.tostring(root, pretty_print=True).decode('utf-8') def to_list(self): ret_list = [] for cn in sorted(self.get_all_compute_nodes().values(), key=lambda cn: cn.uuid): in_dict = {} for field in cn.fields: new_name = "node_"+str(field) in_dict[new_name] = cn[field] node_instances = self.get_node_instances(cn) if not node_instances: deep_in_dict = in_dict.copy() ret_list.append(deep_in_dict) continue for instance in sorted(node_instances, key=lambda x: x.uuid): for field in instance.fields: new_name = "server_"+str(field) in_dict[new_name] = instance[field] if in_dict != {}: deep_in_dict = in_dict.copy() ret_list.append(deep_in_dict) return ret_list @classmethod def from_xml(cls, data): model = cls() root = etree.fromstring(data) for cn in root.findall('.//ComputeNode'): node = element.ComputeNode(**cn.attrib) model.add_node(node) for inst in root.findall('.//Instance'): instance = element.Instance(**inst.attrib) instance.watcher_exclude = ast.literal_eval( inst.attrib["watcher_exclude"]) model.add_instance(instance) parent = inst.getparent() if parent.tag == 'ComputeNode': node = model.get_node_by_uuid(parent.get('uuid')) model.map_instance(instance, node) else: model.add_instance(instance) return model @classmethod def is_isomorphic(cls, G1, G2): def node_match(node1, node2): return node1['attr'].as_dict() == node2['attr'].as_dict() return nx.algorithms.isomorphism.isomorph.is_isomorphic( G1, G2, node_match=node_match) class StorageModelRoot(nx.DiGraph, base.Model): """Cluster graph for an Openstack cluster.""" def __init__(self, stale=False): super(StorageModelRoot, self).__init__() self.stale = stale def __nonzero__(self): return not self.stale __bool__ = __nonzero__ @staticmethod def assert_node(obj): if not isinstance(obj, element.StorageNode): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @staticmethod def assert_pool(obj): if not isinstance(obj, element.Pool): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @staticmethod def assert_volume(obj): if not isinstance(obj, element.Volume): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @lockutils.synchronized("storage_model") def add_node(self, node): self.assert_node(node) super(StorageModelRoot, self).add_node(node.host, attr=node) @lockutils.synchronized("storage_model") def add_pool(self, pool): self.assert_pool(pool) super(StorageModelRoot, self).add_node(pool.name, attr=pool) @lockutils.synchronized("storage_model") def remove_node(self, node): self.assert_node(node) try: super(StorageModelRoot, self).remove_node(node.host) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.StorageNodeNotFound(name=node.host) @lockutils.synchronized("storage_model") def remove_pool(self, pool): self.assert_pool(pool) try: super(StorageModelRoot, self).remove_node(pool.name) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.PoolNotFound(name=pool.name) @lockutils.synchronized("storage_model") def map_pool(self, pool, node): """Map a newly created pool to a node :param pool: :py:class:`~.node.Pool` object or pool name :param node: :py:class:`~.node.StorageNode` object or node host """ if isinstance(pool, six.string_types): pool = self.get_pool_by_pool_name(pool) if isinstance(node, six.string_types): node = self.get_node_by_name(node) self.assert_node(node) self.assert_pool(pool) self.add_edge(pool.name, node.host) @lockutils.synchronized("storage_model") def unmap_pool(self, pool, node): """Unmap a pool from a node :param pool: :py:class:`~.node.Pool` object or pool name :param node: :py:class:`~.node.StorageNode` object or node name """ if isinstance(pool, six.string_types): pool = self.get_pool_by_pool_name(pool) if isinstance(node, six.string_types): node = self.get_node_by_name(node) self.remove_edge(pool.name, node.host) @lockutils.synchronized("storage_model") def add_volume(self, volume): self.assert_volume(volume) super(StorageModelRoot, self).add_node(volume.uuid, attr=volume) @lockutils.synchronized("storage_model") def remove_volume(self, volume): self.assert_volume(volume) try: super(StorageModelRoot, self).remove_node(volume.uuid) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.VolumeNotFound(name=volume.uuid) @lockutils.synchronized("storage_model") def map_volume(self, volume, pool): """Map a newly created volume to a pool :param volume: :py:class:`~.volume.Volume` object or volume UUID :param pool: :py:class:`~.node.Pool` object or pool name """ if isinstance(volume, six.string_types): volume = self.get_volume_by_uuid(volume) if isinstance(pool, six.string_types): pool = self.get_pool_by_pool_name(pool) self.assert_pool(pool) self.assert_volume(volume) self.add_edge(volume.uuid, pool.name) @lockutils.synchronized("storage_model") def unmap_volume(self, volume, pool): """Unmap a volume from a pool :param volume: :py:class:`~.volume.Volume` object or volume UUID :param pool: :py:class:`~.node.Pool` object or pool name """ if isinstance(volume, six.string_types): volume = self.get_volume_by_uuid(volume) if isinstance(pool, six.string_types): pool = self.get_pool_by_pool_name(pool) self.remove_edge(volume.uuid, pool.name) def delete_volume(self, volume): self.assert_volume(volume) self.remove_volume(volume) @lockutils.synchronized("storage_model") def get_all_storage_nodes(self): return {host: cn['attr'] for host, cn in self.nodes(data=True) if isinstance(cn['attr'], element.StorageNode)} @lockutils.synchronized("storage_model") def get_node_by_name(self, name): try: return self._get_by_name(name.split("#")[0]) except exception.StorageResourceNotFound: raise exception.StorageNodeNotFound(name=name) @lockutils.synchronized("storage_model") def get_pool_by_pool_name(self, name): try: return self._get_by_name(name) except exception.StorageResourceNotFound: raise exception.PoolNotFound(name=name) @lockutils.synchronized("storage_model") def get_volume_by_uuid(self, uuid): try: return self._get_by_uuid(uuid) except exception.StorageResourceNotFound: raise exception.VolumeNotFound(name=uuid) def _get_by_uuid(self, uuid): try: return self.nodes[uuid]['attr'] except Exception as exc: LOG.exception(exc) raise exception.StorageResourceNotFound(name=uuid) def _get_by_name(self, name): try: return self.nodes[name]['attr'] except Exception as exc: LOG.exception(exc) raise exception.StorageResourceNotFound(name=name) @lockutils.synchronized("storage_model") def get_node_by_pool_name(self, pool_name): pool = self._get_by_name(pool_name) for node_name in self.neighbors(pool.name): node = self._get_by_name(node_name) if isinstance(node, element.StorageNode): return node raise exception.StorageNodeNotFound(name=pool_name) @lockutils.synchronized("storage_model") def get_node_pools(self, node): self.assert_node(node) node_pools = [] for pool_name in self.predecessors(node.host): pool = self._get_by_name(pool_name) if isinstance(pool, element.Pool): node_pools.append(pool) return node_pools @lockutils.synchronized("storage_model") def get_pool_by_volume(self, volume): self.assert_volume(volume) volume = self._get_by_uuid(volume.uuid) for p in self.neighbors(volume.uuid): pool = self._get_by_name(p) if isinstance(pool, element.Pool): return pool raise exception.PoolNotFound(name=volume.uuid) @lockutils.synchronized("storage_model") def get_all_volumes(self): return {name: vol['attr'] for name, vol in self.nodes(data=True) if isinstance(vol['attr'], element.Volume)} @lockutils.synchronized("storage_model") def get_pool_volumes(self, pool): self.assert_pool(pool) volumes = [] for vol in self.predecessors(pool.name): volume = self._get_by_uuid(vol) if isinstance(volume, element.Volume): volumes.append(volume) return volumes def to_string(self): return self.to_xml() def to_xml(self): root = etree.Element("ModelRoot") # Build storage node tree for cn in sorted(self.get_all_storage_nodes().values(), key=lambda cn: cn.host): storage_node_el = cn.as_xml_element() # Build mapped pool tree node_pools = self.get_node_pools(cn) for pool in sorted(node_pools, key=lambda x: x.name): pool_el = pool.as_xml_element() storage_node_el.append(pool_el) # Build mapped volume tree pool_volumes = self.get_pool_volumes(pool) for volume in sorted(pool_volumes, key=lambda x: x.uuid): volume_el = volume.as_xml_element() pool_el.append(volume_el) root.append(storage_node_el) # Build unmapped volume tree (i.e. not assigned to any pool) for volume in sorted(self.get_all_volumes().values(), key=lambda vol: vol.uuid): try: self.get_pool_by_volume(volume) except (exception.VolumeNotFound, exception.PoolNotFound): root.append(volume.as_xml_element()) return etree.tostring(root, pretty_print=True).decode('utf-8') @classmethod def from_xml(cls, data): model = cls() root = etree.fromstring(data) for cn in root.findall('.//StorageNode'): ndata = {} for attr, val in cn.items(): ndata[attr] = val volume_type = ndata.get('volume_type') if volume_type: ndata['volume_type'] = [volume_type] node = element.StorageNode(**ndata) model.add_node(node) for p in root.findall('.//Pool'): pool = element.Pool(**p.attrib) model.add_pool(pool) parent = p.getparent() if parent.tag == 'StorageNode': node = model.get_node_by_name(parent.get('host')) model.map_pool(pool, node) else: model.add_pool(pool) for vol in root.findall('.//Volume'): volume = element.Volume(**vol.attrib) model.add_volume(volume) parent = vol.getparent() if parent.tag == 'Pool': pool = model.get_pool_by_pool_name(parent.get('name')) model.map_volume(volume, pool) else: model.add_volume(volume) return model @classmethod def is_isomorphic(cls, G1, G2): return nx.algorithms.isomorphism.isomorph.is_isomorphic( G1, G2) class BaremetalModelRoot(nx.DiGraph, base.Model): """Cluster graph for an Openstack cluster: Baremetal Cluster.""" def __init__(self, stale=False): super(BaremetalModelRoot, self).__init__() self.stale = stale def __nonzero__(self): return not self.stale __bool__ = __nonzero__ @staticmethod def assert_node(obj): if not isinstance(obj, element.IronicNode): raise exception.IllegalArgumentException( message=_("'obj' argument type is not valid: %s") % type(obj)) @lockutils.synchronized("baremetal_model") def add_node(self, node): self.assert_node(node) super(BaremetalModelRoot, self).add_node(node.uuid, attr=node) @lockutils.synchronized("baremetal_model") def remove_node(self, node): self.assert_node(node) try: super(BaremetalModelRoot, self).remove_node(node.uuid) except nx.NetworkXError as exc: LOG.exception(exc) raise exception.IronicNodeNotFound(name=node.uuid) @lockutils.synchronized("baremetal_model") def get_all_ironic_nodes(self): return {uuid: cn['attr'] for uuid, cn in self.nodes(data=True) if isinstance(cn['attr'], element.IronicNode)} @lockutils.synchronized("baremetal_model") def get_node_by_uuid(self, uuid): try: return self._get_by_uuid(uuid) except exception.BaremetalResourceNotFound: raise exception.IronicNodeNotFound(name=uuid) def _get_by_uuid(self, uuid): try: return self.nodes[uuid]['attr'] except Exception as exc: LOG.exception(exc) raise exception.BaremetalResourceNotFound(name=uuid) def to_string(self): return self.to_xml() def to_xml(self): root = etree.Element("ModelRoot") # Build Ironic node tree for cn in sorted(self.get_all_ironic_nodes().values(), key=lambda cn: cn.uuid): ironic_node_el = cn.as_xml_element() root.append(ironic_node_el) return etree.tostring(root, pretty_print=True).decode('utf-8') @classmethod def from_xml(cls, data): model = cls() root = etree.fromstring(data) for cn in root.findall('.//IronicNode'): node = element.IronicNode(**cn.attrib) model.add_node(node) return model @classmethod def is_isomorphic(cls, G1, G2): return nx.algorithms.isomorphism.isomorph.is_isomorphic( G1, G2) python-watcher-4.0.0/watcher/decision_engine/model/base.py0000664000175000017500000000217713656752270023700 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ This component is in charge of executing the :ref:`Action Plan ` built by the :ref:`Watcher Decision Engine `. See: :doc:`../architecture` for more details on this component. """ import abc import six @six.add_metaclass(abc.ABCMeta) class Model(object): @abc.abstractmethod def to_string(self): raise NotImplementedError() @abc.abstractmethod def to_xml(self): raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/model/collector/0000775000175000017500000000000013656752352024374 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/model/collector/__init__.py0000664000175000017500000000000013656752270026472 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/model/collector/ironic.py0000664000175000017500000000747513656752270026245 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Authors:Yumeng Bao # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher.common import ironic_helper from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root from watcher.decision_engine.scope import baremetal as baremetal_scope LOG = log.getLogger(__name__) class BaremetalClusterDataModelCollector(base.BaseClusterDataModelCollector): """Baremetal cluster data model collector The Baremetal cluster data model collector creates an in-memory representation of the resources exposed by the baremetal service. """ def __init__(self, config, osc=None): super(BaremetalClusterDataModelCollector, self).__init__(config, osc) @property def notification_endpoints(self): """Associated notification endpoints :return: Associated notification endpoints :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances """ return None def get_audit_scope_handler(self, audit_scope): self._audit_scope_handler = baremetal_scope.BaremetalScope( audit_scope, self.config) if self._data_model_scope is None or ( len(self._data_model_scope) > 0 and ( self._data_model_scope != audit_scope)): self._data_model_scope = audit_scope self._cluster_data_model = None LOG.debug("audit scope %s", audit_scope) return self._audit_scope_handler def execute(self): """Build the baremetal cluster data model""" LOG.debug("Building latest Baremetal cluster data model") if self._audit_scope_handler is None: LOG.debug("No audit, Don't Build Baremetal data model") return builder = BareMetalModelBuilder(self.osc) return builder.execute(self._data_model_scope) class BareMetalModelBuilder(base.BaseModelBuilder): """Build the graph-based model This model builder adds the following data" - Baremetal-related knowledge (Ironic) """ def __init__(self, osc): self.osc = osc self.model = model_root.BaremetalModelRoot() self.ironic_helper = ironic_helper.IronicHelper(osc=self.osc) def add_ironic_node(self, node): # Build and add base node. ironic_node = self.build_ironic_node(node) self.model.add_node(ironic_node) def build_ironic_node(self, node): """Build a Baremetal node from a Ironic node :param node: A ironic node :type node: :py:class:`~ironicclient.v1.node.Node` """ # build up the ironic node. node_attributes = { "uuid": node.uuid, "power_state": node.power_state, "maintenance": node.maintenance, "maintenance_reason": node.maintenance_reason, "extra": {"compute_node_id": node.extra.compute_node_id} } ironic_node = element.IronicNode(**node_attributes) return ironic_node def execute(self, model_scope): # TODO(Dantali0n): Use scope to limit size of model for node in self.call_retry(self.ironic_helper.get_ironic_node_list): self.add_ironic_node(node) return self.model python-watcher-4.0.0/watcher/decision_engine/model/collector/nova.py0000664000175000017500000005164013656752270025716 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Intel Innovation and Research Ireland Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os_resource_classes as orc from oslo_log import log from futurist import waiters from watcher.common import nova_helper from watcher.common import placement_helper from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root from watcher.decision_engine.model.notification import nova from watcher.decision_engine.scope import compute as compute_scope from watcher.decision_engine import threading LOG = log.getLogger(__name__) class NovaClusterDataModelCollector(base.BaseClusterDataModelCollector): """Nova cluster data model collector The Nova cluster data model collector creates an in-memory representation of the resources exposed by the compute service. """ HOST_AGGREGATES = "#/items/properties/compute/host_aggregates/" SCHEMA = { "$schema": "http://json-schema.org/draft-04/schema#", "type": "array", "items": { "type": "object", "properties": { "host_aggregates": { "type": "array", "items": { "anyOf": [ {"$ref": HOST_AGGREGATES + "id"}, {"$ref": HOST_AGGREGATES + "name"}, ] } }, "availability_zones": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "exclude": { "type": "array", "items": { "type": "object", "properties": { "instances": { "type": "array", "items": { "type": "object", "properties": { "uuid": { "type": "string" } }, "additionalProperties": False } }, "compute_nodes": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "host_aggregates": { "type": "array", "items": { "anyOf": [ {"$ref": HOST_AGGREGATES + "id"}, {"$ref": HOST_AGGREGATES + "name"}, ] } }, "instance_metadata": { "type": "array", "items": { "type": "object" } }, "projects": { "type": "array", "items": { "type": "object", "properties": { "uuid": { "type": "string" } }, "additionalProperties": False } } }, "additionalProperties": False } } }, "additionalProperties": False }, "host_aggregates": { "id": { "properties": { "id": { "oneOf": [ {"type": "integer"}, {"enum": ["*"]} ] } }, "additionalProperties": False }, "name": { "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "additionalProperties": False } def __init__(self, config, osc=None): super(NovaClusterDataModelCollector, self).__init__(config, osc) @property def notification_endpoints(self): """Associated notification endpoints :return: Associated notification endpoints :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances """ return [ nova.VersionedNotification(self), ] def get_audit_scope_handler(self, audit_scope): self._audit_scope_handler = compute_scope.ComputeScope( audit_scope, self.config) if self._data_model_scope is None or ( len(self._data_model_scope) > 0 and ( self._data_model_scope != audit_scope)): self._data_model_scope = audit_scope self._cluster_data_model = None LOG.debug("audit scope %s", audit_scope) return self._audit_scope_handler def execute(self): """Build the compute cluster data model""" LOG.debug("Building latest Nova cluster data model") if self._audit_scope_handler is None: LOG.debug("No audit, Don't Build compute data model") return builder = NovaModelBuilder(self.osc) return builder.execute(self._data_model_scope) class NovaModelBuilder(base.BaseModelBuilder): """Build the graph-based model This model builder adds the following data" - Compute-related knowledge (Nova) - TODO(v-francoise): Network-related knowledge (Neutron) NOTE(v-francoise): This model builder is meant to be extended in the future to also include both storage and network information respectively coming from Cinder and Neutron. Some prelimary work has been done in this direction in https://review.opendev.org/#/c/362730 but since we cannot guarantee a sufficient level of consistency for neither the storage nor the network part before the end of the Ocata cycle, this work has been re-scheduled for Pike. In the meantime, all the associated code has been commented out. """ def __init__(self, osc): self.osc = osc self.model = None self.model_scope = dict() self.no_model_scope_flag = False self.nova = osc.nova() self.nova_helper = nova_helper.NovaHelper(osc=self.osc) self.placement_helper = placement_helper.PlacementHelper(osc=self.osc) self.executor = threading.DecisionEngineThreadPool() def _collect_aggregates(self, host_aggregates, _nodes): if not host_aggregates: return aggregate_list = self.call_retry(f=self.nova_helper.get_aggregate_list) aggregate_ids = [aggregate['id'] for aggregate in host_aggregates if 'id' in aggregate] aggregate_names = [aggregate['name'] for aggregate in host_aggregates if 'name' in aggregate] include_all_nodes = any('*' in field for field in (aggregate_ids, aggregate_names)) for aggregate in aggregate_list: if (aggregate.id in aggregate_ids or aggregate.name in aggregate_names or include_all_nodes): _nodes.update(aggregate.hosts) def _collect_zones(self, availability_zones, _nodes): if not availability_zones: return service_list = self.call_retry(f=self.nova_helper.get_service_list) zone_names = [zone['name'] for zone in availability_zones] include_all_nodes = False if '*' in zone_names: include_all_nodes = True for service in service_list: if service.zone in zone_names or include_all_nodes: _nodes.add(service.host) def _compute_node_future(self, future, future_instances): """Add compute node information to model and schedule instance info job :param future: The future from the finished execution :rtype future: :py:class:`futurist.GreenFuture` :param future_instances: list of futures for instance jobs :rtype future_instances: list :py:class:`futurist.GreenFuture` """ try: node_info = future.result()[0] # filter out baremetal node if node_info.hypervisor_type == 'ironic': LOG.debug("filtering out baremetal node: %s", node_info) return self.add_compute_node(node_info) # node.servers is a list of server objects # New in nova version 2.53 instances = getattr(node_info, "servers", None) # Do not submit job if there are no instances on compute node if instances is None: LOG.info("No instances on compute_node: {0}".format(node_info)) return future_instances.append( self.executor.submit( self.add_instance_node, node_info, instances) ) except Exception: LOG.error("compute node from aggregate / " "availability_zone could not be found") def _add_physical_layer(self): """Collects all information on compute nodes and instances Will collect all required compute node and instance information based on the host aggregates and availability zones. If aggregates and zones do not specify any compute nodes all nodes are retrieved instead. The collection of information happens concurrently using the DecisionEngineThreadpool. The collection is parallelized in three steps first information about aggregates and zones is gathered. Secondly, for each of the compute nodes a tasks is submitted to get detailed information about the compute node. Finally, Each of these submitted tasks will submit an additional task if the compute node contains instances. Before returning from this function all instance tasks are waited upon to complete. """ compute_nodes = set() host_aggregates = self.model_scope.get("host_aggregates") availability_zones = self.model_scope.get("availability_zones") """Submit tasks to gather compute nodes from availability zones and host aggregates. Each task adds compute nodes to the set, this set is threadsafe under the assumption that CPython is used with the GIL enabled.""" zone_aggregate_futures = { self.executor.submit( self._collect_aggregates, host_aggregates, compute_nodes), self.executor.submit( self._collect_zones, availability_zones, compute_nodes) } waiters.wait_for_all(zone_aggregate_futures) # if zones and aggregates did not contain any nodes get every node. if not compute_nodes: self.no_model_scope_flag = True all_nodes = self.call_retry( f=self.nova_helper.get_compute_node_list) compute_nodes = set( [node.hypervisor_hostname for node in all_nodes]) LOG.debug("compute nodes: %s", compute_nodes) node_futures = [self.executor.submit( self.nova_helper.get_compute_node_by_name, node, servers=True, detailed=True) for node in compute_nodes] LOG.debug("submitted {0} jobs".format(len(compute_nodes))) # Futures will concurrently be added, only safe with CPython GIL future_instances = [] self.executor.do_while_futures_modify( node_futures, self._compute_node_future, future_instances) # Wait for all instance jobs to finish waiters.wait_for_all(future_instances) def add_compute_node(self, node): # Build and add base node. LOG.debug("node info: %s", node) compute_node = self.build_compute_node(node) self.model.add_node(compute_node) # NOTE(v-francoise): we can encapsulate capabilities of the node # (special instruction sets of CPUs) in the attributes; as well as # sub-nodes can be added re-presenting e.g. GPUs/Accelerators etc. # # Build & add disk, memory, network and cpu nodes. # disk_id, disk_node = self.build_disk_compute_node(base_id, node) # self.add_node(disk_id, disk_node) # mem_id, mem_node = self.build_memory_compute_node(base_id, node) # self.add_node(mem_id, mem_node) # net_id, net_node = self._build_network_compute_node(base_id) # self.add_node(net_id, net_node) # cpu_id, cpu_node = self.build_cpu_compute_node(base_id, node) # self.add_node(cpu_id, cpu_node) # # Connect the base compute node to the dependent nodes. # self.add_edges_from([(base_id, disk_id), (base_id, mem_id), # (base_id, cpu_id), (base_id, net_id)], # label="contains") def build_compute_node(self, node): """Build a compute node from a Nova compute node :param node: A node hypervisor instance :type node: :py:class:`~novaclient.v2.hypervisors.Hypervisor` """ inventories = self.placement_helper.get_inventories(node.id) if inventories and orc.VCPU in inventories: vcpus = inventories[orc.VCPU]['total'] vcpu_reserved = inventories[orc.VCPU]['reserved'] vcpu_ratio = inventories[orc.VCPU]['allocation_ratio'] else: vcpus = node.vcpus vcpu_reserved = 0 vcpu_ratio = 1.0 if inventories and orc.MEMORY_MB in inventories: memory_mb = inventories[orc.MEMORY_MB]['total'] memory_mb_reserved = inventories[orc.MEMORY_MB]['reserved'] memory_ratio = inventories[orc.MEMORY_MB]['allocation_ratio'] else: memory_mb = node.memory_mb memory_mb_reserved = 0 memory_ratio = 1.0 # NOTE(licanwei): A nova BP support-shared-storage-resource-provider # will move DISK_GB from compute node to shared storage RP. # Here may need to be updated when the nova BP released. if inventories and orc.DISK_GB in inventories: disk_capacity = inventories[orc.DISK_GB]['total'] disk_gb_reserved = inventories[orc.DISK_GB]['reserved'] disk_ratio = inventories[orc.DISK_GB]['allocation_ratio'] else: disk_capacity = node.local_gb disk_gb_reserved = 0 disk_ratio = 1.0 # build up the compute node. node_attributes = { # The id of the hypervisor as a UUID from version 2.53. "uuid": node.id, "hostname": node.service["host"], "memory": memory_mb, "memory_ratio": memory_ratio, "memory_mb_reserved": memory_mb_reserved, "disk": disk_capacity, "disk_gb_reserved": disk_gb_reserved, "disk_ratio": disk_ratio, "vcpus": vcpus, "vcpu_reserved": vcpu_reserved, "vcpu_ratio": vcpu_ratio, "state": node.state, "status": node.status, "disabled_reason": node.service["disabled_reason"]} compute_node = element.ComputeNode(**node_attributes) # compute_node = self._build_node("physical", "compute", "hypervisor", # node_attributes) return compute_node def add_instance_node(self, node, instances): if instances is None: LOG.info("no instances on compute_node: {0}".format(node)) return host = node.service["host"] compute_node = self.model.get_node_by_uuid(node.id) filters = {'host': host} limit = len(instances) if len(instances) <= 1000 else -1 # Get all servers on this compute host. # Note that the advantage of passing the limit parameter is # that it can speed up the call time of novaclient. 1000 is # the default maximum number of return servers provided by # compute API. If we need to request more than 1000 servers, # we can set limit=-1. For details, please see: # https://bugs.launchpad.net/watcher/+bug/1834679 instances = self.call_retry(f=self.nova_helper.get_instance_list, filters=filters, limit=limit) for inst in instances: # skip deleted instance if getattr(inst, "OS-EXT-STS:vm_state") == ( element.InstanceState.DELETED.value): continue # Add Node instance = self._build_instance_node(inst) self.model.add_instance(instance) # Connect the instance to its compute node self.model.map_instance(instance, compute_node) def _build_instance_node(self, instance): """Build an instance node Create an instance node for the graph using nova and the `server` nova object. :param instance: Nova VM object. :return: An instance node for the graph. """ flavor = instance.flavor instance_attributes = { "uuid": instance.id, "name": instance.name, "memory": flavor["ram"], "disk": flavor["disk"], "vcpus": flavor["vcpus"], "state": getattr(instance, "OS-EXT-STS:vm_state"), "metadata": instance.metadata, "project_id": instance.tenant_id, "locked": instance.locked} # node_attributes = dict() # node_attributes["layer"] = "virtual" # node_attributes["category"] = "compute" # node_attributes["type"] = "compute" # node_attributes["attributes"] = instance_attributes return element.Instance(**instance_attributes) def _merge_compute_scope(self, compute_scope): model_keys = self.model_scope.keys() update_flag = False role_keys = ("host_aggregates", "availability_zones") for role in compute_scope: role_key = list(role.keys())[0] if role_key not in role_keys: continue role_values = list(role.values())[0] if role_key in model_keys: for value in role_values: if value not in self.model_scope[role_key]: self.model_scope[role_key].append(value) update_flag = True else: self.model_scope[role_key] = role_values update_flag = True return update_flag def _check_model_scope(self, model_scope): compute_scope = [] update_flag = False for _scope in model_scope: if 'compute' in _scope: compute_scope = _scope['compute'] break if self.no_model_scope_flag is False: if compute_scope: update_flag = self._merge_compute_scope(compute_scope) else: self.model_scope = dict() update_flag = True return update_flag def execute(self, model_scope): """Instantiates the graph with the openstack cluster data.""" updata_model_flag = self._check_model_scope(model_scope) if self.model is None or updata_model_flag: self.model = self.model or model_root.ModelRoot() self._add_physical_layer() return self.model python-watcher-4.0.0/watcher/decision_engine/model/collector/manager.py0000664000175000017500000000442713656752270026366 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import utils from watcher.decision_engine.loading import default class CollectorManager(object): def __init__(self): self.collector_loader = default.ClusterDataModelCollectorLoader() self._collectors = None self._notification_endpoints = None def get_collectors(self): if self._collectors is None: collectors = utils.Struct() collector_plugins = cfg.CONF.collector.collector_plugins for collector_name in collector_plugins: collector = self.collector_loader.load(collector_name) collectors[collector_name] = collector self._collectors = collectors return self._collectors def get_notification_endpoints(self): if self._notification_endpoints is None: endpoints = [] for collector in self.get_collectors().values(): endpoints.extend(collector.notification_endpoints) self._notification_endpoints = endpoints return self._notification_endpoints def get_cluster_model_collector(self, name, osc=None): """Retrieve cluster data model collector :param name: name of the cluster data model collector plugin :type name: str :param osc: an OpenStackClients instance :type osc: :py:class:`~.OpenStackClients` instance :returns: cluster data model collector plugin :rtype: :py:class:`~.BaseClusterDataModelCollector` """ return self.collector_loader.load( name, osc=osc) python-watcher-4.0.0/watcher/decision_engine/model/collector/cinder.py0000664000175000017500000002645213656752270026222 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import six from oslo_log import log from watcher.common import cinder_helper from watcher.common import exception from watcher.decision_engine.model.collector import base from watcher.decision_engine.model import element from watcher.decision_engine.model import model_root from watcher.decision_engine.model.notification import cinder from watcher.decision_engine.scope import storage as storage_scope LOG = log.getLogger(__name__) class CinderClusterDataModelCollector(base.BaseClusterDataModelCollector): """Cinder cluster data model collector The Cinder cluster data model collector creates an in-memory representation of the resources exposed by the storage service. """ SCHEMA = { "$schema": "http://json-schema.org/draft-04/schema#", "type": "array", "items": { "type": "object", "properties": { "availability_zones": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "volume_types": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "exclude": { "type": "array", "items": { "type": "object", "properties": { "storage_pools": { "type": "array", "items": { "type": "object", "properties": { "name": { "type": "string" } }, "additionalProperties": False } }, "volumes": { "type": "array", "items": { "type": "object", "properties": { "uuid": { "type": "string" } }, "additionalProperties": False } }, "projects": { "type": "array", "items": { "type": "object", "properties": { "uuid": { "type": "string" } }, "additionalProperties": False } }, "additionalProperties": False } } } }, "additionalProperties": False } } def __init__(self, config, osc=None): super(CinderClusterDataModelCollector, self).__init__(config, osc) @property def notification_endpoints(self): """Associated notification endpoints :return: Associated notification endpoints :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances """ return [ cinder.CapacityNotificationEndpoint(self), cinder.VolumeCreateEnd(self), cinder.VolumeDeleteEnd(self), cinder.VolumeUpdateEnd(self), cinder.VolumeAttachEnd(self), cinder.VolumeDetachEnd(self), cinder.VolumeResizeEnd(self) ] def get_audit_scope_handler(self, audit_scope): self._audit_scope_handler = storage_scope.StorageScope( audit_scope, self.config) if self._data_model_scope is None or ( len(self._data_model_scope) > 0 and ( self._data_model_scope != audit_scope)): self._data_model_scope = audit_scope self._cluster_data_model = None LOG.debug("audit scope %s", audit_scope) return self._audit_scope_handler def execute(self): """Build the storage cluster data model""" LOG.debug("Building latest Cinder cluster data model") if self._audit_scope_handler is None: LOG.debug("No audit, Don't Build storage data model") return builder = CinderModelBuilder(self.osc) return builder.execute(self._data_model_scope) class CinderModelBuilder(base.BaseModelBuilder): """Build the graph-based model This model builder adds the following data" - Storage-related knowledge (Cinder) """ def __init__(self, osc): self.osc = osc self.model = model_root.StorageModelRoot() self.cinder = osc.cinder() self.cinder_helper = cinder_helper.CinderHelper(osc=self.osc) def _add_physical_layer(self): """Add the physical layer of the graph. This includes components which represent actual infrastructure hardware. """ for snode in self.call_retry( self.cinder_helper.get_storage_node_list): self.add_storage_node(snode) for pool in self.call_retry(self.cinder_helper.get_storage_pool_list): pool = self._build_storage_pool(pool) self.model.add_pool(pool) storage_name = getattr(pool, 'name') try: storage_node = self.model.get_node_by_name( storage_name) # Connect the instance to its compute node self.model.map_pool(pool, storage_node) except exception.StorageNodeNotFound: continue def add_storage_node(self, node): # Build and add base node. storage_node = self.build_storage_node(node) self.model.add_node(storage_node) def add_storage_pool(self, pool): storage_pool = self._build_storage_pool(pool) self.model.add_pool(storage_pool) def build_storage_node(self, node): """Build a storage node from a Cinder storage node :param node: A storage node :type node: :py:class:`~cinderclient.v2.services.Service` """ # node.host is formatted as host@backendname since ocata, # or may be only host as of ocata backend = "" try: backend = node.host.split('@')[1] except IndexError: pass volume_type = self.call_retry( self.cinder_helper.get_volume_type_by_backendname, backend) # build up the storage node. node_attributes = { "host": node.host, "zone": node.zone, "state": node.state, "status": node.status, "volume_type": volume_type} storage_node = element.StorageNode(**node_attributes) return storage_node def _build_storage_pool(self, pool): """Build a storage pool from a Cinder storage pool :param pool: A storage pool :type pool: :py:class:`~cinderclient.v2.pools.Pool` :raises: exception.InvalidPoolAttributeValue """ # build up the storage pool. attrs = ["total_volumes", "total_capacity_gb", "free_capacity_gb", "provisioned_capacity_gb", "allocated_capacity_gb"] node_attributes = {"name": pool.name} for attr in attrs: try: node_attributes[attr] = int(getattr(pool, attr)) except AttributeError: LOG.debug("Attribute %s for pool %s is not provided", attr, pool.name) except ValueError: raise exception.InvalidPoolAttributeValue( name=pool.name, attribute=attr) storage_pool = element.Pool(**node_attributes) return storage_pool def _add_virtual_layer(self): """Add the virtual layer to the graph. This layer is the virtual components of the infrastructure. """ self._add_virtual_storage() def _add_virtual_storage(self): volumes = self.call_retry(self.cinder_helper.get_volume_list) for vol in volumes: volume = self._build_volume_node(vol) self.model.add_volume(volume) pool_name = getattr(vol, 'os-vol-host-attr:host') if pool_name is None: # The volume is not attached to any pool continue try: pool = self.model.get_pool_by_pool_name( pool_name) self.model.map_volume(volume, pool) except exception.PoolNotFound: continue def _build_volume_node(self, volume): """Build an volume node Create an volume node for the graph using cinder and the `volume` cinder object. :param instance: Cinder Volume object. :return: A volume node for the graph. """ attachments = [{k: v for k, v in six.iteritems(d) if k in ( 'server_id', 'attachment_id')} for d in volume.attachments] volume_attributes = { "uuid": volume.id, "size": volume.size, "status": volume.status, "attachments": attachments, "name": volume.name or "", "multiattach": volume.multiattach, "snapshot_id": volume.snapshot_id or "", "project_id": getattr(volume, 'os-vol-tenant-attr:tenant_id'), "metadata": volume.metadata, "bootable": volume.bootable} return element.Volume(**volume_attributes) def execute(self, model_scope): """Instantiates the graph with the openstack cluster data. The graph is populated along 2 layers: virtual and physical. As each new layer is built connections are made back to previous layers. """ # TODO(Dantali0n): Use scope to limit size of model self._add_physical_layer() self._add_virtual_layer() return self.model python-watcher-4.0.0/watcher/decision_engine/model/collector/base.py0000664000175000017500000002256713656752270025673 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ A :ref:`Cluster Data Model ` (or CDM) is a logical representation of the current state and topology of the :ref:`Cluster ` :ref:`Managed resources `. It is represented as a set of :ref:`Managed resources ` (which may be a simple tree or a flat list of key-value pairs) which enables Watcher :ref:`Strategies ` to know the current relationships between the different :ref:`resources ` of the :ref:`Cluster ` during an :ref:`Audit ` and enables the :ref:`Strategy ` to request information such as: - What compute nodes are in a given :ref:`Audit Scope `? - What :ref:`Instances ` are hosted on a given compute node? - What is the current load of a compute node? - What is the current free memory of a compute node? - What is the network link between two compute nodes? - What is the available bandwidth on a given network link? - What is the current space available on a given virtual disk of a given :ref:`Instance ` ? - What is the current state of a given :ref:`Instance `? - ... In a word, this data model enables the :ref:`Strategy ` to know: - the current topology of the :ref:`Cluster ` - the current capacity for each :ref:`Managed resource ` - the current amount of used/free space for each :ref:`Managed resource ` - the current state of each :ref:`Managed resources ` In the Watcher project, we aim at providing a some generic and basic :ref:`Cluster Data Model ` for each :ref:`Goal `, usable in the associated :ref:`Strategies ` through a plugin-based mechanism which are called cluster data model collectors (or CDMCs). These CDMCs are responsible for loading and keeping up-to-date their associated CDM by listening to events and also periodically rebuilding themselves from the ground up. They are also directly accessible from the strategies classes. These CDMs are used to: - simplify the development of a new :ref:`Strategy ` for a given :ref:`Goal ` when there already are some existing :ref:`Strategies ` associated to the same :ref:`Goal ` - avoid duplicating the same code in several :ref:`Strategies ` associated to the same :ref:`Goal ` - have a better consistency between the different :ref:`Strategies ` for a given :ref:`Goal ` - avoid any strong coupling with any external :ref:`Cluster Data Model ` (the proposed data model acts as a pivot data model) There may be various :ref:`generic and basic Cluster Data Models ` proposed in Watcher helpers, each of them being adapted to achieving a given :ref:`Goal `: - For example, for a :ref:`Goal ` which aims at optimizing the network :ref:`resources ` the :ref:`Strategy ` may need to know which :ref:`resources ` are communicating together. - Whereas for a :ref:`Goal ` which aims at optimizing thermal and power conditions, the :ref:`Strategy ` may need to know the location of each compute node in the racks and the location of each rack in the room. Note however that a developer can use his/her own :ref:`Cluster Data Model ` if the proposed data model does not fit his/her needs as long as the :ref:`Strategy ` is able to produce a :ref:`Solution ` for the requested :ref:`Goal `. For example, a developer could rely on the Nova Data Model to optimize some compute resources. The :ref:`Cluster Data Model ` may be persisted in any appropriate storage system (SQL database, NoSQL database, JSON file, XML File, In Memory Database, ...). As of now, an in-memory model is built and maintained in the background in order to accelerate the execution of strategies. """ import abc import copy import threading import time from oslo_config import cfg from oslo_log import log import six from watcher.common import clients from watcher.common.loader import loadable from watcher.decision_engine.model import model_root LOG = log.getLogger(__name__) CONF = cfg.CONF @six.add_metaclass(abc.ABCMeta) class BaseClusterDataModelCollector(loadable.LoadableSingleton): STALE_MODEL = model_root.ModelRoot(stale=True) def __init__(self, config, osc=None): super(BaseClusterDataModelCollector, self).__init__(config) self.osc = osc if osc else clients.OpenStackClients() self.lock = threading.RLock() self._audit_scope_handler = None self._cluster_data_model = None self._data_model_scope = None @property def cluster_data_model(self): if self._cluster_data_model is None: self.lock.acquire() self._cluster_data_model = self.execute() self.lock.release() return self._cluster_data_model @cluster_data_model.setter def cluster_data_model(self, model): self.lock.acquire() self._cluster_data_model = model self.lock.release() @abc.abstractproperty def notification_endpoints(self): """Associated notification endpoints :return: Associated notification endpoints :rtype: List of :py:class:`~.EventsNotificationEndpoint` instances """ raise NotImplementedError() def set_cluster_data_model_as_stale(self): self.cluster_data_model = self.STALE_MODEL @abc.abstractmethod def get_audit_scope_handler(self, audit_scope): """Get audit scope handler""" raise NotImplementedError() @abc.abstractmethod def execute(self): """Build a cluster data model""" raise NotImplementedError() @classmethod def get_config_opts(cls): return [ cfg.IntOpt( 'period', default=3600, help='The time interval (in seconds) between each ' 'synchronization of the model'), ] def get_latest_cluster_data_model(self): LOG.debug("Creating copy") LOG.debug(self.cluster_data_model.to_xml()) return copy.deepcopy(self.cluster_data_model) def synchronize(self): """Synchronize the cluster data model Whenever called this synchronization will perform a drop-in replacement with the existing cluster data model """ self.cluster_data_model = self.execute() class BaseModelBuilder(object): def call_retry(self, f, *args, **kwargs): """Attempts to call external service Attempts to access data from the external service and handles exceptions. The retrieval should be retried in accordance to the value of api_call_retries :param f: The method that performs the actual querying for metrics :param args: Array of arguments supplied to the method :param kwargs: The amount of arguments supplied to the method :return: The value as retrieved from the external service """ num_retries = CONF.collector.api_call_retries timeout = CONF.collector.api_query_timeout for i in range(num_retries): try: return f(*args, **kwargs) except Exception as e: LOG.exception(e) self.call_retry_reset(e) LOG.warning("Retry {0} of {1}, error while calling service " "retry in {2} seconds".format(i+1, num_retries, timeout)) time.sleep(timeout) raise @abc.abstractmethod def call_retry_reset(self, exc): """Attempt to recover after encountering an error Recover from errors while calling external services, the exception can be used to make a better decision on how to best recover. """ pass @abc.abstractmethod def execute(self, model_scope): """Build the cluster data model limited to the scope and return it Builds the cluster data model with respect to the supplied scope. The schema of this scope will depend on the type of ModelBuilder. """ raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/manager.py0000664000175000017500000000552513656752270023300 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ This component is responsible for computing a set of potential optimization :ref:`Actions ` in order to fulfill the :ref:`Goal ` of an :ref:`Audit `. It first reads the parameters of the :ref:`Audit ` from the associated :ref:`Audit Template ` and knows the :ref:`Goal ` to achieve. It then selects the most appropriate :ref:`Strategy ` depending on how Watcher was configured for this :ref:`Goal `. The :ref:`Strategy ` is then executed and generates a set of :ref:`Actions ` which are scheduled in time by the :ref:`Watcher Planner ` (i.e., it generates an :ref:`Action Plan `). See :doc:`../architecture` for more details on this component. """ from watcher.common import service_manager from watcher import conf from watcher.decision_engine.messaging import audit_endpoint from watcher.decision_engine.messaging import data_model_endpoint from watcher.decision_engine.model.collector import manager from watcher.decision_engine.strategy.strategies import base \ as strategy_endpoint CONF = conf.CONF class DecisionEngineManager(service_manager.ServiceManager): @property def service_name(self): return 'watcher-decision-engine' @property def api_version(self): return '1.0' @property def publisher_id(self): return CONF.watcher_decision_engine.publisher_id @property def conductor_topic(self): return CONF.watcher_decision_engine.conductor_topic @property def notification_topics(self): return CONF.watcher_decision_engine.notification_topics @property def conductor_endpoints(self): return [audit_endpoint.AuditEndpoint, strategy_endpoint.StrategyEndpoint, data_model_endpoint.DataModelEndpoint] @property def notification_endpoints(self): return self.collector_manager.get_notification_endpoints() @property def collector_manager(self): return manager.CollectorManager() python-watcher-4.0.0/watcher/decision_engine/strategy/0000775000175000017500000000000013656752352023150 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/strategy/__init__.py0000664000175000017500000000000013656752270025246 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/strategy/context/0000775000175000017500000000000013656752352024634 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/strategy/context/__init__.py0000664000175000017500000000000013656752270026732 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/strategy/context/default.py0000664000175000017500000000503713656752270026636 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher.common import clients from watcher.common import utils from watcher.decision_engine.strategy.context import base from watcher.decision_engine.strategy.selection import default from watcher import objects LOG = log.getLogger(__name__) class DefaultStrategyContext(base.StrategyContext): def __init__(self): super(DefaultStrategyContext, self).__init__() LOG.debug("Initializing Strategy Context") @staticmethod def select_strategy(audit, request_context): osc = clients.OpenStackClients() # todo(jed) retrieve in audit parameters (threshold,...) # todo(jed) create ActionPlan goal = objects.Goal.get_by_id(request_context, audit.goal_id) # NOTE(jed56) In the audit object, the 'strategy_id' attribute # is optional. If the admin wants to force the trigger of a Strategy # it could specify the Strategy uuid in the Audit. strategy_name = None if audit.strategy_id: strategy = objects.Strategy.get_by_id( request_context, audit.strategy_id) strategy_name = strategy.name strategy_selector = default.DefaultStrategySelector( goal_name=goal.name, strategy_name=strategy_name, osc=osc) return strategy_selector.select() def do_execute_strategy(self, audit, request_context): selected_strategy = self.select_strategy(audit, request_context) selected_strategy.audit_scope = audit.scope schema = selected_strategy.get_schema() if not audit.parameters and schema: # Default value feedback if no predefined strategy utils.StrictDefaultValidatingDraft4Validator(schema).validate( audit.parameters) selected_strategy.input_parameters.update({ name: value for name, value in audit.parameters.items() }) return selected_strategy.execute(audit=audit) python-watcher-4.0.0/watcher/decision_engine/strategy/context/base.py0000664000175000017500000000516213656752270026123 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import six from watcher import notifications from watcher.objects import fields @six.add_metaclass(abc.ABCMeta) class StrategyContext(object): def execute_strategy(self, audit, request_context): """Execute the strategy for the given an audit :param audit: Audit object :type audit: :py:class:`~.objects.audit.Audit` instance :param request_context: Current request context :type request_context: :py:class:`~.RequestContext` instance :returns: The computed solution :rtype: :py:class:`~.BaseSolution` instance """ try: notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.STRATEGY, phase=fields.NotificationPhase.START) solution = self.do_execute_strategy(audit, request_context) notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.STRATEGY, phase=fields.NotificationPhase.END) return solution except Exception: notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.STRATEGY, priority=fields.NotificationPriority.ERROR, phase=fields.NotificationPhase.ERROR) raise @abc.abstractmethod def do_execute_strategy(self, audit, request_context): """Execute the strategy for the given an audit :param audit: Audit object :type audit: :py:class:`~.objects.audit.Audit` instance :param request_context: Current request context :type request_context: :py:class:`~.RequestContext` instance :returns: The computed solution :rtype: :py:class:`~.BaseSolution` instance """ raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/strategy/selection/0000775000175000017500000000000013656752352025135 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/strategy/selection/__init__.py0000664000175000017500000000000013656752270027233 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/strategy/selection/default.py0000664000175000017500000000530413656752270027134 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.loading import default from watcher.decision_engine.strategy.selection import base LOG = log.getLogger(__name__) class DefaultStrategySelector(base.BaseSelector): def __init__(self, goal_name, strategy_name=None, osc=None): """Default strategy selector :param goal_name: Name of the goal :param strategy_name: Name of the strategy :param osc: an OpenStackClients instance """ super(DefaultStrategySelector, self).__init__() self.goal_name = goal_name self.strategy_name = strategy_name self.osc = osc self.strategy_loader = default.DefaultStrategyLoader() def select(self): """Selects a strategy :raises: :py:class:`~.LoadingError` if it failed to load a strategy :returns: A :py:class:`~.BaseStrategy` instance """ strategy_to_load = None try: if self.strategy_name: strategy_to_load = self.strategy_name else: available_strategies = self.strategy_loader.list_available() available_strategies_for_goal = list( key for key, strat in available_strategies.items() if strat.get_goal_name() == self.goal_name) if not available_strategies_for_goal: raise exception.NoAvailableStrategyForGoal( goal=self.goal_name) # TODO(v-francoise): We should do some more work here to select # a strategy out of a given goal instead of just choosing the # 1st one strategy_to_load = available_strategies_for_goal[0] return self.strategy_loader.load(strategy_to_load, osc=self.osc) except exception.NoAvailableStrategyForGoal: raise except Exception as exc: LOG.exception(exc) raise exception.LoadingError( _("Could not load any strategy for goal %(goal)s"), goal=self.goal_name) python-watcher-4.0.0/watcher/decision_engine/strategy/selection/base.py0000664000175000017500000000147613656752270026430 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import six @six.add_metaclass(abc.ABCMeta) class BaseSelector(object): @abc.abstractmethod def select(self): raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/strategy/common/0000775000175000017500000000000013656752352024440 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/strategy/common/__init__.py0000664000175000017500000000000013656752270026536 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/strategy/common/level.py0000664000175000017500000000146613656752270026127 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import enum class StrategyLevel(enum.Enum): conservative = "conservative" balanced = "balanced" growth = "growth" aggressive = "aggressive" python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/0000775000175000017500000000000013656752352025322 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/workload_balance.py0000664000175000017500000003403513656752270031167 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import division from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class WorkloadBalance(base.WorkloadStabilizationBaseStrategy): """[PoC]Workload balance using live migration *Description* It is a migration strategy based on the VM workload of physical servers. It generates solutions to move a workload whenever a server's CPU or RAM utilization % is higher than the specified threshold. The VM to be moved should make the host close to average workload of all compute nodes. *Requirements* * Hardware: compute node should use the same physical CPUs/RAMs * Software: Ceilometer component ceilometer-agent-compute running in each compute node, and Ceilometer API can report such telemetry "instance_cpu_usage" and "instance_ram_usage" successfully. * You must have at least 2 physical compute nodes to run this strategy. *Limitations* - This is a proof of concept that is not meant to be used in production - We cannot forecast how many servers should be migrated. This is the reason why we only plan a single virtual machine migration at a time. So it's better to use this algorithm with `CONTINUOUS` audits. - It assume that live migrations are possible """ # The meter to report CPU utilization % of VM in ceilometer # Unit: %, value range is [0 , 100] # The meter to report memory resident of VM in ceilometer # Unit: MB DATASOURCE_METRICS = ['instance_cpu_usage', 'instance_ram_usage'] def __init__(self, config, osc=None): """Workload balance using live migration :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(WorkloadBalance, self).__init__(config, osc) # the migration plan will be triggered when the CPU or RAM # utilization % reaches threshold self._meter = None self.instance_migrations_count = 0 @classmethod def get_name(cls): return "workload_balance" @classmethod def get_display_name(cls): return _("Workload Balance Migration Strategy") @classmethod def get_translatable_display_name(cls): return "Workload Balance Migration Strategy" @property def granularity(self): return self.input_parameters.get('granularity', 300) @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "metrics": { "description": "Workload balance based on metrics: " "cpu or ram utilization", "type": "string", "choice": ["instance_cpu_usage", "instance_ram_usage"], "default": "instance_cpu_usage" }, "threshold": { "description": "workload threshold for migration", "type": "number", "default": 25.0 }, "period": { "description": "aggregate time period of ceilometer", "type": "number", "default": 300 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, }, } def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def choose_instance_to_migrate(self, hosts, avg_workload, workload_cache): """Pick up an active instance to migrate from provided hosts :param hosts: the array of dict which contains node object :param avg_workload: the average workload value of all nodes :param workload_cache: the map contains instance to workload mapping """ for instance_data in hosts: source_node = instance_data['compute_node'] source_instances = self.compute_model.get_node_instances( source_node) if source_instances: delta_workload = instance_data['workload'] - avg_workload min_delta = 1000000 instance_id = None for instance in source_instances: try: # NOTE: skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue # select the first active VM to migrate if (instance.state != element.InstanceState.ACTIVE.value): LOG.debug("Instance not active, skipped: %s", instance.uuid) continue current_delta = ( delta_workload - workload_cache[instance.uuid]) if 0 <= current_delta < min_delta: min_delta = current_delta instance_id = instance.uuid except exception.InstanceNotFound: LOG.error("Instance not found; error: %s", instance_id) if instance_id: return (source_node, self.compute_model.get_instance_by_uuid( instance_id)) else: LOG.info("VM not found from compute_node: %s", source_node.uuid) def filter_destination_hosts(self, hosts, instance_to_migrate, avg_workload, workload_cache): """Only return hosts with sufficient available resources""" required_cores = instance_to_migrate.vcpus required_disk = instance_to_migrate.disk required_mem = instance_to_migrate.memory # filter nodes without enough resource destination_hosts = [] src_instance_workload = workload_cache[instance_to_migrate.uuid] for instance_data in hosts: host = instance_data['compute_node'] workload = instance_data['workload'] # calculate the available resources free_res = self.compute_model.get_node_free_resources(host) if (free_res['vcpu'] >= required_cores and free_res['memory'] >= required_mem and free_res['disk'] >= required_disk): if (self._meter == 'instance_cpu_usage' and ((src_instance_workload + workload) < self.threshold / 100 * host.vcpus)): destination_hosts.append(instance_data) if (self._meter == 'instance_ram_usage' and ((src_instance_workload + workload) < self.threshold / 100 * host.memory)): destination_hosts.append(instance_data) return destination_hosts def group_hosts_by_cpu_or_ram_util(self): """Calculate the workloads of each compute_node try to find out the nodes which have reached threshold and the nodes which are under threshold. and also calculate the average workload value of all nodes. and also generate the instance workload map. """ nodes = self.get_available_compute_nodes() cluster_size = len(nodes) overload_hosts = [] nonoverload_hosts = [] # total workload of cluster cluster_workload = 0.0 # use workload_cache to store the workload of VMs for reuse purpose workload_cache = {} for node_id in nodes: node = self.compute_model.get_node_by_uuid(node_id) instances = self.compute_model.get_node_instances(node) node_workload = 0.0 for instance in instances: util = None try: util = self.datasource_backend.statistic_aggregation( instance, 'instance', self._meter, self._period, 'mean', self._granularity) except Exception as exc: LOG.exception(exc) LOG.error("Can not get %s from %s", self._meter, self.datasource_backend.NAME) continue if util is None: LOG.debug("Instance (%s): %s is None", instance.uuid, self._meter) continue if self._meter == 'instance_cpu_usage': workload_cache[instance.uuid] = (util * instance.vcpus / 100) else: workload_cache[instance.uuid] = util node_workload += workload_cache[instance.uuid] LOG.debug("VM (%s): %s %f", instance.uuid, self._meter, util) cluster_workload += node_workload if self._meter == 'instance_cpu_usage': node_util = node_workload / node.vcpus * 100 else: node_util = node_workload / node.memory * 100 instance_data = { 'compute_node': node, self._meter: node_util, 'workload': node_workload} if node_util >= self.threshold: # mark the node to release resources overload_hosts.append(instance_data) else: nonoverload_hosts.append(instance_data) avg_workload = 0 if cluster_size != 0: avg_workload = cluster_workload / cluster_size return overload_hosts, nonoverload_hosts, avg_workload, workload_cache def pre_execute(self): self._pre_execute() self.threshold = self.input_parameters.threshold self._period = self.input_parameters.period self._meter = self.input_parameters.metrics self._granularity = self.input_parameters.granularity def do_execute(self, audit=None): """Strategy execution phase This phase is where you should put the main logic of your strategy. """ source_nodes, target_nodes, avg_workload, workload_cache = ( self.group_hosts_by_cpu_or_ram_util()) if not source_nodes: LOG.debug("No hosts require optimization") return self.solution if not target_nodes: LOG.warning("No hosts current have CPU utilization under %s " "percent, therefore there are no possible target " "hosts for any migration", self.threshold) return self.solution # choose the server with largest cpu_util source_nodes = sorted(source_nodes, reverse=True, key=lambda x: (x[self._meter])) instance_to_migrate = self.choose_instance_to_migrate( source_nodes, avg_workload, workload_cache) if not instance_to_migrate: return self.solution source_node, instance_src = instance_to_migrate # find the hosts that have enough resource for the VM to be migrated destination_hosts = self.filter_destination_hosts( target_nodes, instance_src, avg_workload, workload_cache) # sort the filtered result by workload # pick up the lowest one as dest server if not destination_hosts: # for instance. LOG.warning("No proper target host could be found, it might " "be because of there's no enough CPU/Memory/DISK") return self.solution destination_hosts = sorted(destination_hosts, key=lambda x: (x[self._meter])) # always use the host with lowerest CPU utilization mig_destination_node = destination_hosts[0]['compute_node'] # generate solution to migrate the instance to the dest server, if self.compute_model.migrate_instance( instance_src, source_node, mig_destination_node): self.add_action_migrate( instance_src, 'live', source_node, mig_destination_node) self.instance_migrations_count += 1 def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ self.solution.model = self.compute_model self.solution.set_efficacy_indicators( instance_migrations_count=self.instance_migrations_count, instances_count=len(self.compute_model.get_all_instances()) ) LOG.debug(self.compute_model.to_string()) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/__init__.py0000664000175000017500000000575513656752270027446 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.strategy.strategies import actuation from watcher.decision_engine.strategy.strategies import base from watcher.decision_engine.strategy.strategies import basic_consolidation from watcher.decision_engine.strategy.strategies import dummy_strategy from watcher.decision_engine.strategy.strategies import dummy_with_scorer from watcher.decision_engine.strategy.strategies import host_maintenance from watcher.decision_engine.strategy.strategies import \ node_resource_consolidation from watcher.decision_engine.strategy.strategies import noisy_neighbor from watcher.decision_engine.strategy.strategies import outlet_temp_control from watcher.decision_engine.strategy.strategies import saving_energy from watcher.decision_engine.strategy.strategies import \ storage_capacity_balance from watcher.decision_engine.strategy.strategies import uniform_airflow from watcher.decision_engine.strategy.strategies import \ vm_workload_consolidation from watcher.decision_engine.strategy.strategies import workload_balance from watcher.decision_engine.strategy.strategies import workload_stabilization from watcher.decision_engine.strategy.strategies import zone_migration Actuator = actuation.Actuator BaseStrategy = base.BaseStrategy BasicConsolidation = basic_consolidation.BasicConsolidation OutletTempControl = outlet_temp_control.OutletTempControl DummyStrategy = dummy_strategy.DummyStrategy DummyWithScorer = dummy_with_scorer.DummyWithScorer SavingEnergy = saving_energy.SavingEnergy StorageCapacityBalance = storage_capacity_balance.StorageCapacityBalance VMWorkloadConsolidation = vm_workload_consolidation.VMWorkloadConsolidation WorkloadBalance = workload_balance.WorkloadBalance WorkloadStabilization = workload_stabilization.WorkloadStabilization UniformAirflow = uniform_airflow.UniformAirflow NodeResourceConsolidation = ( node_resource_consolidation.NodeResourceConsolidation) NoisyNeighbor = noisy_neighbor.NoisyNeighbor ZoneMigration = zone_migration.ZoneMigration HostMaintenance = host_maintenance.HostMaintenance __all__ = ("Actuator", "BaseStrategy", "BasicConsolidation", "OutletTempControl", "DummyStrategy", "DummyWithScorer", "VMWorkloadConsolidation", "WorkloadBalance", "WorkloadStabilization", "UniformAirflow", "NoisyNeighbor", "SavingEnergy", "StorageCapacityBalance", "ZoneMigration", "HostMaintenance", "NodeResourceConsolidation") python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/host_maintenance.py0000664000175000017500000002650413656752270031221 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 chinac.com # # Authors: suzhengwei # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log import six from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class HostMaintenance(base.HostMaintenanceBaseStrategy): """[PoC]Host Maintenance *Description* It is a migration strategy for one compute node maintenance, without having the user's application been interruptted. If given one backup node, the strategy will firstly migrate all instances from the maintenance node to the backup node. If the backup node is not provided, it will migrate all instances, relying on nova-scheduler. *Requirements* * You must have at least 2 physical compute nodes to run this strategy. *Limitations* - This is a proof of concept that is not meant to be used in production - It migrates all instances from one host to other hosts. It's better to execute such strategy when load is not heavy, and use this algorithm with `ONESHOT` audit. - It assumes that cold and live migrations are possible. """ INSTANCE_MIGRATION = "migrate" CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" REASON_FOR_DISABLE = 'watcher_disabled' def __init__(self, config, osc=None): super(HostMaintenance, self).__init__(config, osc) @classmethod def get_name(cls): return "host_maintenance" @classmethod def get_display_name(cls): return _("Host Maintenance Strategy") @classmethod def get_translatable_display_name(cls): return "Host Maintenance Strategy" @classmethod def get_schema(cls): return { "properties": { "maintenance_node": { "description": "The name of the compute node which " "need maintenance", "type": "string", }, "backup_node": { "description": "The name of the compute node which " "will backup the maintenance node.", "type": "string", }, }, "required": ["maintenance_node"], } def get_disabled_compute_nodes_with_reason(self, reason=None): return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status == element.ServiceState.DISABLED.value and cn.disabled_reason == reason} def get_disabled_compute_nodes(self): return self.get_disabled_compute_nodes_with_reason( self.REASON_FOR_DISABLE) def get_instance_state_str(self, instance): """Get instance state in string format""" if isinstance(instance.state, six.string_types): return instance.state elif isinstance(instance.state, element.InstanceState): return instance.state.value else: LOG.error('Unexpected instance state type, ' 'state=%(state)s, state_type=%(st)s.', dict(state=instance.state, st=type(instance.state))) raise exception.WatcherException def get_node_status_str(self, node): """Get node status in string format""" if isinstance(node.status, six.string_types): return node.status elif isinstance(node.status, element.ServiceState): return node.status.value else: LOG.error('Unexpected node status type, ' 'status=%(status)s, status_type=%(st)s.', dict(status=node.status, st=type(node.status))) raise exception.WatcherException def get_node_capacity(self, node): """Collect cpu, ram and disk capacity of a node. :param node: node object :return: dict(cpu(cores), ram(MB), disk(B)) """ return dict(cpu=node.vcpu_capacity, ram=node.memory_mb_capacity, disk=node.disk_gb_capacity) def host_fits(self, source_node, destination_node): """check host fits return True if VMs could intensively migrate from source_node to destination_node. """ source_node_used = self.compute_model.get_node_used_resources( source_node) destination_node_free = self.compute_model.get_node_free_resources( destination_node) metrics = ['vcpu', 'memory'] for m in metrics: if source_node_used[m] > destination_node_free[m]: return False return True def add_action_enable_compute_node(self, node): """Add an action for node enabler into the solution.""" params = {'state': element.ServiceState.ENABLED.value, 'resource_name': node.hostname} self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=params) def add_action_maintain_compute_node(self, node): """Add an action for node maintenance into the solution.""" params = {'state': element.ServiceState.DISABLED.value, 'disabled_reason': self.REASON_FOR_MAINTAINING, 'resource_name': node.hostname} self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=params) def enable_compute_node_if_disabled(self, node): node_status_str = self.get_node_status_str(node) if node_status_str != element.ServiceState.ENABLED.value: self.add_action_enable_compute_node(node) def instance_migration(self, instance, src_node, des_node=None): """Add an action for instance migration into the solution. :param instance: instance object :param src_node: node object :param des_node: node object. if None, the instance will be migrated relying on nova-scheduler :return: None """ instance_state_str = self.get_instance_state_str(instance) if instance_state_str == element.InstanceState.ACTIVE.value: migration_type = 'live' else: migration_type = 'cold' params = {'migration_type': migration_type, 'source_node': src_node.uuid, 'resource_name': instance.name} if des_node: params['destination_node'] = des_node.uuid self.solution.add_action(action_type=self.INSTANCE_MIGRATION, resource_id=instance.uuid, input_parameters=params) def host_migration(self, source_node, destination_node): """host migration Migrate all instances from source_node to destination_node. Active instances use "live-migrate", and other instances use "cold-migrate" """ instances = self.compute_model.get_node_instances(source_node) for instance in instances: self.instance_migration(instance, source_node, destination_node) def safe_maintain(self, maintenance_node, backup_node=None): """safe maintain one compute node Migrate all instances of the maintenance_node intensively to the backup host. If the user didn't give the backup host, it will select one unused node to backup the maintaining node. It calculate the resource both of the backup node and maintaining node to evaluate the migrations from maintaining node to backup node. If all instances of the maintaining node can migrated to the backup node, it will set the maintaining node in 'watcher_maintaining' status, and add the migrations to solution. """ # If the user gives a backup node with required capacity, then migrates # all instances from the maintaining node to the backup node. if backup_node: if self.host_fits(maintenance_node, backup_node): self.enable_compute_node_if_disabled(backup_node) self.add_action_maintain_compute_node(maintenance_node) self.host_migration(maintenance_node, backup_node) return True # If the user didn't give the backup host, select one unused # node with required capacity, then migrates all instances # from maintaining node to it. nodes = sorted( self.get_disabled_compute_nodes().values(), key=lambda x: self.get_node_capacity(x)['cpu']) if maintenance_node in nodes: nodes.remove(maintenance_node) for node in nodes: if self.host_fits(maintenance_node, node): self.enable_compute_node_if_disabled(node) self.add_action_maintain_compute_node(maintenance_node) self.host_migration(maintenance_node, node) return True return False def try_maintain(self, maintenance_node): """try to maintain one compute node It firstly set the maintenance_node in 'watcher_maintaining' status. Then try to migrate all instances of the maintenance node, rely on nova-scheduler. """ self.add_action_maintain_compute_node(maintenance_node) instances = self.compute_model.get_node_instances(maintenance_node) for instance in instances: self.instance_migration(instance, maintenance_node) def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): LOG.info(_('Executing Host Maintenance Migration Strategy')) maintenance_node = self.input_parameters.get('maintenance_node') backup_node = self.input_parameters.get('backup_node') # if no VMs in the maintenance_node, just maintain the compute node src_node = self.compute_model.get_node_by_name(maintenance_node) if len(self.compute_model.get_node_instances(src_node)) == 0: if (src_node.disabled_reason != self.REASON_FOR_MAINTAINING): self.add_action_maintain_compute_node(src_node) return if backup_node: des_node = self.compute_model.get_node_by_name(backup_node) else: des_node = None if not self.safe_maintain(src_node, des_node): self.try_maintain(src_node) def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ LOG.debug(self.solution.actions) LOG.debug(self.compute_model.to_string()) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/noisy_neighbor.py0000664000175000017500000002330213656752270030711 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) CONF = cfg.CONF class NoisyNeighbor(base.NoisyNeighborBaseStrategy): """Noisy Neighbor strategy using live migration *Description* This strategy can identify and migrate a Noisy Neighbor - a low priority VM that negatively affects performance of a high priority VM in terms of IPC by over utilizing Last Level Cache. *Requirements* To enable LLC metric, latest Intel server with CMT support is required. *Limitations* This is a proof of concept that is not meant to be used in production *Spec URL* http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/noisy_neighbor_strategy.html """ MIGRATION = "migrate" DATASOURCE_METRICS = ['instance_l3_cache_usage'] DEFAULT_WATCHER_PRIORITY = 5 def __init__(self, config, osc=None): super(NoisyNeighbor, self).__init__(config, osc) self.meter_name = 'instance_l3_cache_usage' @classmethod def get_name(cls): return "noisy_neighbor" @classmethod def get_display_name(cls): return _("Noisy Neighbor") @classmethod def get_translatable_display_name(cls): return "Noisy Neighbor" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "cache_threshold": { "description": "Performance drop in L3_cache threshold " "for migration", "type": "number", "default": 35.0 }, "period": { "description": "Aggregate time period of " "ceilometer and gnocchi", "type": "number", "default": 100.0 }, }, } def get_current_and_previous_cache(self, instance): try: curr_cache = self.datasource_backend.get_instance_l3_cache_usage( instance, self.meter_name, self.period, 'mean', granularity=300) previous_cache = 2 * ( self.datasource_backend.get_instance_l3_cache_usage( instance, self.meter_name, 2 * self.period, 'mean', granularity=300)) - curr_cache except Exception as exc: LOG.exception(exc) return None, None return curr_cache, previous_cache def find_priority_instance(self, instance): current_cache, previous_cache = \ self.get_current_and_previous_cache(instance) if None in (current_cache, previous_cache): LOG.warning("Datasource unable to pick L3 Cache " "values. Skipping the instance") return None if (current_cache < (1 - (self.cache_threshold / 100.0)) * previous_cache): return instance else: return None def find_noisy_instance(self, instance): noisy_current_cache, noisy_previous_cache = \ self.get_current_and_previous_cache(instance) if None in (noisy_current_cache, noisy_previous_cache): LOG.warning("Datasource unable to pick " "L3 Cache. Skipping the instance") return None if (noisy_current_cache > (1 + (self.cache_threshold / 100.0)) * noisy_previous_cache): return instance else: return None def group_hosts(self): nodes = self.compute_model.get_all_compute_nodes() hosts_need_release = {} hosts_target = [] for node in nodes.values(): instances_of_node = self.compute_model.get_node_instances(node) node_instance_count = len(instances_of_node) # Flag that tells us whether to skip the node or not. If True, # the node is skipped. Will be true if we find a noisy instance or # when potential priority instance will be same as potential noisy # instance loop_break_flag = False if node_instance_count > 1: instance_priority_list = [] for instance in instances_of_node: instance_priority_list.append(instance) # If there is no metadata regarding watcher-priority, it takes # DEFAULT_WATCHER_PRIORITY as priority. instance_priority_list.sort(key=lambda a: ( a.get('metadata').get('watcher-priority'), self.DEFAULT_WATCHER_PRIORITY)) instance_priority_list_reverse = list(instance_priority_list) instance_priority_list_reverse.reverse() for potential_priority_instance in instance_priority_list: priority_instance = self.find_priority_instance( potential_priority_instance) if (priority_instance is not None): for potential_noisy_instance in ( instance_priority_list_reverse): if(potential_noisy_instance == potential_priority_instance): loop_break_flag = True break noisy_instance = self.find_noisy_instance( potential_noisy_instance) if noisy_instance is not None: hosts_need_release[node.uuid] = { 'priority_vm': potential_priority_instance, 'noisy_vm': potential_noisy_instance} LOG.debug("Priority VM found: %s", potential_priority_instance.uuid) LOG.debug("Noisy VM found: %s", potential_noisy_instance.uuid) loop_break_flag = True break # No need to check other instances in the node if loop_break_flag is True: break if node.uuid not in hosts_need_release: hosts_target.append(node) return hosts_need_release, hosts_target def filter_dest_servers(self, hosts, instance_to_migrate): required_cores = instance_to_migrate.vcpus required_disk = instance_to_migrate.disk required_memory = instance_to_migrate.memory dest_servers = [] for host in hosts: free_res = self.compute_model.get_node_free_resources(host) if (free_res['vcpu'] >= required_cores and free_res['disk'] >= required_disk and free_res['memory'] >= required_memory): dest_servers.append(host) return dest_servers def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): self.cache_threshold = self.input_parameters.cache_threshold self.period = self.input_parameters.period hosts_need_release, hosts_target = self.group_hosts() if len(hosts_need_release) == 0: LOG.debug("No hosts require optimization") return if len(hosts_target) == 0: LOG.debug("No hosts available to migrate") return mig_source_node_name = max(hosts_need_release.keys(), key=lambda a: hosts_need_release[a]['priority_vm']) instance_to_migrate = hosts_need_release[mig_source_node_name][ 'noisy_vm'] if instance_to_migrate is None: return dest_servers = self.filter_dest_servers(hosts_target, instance_to_migrate) if len(dest_servers) == 0: LOG.info("No proper target host could be found") return # Destination node will be the first available node in the list. mig_destination_node = dest_servers[0] mig_source_node = self.compute_model.get_node_by_uuid( mig_source_node_name) if self.compute_model.migrate_instance(instance_to_migrate, mig_source_node, mig_destination_node): parameters = {'migration_type': 'live', 'source_node': mig_source_node.uuid, 'destination_node': mig_destination_node.uuid, 'resource_name': instance_to_migrate.name} self.solution.add_action(action_type=self.MIGRATION, resource_id=instance_to_migrate.uuid, input_parameters=parameters) def post_execute(self): self.solution.model = self.compute_model LOG.debug(self.compute_model.to_string()) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/dummy_with_scorer.py0000664000175000017500000001417213656752270031443 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import random from oslo_log import log from oslo_serialization import jsonutils from oslo_utils import units from watcher._i18n import _ from watcher.decision_engine.scoring import scoring_factory from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class DummyWithScorer(base.DummyBaseStrategy): """A dummy strategy using dummy scoring engines. This is a dummy strategy demonstrating how to work with scoring engines. One scoring engine is predicting the workload type of a machine based on the telemetry data, the other one is simply calculating the average value for given elements in a list. Results are then passed to the NOP action. The strategy is presenting the whole workflow: - Get a reference to a scoring engine - Prepare input data (features) for score calculation - Perform score calculation - Use scorer's metadata for results interpretation """ DEFAULT_NAME = "dummy_with_scorer" DEFAULT_DESCRIPTION = "Dummy Strategy with Scorer" NOP = "nop" SLEEP = "sleep" def __init__(self, config, osc=None): """Constructor: the signature should be identical within the subclasses :param config: Configuration related to this plugin :type config: :py:class:`~.Struct` :param osc: An OpenStackClients instance :type osc: :py:class:`~.OpenStackClients` instance """ super(DummyWithScorer, self).__init__(config, osc) # Setup Scoring Engines self._workload_scorer = (scoring_factory .get_scoring_engine('dummy_scorer')) self._avg_scorer = (scoring_factory .get_scoring_engine('dummy_avg_scorer')) # Get metainfo from Workload Scorer for result interpretation metainfo = jsonutils.loads(self._workload_scorer.get_metainfo()) self._workloads = {index: workload for index, workload in enumerate( metainfo['workloads'])} def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): # Simple "hello world" from strategy param1 = self.input_parameters.param1 param2 = self.input_parameters.param2 LOG.debug('DummyWithScorer params: param1=%(p1)f, param2=%(p2)s', {'p1': param1, 'p2': param2}) parameters = {'message': 'Hello from Dummy Strategy with Scorer!'} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) # Demonstrate workload scorer features = self._generate_random_telemetry() result_str = self._workload_scorer.calculate_score(features) LOG.debug('Workload Scorer result: %s', result_str) # Parse the result using workloads from scorer's metainfo result = self._workloads[jsonutils.loads(result_str)[0]] LOG.debug('Detected Workload: %s', result) parameters = {'message': 'Detected Workload: %s' % result} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) # Demonstrate AVG scorer features = jsonutils.dumps(random.sample(range(1000), 20)) result_str = self._avg_scorer.calculate_score(features) LOG.debug('AVG Scorer result: %s', result_str) result = jsonutils.loads(result_str)[0] LOG.debug('AVG Scorer result (parsed): %d', result) parameters = {'message': 'AVG Scorer result: %s' % result} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) # Sleep action self.solution.add_action(action_type=self.SLEEP, input_parameters={'duration': 5.0}) def post_execute(self): pass @classmethod def get_name(cls): return 'dummy_with_scorer' @classmethod def get_display_name(cls): return _('Dummy Strategy using sample Scoring Engines') @classmethod def get_translatable_display_name(cls): return 'Dummy Strategy using sample Scoring Engines' @classmethod def get_schema(cls): # Mandatory default setting for each element return { 'properties': { 'param1': { 'description': 'number parameter example', 'type': 'number', 'default': 3.2, 'minimum': 1.0, 'maximum': 10.2, }, 'param2': { 'description': 'string parameter example', 'type': "string", 'default': "hello" }, }, } def _generate_random_telemetry(self): processor_time = random.randint(0, 100) mem_total_bytes = 4*units.Gi mem_avail_bytes = random.randint(1*units.Gi, 4*units.Gi) mem_page_reads = random.randint(0, 2000) mem_page_writes = random.randint(0, 2000) disk_read_bytes = random.randint(0*units.Mi, 200*units.Mi) disk_write_bytes = random.randint(0*units.Mi, 200*units.Mi) net_bytes_received = random.randint(0*units.Mi, 20*units.Mi) net_bytes_sent = random.randint(0*units.Mi, 10*units.Mi) return jsonutils.dumps([ processor_time, mem_total_bytes, mem_avail_bytes, mem_page_reads, mem_page_writes, disk_read_bytes, disk_write_bytes, net_bytes_received, net_bytes_sent]) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/zone_migration.py0000664000175000017500000010065513656752270030726 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from dateutil.parser import parse import six from oslo_log import log from cinderclient.v2.volumes import Volume from novaclient.v2.servers import Server from watcher._i18n import _ from watcher.common import cinder_helper from watcher.common import nova_helper from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) INSTANCE = "instance" VOLUME = "volume" ACTIVE = "active" PAUSED = 'paused' STOPPED = "stopped" status_ACTIVE = 'ACTIVE' status_PAUSED = 'PAUSED' status_SHUTOFF = 'SHUTOFF' AVAILABLE = "available" IN_USE = "in-use" class ZoneMigration(base.ZoneMigrationBaseStrategy): """Zone migration using instance and volume migration This is zone migration strategy to migrate many instances and volumes efficiently with minimum downtime for hardware maintenance. """ def __init__(self, config, osc=None): super(ZoneMigration, self).__init__(config, osc) self._nova = None self._cinder = None self.live_count = 0 self.planned_live_count = 0 self.cold_count = 0 self.planned_cold_count = 0 self.volume_count = 0 self.planned_volume_count = 0 self.volume_update_count = 0 self.planned_volume_update_count = 0 @classmethod def get_name(cls): return "zone_migration" @classmethod def get_display_name(cls): return _("Zone migration") @classmethod def get_translatable_display_name(cls): return "Zone migration" @classmethod def get_schema(cls): return { "properties": { "compute_nodes": { "type": "array", "items": { "type": "object", "properties": { "src_node": { "description": "Compute node from which" " instances migrate", "type": "string" }, "dst_node": { "description": "Compute node to which " "instances migrate", "type": "string" } }, "required": ["src_node"], "additionalProperties": False } }, "storage_pools": { "type": "array", "items": { "type": "object", "properties": { "src_pool": { "description": "Storage pool from which" " volumes migrate", "type": "string" }, "dst_pool": { "description": "Storage pool to which" " volumes migrate", "type": "string" }, "src_type": { "description": "Volume type from which" " volumes migrate", "type": "string" }, "dst_type": { "description": "Volume type to which" " volumes migrate", "type": "string" } }, "required": ["src_pool", "src_type", "dst_type"], "additionalProperties": False } }, "parallel_total": { "description": "The number of actions to be run in" " parallel in total", "type": "integer", "minimum": 0, "default": 6 }, "parallel_per_node": { "description": "The number of actions to be run in" " parallel per compute node", "type": "integer", "minimum": 0, "default": 2 }, "parallel_per_pool": { "description": "The number of actions to be run in" " parallel per storage host", "type": "integer", "minimum": 0, "default": 2 }, "priority": { "description": "List prioritizes instances and volumes", "type": "object", "properties": { "project": { "type": "array", "items": {"type": "string"} }, "compute_node": { "type": "array", "items": {"type": "string"} }, "storage_pool": { "type": "array", "items": {"type": "string"} }, "compute": { "enum": ["vcpu_num", "mem_size", "disk_size", "created_at"] }, "storage": { "enum": ["size", "created_at"] } }, "additionalProperties": False }, "with_attached_volume": { "description": "instance migrates just after attached" " volumes or not", "type": "boolean", "default": False }, }, "additionalProperties": False } @property def migrate_compute_nodes(self): """Get compute nodes from input_parameters :returns: compute nodes e.g. [{"src_node": "w012", "dst_node": "w022"}, {"src_node": "w013", "dst_node": "w023"}] """ return self.input_parameters.get('compute_nodes') @property def migrate_storage_pools(self): """Get storage pools from input_parameters :returns: storage pools e.g. [ {"src_pool": "src1@back1#pool1", "dst_pool": "dst1@back1#pool1", "src_type": "src1_type", "dst_type": "dst1_type"}, {"src_pool": "src1@back2#pool1", "dst_pool": "dst1@back2#pool1", "src_type": "src1_type", "dst_type": "dst1_type"} ] """ return self.input_parameters.get('storage_pools') @property def parallel_total(self): return self.input_parameters.get('parallel_total') @property def parallel_per_node(self): return self.input_parameters.get('parallel_per_node') @property def parallel_per_pool(self): return self.input_parameters.get('parallel_per_pool') @property def priority(self): """Get priority from input_parameters :returns: priority map e.g. { "project": ["pj1"], "compute_node": ["compute1", "compute2"], "compute": ["vcpu_num"], "storage_pool": ["pool1", "pool2"], "storage": ["size", "created_at"] } """ return self.input_parameters.get('priority') @property def with_attached_volume(self): return self.input_parameters.get('with_attached_volume') @property def nova(self): if self._nova is None: self._nova = nova_helper.NovaHelper(osc=self.osc) return self._nova @property def cinder(self): if self._cinder is None: self._cinder = cinder_helper.CinderHelper(osc=self.osc) return self._cinder def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def get_available_storage_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] return {uuid: cn for uuid, cn in self.storage_model.get_all_storage_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def pre_execute(self): self._pre_execute() LOG.debug(self.storage_model.to_string()) def do_execute(self, audit=None): """Strategy execution phase """ filtered_targets = self.filtered_targets() self.set_migration_count(filtered_targets) total_limit = self.parallel_total per_node_limit = self.parallel_per_node per_pool_limit = self.parallel_per_pool action_counter = ActionCounter(total_limit, per_pool_limit, per_node_limit) for k, targets in six.iteritems(filtered_targets): if k == VOLUME: self.volumes_migration(targets, action_counter) elif k == INSTANCE: if self.volume_count == 0 and self.volume_update_count == 0: # if with_attached_volume is true, # instance having attached volumes already migrated, # migrate instances which does not have attached volumes if self.with_attached_volume: targets = self.instances_no_attached(targets) self.instances_migration(targets, action_counter) else: self.instances_migration(targets, action_counter) LOG.debug("action total: %s, pools: %s, nodes %s ", action_counter.total_count, action_counter.per_pool_count, action_counter.per_node_count) def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ self.solution.set_efficacy_indicators( live_migrate_instance_count=self.live_count, planned_live_migrate_instance_count=self.planned_live_count, cold_migrate_instance_count=self.cold_count, planned_cold_migrate_instance_count=self.planned_cold_count, volume_migrate_count=self.volume_count, planned_volume_migrate_count=self.planned_volume_count, volume_update_count=self.volume_update_count, planned_volume_update_count=self.planned_volume_update_count ) def set_migration_count(self, targets): """Set migration count :param targets: dict of instance object and volume object list keys of dict are instance and volume """ for instance in targets.get('instance', []): if self.is_live(instance): self.live_count += 1 elif self.is_cold(instance): self.cold_count += 1 for volume in targets.get('volume', []): if self.is_available(volume): self.volume_count += 1 elif self.is_in_use(volume): self.volume_update_count += 1 def is_live(self, instance): status = getattr(instance, 'status') state = getattr(instance, 'OS-EXT-STS:vm_state') return (status == status_ACTIVE and state == ACTIVE ) or (status == status_PAUSED and state == PAUSED) def is_cold(self, instance): status = getattr(instance, 'status') state = getattr(instance, 'OS-EXT-STS:vm_state') return status == status_SHUTOFF and state == STOPPED def is_available(self, volume): return getattr(volume, 'status') == AVAILABLE def is_in_use(self, volume): return getattr(volume, 'status') == IN_USE def instances_no_attached(instances): return [i for i in instances if not getattr(i, "os-extended-volumes:volumes_attached")] def get_host_by_pool(self, pool): """Get host name from pool name Utility method to get host name from pool name which is formatted as host@backend#pool. :param pool: pool name :returns: host name """ return pool.split('@')[0] def get_dst_node(self, src_node): """Get destination node from self.migration_compute_nodes :param src_node: compute node name :returns: destination node name """ for node in self.migrate_compute_nodes: if node.get("src_node") == src_node: return node.get("dst_node") def get_dst_pool_and_type(self, src_pool, src_type): """Get destination pool and type from self.migration_storage_pools :param src_pool: storage pool name :param src_type: storage volume type :returns: set of storage pool name and volume type name """ for pool in self.migrate_storage_pools: if pool.get("src_pool") == src_pool: return (pool.get("dst_pool", None), pool.get("dst_type")) def volumes_migration(self, volumes, action_counter): for volume in volumes: if action_counter.is_total_max(): LOG.debug('total reached limit') break pool = getattr(volume, 'os-vol-host-attr:host') if action_counter.is_pool_max(pool): LOG.debug("%s has objects to be migrated, but it has" " reached the limit of parallelization.", pool) continue src_type = volume.volume_type dst_pool, dst_type = self.get_dst_pool_and_type(pool, src_type) LOG.debug(src_type) LOG.debug("%s %s", dst_pool, dst_type) if self.is_available(volume): if src_type == dst_type: self._volume_migrate(volume, dst_pool) else: self._volume_retype(volume, dst_type) elif self.is_in_use(volume): self._volume_update(volume, dst_type) # if with_attached_volume is True, migrate attaching instances if self.with_attached_volume: instances = [self.nova.find_instance(dic.get('server_id')) for dic in volume.attachments] self.instances_migration(instances, action_counter) action_counter.add_pool(pool) def instances_migration(self, instances, action_counter): for instance in instances: src_node = getattr(instance, 'OS-EXT-SRV-ATTR:host') if action_counter.is_total_max(): LOG.debug('total reached limit') break if action_counter.is_node_max(src_node): LOG.debug("%s has objects to be migrated, but it has" " reached the limit of parallelization.", src_node) continue dst_node = self.get_dst_node(src_node) if self.is_live(instance): self._live_migration(instance, src_node, dst_node) elif self.is_cold(instance): self._cold_migration(instance, src_node, dst_node) action_counter.add_node(src_node) def _live_migration(self, instance, src_node, dst_node): parameters = {"migration_type": "live", "destination_node": dst_node, "source_node": src_node, "resource_name": instance.name} self.solution.add_action( action_type="migrate", resource_id=instance.id, input_parameters=parameters) self.planned_live_count += 1 def _cold_migration(self, instance, src_node, dst_node): parameters = {"migration_type": "cold", "destination_node": dst_node, "source_node": src_node, "resource_name": instance.name} self.solution.add_action( action_type="migrate", resource_id=instance.id, input_parameters=parameters) self.planned_cold_count += 1 def _volume_update(self, volume, dst_type): parameters = {"migration_type": "swap", "destination_type": dst_type, "resource_name": volume.name} self.solution.add_action( action_type="volume_migrate", resource_id=volume.id, input_parameters=parameters) self.planned_volume_update_count += 1 def _volume_migrate(self, volume, dst_pool): parameters = {"migration_type": "migrate", "destination_node": dst_pool, "resource_name": volume.name} self.solution.add_action( action_type="volume_migrate", resource_id=volume.id, input_parameters=parameters) self.planned_volume_count += 1 def _volume_retype(self, volume, dst_type): parameters = {"migration_type": "retype", "destination_type": dst_type, "resource_name": volume.name} self.solution.add_action( action_type="volume_migrate", resource_id=volume.id, input_parameters=parameters) self.planned_volume_count += 1 def get_src_node_list(self): """Get src nodes from migrate_compute_nodes :returns: src node name list """ if not self.migrate_compute_nodes: return None return [v for dic in self.migrate_compute_nodes for k, v in dic.items() if k == "src_node"] def get_src_pool_list(self): """Get src pools from migrate_storage_pools :returns: src pool name list """ return [v for dic in self.migrate_storage_pools for k, v in dic.items() if k == "src_pool"] def get_instances(self): """Get migrate target instances :returns: instance list on src nodes and compute scope """ src_node_list = self.get_src_node_list() if not src_node_list: return None return [i for i in self.nova.get_instance_list() if getattr(i, 'OS-EXT-SRV-ATTR:host') in src_node_list and self.compute_model.get_instance_by_uuid(i.id)] def get_volumes(self): """Get migrate target volumes :returns: volume list on src pools and storage scope """ src_pool_list = self.get_src_pool_list() return [i for i in self.cinder.get_volume_list() if getattr(i, 'os-vol-host-attr:host') in src_pool_list and self.storage_model.get_volume_by_uuid(i.id)] def filtered_targets(self): """Filter targets prioritize instances and volumes based on priorities from input parameters. :returns: prioritized targets """ result = {} if self.migrate_compute_nodes: result["instance"] = self.get_instances() if self.migrate_storage_pools: result["volume"] = self.get_volumes() if not self.priority: return result filter_actions = self.get_priority_filter_list() LOG.debug(filter_actions) # apply all filters set in input prameter for action in list(reversed(filter_actions)): LOG.debug(action) result = action.apply_filter(result) return result def get_priority_filter_list(self): """Get priority filters :returns: list of filter object with arguments in self.priority """ filter_list = [] priority_filter_map = self.get_priority_filter_map() for k, v in six.iteritems(self.priority): if k in priority_filter_map: filter_list.append(priority_filter_map[k](v)) return filter_list def get_priority_filter_map(self): """Get priority filter map :returns: filter map key is the key in priority input parameters. value is filter class for prioritizing. """ return { "project": ProjectSortFilter, "compute_node": ComputeHostSortFilter, "storage_pool": StorageHostSortFilter, "compute": ComputeSpecSortFilter, "storage": StorageSpecSortFilter, } class ActionCounter(object): """Manage the number of actions in parallel""" def __init__(self, total_limit=6, per_pool_limit=2, per_node_limit=2): """Initialize dict of host and the number of action :param total_limit: total number of actions :param per_pool_limit: the number of migrate actions per storage pool :param per_node_limit: the number of migrate actions per compute node """ self.total_limit = total_limit self.per_pool_limit = per_pool_limit self.per_node_limit = per_node_limit self.per_pool_count = {} self.per_node_count = {} self.total_count = 0 def add_pool(self, pool): """Increment the number of actions on host and total count :param pool: storage pool :returns: True if incremented, False otherwise """ if pool not in self.per_pool_count: self.per_pool_count[pool] = 0 if not self.is_total_max() and not self.is_pool_max(pool): self.per_pool_count[pool] += 1 self.total_count += 1 LOG.debug("total: %s, per_pool: %s", self.total_count, self.per_pool_count) return True return False def add_node(self, node): """Add the number of actions on node :param host: compute node :returns: True if action can be added, False otherwise """ if node not in self.per_node_count: self.per_node_count[node] = 0 if not self.is_total_max() and not self.is_node_max(node): self.per_node_count[node] += 1 self.total_count += 1 LOG.debug("total: %s, per_node: %s", self.total_count, self.per_node_count) return True return False def is_total_max(self): """Check if total count reached limit :returns: True if total count reached limit, False otherwise """ return self.total_count >= self.total_limit def is_pool_max(self, pool): """Check if per pool count reached limit :returns: True if count reached limit, False otherwise """ if pool not in self.per_pool_count: self.per_pool_count[pool] = 0 LOG.debug("the number of parallel per pool %s is %s ", pool, self.per_pool_count[pool]) LOG.debug("per pool limit is %s", self.per_pool_limit) return self.per_pool_count[pool] >= self.per_pool_limit def is_node_max(self, node): """Check if per node count reached limit :returns: True if count reached limit, False otherwise """ if node not in self.per_node_count: self.per_node_count[node] = 0 return self.per_node_count[node] >= self.per_node_limit class BaseFilter(object): """Base class for Filter""" apply_targets = ('ALL',) def __init__(self, values=[], **kwargs): """initialization :param values: priority value """ if not isinstance(values, list): values = [values] self.condition = values def apply_filter(self, targets): """apply filter to targets :param targets: dict of instance object and volume object list keys of dict are instance and volume """ if not targets: return {} for cond in list(reversed(self.condition)): for k, v in six.iteritems(targets): if not self.is_allowed(k): continue LOG.debug("filter:%s with the key: %s", cond, k) targets[k] = self.exec_filter(v, cond) LOG.debug(targets) return targets def is_allowed(self, key): return (key in self.apply_targets) or ('ALL' in self.apply_targets) def exec_filter(self, items, sort_key): """This is implemented by sub class""" return items class SortMovingToFrontFilter(BaseFilter): """This is to move to front if a condition is True""" def exec_filter(self, items, sort_key): return self.sort_moving_to_front(items, sort_key, self.compare_func) def sort_moving_to_front(self, items, sort_key=None, compare_func=None): if not compare_func or not sort_key: return items for item in list(reversed(items)): if compare_func(item, sort_key): items.remove(item) items.insert(0, item) return items def compare_func(self, item, sort_key): return True class ProjectSortFilter(SortMovingToFrontFilter): """ComputeHostSortFilter""" apply_targets = ('instance', 'volume') def __init__(self, values=[], **kwargs): super(ProjectSortFilter, self).__init__(values, **kwargs) def compare_func(self, item, sort_key): """Compare project id of item with sort_key :param item: instance object or volume object :param sort_key: project id :returns: true: project id of item equals sort_key false: otherwise """ project_id = self.get_project_id(item) LOG.debug("project_id: %s, sort_key: %s", project_id, sort_key) return project_id == sort_key def get_project_id(self, item): """get project id of item :param item: instance object or volume object :returns: project id """ if isinstance(item, Volume): return getattr(item, 'os-vol-tenant-attr:tenant_id') elif isinstance(item, Server): return item.tenant_id class ComputeHostSortFilter(SortMovingToFrontFilter): """ComputeHostSortFilter""" apply_targets = ('instance',) def __init__(self, values=[], **kwargs): super(ComputeHostSortFilter, self).__init__(values, **kwargs) def compare_func(self, item, sort_key): """Compare compute name of item with sort_key :param item: instance object :param sort_key: compute host name :returns: true: compute name on which intance is equals sort_key false: otherwise """ host = self.get_host(item) LOG.debug("host: %s, sort_key: %s", host, sort_key) return host == sort_key def get_host(self, item): """get hostname on which item is :param item: instance object :returns: hostname on which item is """ return getattr(item, 'OS-EXT-SRV-ATTR:host') class StorageHostSortFilter(SortMovingToFrontFilter): """StoragehostSortFilter""" apply_targets = ('volume',) def compare_func(self, item, sort_key): """Compare pool name of item with sort_key :param item: volume object :param sort_key: storage pool name :returns: true: pool name on which intance is equals sort_key false: otherwise """ host = self.get_host(item) LOG.debug("host: %s, sort_key: %s", host, sort_key) return host == sort_key def get_host(self, item): return getattr(item, 'os-vol-host-attr:host') class ComputeSpecSortFilter(BaseFilter): """ComputeSpecSortFilter""" apply_targets = ('instance',) accept_keys = ['vcpu_num', 'mem_size', 'disk_size', 'created_at'] def __init__(self, values=[], **kwargs): super(ComputeSpecSortFilter, self).__init__(values, **kwargs) self._nova = None @property def nova(self): if self._nova is None: self._nova = nova_helper.NovaHelper() return self._nova def exec_filter(self, items, sort_key): result = items if sort_key not in self.accept_keys: LOG.warning("Invalid key is specified: %s", sort_key) else: result = self.get_sorted_items(items, sort_key) return result def get_sorted_items(self, items, sort_key): """Sort items by sort_key :param items: instances :param sort_key: sort_key :returns: items sorted by sort_key """ result = items flavors = self.nova.get_flavor_list() if sort_key == 'mem_size': result = sorted(items, key=lambda x: float(self.get_mem_size(x, flavors)), reverse=True) elif sort_key == 'vcpu_num': result = sorted(items, key=lambda x: float(self.get_vcpu_num(x, flavors)), reverse=True) elif sort_key == 'disk_size': result = sorted(items, key=lambda x: float( self.get_disk_size(x, flavors)), reverse=True) elif sort_key == 'created_at': result = sorted(items, key=lambda x: parse(getattr(x, sort_key)), reverse=False) return result def get_mem_size(self, item, flavors): """Get memory size of item :param item: instance :param flavors: flavors :returns: memory size of item """ LOG.debug("item: %s, flavors: %s", item, flavors) for flavor in flavors: LOG.debug("item.flavor: %s, flavor: %s", item.flavor, flavor) if item.flavor.get('id') == flavor.id: LOG.debug("flavor.ram: %s", flavor.ram) return flavor.ram def get_vcpu_num(self, item, flavors): """Get vcpu number of item :param item: instance :param flavors: flavors :returns: vcpu number of item """ LOG.debug("item: %s, flavors: %s", item, flavors) for flavor in flavors: LOG.debug("item.flavor: %s, flavor: %s", item.flavor, flavor) if item.flavor.get('id') == flavor.id: LOG.debug("flavor.vcpus: %s", flavor.vcpus) return flavor.vcpus def get_disk_size(self, item, flavors): """Get disk size of item :param item: instance :param flavors: flavors :returns: disk size of item """ LOG.debug("item: %s, flavors: %s", item, flavors) for flavor in flavors: LOG.debug("item.flavor: %s, flavor: %s", item.flavor, flavor) if item.flavor.get('id') == flavor.id: LOG.debug("flavor.disk: %s", flavor.disk) return flavor.disk class StorageSpecSortFilter(BaseFilter): """StorageSpecSortFilter""" apply_targets = ('volume',) accept_keys = ['size', 'created_at'] def exec_filter(self, items, sort_key): result = items if sort_key not in self.accept_keys: LOG.warning("Invalid key is specified: %s", sort_key) return result if sort_key == 'created_at': result = sorted(items, key=lambda x: parse(getattr(x, sort_key)), reverse=False) else: result = sorted(items, key=lambda x: float(getattr(x, sort_key)), reverse=True) LOG.debug(result) return result python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/actuation.py0000664000175000017500000000570113656752270027665 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base class Actuator(base.UnclassifiedStrategy): """Actuator Actuator that simply executes the actions given as parameter This strategy allows anyone to create an action plan with a predefined set of actions. This strategy can be used for 2 different purposes: - Test actions - Use this strategy based on an event trigger to perform some explicit task """ @classmethod def get_name(cls): return "actuator" @classmethod def get_display_name(cls): return _("Actuator") @classmethod def get_translatable_display_name(cls): return "Actuator" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "$schema": "http://json-schema.org/draft-04/schema#", "type": "object", "properties": { "actions": { "type": "array", "items": { "type": "object", "properties": { "action_type": { "type": "string" }, "resource_id": { "type": "string" }, "input_parameters": { "type": "object", "properties": {}, "additionalProperties": True } }, "required": [ "action_type", "input_parameters" ], "additionalProperties": True, } } }, "required": [ "actions" ] } @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] @property def actions(self): return self.input_parameters.get('actions', []) def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): for action in self.actions: self.solution.add_action(**action) def post_execute(self): pass python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/dummy_with_resize.py0000664000175000017500000000702113656752270031442 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class DummyWithResize(base.DummyBaseStrategy): """Dummy strategy used for integration testing via Tempest *Description* This strategy does not provide any useful optimization. Its only purpose is to be used by Tempest tests. *Requirements* *Limitations* Do not use in production. *Spec URL* """ NOP = "nop" SLEEP = "sleep" def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): para1 = self.input_parameters.para1 para2 = self.input_parameters.para2 LOG.debug("Executing Dummy strategy with para1=%(p1)f, para2=%(p2)s", {'p1': para1, 'p2': para2}) parameters = {'message': 'hello World'} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) parameters = {'message': 'Welcome'} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) self.solution.add_action(action_type=self.SLEEP, input_parameters={'duration': 5.0}) self.solution.add_action( action_type='migrate', resource_id='b199db0c-1408-4d52-b5a5-5ca14de0ff36', input_parameters={ 'source_node': 'compute2', 'destination_node': 'compute3', 'migration_type': 'live'}) self.solution.add_action( action_type='migrate', resource_id='8db1b3c1-7938-4c34-8c03-6de14b874f8f', input_parameters={ 'source_node': 'compute2', 'destination_node': 'compute3', 'migration_type': 'live'} ) self.solution.add_action( action_type='resize', resource_id='8db1b3c1-7938-4c34-8c03-6de14b874f8f', input_parameters={'flavor': 'x2'} ) def post_execute(self): pass @classmethod def get_name(cls): return "dummy_with_resize" @classmethod def get_display_name(cls): return _("Dummy strategy with resize") @classmethod def get_translatable_display_name(cls): return "Dummy strategy with resize" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "para1": { "description": "number parameter example", "type": "number", "default": 3.2, "minimum": 1.0, "maximum": 10.2, }, "para2": { "description": "string parameter example", "type": "string", "default": "hello" }, }, } python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/dummy_strategy.py0000664000175000017500000000545213656752270030756 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class DummyStrategy(base.DummyBaseStrategy): """Dummy strategy used for integration testing via Tempest *Description* This strategy does not provide any useful optimization. Its only purpose is to be used by Tempest tests. *Requirements* *Limitations* Do not use in production. *Spec URL* """ NOP = "nop" SLEEP = "sleep" def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): para1 = self.input_parameters.para1 para2 = self.input_parameters.para2 LOG.debug("Executing Dummy strategy with para1=%(p1)f, para2=%(p2)s", {'p1': para1, 'p2': para2}) parameters = {'message': 'hello World'} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) parameters = {'message': para2} self.solution.add_action(action_type=self.NOP, input_parameters=parameters) self.solution.add_action(action_type=self.SLEEP, input_parameters={'duration': para1}) def post_execute(self): pass @classmethod def get_name(cls): return "dummy" @classmethod def get_display_name(cls): return _("Dummy strategy") @classmethod def get_translatable_display_name(cls): return "Dummy strategy" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "para1": { "description": "number parameter example", "type": "number", "default": 3.2, "minimum": 1.0, "maximum": 10.2, }, "para2": { "description": "string parameter example", "type": "string", "default": "hello" }, }, } python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/uniform_airflow.py0000664000175000017500000003211613656752270031100 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class UniformAirflow(base.BaseStrategy): """[PoC]Uniform Airflow using live migration *Description* It is a migration strategy based on the airflow of physical servers. It generates solutions to move VM whenever a server's airflow is higher than the specified threshold. *Requirements* * Hardware: compute node with NodeManager 3.0 support * Software: Ceilometer component ceilometer-agent-compute running in each compute node, and Ceilometer API can report such telemetry "airflow, system power, inlet temperature" successfully. * You must have at least 2 physical compute nodes to run this strategy *Limitations* - This is a proof of concept that is not meant to be used in production. - We cannot forecast how many servers should be migrated. This is the reason why we only plan a single virtual machine migration at a time. So it's better to use this algorithm with `CONTINUOUS` audits. - It assumes that live migrations are possible. """ # choose 300 seconds as the default duration of meter aggregation PERIOD = 300 DATASOURCE_METRICS = ['host_airflow', 'host_inlet_temp', 'host_power'] def __init__(self, config, osc=None): """Using live migration :param config: A mapping containing the configuration of this strategy :type config: dict :param osc: an OpenStackClients object """ super(UniformAirflow, self).__init__(config, osc) # The migration plan will be triggered when the airflow reaches # threshold self._period = self.PERIOD @classmethod def get_name(cls): return "uniform_airflow" @classmethod def get_display_name(cls): return _("Uniform airflow migration strategy") @classmethod def get_translatable_display_name(cls): return "Uniform airflow migration strategy" @classmethod def get_goal_name(cls): return "airflow_optimization" @property def granularity(self): return self.input_parameters.get('granularity', 300) @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "threshold_airflow": { "description": ("airflow threshold for migration, Unit is " "0.1CFM"), "type": "number", "default": 400.0 }, "threshold_inlet_t": { "description": ("inlet temperature threshold for " "migration decision"), "type": "number", "default": 28.0 }, "threshold_power": { "description": ("system power threshold for migration " "decision"), "type": "number", "default": 350.0 }, "period": { "description": "aggregate time period of ceilometer", "type": "number", "default": 300 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, }, } def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def calculate_used_resource(self, node): """Compute the used vcpus, memory and disk based on instance flavors""" used_res = self.compute_model.get_node_used_resources(node) return used_res['vcpu'], used_res['memory'], used_res['disk'] def choose_instance_to_migrate(self, hosts): """Pick up an active instance to migrate from provided hosts :param hosts: the array of dict which contains node object """ instances_tobe_migrate = [] for nodemap in hosts: source_node = nodemap['node'] source_instances = self.compute_model.get_node_instances( source_node) if source_instances: inlet_temp = self.datasource_backend.statistic_aggregation( resource=source_node, resource_type='instance', meter_name='host_inlet_temp', period=self._period, granularity=self.granularity) power = self.datasource_backend.statistic_aggregation( resource=source_node, resource_type='instance', meter_name='host_power', period=self._period, granularity=self.granularity) if (power < self.threshold_power and inlet_temp < self.threshold_inlet_t): # hardware issue, migrate all instances from this node for instance in source_instances: instances_tobe_migrate.append(instance) return source_node, instances_tobe_migrate else: # migrate the first active instance for instance in source_instances: # NOTE: skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue if (instance.state != element.InstanceState.ACTIVE.value): LOG.info( "Instance not active, skipped: %s", instance.uuid) continue instances_tobe_migrate.append(instance) return source_node, instances_tobe_migrate else: LOG.info("Instance not found on node: %s", source_node.uuid) def filter_destination_hosts(self, hosts, instances_to_migrate): """Find instance and host with sufficient available resources""" # large instances go first instances_to_migrate = sorted( instances_to_migrate, reverse=True, key=lambda x: (x.vcpus)) # find hosts for instances destination_hosts = [] for instance_to_migrate in instances_to_migrate: required_cores = instance_to_migrate.vcpus required_disk = instance_to_migrate.disk required_mem = instance_to_migrate.memory dest_migrate_info = {} for nodemap in hosts: host = nodemap['node'] if 'cores_used' not in nodemap: # calculate the available resources nodemap['cores_used'], nodemap['mem_used'],\ nodemap['disk_used'] = self.calculate_used_resource( host) cores_available = (host.vcpus - nodemap['cores_used']) disk_available = (host.disk - nodemap['disk_used']) mem_available = ( host.memory - nodemap['mem_used']) if (cores_available >= required_cores and disk_available >= required_disk and mem_available >= required_mem): dest_migrate_info['instance'] = instance_to_migrate dest_migrate_info['node'] = host nodemap['cores_used'] += required_cores nodemap['mem_used'] += required_mem nodemap['disk_used'] += required_disk destination_hosts.append(dest_migrate_info) break # check if all instances have target hosts if len(destination_hosts) != len(instances_to_migrate): LOG.warning("Not all target hosts could be found; it might " "be because there is not enough resource") return None return destination_hosts def group_hosts_by_airflow(self): """Group hosts based on airflow meters""" nodes = self.get_available_compute_nodes() overload_hosts = [] nonoverload_hosts = [] for node_id in nodes: airflow = None node = self.compute_model.get_node_by_uuid( node_id) airflow = self.datasource_backend.statistic_aggregation( resource=node, resource_type='compute_node', meter_name='host_airflow', period=self._period, granularity=self.granularity) # some hosts may not have airflow meter, remove from target if airflow is None: LOG.warning("%s: no airflow data", node.uuid) continue LOG.debug("%(resource)s: airflow %(airflow)f", {'resource': node, 'airflow': airflow}) nodemap = {'node': node, 'airflow': airflow} if airflow >= self.threshold_airflow: # mark the node to release resources overload_hosts.append(nodemap) else: nonoverload_hosts.append(nodemap) return overload_hosts, nonoverload_hosts def pre_execute(self): self._pre_execute() self.meter_name_airflow = 'host_airflow' self.meter_name_inlet_t = 'host_inlet_temp' self.meter_name_power = 'host_power' self.threshold_airflow = self.input_parameters.threshold_airflow self.threshold_inlet_t = self.input_parameters.threshold_inlet_t self.threshold_power = self.input_parameters.threshold_power self._period = self.input_parameters.period def do_execute(self, audit=None): source_nodes, target_nodes = self.group_hosts_by_airflow() if not source_nodes: LOG.debug("No hosts require optimization") return self.solution if not target_nodes: LOG.warning("No hosts currently have airflow under %s, " "therefore there are no possible target " "hosts for any migration", self.threshold_airflow) return self.solution # migrate the instance from server with largest airflow first source_nodes = sorted(source_nodes, reverse=True, key=lambda x: (x["airflow"])) instances_to_migrate = self.choose_instance_to_migrate(source_nodes) if not instances_to_migrate: return self.solution source_node, instances_src = instances_to_migrate # sort host with airflow target_nodes = sorted(target_nodes, key=lambda x: (x["airflow"])) # find the hosts that have enough resource # for the instance to be migrated destination_hosts = self.filter_destination_hosts( target_nodes, instances_src) if not destination_hosts: LOG.warning("No target host could be found; it might " "be because there is not enough resources") return self.solution # generate solution to migrate the instance to the dest server, for info in destination_hosts: instance = info['instance'] destination_node = info['node'] if self.compute_model.migrate_instance( instance, source_node, destination_node): self.add_action_migrate( instance, 'live', source_node, destination_node) def post_execute(self): self.solution.model = self.compute_model # TODO(v-francoise): Add the indicators to the solution LOG.debug(self.compute_model.to_string()) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/workload_stabilization.py0000664000175000017500000006050613656752270032460 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica LLC # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import copy import itertools import math import random import oslo_cache from oslo_config import cfg from oslo_log import log import oslo_utils from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) CONF = cfg.CONF def _set_memoize(conf): oslo_cache.configure(conf) region = oslo_cache.create_region() configured_region = oslo_cache.configure_cache_region(conf, region) return oslo_cache.core.get_memoization_decorator(conf, configured_region, 'cache') class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy): """Workload Stabilization control using live migration This is workload stabilization strategy based on standard deviation algorithm. The goal is to determine if there is an overload in a cluster and respond to it by migrating VMs to stabilize the cluster. This strategy has been tested in a small (32 nodes) cluster. It assumes that live migrations are possible in your cluster. """ MEMOIZE = _set_memoize(CONF) DATASOURCE_METRICS = ['host_cpu_usage', 'instance_cpu_usage', 'instance_ram_usage', 'host_ram_usage'] def __init__(self, config, osc=None): """Workload Stabilization control using live migration :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(WorkloadStabilization, self).__init__(config, osc) self.weights = None self.metrics = None self.thresholds = None self.host_choice = None self.instance_metrics = None self.retry_count = None self.periods = None self.aggregation_method = None self.sd_before_audit = 0 self.sd_after_audit = 0 self.instance_migrations_count = 0 self.instances_count = 0 @classmethod def get_name(cls): return "workload_stabilization" @classmethod def get_display_name(cls): return _("Workload stabilization") @classmethod def get_translatable_display_name(cls): return "Workload stabilization" @property def granularity(self): return self.input_parameters.get('granularity', 300) @classmethod def get_schema(cls): return { "properties": { "metrics": { "description": "Metrics used as rates of cluster loads.", "type": "array", "items": { "type": "string", "enum": ["instance_cpu_usage", "instance_ram_usage"] }, "default": ["instance_cpu_usage"] }, "thresholds": { "description": "Dict where key is a metric and value " "is a trigger value.", "type": "object", "properties": { "instance_cpu_usage": { "type": "number", "minimum": 0, "maximum": 1 }, "instance_ram_usage": { "type": "number", "minimum": 0, "maximum": 1 } }, "default": {"instance_cpu_usage": 0.1, "instance_ram_usage": 0.1} }, "weights": { "description": "These weights used to calculate " "common standard deviation. Name of weight" " contains meter name and _weight suffix.", "type": "object", "properties": { "instance_cpu_usage_weight": { "type": "number", "minimum": 0, "maximum": 1 }, "instance_ram_usage_weight": { "type": "number", "minimum": 0, "maximum": 1 } }, "default": {"instance_cpu_usage_weight": 1.0, "instance_ram_usage_weight": 1.0} }, "instance_metrics": { "description": "Mapping to get hardware statistics using" " instance metrics", "type": "object", "default": {"instance_cpu_usage": "host_cpu_usage", "instance_ram_usage": "host_ram_usage"} }, "host_choice": { "description": "Method of host's choice. There are cycle," " retry and fullsearch methods. " "Cycle will iterate hosts in cycle. " "Retry will get some hosts random " "(count defined in retry_count option). " "Fullsearch will return each host " "from list.", "type": "string", "default": "retry" }, "retry_count": { "description": "Count of random returned hosts", "type": "number", "minimum": 1, "default": 1 }, "periods": { "description": "These periods are used to get statistic " "aggregation for instance and host " "metrics. The period is simply a repeating" " interval of time into which the samples" " are grouped for aggregation. Watcher " "uses only the last period of all received" " ones.", "type": "object", "properties": { "instance": { "type": "integer", "minimum": 0 }, "compute_node": { "type": "integer", "minimum": 0 }, "node": { "type": "integer", # node is deprecated "minimum": 0, "default": 0 }, }, "default": { "instance": 720, "compute_node": 600, # node is deprecated "node": 0, } }, "aggregation_method": { "description": "Function used to aggregate multiple " "measures into an aggregate. For example, " "the min aggregation method will aggregate " "the values of different measures to the " "minimum value of all the measures in the " "time range.", "type": "object", "properties": { "instance": { "type": "string", "default": 'mean' }, "compute_node": { "type": "string", "default": 'mean' }, # node is deprecated "node": { "type": "string", "default": '' }, }, "default": { "instance": 'mean', "compute_node": 'mean', # node is deprecated "node": '', } }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "minimum": 0, "default": 300 }, } } def transform_instance_cpu(self, instance_load, host_vcpus): """Transform instance cpu utilization to overall host cpu utilization. :param instance_load: dict that contains instance uuid and utilization info. :param host_vcpus: int :return: float value """ return (instance_load['instance_cpu_usage'] * (instance_load['vcpus'] / float(host_vcpus))) @MEMOIZE def get_instance_load(self, instance): """Gathering instance load through ceilometer/gnocchi statistic. :param instance: instance for which statistic is gathered. :return: dict """ LOG.debug('Getting load for %s', instance.uuid) instance_load = {'uuid': instance.uuid, 'vcpus': instance.vcpus} for meter in self.metrics: avg_meter = self.datasource_backend.statistic_aggregation( instance, 'instance', meter, self.periods['instance'], self.aggregation_method['instance'], self.granularity) if avg_meter is None: LOG.warning( "No values returned by %(resource_id)s " "for %(metric_name)s", dict( resource_id=instance.uuid, metric_name=meter)) return if meter == 'instance_cpu_usage': avg_meter /= float(100) LOG.debug('Load of %(metric)s for %(instance)s is %(value)s', {'metric': meter, 'instance': instance.uuid, 'value': avg_meter}) instance_load[meter] = avg_meter return instance_load def normalize_hosts_load(self, hosts): normalized_hosts = copy.deepcopy(hosts) for host in normalized_hosts: if 'instance_ram_usage' in normalized_hosts[host]: node = self.compute_model.get_node_by_uuid(host) normalized_hosts[host]['instance_ram_usage'] \ /= float(node.memory) return normalized_hosts def get_available_nodes(self): nodes = self.compute_model.get_all_compute_nodes().items() return {node_uuid: node for node_uuid, node in nodes if node.state == element.ServiceState.ONLINE.value and node.status == element.ServiceState.ENABLED.value} def get_hosts_load(self): """Get load of every available host by gathering instances load""" hosts_load = {} for node_id, node in self.get_available_nodes().items(): hosts_load[node_id] = {} hosts_load[node_id]['vcpus'] = node.vcpus LOG.debug('Getting load for %s', node_id) for metric in self.metrics: avg_meter = None meter_name = self.instance_metrics[metric] avg_meter = self.datasource_backend.statistic_aggregation( node, 'compute_node', self.instance_metrics[metric], self.periods['compute_node'], self.aggregation_method['compute_node'], self.granularity) if avg_meter is None: LOG.warning('No values returned by node %s for %s', node_id, meter_name) del hosts_load[node_id] break else: if meter_name == 'host_ram_usage': avg_meter /= oslo_utils.units.Ki if meter_name == 'host_cpu_usage': avg_meter /= 100 LOG.debug('Load of %(metric)s for %(node)s is %(value)s', {'metric': metric, 'node': node_id, 'value': avg_meter}) hosts_load[node_id][metric] = avg_meter return hosts_load def get_sd(self, hosts, meter_name): """Get standard deviation among hosts by specified meter""" mean = 0 variation = 0 num_hosts = len(hosts) if num_hosts == 0: return 0 for host_id in hosts: mean += hosts[host_id][meter_name] mean /= num_hosts for host_id in hosts: variation += (hosts[host_id][meter_name] - mean) ** 2 variation /= num_hosts sd = math.sqrt(variation) return sd def calculate_weighted_sd(self, sd_case): """Calculate common standard deviation among meters on host""" weighted_sd = 0 for metric, value in zip(self.metrics, sd_case): try: weighted_sd += value * float(self.weights[metric + '_weight']) except KeyError as exc: LOG.exception(exc) raise exception.WatcherException( _("Incorrect mapping: could not find associated weight" " for %s in weight dict.") % metric) return weighted_sd def calculate_migration_case(self, hosts, instance, src_node, dst_node): """Calculate migration case Return list of standard deviation values, that appearing in case of migration of instance from source host to destination host :param hosts: hosts with their workload :param instance: the virtual machine :param src_node: the source node :param dst_node: the destination node :return: list of standard deviation values """ migration_case = [] new_hosts = copy.deepcopy(hosts) instance_load = self.get_instance_load(instance) if not instance_load: return s_host_vcpus = new_hosts[src_node.uuid]['vcpus'] d_host_vcpus = new_hosts[dst_node.uuid]['vcpus'] for metric in self.metrics: if metric == 'instance_cpu_usage': new_hosts[src_node.uuid][metric] -= ( self.transform_instance_cpu(instance_load, s_host_vcpus)) new_hosts[dst_node.uuid][metric] += ( self.transform_instance_cpu(instance_load, d_host_vcpus)) else: new_hosts[src_node.uuid][metric] -= instance_load[metric] new_hosts[dst_node.uuid][metric] += instance_load[metric] normalized_hosts = self.normalize_hosts_load(new_hosts) for metric in self.metrics: migration_case.append(self.get_sd(normalized_hosts, metric)) migration_case.append(new_hosts) return migration_case def get_current_weighted_sd(self, hosts_load): """Calculate current weighted sd""" current_sd = [] normalized_load = self.normalize_hosts_load(hosts_load) for metric in self.metrics: metric_sd = self.get_sd(normalized_load, metric) current_sd.append(metric_sd) current_sd.append(hosts_load) return self.calculate_weighted_sd(current_sd[:-1]) def simulate_migrations(self, hosts): """Make sorted list of pairs instance:dst_host""" def yield_nodes(nodes): if self.host_choice == 'cycle': for i in itertools.cycle(nodes): yield [i] if self.host_choice == 'retry': while True: yield random.sample(nodes, self.retry_count) if self.host_choice == 'fullsearch': while True: yield nodes instance_host_map = [] nodes = sorted(list(self.get_available_nodes())) current_weighted_sd = self.get_current_weighted_sd(hosts) for src_host in nodes: src_node = self.compute_model.get_node_by_uuid(src_host) c_nodes = copy.copy(nodes) c_nodes.remove(src_host) node_list = yield_nodes(c_nodes) for instance in self.compute_model.get_node_instances(src_node): # NOTE: skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue if instance.state not in [element.InstanceState.ACTIVE.value, element.InstanceState.PAUSED.value]: continue min_sd_case = {'value': current_weighted_sd} for dst_host in next(node_list): dst_node = self.compute_model.get_node_by_uuid(dst_host) sd_case = self.calculate_migration_case( hosts, instance, src_node, dst_node) if sd_case is None: break weighted_sd = self.calculate_weighted_sd(sd_case[:-1]) if weighted_sd < min_sd_case['value']: min_sd_case = { 'host': dst_node.uuid, 'value': weighted_sd, 's_host': src_node.uuid, 'instance': instance.uuid} instance_host_map.append(min_sd_case) if sd_case is None: continue return sorted(instance_host_map, key=lambda x: x['value']) def check_threshold(self): """Check if cluster is needed in balancing""" hosts_load = self.get_hosts_load() normalized_load = self.normalize_hosts_load(hosts_load) for metric in self.metrics: metric_sd = self.get_sd(normalized_load, metric) LOG.info("Standard deviation for %(metric)s is %(sd)s.", {'metric': metric, 'sd': metric_sd}) if metric_sd > float(self.thresholds[metric]): LOG.info("Standard deviation of %(metric)s exceeds" " appropriate threshold %(threshold)s by %(sd)s.", {'metric': metric, 'threshold': float(self.thresholds[metric]), 'sd': metric_sd}) LOG.info("Launching workload optimization...") self.sd_before_audit = metric_sd return self.simulate_migrations(hosts_load) def create_migration_instance(self, mig_instance, mig_source_node, mig_destination_node): """Create migration VM""" if self.compute_model.migrate_instance( mig_instance, mig_source_node, mig_destination_node): self.add_action_migrate(mig_instance, 'live', mig_source_node, mig_destination_node) self.instance_migrations_count += 1 def migrate(self, instance_uuid, src_host, dst_host): mig_instance = self.compute_model.get_instance_by_uuid(instance_uuid) mig_source_node = self.compute_model.get_node_by_uuid( src_host) mig_destination_node = self.compute_model.get_node_by_uuid( dst_host) self.create_migration_instance(mig_instance, mig_source_node, mig_destination_node) def fill_solution(self): self.solution.model = self.compute_model return self.solution def pre_execute(self): self._pre_execute() self.weights = self.input_parameters.weights self.metrics = self.input_parameters.metrics self.thresholds = self.input_parameters.thresholds self.host_choice = self.input_parameters.host_choice self.instance_metrics = self.input_parameters.instance_metrics self.retry_count = self.input_parameters.retry_count self.periods = self.input_parameters.periods self.aggregation_method = self.input_parameters.aggregation_method # backwards compatibility for node parameter with aggregate. if self.aggregation_method['node']: LOG.warning('Parameter node has been renamed to compute_node and ' 'will be removed in next release.') self.aggregation_method['compute_node'] = \ self.aggregation_method['node'] # backwards compatibility for node parameter with period. if self.periods['node'] != 0: LOG.warning('Parameter node has been renamed to compute_node and ' 'will be removed in next release.') self.periods['compute_node'] = self.periods['node'] def do_execute(self, audit=None): migration = self.check_threshold() if migration: hosts_load = self.get_hosts_load() min_sd = 1 balanced = False for instance_host in migration: instance = self.compute_model.get_instance_by_uuid( instance_host['instance']) src_node = self.compute_model.get_node_by_uuid( instance_host['s_host']) dst_node = self.compute_model.get_node_by_uuid( instance_host['host']) if instance.disk > dst_node.disk: continue instance_load = self.calculate_migration_case( hosts_load, instance, src_node, dst_node) weighted_sd = self.calculate_weighted_sd(instance_load[:-1]) if weighted_sd < min_sd: min_sd = weighted_sd hosts_load = instance_load[-1] LOG.info("Migration of %(instance_uuid)s from %(s_host)s " "to %(host)s reduces standard deviation to " "%(min_sd)s.", {'instance_uuid': instance_host['instance'], 's_host': instance_host['s_host'], 'host': instance_host['host'], 'min_sd': min_sd}) self.migrate(instance_host['instance'], instance_host['s_host'], instance_host['host']) self.sd_after_audit = min_sd for metric, value in zip(self.metrics, instance_load[:-1]): if value < float(self.thresholds[metric]): LOG.info("At least one of metrics' values fell " "below the threshold values. " "Workload Stabilization has successfully " "completed optimization process.") balanced = True break if balanced: break def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ self.fill_solution() self.solution.set_efficacy_indicators( instance_migrations_count=self.instance_migrations_count, standard_deviation_before_audit=self.sd_before_audit, standard_deviation_after_audit=self.sd_after_audit, instances_count=len(self.compute_model.get_all_instances()), ) LOG.debug(self.compute_model.to_string()) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/storage_capacity_balance.py0000664000175000017500000003646513656752270032677 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher._i18n import _ from watcher.common import cinder_helper from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class StorageCapacityBalance(base.WorkloadStabilizationBaseStrategy): """Storage capacity balance using cinder volume migration *Description* This strategy migrates volumes based on the workload of the cinder pools. It makes decision to migrate a volume whenever a pool's used utilization % is higher than the specified threshold. The volume to be moved should make the pool close to average workload of all cinder pools. *Requirements* * You must have at least 2 cinder volume pools to run this strategy. *Limitations* * Volume migration depends on the storage device. It may take a long time. *Spec URL* http://specs.openstack.org/openstack/watcher-specs/specs/queens/implemented/storage-capacity-balance.html """ def __init__(self, config, osc=None): """VolumeMigrate using cinder volume migration :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(StorageCapacityBalance, self).__init__(config, osc) self._cinder = None self.volume_threshold = 80.0 self.pool_type_cache = dict() self.source_pools = [] self.dest_pools = [] @property def cinder(self): if not self._cinder: self._cinder = cinder_helper.CinderHelper(osc=self.osc) return self._cinder @classmethod def get_name(cls): return "storage_capacity_balance" @classmethod def get_display_name(cls): return _("Storage Capacity Balance Strategy") @classmethod def get_translatable_display_name(cls): return "Storage Capacity Balance Strategy" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "volume_threshold": { "description": "volume threshold for capacity balance", "type": "number", "default": 80.0 }, }, } @classmethod def get_config_opts(cls): return super(StorageCapacityBalance, cls).get_config_opts() + [ cfg.ListOpt( "ex_pools", help="exclude pools", default=['local_vstorage']), ] def get_pools(self, cinder): """Get all volume pools excepting ex_pools. :param cinder: cinder client :return: volume pools """ ex_pools = self.config.ex_pools pools = cinder.get_storage_pool_list() filtered_pools = [p for p in pools if p.pool_name not in ex_pools] return filtered_pools def get_volumes(self, cinder): """Get all volumes with status in available or in-use and no snapshot. :param cinder: cinder client :return: all volumes """ all_volumes = cinder.get_volume_list() valid_status = ['in-use', 'available'] volume_snapshots = cinder.get_volume_snapshots_list() snapshot_volume_ids = [] for snapshot in volume_snapshots: snapshot_volume_ids.append(snapshot.volume_id) nosnap_volumes = list(filter(lambda v: v.id not in snapshot_volume_ids, all_volumes)) LOG.info("volumes in snap: %s", snapshot_volume_ids) status_volumes = list( filter(lambda v: v.status in valid_status, nosnap_volumes)) valid_volumes = [v for v in status_volumes if getattr(v, 'migration_status') == 'success' or getattr(v, 'migration_status') is None] LOG.info("valid volumes: %s", valid_volumes) return valid_volumes def group_pools(self, pools, threshold): """group volume pools by threshold. :param pools: all volume pools :param threshold: volume threshold :return: under and over threshold pools """ under_pools = list( filter(lambda p: float(p.total_capacity_gb) - float(p.free_capacity_gb) < float(p.total_capacity_gb) * threshold, pools)) over_pools = list( filter(lambda p: float(p.total_capacity_gb) - float(p.free_capacity_gb) >= float(p.total_capacity_gb) * threshold, pools)) return over_pools, under_pools def get_volume_type_by_name(self, cinder, backendname): # return list of pool type if backendname in self.pool_type_cache.keys(): return self.pool_type_cache.get(backendname) volume_type_list = cinder.get_volume_type_list() volume_type = list(filter( lambda volume_type: volume_type.extra_specs.get( 'volume_backend_name') == backendname, volume_type_list)) if volume_type: self.pool_type_cache[backendname] = volume_type return self.pool_type_cache.get(backendname) else: return [] def migrate_fit(self, volume, threshold): target_pool_name = None if volume.volume_type: LOG.info("volume %s type %s", volume.id, volume.volume_type) return target_pool_name self.dest_pools.sort( key=lambda p: float(p.free_capacity_gb) / float(p.total_capacity_gb)) for pool in reversed(self.dest_pools): total_cap = float(pool.total_capacity_gb) allocated = float(pool.allocated_capacity_gb) ratio = pool.max_over_subscription_ratio if total_cap * ratio < allocated + float(volume.size): LOG.info("pool %s allocated over", pool.name) continue free_cap = float(pool.free_capacity_gb) - float(volume.size) if free_cap > (1 - threshold) * total_cap: target_pool_name = pool.name index = self.dest_pools.index(pool) setattr(self.dest_pools[index], 'free_capacity_gb', str(free_cap)) LOG.info("volume: get pool %s for vol %s", target_pool_name, volume.name) break return target_pool_name def check_pool_type(self, volume, dest_pool): target_type = None src_extra_specs = {} # check type feature if not volume.volume_type: return target_type volume_type_list = self.cinder.get_volume_type_list() volume_type = list(filter( lambda volume_type: volume_type.name == volume.volume_type, volume_type_list)) if volume_type: src_extra_specs = volume_type[0].extra_specs src_extra_specs.pop('volume_backend_name', None) backendname = getattr(dest_pool, 'volume_backend_name') dst_pool_type = self.get_volume_type_by_name(self.cinder, backendname) for src_key in src_extra_specs.keys(): dst_pool_type = [pt for pt in dst_pool_type if pt.extra_specs.get(src_key) == src_extra_specs.get(src_key)] if dst_pool_type: if volume.volume_type: if dst_pool_type[0].name != volume.volume_type: target_type = dst_pool_type[0].name else: target_type = dst_pool_type[0].name return target_type def retype_fit(self, volume, threshold): target_type = None self.dest_pools.sort( key=lambda p: float(p.free_capacity_gb) / float(p.total_capacity_gb)) for pool in reversed(self.dest_pools): backendname = getattr(pool, 'volume_backend_name') pool_type = self.get_volume_type_by_name(self.cinder, backendname) LOG.info("volume: pool %s, type %s", pool.name, pool_type) if pool_type is None: continue total_cap = float(pool.total_capacity_gb) allocated = float(pool.allocated_capacity_gb) ratio = pool.max_over_subscription_ratio if total_cap * ratio < allocated + float(volume.size): LOG.info("pool %s allocated over", pool.name) continue free_cap = float(pool.free_capacity_gb) - float(volume.size) if free_cap > (1 - threshold) * total_cap: target_type = self.check_pool_type(volume, pool) if target_type is None: continue index = self.dest_pools.index(pool) setattr(self.dest_pools[index], 'free_capacity_gb', str(free_cap)) LOG.info("volume: get type %s for vol %s", target_type, volume.name) break return target_type def get_actions(self, pool, volumes, threshold): """get volume, pool key-value action return: retype, migrate dict """ retype_dicts = dict() migrate_dicts = dict() total_cap = float(pool.total_capacity_gb) used_cap = float(pool.total_capacity_gb) - float(pool.free_capacity_gb) seek_flag = True volumes_in_pool = list( filter(lambda v: getattr(v, 'os-vol-host-attr:host') == pool.name, volumes)) LOG.info("volumes in pool: %s", str(volumes_in_pool)) if not volumes_in_pool: return retype_dicts, migrate_dicts ava_volumes = list(filter(lambda v: v.status == 'available', volumes_in_pool)) ava_volumes.sort(key=lambda v: float(v.size)) LOG.info("available volumes in pool: %s ", str(ava_volumes)) for vol in ava_volumes: vol_flag = False migrate_pool = self.migrate_fit(vol, threshold) if migrate_pool: migrate_dicts[vol.id] = migrate_pool vol_flag = True else: target_type = self.retype_fit(vol, threshold) if target_type: retype_dicts[vol.id] = target_type vol_flag = True if vol_flag: used_cap -= float(vol.size) if used_cap < threshold * total_cap: seek_flag = False break if seek_flag: noboot_volumes = list( filter(lambda v: v.bootable.lower() == 'false' and v.status == 'in-use', volumes_in_pool)) noboot_volumes.sort(key=lambda v: float(v.size)) LOG.info("noboot volumes: %s ", str(noboot_volumes)) for vol in noboot_volumes: vol_flag = False migrate_pool = self.migrate_fit(vol, threshold) if migrate_pool: migrate_dicts[vol.id] = migrate_pool vol_flag = True else: target_type = self.retype_fit(vol, threshold) if target_type: retype_dicts[vol.id] = target_type vol_flag = True if vol_flag: used_cap -= float(vol.size) if used_cap < threshold * total_cap: seek_flag = False break if seek_flag: boot_volumes = list( filter(lambda v: v.bootable.lower() == 'true' and v.status == 'in-use', volumes_in_pool) ) boot_volumes.sort(key=lambda v: float(v.size)) LOG.info("boot volumes: %s ", str(boot_volumes)) for vol in boot_volumes: vol_flag = False migrate_pool = self.migrate_fit(vol, threshold) if migrate_pool: migrate_dicts[vol.id] = migrate_pool vol_flag = True else: target_type = self.retype_fit(vol, threshold) if target_type: retype_dicts[vol.id] = target_type vol_flag = True if vol_flag: used_cap -= float(vol.size) if used_cap < threshold * total_cap: seek_flag = False break return retype_dicts, migrate_dicts def pre_execute(self): LOG.info("Initializing " + self.get_display_name() + " Strategy") self.volume_threshold = self.input_parameters.volume_threshold def do_execute(self, audit=None): """Strategy execution phase This phase is where you should put the main logic of your strategy. """ all_pools = self.get_pools(self.cinder) all_volumes = self.get_volumes(self.cinder) threshold = float(self.volume_threshold) / 100 self.source_pools, self.dest_pools = self.group_pools( all_pools, threshold) LOG.info(" source pools: %s dest pools:%s", self.source_pools, self.dest_pools) if not self.source_pools: LOG.info("No pools require optimization") return if not self.dest_pools: LOG.info("No enough pools for optimization") return for source_pool in self.source_pools: retype_actions, migrate_actions = self.get_actions( source_pool, all_volumes, threshold) for vol_id, pool_type in retype_actions.items(): vol = [v for v in all_volumes if v.id == vol_id] parameters = {'migration_type': 'retype', 'destination_type': pool_type, 'resource_name': vol[0].name} self.solution.add_action(action_type='volume_migrate', resource_id=vol_id, input_parameters=parameters) for vol_id, pool_name in migrate_actions.items(): vol = [v for v in all_volumes if v.id == vol_id] parameters = {'migration_type': 'migrate', 'destination_node': pool_name, 'resource_name': vol[0].name} self.solution.add_action(action_type='volume_migrate', resource_id=vol_id, input_parameters=parameters) def post_execute(self): """Post-execution phase """ self.solution.set_efficacy_indicators( instance_migrations_count=0, instances_count=0, ) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/outlet_temp_control.py0000664000175000017500000002540213656752270031777 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 Intel Corp # # Authors: Junjie-Huang # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ *Good Thermal Strategy* Towards to software defined infrastructure, the power and thermal intelligences is being adopted to optimize workload, which can help improve efficiency, reduce power, as well as to improve datacenter PUE and lower down operation cost in data center. Outlet (Exhaust Air) Temperature is one of the important thermal telemetries to measure thermal/workload status of server. This strategy makes decisions to migrate workloads to the hosts with good thermal condition (lowest outlet temperature) when the outlet temperature of source hosts reach a configurable threshold. """ from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class OutletTempControl(base.ThermalOptimizationBaseStrategy): """[PoC] Outlet temperature control using live migration *Description* It is a migration strategy based on the outlet temperature of compute hosts. It generates solutions to move a workload whenever a server's outlet temperature is higher than the specified threshold. *Requirements* * Hardware: All computer hosts should support IPMI and PTAS technology * Software: Ceilometer component ceilometer-agent-ipmi running in each compute host, and Ceilometer API can report such telemetry ``hardware.ipmi.node.outlet_temperature`` successfully. * You must have at least 2 physical compute hosts to run this strategy. *Limitations* - This is a proof of concept that is not meant to be used in production - We cannot forecast how many servers should be migrated. This is the reason why we only plan a single virtual machine migration at a time. So it's better to use this algorithm with `CONTINUOUS` audits. - It assume that live migrations are possible *Spec URL* https://github.com/openstack/watcher-specs/blob/master/specs/mitaka/implemented/outlet-temperature-based-strategy.rst """ # The meter to report outlet temperature in ceilometer MIGRATION = "migrate" DATASOURCE_METRICS = ['host_outlet_temp'] def __init__(self, config, osc=None): """Outlet temperature control using live migration :param config: A mapping containing the configuration of this strategy :type config: dict :param osc: an OpenStackClients object, defaults to None :type osc: :py:class:`~.OpenStackClients` instance, optional """ super(OutletTempControl, self).__init__(config, osc) @classmethod def get_name(cls): return "outlet_temperature" @classmethod def get_display_name(cls): return _("Outlet temperature based strategy") @classmethod def get_translatable_display_name(cls): return "Outlet temperature based strategy" @property def period(self): return self.input_parameters.get('period', 30) @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "threshold": { "description": "temperature threshold for migration", "type": "number", "default": 35.0 }, "period": { "description": "The time interval in seconds for " "getting statistic aggregation", "type": "number", "default": 30 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, }, } @property def granularity(self): return self.input_parameters.get('granularity', 300) def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def group_hosts_by_outlet_temp(self): """Group hosts based on outlet temp meters""" nodes = self.get_available_compute_nodes() hosts_need_release = [] hosts_target = [] metric_name = 'host_outlet_temp' for node in nodes.values(): outlet_temp = None outlet_temp = self.datasource_backend.statistic_aggregation( resource=node, resource_type='compute_node', meter_name=metric_name, period=self.period, granularity=self.granularity, ) # some hosts may not have outlet temp meters, remove from target if outlet_temp is None: LOG.warning("%s: no outlet temp data", node.uuid) continue LOG.debug("%(resource)s: outlet temperature %(temp)f", {'resource': node.uuid, 'temp': outlet_temp}) instance_data = {'compute_node': node, 'outlet_temp': outlet_temp} if outlet_temp >= self.threshold: # mark the node to release resources hosts_need_release.append(instance_data) else: hosts_target.append(instance_data) return hosts_need_release, hosts_target def choose_instance_to_migrate(self, hosts): """Pick up an active instance to migrate from provided hosts""" for instance_data in hosts: mig_source_node = instance_data['compute_node'] instances_of_src = self.compute_model.get_node_instances( mig_source_node) for instance in instances_of_src: try: # NOTE: skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue # select the first active instance to migrate if (instance.state != element.InstanceState.ACTIVE.value): LOG.info("Instance not active, skipped: %s", instance.uuid) continue return mig_source_node, instance except exception.InstanceNotFound as e: LOG.exception(e) LOG.info("Instance not found") return None def filter_dest_servers(self, hosts, instance_to_migrate): """Only return hosts with sufficient available resources""" required_cores = instance_to_migrate.vcpus required_disk = instance_to_migrate.disk required_memory = instance_to_migrate.memory # filter nodes without enough resource dest_servers = [] for instance_data in hosts: host = instance_data['compute_node'] # available free_res = self.compute_model.get_node_free_resources(host) if (free_res['vcpu'] >= required_cores and free_res['disk'] >= required_disk and free_res['memory'] >= required_memory): dest_servers.append(instance_data) return dest_servers def pre_execute(self): self._pre_execute() # the migration plan will be triggered when the outlet temperature # reaches threshold self.threshold = self.input_parameters.threshold LOG.info("Outlet temperature strategy threshold=%d", self.threshold) def do_execute(self, audit=None): hosts_need_release, hosts_target = self.group_hosts_by_outlet_temp() if len(hosts_need_release) == 0: # TODO(zhenzanz): return something right if there's no hot servers LOG.debug("No hosts require optimization") return self.solution if len(hosts_target) == 0: LOG.warning("No hosts under outlet temp threshold found") return self.solution # choose the server with highest outlet t hosts_need_release = sorted(hosts_need_release, reverse=True, key=lambda x: (x["outlet_temp"])) instance_to_migrate = self.choose_instance_to_migrate( hosts_need_release) # calculate the instance's cpu cores,memory,disk needs if instance_to_migrate is None: return self.solution mig_source_node, instance_src = instance_to_migrate dest_servers = self.filter_dest_servers(hosts_target, instance_src) # sort the filtered result by outlet temp # pick up the lowest one as dest server if len(dest_servers) == 0: # TODO(zhenzanz): maybe to warn that there's no resource # for instance. LOG.info("No proper target host could be found") return self.solution dest_servers = sorted(dest_servers, key=lambda x: (x["outlet_temp"])) # always use the host with lowerest outlet temperature mig_destination_node = dest_servers[0]['compute_node'] # generate solution to migrate the instance to the dest server, if self.compute_model.migrate_instance( instance_src, mig_source_node, mig_destination_node): parameters = {'migration_type': 'live', 'source_node': mig_source_node.uuid, 'destination_node': mig_destination_node.uuid, 'resource_name': instance_src.name} self.solution.add_action(action_type=self.MIGRATION, resource_id=instance_src.uuid, input_parameters=parameters) def post_execute(self): self.solution.model = self.compute_model # TODO(v-francoise): Add the indicators to the solution LOG.debug(self.compute_model.to_string()) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/basic_consolidation.py0000664000175000017500000004401713656752270031707 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher._i18n import _ from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class BasicConsolidation(base.ServerConsolidationBaseStrategy): """Good server consolidation strategy Basic offline consolidation using live migration Consolidation of VMs is essential to achieve energy optimization in cloud environments such as OpenStack. As VMs are spinned up and/or moved over time, it becomes necessary to migrate VMs among servers to lower the costs. However, migration of VMs introduces runtime overheads and consumes extra energy, thus a good server consolidation strategy should carefully plan for migration in order to both minimize energy consumption and comply to the various SLAs. This algorithm not only minimizes the overall number of used servers, but also minimizes the number of migrations. It has been developed only for tests. You must have at least 2 physical compute nodes to run it, so you can easily run it on DevStack. It assumes that live migration is possible on your OpenStack cluster. """ DATASOURCE_METRICS = ['host_cpu_usage', 'instance_cpu_usage'] CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" def __init__(self, config, osc=None): """Basic offline Consolidation using live migration :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(BasicConsolidation, self).__init__(config, osc) # set default value for the number of enabled compute nodes self.number_of_enabled_nodes = 0 # set default value for the number of released nodes self.number_of_released_nodes = 0 # set default value for the number of migrations self.number_of_migrations = 0 # set default value for the efficacy self.efficacy = 100 # TODO(jed): improve threshold overbooking? self.threshold_mem = 1 self.threshold_disk = 1 self.threshold_cores = 1 @classmethod def get_name(cls): return "basic" @property def migration_attempts(self): return self.input_parameters.get('migration_attempts', 0) @property def period(self): return self.input_parameters.get('period', 7200) @property def granularity(self): return self.input_parameters.get('granularity', 300) @property def aggregation_method(self): return self.input_parameters.get( 'aggregation_method', { "instance": 'mean', "compute_node": 'mean', "node": '' } ) @classmethod def get_display_name(cls): return _("Basic offline consolidation") @classmethod def get_translatable_display_name(cls): return "Basic offline consolidation" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "migration_attempts": { "description": "Maximum number of combinations to be " "tried by the strategy while searching " "for potential candidates. To remove the " "limit, set it to 0 (by default)", "type": "number", "default": 0 }, "period": { "description": "The time interval in seconds for " "getting statistic aggregation", "type": "number", "default": 7200 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, "aggregation_method": { "description": "Function used to aggregate multiple " "measures into an aggregate. For example, " "the min aggregation method will aggregate " "the values of different measures to the " "minimum value of all the measures in the " "time range.", "type": "object", "properties": { "instance": { "type": "string", "default": 'mean' }, "compute_node": { "type": "string", "default": 'mean' }, "node": { "type": "string", # node is deprecated "default": '' }, }, "default": { "instance": 'mean', "compute_node": 'mean', # node is deprecated "node": '', } }, }, } @classmethod def get_config_opts(cls): return super(BasicConsolidation, cls).get_config_opts() + [ cfg.BoolOpt( 'check_optimize_metadata', help='Check optimize metadata field in instance before' ' migration', default=False), ] def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] return {uuid: cn for uuid, cn in self.compute_model.get_all_compute_nodes().items() if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def check_migration(self, source_node, destination_node, instance_to_migrate): """Check if the migration is possible :param source_node: the current node of the virtual machine :param destination_node: the destination of the virtual machine :param instance_to_migrate: the instance / virtual machine :return: True if there is enough place otherwise false """ if source_node == destination_node: return False LOG.debug('Migrate instance %s from %s to %s', instance_to_migrate, source_node, destination_node) used_resources = self.compute_model.get_node_used_resources( destination_node) # capacity requested by the compute node total_cores = used_resources['vcpu'] + instance_to_migrate.vcpus total_disk = used_resources['disk'] + instance_to_migrate.disk total_mem = used_resources['memory'] + instance_to_migrate.memory return self.check_threshold(destination_node, total_cores, total_disk, total_mem) def check_threshold(self, destination_node, total_cores, total_disk, total_mem): """Check threshold Check the threshold value defined by the ratio of aggregated CPU capacity of VMs on one node to CPU capacity of this node must not exceed the threshold value. :param destination_node: the destination of the virtual machine :param total_cores: total cores of the virtual machine :param total_disk: total disk size used by the virtual machine :param total_mem: total memory used by the virtual machine :return: True if the threshold is not exceed """ cpu_capacity = destination_node.vcpu_capacity disk_capacity = destination_node.disk_gb_capacity memory_capacity = destination_node.memory_mb_capacity return (cpu_capacity >= total_cores * self.threshold_cores and disk_capacity >= total_disk * self.threshold_disk and memory_capacity >= total_mem * self.threshold_mem) def calculate_weight(self, compute_resource, total_cores_used, total_disk_used, total_memory_used): """Calculate weight of every resource :param compute_resource: :param total_cores_used: :param total_disk_used: :param total_memory_used: :return: """ cpu_capacity = compute_resource.vcpus disk_capacity = compute_resource.disk memory_capacity = compute_resource.memory score_cores = (1 - (float(cpu_capacity) - float(total_cores_used)) / float(cpu_capacity)) # It's possible that disk_capacity is 0, e.g., m1.nano.disk = 0 if disk_capacity == 0: score_disk = 0 else: score_disk = (1 - (float(disk_capacity) - float(total_disk_used)) / float(disk_capacity)) score_memory = ( 1 - (float(memory_capacity) - float(total_memory_used)) / float(memory_capacity)) # TODO(jed): take in account weight return (score_cores + score_disk + score_memory) / 3 def get_compute_node_cpu_usage(self, compute_node): return self.datasource_backend.get_host_cpu_usage( compute_node, self.period, self.aggregation_method['compute_node'], self.granularity) def get_instance_cpu_usage(self, instance): return self.datasource_backend.get_instance_cpu_usage( instance, self.period, self.aggregation_method['instance'], self.granularity) def calculate_score_node(self, node): """Calculate the score that represent the utilization level :param node: :py:class:`~.ComputeNode` instance :return: Score for the given compute node :rtype: float """ host_avg_cpu_util = self.get_compute_node_cpu_usage(node) if host_avg_cpu_util is None: resource_id = "%s_%s" % (node.uuid, node.hostname) LOG.error( "No values returned by %(resource_id)s " "for %(metric_name)s", dict( resource_id=resource_id, metric_name='host_cpu_usage')) host_avg_cpu_util = 100 total_cores_used = node.vcpus * (host_avg_cpu_util / 100.0) return self.calculate_weight(node, total_cores_used, 0, 0) def calculate_score_instance(self, instance): """Calculate Score of virtual machine :param instance: the virtual machine :return: score """ instance_cpu_utilization = self.get_instance_cpu_usage(instance) if instance_cpu_utilization is None: LOG.error( "No values returned by %(resource_id)s " "for %(metric_name)s", dict( resource_id=instance.uuid, metric_name='instance_cpu_usage')) instance_cpu_utilization = 100 total_cores_used = instance.vcpus * (instance_cpu_utilization / 100.0) return self.calculate_weight(instance, total_cores_used, 0, 0) def add_action_disable_node(self, node): parameters = {'state': element.ServiceState.DISABLED.value, 'disabled_reason': self.REASON_FOR_DISABLE, 'resource_name': node.hostname} self.solution.add_action(action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=parameters) def compute_score_of_nodes(self): """Calculate score of nodes based on load by VMs""" score = [] for node in self.get_available_compute_nodes().values(): if node.status == element.ServiceState.ENABLED.value: self.number_of_enabled_nodes += 1 instances = self.compute_model.get_node_instances(node) if len(instances) > 0: result = self.calculate_score_node(node) score.append((node.uuid, result)) return score def node_and_instance_score(self, sorted_scores): """Get List of VMs from node""" node_to_release = sorted_scores[len(sorted_scores) - 1][0] instances = self.compute_model.get_node_instances( self.compute_model.get_node_by_uuid(node_to_release)) instances_to_migrate = self.filter_instances_by_audit_tag(instances) instance_score = [] for instance in instances_to_migrate: if instance.state == element.InstanceState.ACTIVE.value: instance_score.append( (instance, self.calculate_score_instance(instance))) return node_to_release, instance_score def create_migration_instance(self, mig_instance, mig_source_node, mig_destination_node): """Create migration VM""" if self.compute_model.migrate_instance( mig_instance, mig_source_node, mig_destination_node): self.add_action_migrate(mig_instance, 'live', mig_source_node, mig_destination_node) if len(self.compute_model.get_node_instances(mig_source_node)) == 0: self.add_action_disable_node(mig_source_node) self.number_of_released_nodes += 1 def calculate_num_migrations(self, sorted_instances, node_to_release, sorted_score): number_migrations = 0 for mig_instance, __ in sorted_instances: # skip exclude instance when migrating if mig_instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", mig_instance.uuid) continue for node_uuid, __ in sorted_score: mig_source_node = self.compute_model.get_node_by_uuid( node_to_release) mig_destination_node = self.compute_model.get_node_by_uuid( node_uuid) result = self.check_migration( mig_source_node, mig_destination_node, mig_instance) if result: self.create_migration_instance( mig_instance, mig_source_node, mig_destination_node) number_migrations += 1 break return number_migrations def unsuccessful_migration_actualization(self, number_migrations, unsuccessful_migration): if number_migrations > 0: self.number_of_migrations += number_migrations return 0 else: return unsuccessful_migration + 1 def pre_execute(self): self._pre_execute() # backwards compatibility for node parameter. if self.aggregation_method['node'] != '': LOG.warning('Parameter node has been renamed to compute_node and ' 'will be removed in next release.') self.aggregation_method['compute_node'] = \ self.aggregation_method['node'] def do_execute(self, audit=None): unsuccessful_migration = 0 scores = self.compute_score_of_nodes() # Sort compute nodes by Score decreasing sorted_scores = sorted(scores, reverse=True, key=lambda x: (x[1])) LOG.debug("Compute node(s) BFD %s", sorted_scores) # Get Node to be released if len(scores) == 0: LOG.warning( "The workloads of the compute nodes" " of the cluster is zero") return while sorted_scores and ( not self.migration_attempts or self.migration_attempts >= unsuccessful_migration): node_to_release, instance_score = self.node_and_instance_score( sorted_scores) # Sort instances by Score sorted_instances = sorted( instance_score, reverse=True, key=lambda x: (x[1])) # BFD: Best Fit Decrease LOG.debug("Instance(s) BFD %s", sorted_instances) migrations = self.calculate_num_migrations( sorted_instances, node_to_release, sorted_scores) unsuccessful_migration = self.unsuccessful_migration_actualization( migrations, unsuccessful_migration) if not migrations: # We don't have any possible migrations to perform on this node # so we discard the node so we can try to migrate instances # from the next one in the list sorted_scores.pop() infos = { "compute_nodes_count": self.number_of_enabled_nodes, "released_compute_nodes_count": self.number_of_released_nodes, "instance_migrations_count": self.number_of_migrations, "efficacy": self.efficacy } LOG.debug(infos) def post_execute(self): self.solution.set_efficacy_indicators( compute_nodes_count=self.number_of_enabled_nodes, released_compute_nodes_count=self.number_of_released_nodes, instance_migrations_count=self.number_of_migrations, ) LOG.debug(self.compute_model.to_string()) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/vm_workload_consolidation.py0000664000175000017500000005522013656752270033150 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Authors: Vojtech CIMA # Bruno GRAZIOLI # Sean MURPHY # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log import six from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy): """VM Workload Consolidation Strategy A load consolidation strategy based on heuristic first-fit algorithm which focuses on measured CPU utilization and tries to minimize hosts which have too much or too little load respecting resource capacity constraints. This strategy produces a solution resulting in more efficient utilization of cluster resources using following four phases: * Offload phase - handling over-utilized resources * Consolidation phase - handling under-utilized resources * Solution optimization - reducing number of migrations * Disability of unused compute nodes A capacity coefficients (cc) might be used to adjust optimization thresholds. Different resources may require different coefficient values as well as setting up different coefficient values in both phases may lead to more efficient consolidation in the end. If the cc equals 1 the full resource capacity may be used, cc values lower than 1 will lead to resource under utilization and values higher than 1 will lead to resource overbooking. e.g. If targeted utilization is 80 percent of a compute node capacity, the coefficient in the consolidation phase will be 0.8, but may any lower value in the offloading phase. The lower it gets the cluster will appear more released (distributed) for the following consolidation phase. As this strategy leverages VM live migration to move the load from one compute node to another, this feature needs to be set up correctly on all compute nodes within the cluster. This strategy assumes it is possible to live migrate any VM from an active compute node to any other active compute node. """ AGGREGATE = 'mean' DATASOURCE_METRICS = ['instance_ram_allocated', 'instance_cpu_usage', 'instance_ram_usage', 'instance_root_disk_size'] MIGRATION = "migrate" CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" def __init__(self, config, osc=None): super(VMWorkloadConsolidation, self).__init__(config, osc) self.number_of_migrations = 0 self.number_of_released_nodes = 0 self.datasource_instance_data_cache = dict() @classmethod def get_name(cls): return "vm_workload_consolidation" @classmethod def get_display_name(cls): return _("VM Workload Consolidation Strategy") @classmethod def get_translatable_display_name(cls): return "VM Workload Consolidation Strategy" @property def period(self): return self.input_parameters.get('period', 3600) @property def granularity(self): return self.input_parameters.get('granularity', 300) @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "period": { "description": "The time interval in seconds for " "getting statistic aggregation", "type": "number", "default": 3600 }, "granularity": { "description": "The time between two measures in an " "aggregated timeseries of a metric.", "type": "number", "default": 300 }, } } def get_available_compute_nodes(self): default_node_scope = [element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] nodes = self.compute_model.get_all_compute_nodes().items() return {uuid: cn for uuid, cn in nodes if cn.state == element.ServiceState.ONLINE.value and cn.status in default_node_scope} def get_instance_state_str(self, instance): """Get instance state in string format. :param instance: """ if isinstance(instance.state, six.string_types): return instance.state elif isinstance(instance.state, element.InstanceState): return instance.state.value else: LOG.error('Unexpected instance state type, ' 'state=%(state)s, state_type=%(st)s.', dict(state=instance.state, st=type(instance.state))) raise exception.WatcherException def get_node_status_str(self, node): """Get node status in string format. :param node: """ if isinstance(node.status, six.string_types): return node.status elif isinstance(node.status, element.ServiceState): return node.status.value else: LOG.error('Unexpected node status type, ' 'status=%(status)s, status_type=%(st)s.', dict(status=node.status, st=type(node.status))) raise exception.WatcherException def add_action_enable_compute_node(self, node): """Add an action for node enabler into the solution. :param node: node object :return: None """ params = {'state': element.ServiceState.ENABLED.value, 'resource_name': node.hostname} self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=params) self.number_of_released_nodes -= 1 def add_action_disable_node(self, node): """Add an action for node disability into the solution. :param node: node object :return: None """ params = {'state': element.ServiceState.DISABLED.value, 'disabled_reason': self.REASON_FOR_DISABLE, 'resource_name': node.hostname} self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=params) self.number_of_released_nodes += 1 def add_migration(self, instance, source_node, destination_node): """Add an action for VM migration into the solution. :param instance: instance object :param source_node: node object :param destination_node: node object :return: None """ instance_state_str = self.get_instance_state_str(instance) if instance_state_str not in (element.InstanceState.ACTIVE.value, element.InstanceState.PAUSED.value): # Watcher currently only supports live VM migration and block live # VM migration which both requires migrated VM to be active. # When supported, the cold migration may be used as a fallback # migration mechanism to move non active VMs. LOG.error( 'Cannot live migrate: instance_uuid=%(instance_uuid)s, ' 'state=%(instance_state)s.', dict( instance_uuid=instance.uuid, instance_state=instance_state_str)) return migration_type = 'live' # Here will makes repeated actions to enable the same compute node, # when migrating VMs to the destination node which is disabled. # Whether should we remove the same actions in the solution??? destination_node_status_str = self.get_node_status_str( destination_node) if destination_node_status_str == element.ServiceState.DISABLED.value: self.add_action_enable_compute_node(destination_node) if self.compute_model.migrate_instance( instance, source_node, destination_node): self.add_action_migrate( instance, migration_type, source_node, destination_node) self.number_of_migrations += 1 def disable_unused_nodes(self): """Generate actions for disabling unused nodes. :return: None """ for node in self.get_available_compute_nodes().values(): if (len(self.compute_model.get_node_instances(node)) == 0 and node.status != element.ServiceState.DISABLED.value): self.add_action_disable_node(node) def get_instance_utilization(self, instance): """Collect cpu, ram and disk utilization statistics of a VM. :param instance: instance object :param aggr: string :return: dict(cpu(number of vcpus used), ram(MB used), disk(B used)) """ instance_cpu_util = None instance_ram_util = None instance_disk_util = None if instance.uuid in self.datasource_instance_data_cache.keys(): return self.datasource_instance_data_cache.get(instance.uuid) instance_cpu_util = self.datasource_backend.get_instance_cpu_usage( resource=instance, period=self.period, aggregate=self.AGGREGATE, granularity=self.granularity) instance_ram_util = self.datasource_backend.get_instance_ram_usage( resource=instance, period=self.period, aggregate=self.AGGREGATE, granularity=self.granularity) if not instance_ram_util: instance_ram_util = ( self.datasource_backend.get_instance_ram_allocated( resource=instance, period=self.period, aggregate=self.AGGREGATE, granularity=self.granularity)) instance_disk_util = ( self.datasource_backend.get_instance_root_disk_size( resource=instance, period=self.period, aggregate=self.AGGREGATE, granularity=self.granularity)) if instance_cpu_util: total_cpu_utilization = ( instance.vcpus * (instance_cpu_util / 100.0)) else: total_cpu_utilization = instance.vcpus if not instance_ram_util: instance_ram_util = instance.memory LOG.warning('No values returned by %s for memory.resident, ' 'use instance flavor ram value', instance.uuid) if not instance_disk_util: instance_disk_util = instance.disk LOG.warning('No values returned by %s for disk.root.size, ' 'use instance flavor disk value', instance.uuid) self.datasource_instance_data_cache[instance.uuid] = dict( cpu=total_cpu_utilization, ram=instance_ram_util, disk=instance_disk_util) return self.datasource_instance_data_cache.get(instance.uuid) def get_node_utilization(self, node): """Collect cpu, ram and disk utilization statistics of a node. :param node: node object :param aggr: string :return: dict(cpu(number of cores used), ram(MB used), disk(B used)) """ node_instances = self.compute_model.get_node_instances(node) node_ram_util = 0 node_disk_util = 0 node_cpu_util = 0 for instance in node_instances: instance_util = self.get_instance_utilization( instance) node_cpu_util += instance_util['cpu'] node_ram_util += instance_util['ram'] node_disk_util += instance_util['disk'] return dict(cpu=node_cpu_util, ram=node_ram_util, disk=node_disk_util) def get_node_capacity(self, node): """Collect cpu, ram and disk capacity of a node. :param node: node object :return: dict(cpu(cores), ram(MB), disk(B)) """ return dict(cpu=node.vcpu_capacity, ram=node.memory_mb_capacity, disk=node.disk_gb_capacity) def get_relative_node_utilization(self, node): """Return relative node utilization. :param node: node object :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} """ relative_node_utilization = {} util = self.get_node_utilization(node) cap = self.get_node_capacity(node) for k in util.keys(): relative_node_utilization[k] = float(util[k]) / float(cap[k]) return relative_node_utilization def get_relative_cluster_utilization(self): """Calculate relative cluster utilization (rcu). RCU is an average of relative utilizations (rhu) of active nodes. :return: {'cpu': <0,1>, 'ram': <0,1>, 'disk': <0,1>} """ nodes = self.get_available_compute_nodes().values() rcu = {} counters = {} for node in nodes: node_status_str = self.get_node_status_str(node) if node_status_str == element.ServiceState.ENABLED.value: rhu = self.get_relative_node_utilization(node) for k in rhu.keys(): if k not in rcu: rcu[k] = 0 if k not in counters: counters[k] = 0 rcu[k] += rhu[k] counters[k] += 1 for k in rcu.keys(): rcu[k] /= counters[k] return rcu def is_overloaded(self, node, cc): """Indicate whether a node is overloaded. This considers provided resource capacity coefficients (cc). :param node: node object :param cc: dictionary containing resource capacity coefficients :return: [True, False] """ node_capacity = self.get_node_capacity(node) node_utilization = self.get_node_utilization( node) metrics = ['cpu'] for m in metrics: if node_utilization[m] > node_capacity[m] * cc[m]: return True return False def instance_fits(self, instance, node, cc): """Indicate whether is a node able to accommodate a VM. This considers provided resource capacity coefficients (cc). :param instance: :py:class:`~.element.Instance` :param node: node object :param cc: dictionary containing resource capacity coefficients :return: [True, False] """ node_capacity = self.get_node_capacity(node) node_utilization = self.get_node_utilization(node) instance_utilization = self.get_instance_utilization(instance) metrics = ['cpu', 'ram', 'disk'] for m in metrics: if (instance_utilization[m] + node_utilization[m] > node_capacity[m] * cc[m]): return False return True def optimize_solution(self): """Optimize solution. This is done by eliminating unnecessary or circular set of migrations which can be replaced by a more efficient solution. e.g.: * A->B, B->C => replace migrations A->B, B->C with a single migration A->C as both solution result in VM running on node C which can be achieved with one migration instead of two. * A->B, B->A => remove A->B and B->A as they do not result in a new VM placement. """ migrate_actions = ( a for a in self.solution.actions if a[ 'action_type'] == self.MIGRATION) instance_to_be_migrated = ( a['input_parameters']['resource_id'] for a in migrate_actions) instance_uuids = list(set(instance_to_be_migrated)) for instance_uuid in instance_uuids: actions = list( a for a in self.solution.actions if a[ 'input_parameters'][ 'resource_id'] == instance_uuid) if len(actions) > 1: src_name = actions[0]['input_parameters']['source_node'] dst_name = actions[-1]['input_parameters']['destination_node'] for a in actions: self.solution.actions.remove(a) self.number_of_migrations -= 1 src_node = self.compute_model.get_node_by_name(src_name) dst_node = self.compute_model.get_node_by_name(dst_name) instance = self.compute_model.get_instance_by_uuid( instance_uuid) if self.compute_model.migrate_instance( instance, dst_node, src_node): self.add_migration(instance, src_node, dst_node) def offload_phase(self, cc): """Perform offloading phase. This considers provided resource capacity coefficients. Offload phase performing first-fit based bin packing to offload overloaded nodes. This is done in a fashion of moving the least CPU utilized VM first as live migration these generally causes less troubles. This phase results in a cluster with no overloaded nodes. * This phase is be able to enable disabled nodes (if needed and any available) in the case of the resource capacity provided by active nodes is not able to accommodate all the load. As the offload phase is later followed by the consolidation phase, the node enabler in this phase doesn't necessarily results in more enabled nodes in the final solution. :param cc: dictionary containing resource capacity coefficients """ sorted_nodes = sorted( self.get_available_compute_nodes().values(), key=lambda x: self.get_node_utilization(x)['cpu']) for node in reversed(sorted_nodes): if self.is_overloaded(node, cc): for instance in sorted( self.compute_model.get_node_instances(node), key=lambda x: self.get_instance_utilization( x)['cpu'] ): # skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue for destination_node in reversed(sorted_nodes): if self.instance_fits( instance, destination_node, cc): self.add_migration(instance, node, destination_node) break if not self.is_overloaded(node, cc): break def consolidation_phase(self, cc): """Perform consolidation phase. This considers provided resource capacity coefficients. Consolidation phase performing first-fit based bin packing. First, nodes with the lowest cpu utilization are consolidated by moving their load to nodes with the highest cpu utilization which can accommodate the load. In this phase the most cpu utilized VMs are prioritized as their load is more difficult to accommodate in the system than less cpu utilized VMs which can be later used to fill smaller CPU capacity gaps. :param cc: dictionary containing resource capacity coefficients """ sorted_nodes = sorted( self.get_available_compute_nodes().values(), key=lambda x: self.get_node_utilization(x)['cpu']) asc = 0 for node in sorted_nodes: instances = sorted( self.compute_model.get_node_instances(node), key=lambda x: self.get_instance_utilization(x)['cpu']) for instance in reversed(instances): # skip exclude instance when migrating if instance.watcher_exclude: LOG.debug("Instance is excluded by scope, " "skipped: %s", instance.uuid) continue dsc = len(sorted_nodes) - 1 for destination_node in reversed(sorted_nodes): if asc >= dsc: break if self.instance_fits( instance, destination_node, cc): self.add_migration(instance, node, destination_node) break dsc -= 1 asc += 1 def pre_execute(self): self._pre_execute() def do_execute(self, audit=None): """Execute strategy. This strategy produces a solution resulting in more efficient utilization of cluster resources using following four phases: * Offload phase - handling over-utilized resources * Consolidation phase - handling under-utilized resources * Solution optimization - reducing number of migrations * Disability of unused nodes :param original_model: root_model object """ LOG.info('Executing Smart Strategy') rcu = self.get_relative_cluster_utilization() cc = {'cpu': 1.0, 'ram': 1.0, 'disk': 1.0} # Offloading phase self.offload_phase(cc) # Consolidation phase self.consolidation_phase(cc) # Optimize solution self.optimize_solution() # disable unused nodes self.disable_unused_nodes() rcu_after = self.get_relative_cluster_utilization() info = { "compute_nodes_count": len( self.get_available_compute_nodes()), 'number_of_migrations': self.number_of_migrations, 'number_of_released_nodes': self.number_of_released_nodes, 'relative_cluster_utilization_before': str(rcu), 'relative_cluster_utilization_after': str(rcu_after) } LOG.debug(info) def post_execute(self): self.solution.set_efficacy_indicators( compute_nodes_count=len( self.get_available_compute_nodes()), released_compute_nodes_count=self.number_of_released_nodes, instance_migrations_count=self.number_of_migrations, ) LOG.debug(self.compute_model.to_string()) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/base.py0000775000175000017500000004373413656752270026623 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Strategy ` is an algorithm implementation which is able to find a :ref:`Solution ` for a given :ref:`Goal `. There may be several potential strategies which are able to achieve the same :ref:`Goal `. This is why it is possible to configure which specific :ref:`Strategy ` should be used for each :ref:`Goal `. Some strategies may provide better optimization results but may take more time to find an optimal :ref:`Solution `. When a new :ref:`Goal ` is added to the Watcher configuration, at least one default associated :ref:`Strategy ` should be provided as well. :ref:`Some default implementations are provided `, but it is possible to :ref:`develop new implementations ` which are dynamically loaded by Watcher at launch time. """ import abc import six from oslo_config import cfg from oslo_log import log from oslo_utils import strutils from watcher.common import clients from watcher.common import context from watcher.common import exception from watcher.common.loader import loadable from watcher.common import utils from watcher.decision_engine.datasources import manager as ds_manager from watcher.decision_engine.loading import default as loading from watcher.decision_engine.model.collector import manager from watcher.decision_engine.solution import default from watcher.decision_engine.strategy.common import level LOG = log.getLogger(__name__) CONF = cfg.CONF class StrategyEndpoint(object): def __init__(self, messaging): self._messaging = messaging def _collect_metrics(self, strategy, datasource): metrics = [] if not datasource: return {'type': 'Metrics', 'state': metrics, 'mandatory': False, 'comment': ''} else: ds_metrics = datasource.list_metrics() if ds_metrics is None: raise exception.DataSourceNotAvailable( datasource=datasource.NAME) else: for metric in strategy.DATASOURCE_METRICS: original_metric_name = datasource.METRIC_MAP.get(metric) if original_metric_name in ds_metrics: metrics.append({original_metric_name: 'available'}) else: metrics.append({original_metric_name: 'not available'}) return {'type': 'Metrics', 'state': metrics, 'mandatory': False, 'comment': ''} def _get_datasource_status(self, strategy, datasource): if not datasource: state = "Datasource is not presented for this strategy" else: state = "%s: %s" % (datasource.NAME, datasource.check_availability()) return {'type': 'Datasource', 'state': state, 'mandatory': True, 'comment': ''} def _get_cdm(self, strategy): models = [] for model in ['compute_model', 'storage_model', 'baremetal_model']: try: getattr(strategy, model) except Exception: models.append({model: 'not available'}) else: models.append({model: 'available'}) return {'type': 'CDM', 'state': models, 'mandatory': True, 'comment': ''} def get_strategy_info(self, context, strategy_name): strategy = loading.DefaultStrategyLoader().load(strategy_name) try: is_datasources = getattr(strategy.config, 'datasources', None) if is_datasources: datasource = getattr(strategy, 'datasource_backend') else: datasource = getattr(strategy, strategy.config.datasource) except (AttributeError, IndexError): datasource = [] available_datasource = self._get_datasource_status(strategy, datasource) available_metrics = self._collect_metrics(strategy, datasource) available_cdm = self._get_cdm(strategy) return [available_datasource, available_metrics, available_cdm] @six.add_metaclass(abc.ABCMeta) class BaseStrategy(loadable.Loadable): """A base class for all the strategies A Strategy is an algorithm implementation which is able to find a Solution for a given Goal. """ DATASOURCE_METRICS = [] """Contains all metrics the strategy requires from a datasource to properly execute""" MIGRATION = "migrate" def __init__(self, config, osc=None): """Constructor: the signature should be identical within the subclasses :param config: Configuration related to this plugin :type config: :py:class:`~.Struct` :param osc: An OpenStackClients instance :type osc: :py:class:`~.OpenStackClients` instance """ super(BaseStrategy, self).__init__(config) self.ctx = context.make_context() self._name = self.get_name() self._display_name = self.get_display_name() self._goal = self.get_goal() # default strategy level self._strategy_level = level.StrategyLevel.conservative self._cluster_state_collector = None # the solution given by the strategy self._solution = default.DefaultSolution(goal=self.goal, strategy=self) self._osc = osc self._collector_manager = None self._compute_model = None self._storage_model = None self._baremetal_model = None self._input_parameters = utils.Struct() self._audit_scope = None self._datasource_backend = None self._planner = 'weight' @classmethod @abc.abstractmethod def get_name(cls): """The name of the strategy""" raise NotImplementedError() @classmethod @abc.abstractmethod def get_display_name(cls): """The goal display name for the strategy""" raise NotImplementedError() @classmethod @abc.abstractmethod def get_translatable_display_name(cls): """The translatable msgid of the strategy""" # Note(v-francoise): Defined here to be used as the translation key for # other services raise NotImplementedError() @classmethod @abc.abstractmethod def get_goal_name(cls): """The goal name the strategy achieves""" raise NotImplementedError() @classmethod def get_goal(cls): """The goal the strategy achieves""" goal_loader = loading.DefaultGoalLoader() return goal_loader.load(cls.get_goal_name()) @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ datasources_ops = list(ds_manager.DataSourceManager.metric_map.keys()) return [ cfg.ListOpt( "datasources", help="Datasources to use in order to query the needed metrics." " This option overrides the global preference." " options: {0}".format(datasources_ops), item_type=cfg.types.String(choices=datasources_ops), default=None) ] @abc.abstractmethod def pre_execute(self): """Pre-execution phase This can be used to fetch some pre-requisites or data. """ raise NotImplementedError() @abc.abstractmethod def do_execute(self, audit=None): """Strategy execution phase :param audit: An Audit instance :type audit: :py:class:`~.Audit` instance This phase is where you should put the main logic of your strategy. """ raise NotImplementedError() @abc.abstractmethod def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ raise NotImplementedError() def _pre_execute(self): """Base Pre-execution phase This will perform basic pre execution operations most strategies should perform. """ LOG.info("Initializing " + self.get_display_name()) if not self.compute_model: raise exception.ClusterStateNotDefined() if self.compute_model.stale: raise exception.ClusterStateStale() LOG.debug(self.compute_model.to_string()) def execute(self, audit=None): """Execute a strategy :param audit: An Audit instance :type audit: :py:class:`~.Audit` instance :return: A computed solution (via a placement algorithm) :rtype: :py:class:`~.BaseSolution` instance """ self.pre_execute() self.do_execute(audit=audit) self.post_execute() self.solution.compute_global_efficacy() return self.solution @property def collector_manager(self): if self._collector_manager is None: self._collector_manager = manager.CollectorManager() return self._collector_manager @property def compute_model(self): """Cluster data model :returns: Cluster data model the strategy is executed on :rtype model: :py:class:`~.ModelRoot` instance """ if self._compute_model is None: collector = self.collector_manager.get_cluster_model_collector( 'compute', osc=self.osc) audit_scope_handler = collector.get_audit_scope_handler( audit_scope=self.audit_scope) self._compute_model = audit_scope_handler.get_scoped_model( collector.get_latest_cluster_data_model()) if not self._compute_model: raise exception.ClusterStateNotDefined() if self._compute_model.stale: raise exception.ClusterStateStale() return self._compute_model @property def storage_model(self): """Cluster data model :returns: Cluster data model the strategy is executed on :rtype model: :py:class:`~.ModelRoot` instance """ if self._storage_model is None: collector = self.collector_manager.get_cluster_model_collector( 'storage', osc=self.osc) audit_scope_handler = collector.get_audit_scope_handler( audit_scope=self.audit_scope) self._storage_model = audit_scope_handler.get_scoped_model( collector.get_latest_cluster_data_model()) if not self._storage_model: raise exception.ClusterStateNotDefined() if self._storage_model.stale: raise exception.ClusterStateStale() return self._storage_model @property def baremetal_model(self): """Cluster data model :returns: Cluster data model the strategy is executed on :rtype model: :py:class:`~.ModelRoot` instance """ if self._baremetal_model is None: collector = self.collector_manager.get_cluster_model_collector( 'baremetal', osc=self.osc) audit_scope_handler = collector.get_audit_scope_handler( audit_scope=self.audit_scope) self._baremetal_model = audit_scope_handler.get_scoped_model( collector.get_latest_cluster_data_model()) if not self._baremetal_model: raise exception.ClusterStateNotDefined() if self._baremetal_model.stale: raise exception.ClusterStateStale() return self._baremetal_model @classmethod def get_schema(cls): """Defines a Schema that the input parameters shall comply to :return: A jsonschema format (mandatory default setting) :rtype: dict """ return {} @property def datasource_backend(self): if not self._datasource_backend: # Load the global preferred datasources order but override it # if the strategy has a specific datasources config datasources = CONF.watcher_datasources if self.config.datasources: datasources = self.config self._datasource_backend = ds_manager.DataSourceManager( config=datasources, osc=self.osc ).get_backend(self.DATASOURCE_METRICS) return self._datasource_backend @property def input_parameters(self): return self._input_parameters @input_parameters.setter def input_parameters(self, p): self._input_parameters = p @property def osc(self): if not self._osc: self._osc = clients.OpenStackClients() return self._osc @property def solution(self): return self._solution @solution.setter def solution(self, s): self._solution = s @property def audit_scope(self): return self._audit_scope @audit_scope.setter def audit_scope(self, s): self._audit_scope = s @property def name(self): return self._name @property def display_name(self): return self._display_name @property def goal(self): return self._goal @property def strategy_level(self): return self._strategy_level @strategy_level.setter def strategy_level(self, s): self._strategy_level = s @property def state_collector(self): return self._cluster_state_collector @state_collector.setter def state_collector(self, s): self._cluster_state_collector = s @property def planner(self): return self._planner @planner.setter def planner(self, s): self._planner = s def filter_instances_by_audit_tag(self, instances): if not self.config.check_optimize_metadata: return instances instances_to_migrate = [] for instance in instances: optimize = True if instance.metadata: try: optimize = strutils.bool_from_string( instance.metadata.get('optimize')) except ValueError: optimize = False if optimize: instances_to_migrate.append(instance) return instances_to_migrate def add_action_migrate(self, instance, migration_type, source_node, destination_node): parameters = {'migration_type': migration_type, 'source_node': source_node.hostname, 'destination_node': destination_node.hostname, 'resource_name': instance.name} self.solution.add_action(action_type=self.MIGRATION, resource_id=instance.uuid, input_parameters=parameters) @six.add_metaclass(abc.ABCMeta) class DummyBaseStrategy(BaseStrategy): @classmethod def get_goal_name(cls): return "dummy" @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] @six.add_metaclass(abc.ABCMeta) class UnclassifiedStrategy(BaseStrategy): """This base class is used to ease the development of new strategies The goal defined within this strategy can be used to simplify the documentation explaining how to implement a new strategy plugin by omitting the need for the strategy developer to define a goal straight away. """ @classmethod def get_goal_name(cls): return "unclassified" @six.add_metaclass(abc.ABCMeta) class ServerConsolidationBaseStrategy(BaseStrategy): REASON_FOR_DISABLE = 'watcher_disabled' @classmethod def get_goal_name(cls): return "server_consolidation" @six.add_metaclass(abc.ABCMeta) class ThermalOptimizationBaseStrategy(BaseStrategy): @classmethod def get_goal_name(cls): return "thermal_optimization" @six.add_metaclass(abc.ABCMeta) class WorkloadStabilizationBaseStrategy(BaseStrategy): def __init__(self, *args, **kwargs): super(WorkloadStabilizationBaseStrategy, self ).__init__(*args, **kwargs) self._planner = 'workload_stabilization' @classmethod def get_goal_name(cls): return "workload_balancing" @six.add_metaclass(abc.ABCMeta) class NoisyNeighborBaseStrategy(BaseStrategy): @classmethod def get_goal_name(cls): return "noisy_neighbor" @six.add_metaclass(abc.ABCMeta) class SavingEnergyBaseStrategy(BaseStrategy): @classmethod def get_goal_name(cls): return "saving_energy" @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] @six.add_metaclass(abc.ABCMeta) class ZoneMigrationBaseStrategy(BaseStrategy): @classmethod def get_goal_name(cls): return "hardware_maintenance" @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] @six.add_metaclass(abc.ABCMeta) class HostMaintenanceBaseStrategy(BaseStrategy): REASON_FOR_MAINTAINING = 'watcher_maintaining' @classmethod def get_goal_name(cls): return "cluster_maintaining" @classmethod def get_config_opts(cls): """Override base class config options as do not use datasource """ return [] python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/node_resource_consolidation.py0000664000175000017500000002603413656752270033461 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.model import element from watcher.decision_engine.strategy.strategies import base from watcher import objects LOG = log.getLogger(__name__) class NodeResourceConsolidation(base.ServerConsolidationBaseStrategy): """consolidating resources on nodes using server migration *Description* This strategy checks the resource usages of compute nodes, if the used resources are less than total, it will try to migrate server to consolidate the use of resource. *Requirements* * You must have at least 2 compute nodes to run this strategy. * Hardware: compute nodes should use the same physical CPUs/RAMs *Limitations* * This is a proof of concept that is not meant to be used in production * It assume that live migrations are possible *Spec URL* http://specs.openstack.org/openstack/watcher-specs/specs/train/implemented/node-resource-consolidation.html """ CHANGE_NOVA_SERVICE_STATE = "change_nova_service_state" REASON_FOR_DISABLE = 'Watcher node resource consolidation strategy' def __init__(self, config, osc=None): """node resource consolidation :param config: A mapping containing the configuration of this strategy :type config: :py:class:`~.Struct` instance :param osc: :py:class:`~.OpenStackClients` instance """ super(NodeResourceConsolidation, self).__init__(config, osc) self.host_choice = 'auto' self.audit = None self.compute_nodes_count = 0 self.number_of_released_nodes = 0 self.number_of_migrations = 0 @classmethod def get_name(cls): return "node_resource_consolidation" @classmethod def get_display_name(cls): return _("Node Resource Consolidation strategy") @classmethod def get_translatable_display_name(cls): return "Node Resource Consolidation strategy" @classmethod def get_schema(cls): # Mandatory default setting for each element return { "properties": { "host_choice": { "description": "The way to select the server migration " "destination node. The value 'auto' " "means that Nova scheduler selects " "the destination node, and 'specify' " "means the strategy specifies the " "destination.", "type": "string", "default": 'auto' }, }, } def check_resources(self, servers, destination): # check whether a node able to accommodate a VM dest_flag = False if not destination: return dest_flag free_res = self.compute_model.get_node_free_resources(destination) for server in servers: # just vcpu and memory, do not consider disk if free_res['vcpu'] >= server.vcpus and ( free_res['memory'] >= server.memory): free_res['vcpu'] -= server.vcpus free_res['memory'] -= server.memory dest_flag = True servers.remove(server) return dest_flag def select_destination(self, server, source, destinations): dest_node = None if not destinations: return dest_node sorted_nodes = sorted( destinations, key=lambda x: self.compute_model.get_node_free_resources( x)['vcpu']) for dest in sorted_nodes: if self.check_resources([server], dest): if self.compute_model.migrate_instance(server, source, dest): dest_node = dest break return dest_node def add_migrate_actions(self, sources, destinations): if not sources or not destinations: return for node in sources: servers = self.compute_model.get_node_instances(node) sorted_servers = sorted( servers, key=lambda x: x.vcpus, reverse=True) for server in sorted_servers: parameters = {'migration_type': 'live', 'source_node': node.hostname, 'resource_name': server.name} action_flag = False if self.host_choice != 'auto': # specify destination host dest = self.select_destination(server, node, destinations) if dest: parameters['destination_node'] = dest.hostname action_flag = True else: action_flag = True if action_flag: self.number_of_migrations += 1 self.solution.add_action( action_type=self.MIGRATION, resource_id=server.uuid, input_parameters=parameters) def add_change_node_state_actions(self, nodes, status): if status not in (element.ServiceState.DISABLED.value, element.ServiceState.ENABLED.value): raise exception.IllegalArgumentException( message=_("The node status is not defined")) changed_nodes = [] for node in nodes: if node.status != status: parameters = {'state': status, 'resource_name': node.hostname} if status == element.ServiceState.DISABLED.value: parameters['disabled_reason'] = self.REASON_FOR_DISABLE self.solution.add_action( action_type=self.CHANGE_NOVA_SERVICE_STATE, resource_id=node.uuid, input_parameters=parameters) node.status = status changed_nodes.append(node) return changed_nodes def get_nodes_migrate_failed(self): # check if migration action ever failed # just for continuous audit nodes_failed = [] if self.audit is None or ( self.audit.audit_type == objects.audit.AuditType.ONESHOT.value): return nodes_failed filters = {'audit_uuid': self.audit.uuid} actions = objects.action.Action.list( self.ctx, filters=filters) for action in actions: if action.state == objects.action.State.FAILED and ( action.action_type == self.MIGRATION): server_uuid = action.input_parameters.get('resource_id') node = self.compute_model.get_node_by_instance_uuid( server_uuid) if node not in nodes_failed: nodes_failed.append(node) return nodes_failed def group_nodes(self, nodes): free_nodes = [] source_nodes = [] dest_nodes = [] nodes_failed = self.get_nodes_migrate_failed() LOG.info("nodes: %s migration failed", nodes_failed) sorted_nodes = sorted( nodes, key=lambda x: self.compute_model.get_node_used_resources( x)['vcpu']) for node in sorted_nodes: if node in dest_nodes: break # If ever migration failed, do not migrate again if node in nodes_failed: # maybe can as the destination node if node.status == element.ServiceState.ENABLED.value: dest_nodes.append(node) continue used_resource = self.compute_model.get_node_used_resources(node) if used_resource['vcpu'] > 0: servers = self.compute_model.get_node_instances(node) for dest in reversed(sorted_nodes): # skip if compute node is disabled if dest.status == element.ServiceState.DISABLED.value: LOG.info("node %s is down", dest.hostname) continue if dest in dest_nodes: continue if node == dest: # The last on as destination node dest_nodes.append(dest) break if self.check_resources(servers, dest): dest_nodes.append(dest) if node not in source_nodes: source_nodes.append(node) if not servers: break else: free_nodes.append(node) return free_nodes, source_nodes, dest_nodes def pre_execute(self): self._pre_execute() self.host_choice = self.input_parameters.get('host_choice', 'auto') self.planner = 'node_resource_consolidation' def do_execute(self, audit=None): """Strategy execution phase Executing strategy and creating solution. """ self.audit = audit nodes = list(self.compute_model.get_all_compute_nodes().values()) free_nodes, source_nodes, dest_nodes = self.group_nodes(nodes) self.compute_nodes_count = len(nodes) self.number_of_released_nodes = len(source_nodes) LOG.info("Free nodes: %s", free_nodes) LOG.info("Source nodes: %s", source_nodes) LOG.info("Destination nodes: %s", dest_nodes) if not source_nodes: LOG.info("No compute node needs to be consolidated") return nodes_disabled = [] if self.host_choice == 'auto': # disable compute node to avoid to be select by Nova scheduler nodes_disabled = self.add_change_node_state_actions( free_nodes+source_nodes, element.ServiceState.DISABLED.value) self.add_migrate_actions(source_nodes, dest_nodes) if nodes_disabled: # restore disabled compute node after migration self.add_change_node_state_actions( nodes_disabled, element.ServiceState.ENABLED.value) def post_execute(self): """Post-execution phase """ self.solution.set_efficacy_indicators( compute_nodes_count=self.compute_nodes_count, released_compute_nodes_count=self.number_of_released_nodes, instance_migrations_count=self.number_of_migrations, ) python-watcher-4.0.0/watcher/decision_engine/strategy/strategies/saving_energy.py0000664000175000017500000002103113656752270030530 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corporation # # Authors: licanwei # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import random from oslo_log import log from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.strategy.strategies import base LOG = log.getLogger(__name__) class SavingEnergy(base.SavingEnergyBaseStrategy): """Saving Energy Strategy *Description* Saving Energy Strategy together with VM Workload Consolidation Strategy can perform the Dynamic Power Management (DPM) functionality, which tries to save power by dynamically consolidating workloads even further during periods of low resource utilization. Virtual machines are migrated onto fewer hosts and the unneeded hosts are powered off. After consolidation, Saving Energy Strategy produces a solution of powering off/on according to the following detailed policy: In this policy, a preset number(min_free_hosts_num) is given by user, and this min_free_hosts_num describes minimum free compute nodes that users expect to have, where "free compute nodes" refers to those nodes unused but still powered on. If the actual number of unused nodes(in power-on state) is larger than the given number, randomly select the redundant nodes and power off them; If the actual number of unused nodes(in poweron state) is smaller than the given number and there are spare unused nodes(in poweroff state), randomly select some nodes(unused,poweroff) and power on them. *Requirements* In this policy, in order to calculate the min_free_hosts_num, users must provide two parameters: * One parameter("min_free_hosts_num") is a constant int number. This number should be int type and larger than zero. * The other parameter("free_used_percent") is a percentage number, which describes the quotient of min_free_hosts_num/nodes_with_VMs_num, where nodes_with_VMs_num is the number of nodes with VMs running on it. This parameter is used to calculate a dynamic min_free_hosts_num. The nodes with VMs refer to those nodes with VMs running on it. Then choose the larger one as the final min_free_hosts_num. *Limitations* * at least 2 physical compute hosts *Spec URL* http://specs.openstack.org/openstack/watcher-specs/specs/pike/implemented/energy-saving-strategy.html """ def __init__(self, config, osc=None): super(SavingEnergy, self).__init__(config, osc) self._ironic_client = None self._nova_client = None self.with_vms_node_pool = [] self.free_poweron_node_pool = [] self.free_poweroff_node_pool = [] self.free_used_percent = 0 self.min_free_hosts_num = 1 @property def ironic_client(self): if not self._ironic_client: self._ironic_client = self.osc.ironic() return self._ironic_client @property def nova_client(self): if not self._nova_client: self._nova_client = self.osc.nova() return self._nova_client @classmethod def get_name(cls): return "saving_energy" @classmethod def get_display_name(cls): return _("Saving Energy Strategy") @classmethod def get_translatable_display_name(cls): return "Saving Energy Strategy" @classmethod def get_schema(cls): """return a schema of two input parameters The standby nodes refer to those nodes unused but still poweredon to deal with boom of new instances. """ return { "properties": { "free_used_percent": { "description": ("a rational number, which describes the" " quotient of" " min_free_hosts_num/nodes_with_VMs_num" " where nodes_with_VMs_num is the number" " of nodes with VMs"), "type": "number", "default": 10.0 }, "min_free_hosts_num": { "description": ("minimum number of hosts without VMs" " but still powered on"), "type": "number", "default": 1 }, }, } def add_action_poweronoff_node(self, node, state): """Add an action for node disability into the solution. :param node: node :param state: node power state, power on or power off :return: None """ params = {'state': state, 'resource_name': node.hostname} self.solution.add_action( action_type='change_node_power_state', resource_id=node.uuid, input_parameters=params) def get_hosts_pool(self): """Get three pools, with_vms_node_pool, free_poweron_node_pool, free_poweroff_node_pool. """ node_list = self.ironic_client.node.list() for node in node_list: node_info = self.ironic_client.node.get(node.uuid) hypervisor_id = node_info.extra.get('compute_node_id', None) if hypervisor_id is None: LOG.warning(('Cannot find compute_node_id in extra ' 'of ironic node %s'), node.uuid) continue hypervisor_node = self.nova_client.hypervisors.get(hypervisor_id) if hypervisor_node is None: LOG.warning(('Cannot find hypervisor %s'), hypervisor_id) continue node.hostname = hypervisor_node.hypervisor_hostname hypervisor_node = hypervisor_node.to_dict() compute_service = hypervisor_node.get('service', None) host_name = compute_service.get('host') try: self.compute_model.get_node_by_name(host_name) except exception.ComputeNodeNotFound: continue if not (hypervisor_node.get('state') == 'up'): """filter nodes that are not in 'up' state""" continue else: if (hypervisor_node['running_vms'] == 0): if (node_info.power_state == 'power on'): self.free_poweron_node_pool.append(node) elif (node_info.power_state == 'power off'): self.free_poweroff_node_pool.append(node) else: self.with_vms_node_pool.append(node) def save_energy(self): need_poweron = max( (len(self.with_vms_node_pool) * self.free_used_percent / 100), ( self.min_free_hosts_num)) len_poweron = len(self.free_poweron_node_pool) len_poweroff = len(self.free_poweroff_node_pool) if len_poweron > need_poweron: for node in random.sample(self.free_poweron_node_pool, (len_poweron - need_poweron)): self.add_action_poweronoff_node(node, 'off') LOG.debug("power off %s", node.uuid) elif len_poweron < need_poweron: diff = need_poweron - len_poweron for node in random.sample(self.free_poweroff_node_pool, min(len_poweroff, diff)): self.add_action_poweronoff_node(node, 'on') LOG.debug("power on %s", node.uuid) def pre_execute(self): self._pre_execute() self.free_used_percent = self.input_parameters.free_used_percent self.min_free_hosts_num = self.input_parameters.min_free_hosts_num def do_execute(self, audit=None): """Strategy execution phase This phase is where you should put the main logic of your strategy. """ self.get_hosts_pool() self.save_energy() def post_execute(self): """Post-execution phase This can be used to compute the global efficacy """ self.solution.model = self.compute_model LOG.debug(self.compute_model.to_string()) python-watcher-4.0.0/watcher/decision_engine/sync.py0000664000175000017500000006135013656752270022640 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import ast import collections from oslo_log import log from watcher.common import context from watcher.decision_engine.loading import default from watcher.decision_engine.scoring import scoring_factory from watcher import objects LOG = log.getLogger(__name__) GoalMapping = collections.namedtuple( 'GoalMapping', ['name', 'display_name', 'efficacy_specification']) StrategyMapping = collections.namedtuple( 'StrategyMapping', ['name', 'goal_name', 'display_name', 'parameters_spec']) ScoringEngineMapping = collections.namedtuple( 'ScoringEngineMapping', ['name', 'description', 'metainfo']) IndicatorSpec = collections.namedtuple( 'IndicatorSpec', ['name', 'description', 'unit', 'schema']) class Syncer(object): """Syncs all available goals and strategies with the Watcher DB""" def __init__(self): self.ctx = context.make_context() self.discovered_map = None self._available_goals = None self._available_goals_map = None self._available_strategies = None self._available_strategies_map = None self._available_scoringengines = None self._available_scoringengines_map = None # This goal mapping maps stale goal IDs to the synced goal self.goal_mapping = dict() # This strategy mapping maps stale strategy IDs to the synced goal self.strategy_mapping = dict() # Maps stale scoring engine IDs to the synced scoring engines self.se_mapping = dict() self.stale_audit_templates_map = {} self.stale_audits_map = {} self.stale_action_plans_map = {} @property def available_goals(self): """Goals loaded from DB""" if self._available_goals is None: self._available_goals = objects.Goal.list(self.ctx) return self._available_goals @property def available_strategies(self): """Strategies loaded from DB""" if self._available_strategies is None: self._available_strategies = objects.Strategy.list(self.ctx) goal_ids = [g.id for g in self.available_goals] stale_strategies = [s for s in self._available_strategies if s.goal_id not in goal_ids] for s in stale_strategies: LOG.info("Can't find Goal id %d of strategy %s", s.goal_id, s.name) s.soft_delete() self._available_strategies.remove(s) return self._available_strategies @property def available_scoringengines(self): """Scoring Engines loaded from DB""" if self._available_scoringengines is None: self._available_scoringengines = (objects.ScoringEngine .list(self.ctx)) return self._available_scoringengines @property def available_goals_map(self): """Mapping of goals loaded from DB""" if self._available_goals_map is None: self._available_goals_map = { GoalMapping( name=g.name, display_name=g.display_name, efficacy_specification=tuple( IndicatorSpec(**item) for item in g.efficacy_specification)): g for g in self.available_goals } return self._available_goals_map @property def available_strategies_map(self): if self._available_strategies_map is None: goals_map = {g.id: g.name for g in self.available_goals} self._available_strategies_map = { StrategyMapping( name=s.name, goal_name=goals_map[s.goal_id], display_name=s.display_name, parameters_spec=str(s.parameters_spec)): s for s in self.available_strategies } return self._available_strategies_map @property def available_scoringengines_map(self): if self._available_scoringengines_map is None: self._available_scoringengines_map = { ScoringEngineMapping( name=s.id, description=s.description, metainfo=s.metainfo): s for s in self.available_scoringengines } return self._available_scoringengines_map def sync(self): self.discovered_map = self._discover() goals_map = self.discovered_map["goals"] strategies_map = self.discovered_map["strategies"] scoringengines_map = self.discovered_map["scoringengines"] for goal_name, goal_map in goals_map.items(): if goal_map in self.available_goals_map: LOG.info("Goal %s already exists", goal_name) continue self.goal_mapping.update(self._sync_goal(goal_map)) for strategy_name, strategy_map in strategies_map.items(): if (strategy_map in self.available_strategies_map and strategy_map.goal_name not in [g.name for g in self.goal_mapping.values()]): LOG.info("Strategy %s already exists", strategy_name) continue self.strategy_mapping.update(self._sync_strategy(strategy_map)) for se_name, se_map in scoringengines_map.items(): if se_map in self.available_scoringengines_map: LOG.info("Scoring Engine %s already exists", se_name) continue self.se_mapping.update(self._sync_scoringengine(se_map)) self._sync_objects() self._soft_delete_removed_scoringengines() def _sync_goal(self, goal_map): goal_name = goal_map.name goal_mapping = dict() # Goals that are matching by name with the given discovered goal name matching_goals = [g for g in self.available_goals if g.name == goal_name] stale_goals = self._soft_delete_stale_goals(goal_map, matching_goals) if stale_goals or not matching_goals: goal = objects.Goal(self.ctx) goal.name = goal_name goal.display_name = goal_map.display_name goal.efficacy_specification = [ indicator._asdict() for indicator in goal_map.efficacy_specification] goal.create() LOG.info("Goal %s created", goal_name) # Updating the internal states self.available_goals_map[goal] = goal_map # Map the old goal IDs to the new (equivalent) goal for matching_goal in matching_goals: goal_mapping[matching_goal.id] = goal return goal_mapping def _sync_strategy(self, strategy_map): strategy_name = strategy_map.name strategy_display_name = strategy_map.display_name goal_name = strategy_map.goal_name parameters_spec = strategy_map.parameters_spec strategy_mapping = dict() # Strategies that are matching by name with the given # discovered strategy name matching_strategies = [s for s in self.available_strategies if s.name == strategy_name] stale_strategies = self._soft_delete_stale_strategies( strategy_map, matching_strategies) if stale_strategies or not matching_strategies: strategy = objects.Strategy(self.ctx) strategy.name = strategy_name strategy.display_name = strategy_display_name strategy.goal_id = objects.Goal.get_by_name(self.ctx, goal_name).id strategy.parameters_spec = parameters_spec strategy.create() LOG.info("Strategy %s created", strategy_name) # Updating the internal states self.available_strategies_map[strategy] = strategy_map # Map the old strategy IDs to the new (equivalent) strategy for matching_strategy in matching_strategies: strategy_mapping[matching_strategy.id] = strategy return strategy_mapping def _sync_scoringengine(self, scoringengine_map): scoringengine_name = scoringengine_map.name se_mapping = dict() # Scoring Engines matching by id with discovered Scoring engine matching_scoringengines = [se for se in self.available_scoringengines if se.name == scoringengine_name] stale_scoringengines = self._soft_delete_stale_scoringengines( scoringengine_map, matching_scoringengines) if stale_scoringengines or not matching_scoringengines: scoringengine = objects.ScoringEngine(self.ctx) scoringengine.name = scoringengine_name scoringengine.description = scoringengine_map.description scoringengine.metainfo = scoringengine_map.metainfo scoringengine.create() LOG.info("Scoring Engine %s created", scoringengine_name) # Updating the internal states self.available_scoringengines_map[scoringengine] = \ scoringengine_map # Map the old scoring engine names to the new (equivalent) SE for matching_scoringengine in matching_scoringengines: se_mapping[matching_scoringengine.name] = scoringengine return se_mapping def _sync_objects(self): # First we find audit templates, audits and action plans that are stale # because their associated goal or strategy has been modified and we # update them in-memory self._find_stale_audit_templates_due_to_goal() self._find_stale_audit_templates_due_to_strategy() self._find_stale_audits_due_to_goal() self._find_stale_audits_due_to_strategy() self._find_stale_action_plans_due_to_strategy() self._find_stale_action_plans_due_to_audit() # Then we handle the case where an audit template, an audit or an # action plan becomes stale because its related goal does not # exist anymore. self._soft_delete_removed_goals() # Then we handle the case where an audit template, an audit or an # action plan becomes stale because its related strategy does not # exist anymore. self._soft_delete_removed_strategies() # Finally, we save into the DB the updated stale audit templates # and soft delete stale audits and action plans for stale_audit_template in self.stale_audit_templates_map.values(): stale_audit_template.save() LOG.info("Audit Template '%s' synced", stale_audit_template.name) for stale_audit in self.stale_audits_map.values(): stale_audit.save() LOG.info("Stale audit '%s' synced and cancelled", stale_audit.uuid) for stale_action_plan in self.stale_action_plans_map.values(): stale_action_plan.save() LOG.info("Stale action plan '%s' synced and cancelled", stale_action_plan.uuid) def _find_stale_audit_templates_due_to_goal(self): for goal_id, synced_goal in self.goal_mapping.items(): filters = {"goal_id": goal_id} stale_audit_templates = objects.AuditTemplate.list( self.ctx, filters=filters) # Update the goal ID for the stale audit templates (w/o saving) for audit_template in stale_audit_templates: if audit_template.id not in self.stale_audit_templates_map: audit_template.goal_id = synced_goal.id self.stale_audit_templates_map[audit_template.id] = ( audit_template) else: self.stale_audit_templates_map[ audit_template.id].goal_id = synced_goal.id def _find_stale_audit_templates_due_to_strategy(self): for strategy_id, synced_strategy in self.strategy_mapping.items(): filters = {"strategy_id": strategy_id} stale_audit_templates = objects.AuditTemplate.list( self.ctx, filters=filters) # Update strategy IDs for all stale audit templates (w/o saving) for audit_template in stale_audit_templates: if audit_template.id not in self.stale_audit_templates_map: audit_template.strategy_id = synced_strategy.id self.stale_audit_templates_map[audit_template.id] = ( audit_template) else: self.stale_audit_templates_map[ audit_template.id].strategy_id = synced_strategy.id def _find_stale_audits_due_to_goal(self): for goal_id, synced_goal in self.goal_mapping.items(): filters = {"goal_id": goal_id} stale_audits = objects.Audit.list( self.ctx, filters=filters, eager=True) # Update the goal ID for the stale audits (w/o saving) for audit in stale_audits: if audit.id not in self.stale_audits_map: audit.goal_id = synced_goal.id self.stale_audits_map[audit.id] = audit else: self.stale_audits_map[audit.id].goal_id = synced_goal.id def _find_stale_audits_due_to_strategy(self): for strategy_id, synced_strategy in self.strategy_mapping.items(): filters = {"strategy_id": strategy_id} stale_audits = objects.Audit.list( self.ctx, filters=filters, eager=True) # Update strategy IDs for all stale audits (w/o saving) for audit in stale_audits: if audit.id not in self.stale_audits_map: audit.strategy_id = synced_strategy.id audit.state = objects.audit.State.CANCELLED self.stale_audits_map[audit.id] = audit else: self.stale_audits_map[ audit.id].strategy_id = synced_strategy.id self.stale_audits_map[ audit.id].state = objects.audit.State.CANCELLED def _find_stale_action_plans_due_to_strategy(self): for strategy_id, synced_strategy in self.strategy_mapping.items(): filters = {"strategy_id": strategy_id} stale_action_plans = objects.ActionPlan.list( self.ctx, filters=filters, eager=True) # Update strategy IDs for all stale action plans (w/o saving) for action_plan in stale_action_plans: if action_plan.id not in self.stale_action_plans_map: action_plan.strategy_id = synced_strategy.id action_plan.state = objects.action_plan.State.CANCELLED self.stale_action_plans_map[action_plan.id] = action_plan else: self.stale_action_plans_map[ action_plan.id].strategy_id = synced_strategy.id self.stale_action_plans_map[ action_plan.id].state = ( objects.action_plan.State.CANCELLED) def _find_stale_action_plans_due_to_audit(self): for audit_id, synced_audit in self.stale_audits_map.items(): filters = {"audit_id": audit_id} stale_action_plans = objects.ActionPlan.list( self.ctx, filters=filters, eager=True) # Update audit IDs for all stale action plans (w/o saving) for action_plan in stale_action_plans: if action_plan.id not in self.stale_action_plans_map: action_plan.audit_id = synced_audit.id action_plan.state = objects.action_plan.State.CANCELLED self.stale_action_plans_map[action_plan.id] = action_plan else: self.stale_action_plans_map[ action_plan.id].audit_id = synced_audit.id self.stale_action_plans_map[ action_plan.id].state = ( objects.action_plan.State.CANCELLED) def _soft_delete_removed_goals(self): removed_goals = [ g for g in self.available_goals if g.name not in self.discovered_map['goals']] for removed_goal in removed_goals: removed_goal.soft_delete() filters = {"goal_id": removed_goal.id} invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) for at in invalid_ats: LOG.warning( "Audit Template '%(audit_template)s' references a " "goal that does not exist", audit_template=at.uuid) stale_audits = objects.Audit.list( self.ctx, filters=filters, eager=True) for audit in stale_audits: LOG.warning( "Audit '%(audit)s' references a " "goal that does not exist", audit=audit.uuid) if audit.id not in self.stale_audits_map: audit.state = objects.audit.State.CANCELLED self.stale_audits_map[audit.id] = audit else: self.stale_audits_map[ audit.id].state = objects.audit.State.CANCELLED def _soft_delete_removed_strategies(self): removed_strategies = [ s for s in self.available_strategies if s.name not in self.discovered_map['strategies']] for removed_strategy in removed_strategies: removed_strategy.soft_delete() filters = {"strategy_id": removed_strategy.id} invalid_ats = objects.AuditTemplate.list(self.ctx, filters=filters) for at in invalid_ats: LOG.info( "Audit Template '%(audit_template)s' references a " "strategy that does not exist", audit_template=at.uuid) # In this case we can reset the strategy ID to None # so the audit template can still achieve the same goal # but with a different strategy if at.id not in self.stale_audit_templates_map: at.strategy_id = None self.stale_audit_templates_map[at.id] = at else: self.stale_audit_templates_map[at.id].strategy_id = None stale_audits = objects.Audit.list( self.ctx, filters=filters, eager=True) for audit in stale_audits: LOG.warning( "Audit '%(audit)s' references a " "strategy that does not exist", audit=audit.uuid) if audit.id not in self.stale_audits_map: audit.state = objects.audit.State.CANCELLED self.stale_audits_map[audit.id] = audit else: self.stale_audits_map[ audit.id].state = objects.audit.State.CANCELLED stale_action_plans = objects.ActionPlan.list( self.ctx, filters=filters, eager=True) for action_plan in stale_action_plans: LOG.warning( "Action Plan '%(action_plan)s' references a " "strategy that does not exist", action_plan=action_plan.uuid) if action_plan.id not in self.stale_action_plans_map: action_plan.state = objects.action_plan.State.CANCELLED self.stale_action_plans_map[action_plan.id] = action_plan else: self.stale_action_plans_map[ action_plan.id].state = ( objects.action_plan.State.CANCELLED) def _soft_delete_removed_scoringengines(self): removed_se = [ se for se in self.available_scoringengines if se.name not in self.discovered_map['scoringengines']] for se in removed_se: LOG.info("Scoring Engine %s removed", se.name) se.soft_delete() def _discover(self): strategies_map = {} goals_map = {} scoringengines_map = {} discovered_map = { "goals": goals_map, "strategies": strategies_map, "scoringengines": scoringengines_map} goal_loader = default.DefaultGoalLoader() implemented_goals = goal_loader.list_available() strategy_loader = default.DefaultStrategyLoader() implemented_strategies = strategy_loader.list_available() for goal_cls in implemented_goals.values(): goals_map[goal_cls.get_name()] = GoalMapping( name=goal_cls.get_name(), display_name=goal_cls.get_translatable_display_name(), efficacy_specification=tuple( IndicatorSpec(**indicator.to_dict()) for indicator in goal_cls.get_efficacy_specification( ).get_indicators_specifications())) for strategy_cls in implemented_strategies.values(): strategies_map[strategy_cls.get_name()] = StrategyMapping( name=strategy_cls.get_name(), goal_name=strategy_cls.get_goal_name(), display_name=strategy_cls.get_translatable_display_name(), parameters_spec=str(strategy_cls.get_schema())) for se in scoring_factory.get_scoring_engine_list(): scoringengines_map[se.get_name()] = ScoringEngineMapping( name=se.get_name(), description=se.get_description(), metainfo=se.get_metainfo()) return discovered_map def _soft_delete_stale_goals(self, goal_map, matching_goals): """Soft delete the stale goals :param goal_map: discovered goal map :type goal_map: :py:class:`~.GoalMapping` instance :param matching_goals: list of DB goals matching the goal_map :type matching_goals: list of :py:class:`~.objects.Goal` instances :returns: A list of soft deleted DB goals (subset of matching goals) :rtype: list of :py:class:`~.objects.Goal` instances """ goal_display_name = goal_map.display_name goal_name = goal_map.name goal_efficacy_spec = goal_map.efficacy_specification stale_goals = [] for matching_goal in matching_goals: if (matching_goal.efficacy_specification == goal_efficacy_spec and matching_goal.display_name == goal_display_name): LOG.info("Goal %s unchanged", goal_name) else: LOG.info("Goal %s modified", goal_name) matching_goal.soft_delete() stale_goals.append(matching_goal) return stale_goals def _soft_delete_stale_strategies(self, strategy_map, matching_strategies): strategy_name = strategy_map.name strategy_display_name = strategy_map.display_name parameters_spec = strategy_map.parameters_spec stale_strategies = [] for matching_strategy in matching_strategies: if (matching_strategy.display_name == strategy_display_name and matching_strategy.goal_id not in self.goal_mapping and matching_strategy.parameters_spec == ast.literal_eval(parameters_spec)): LOG.info("Strategy %s unchanged", strategy_name) else: LOG.info("Strategy %s modified", strategy_name) matching_strategy.soft_delete() stale_strategies.append(matching_strategy) return stale_strategies def _soft_delete_stale_scoringengines( self, scoringengine_map, matching_scoringengines): se_name = scoringengine_map.name se_description = scoringengine_map.description se_metainfo = scoringengine_map.metainfo stale_scoringengines = [] for matching_scoringengine in matching_scoringengines: if (matching_scoringengine.description == se_description and matching_scoringengine.metainfo == se_metainfo): LOG.info("Scoring Engine %s unchanged", se_name) else: LOG.info("Scoring Engine %s modified", se_name) matching_scoringengine.soft_delete() stale_scoringengines.append(matching_scoringengine) return stale_scoringengines python-watcher-4.0.0/watcher/decision_engine/scheduling.py0000664000175000017500000001037513656752270024012 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime import eventlet from oslo_log import log from watcher.common import context from watcher.common import exception from watcher.common import scheduling from watcher.decision_engine.model.collector import manager from watcher import objects from watcher import conf LOG = log.getLogger(__name__) CONF = conf.CONF class DecisionEngineSchedulingService(scheduling.BackgroundSchedulerService): def __init__(self, gconfig=None, **options): gconfig = None or {} super(DecisionEngineSchedulingService, self).__init__( gconfig, **options) self.collector_manager = manager.CollectorManager() @property def collectors(self): return self.collector_manager.get_collectors() def add_sync_jobs(self): for name, collector in self.collectors.items(): timed_task = self._wrap_collector_sync_with_timeout( collector, name) self.add_job(timed_task, trigger='interval', seconds=collector.config.period, next_run_time=datetime.datetime.now()) def _as_timed_sync_func(self, sync_func, name, timeout): def _timed_sync(): with eventlet.Timeout( timeout, exception=exception.ClusterDataModelCollectionError(cdm=name) ): sync_func() return _timed_sync def _wrap_collector_sync_with_timeout(self, collector, name): """Add an execution timeout constraint on a function""" timeout = collector.config.period def _sync(): try: timed_sync = self._as_timed_sync_func( collector.synchronize, name, timeout) timed_sync() except Exception as exc: LOG.exception(exc) collector.set_cluster_data_model_as_stale() return _sync def add_checkstate_job(self): # 30 minutes interval interval = CONF.watcher_decision_engine.check_periodic_interval ap_manager = objects.action_plan.StateManager() if CONF.watcher_decision_engine.action_plan_expiry != 0: self.add_job(ap_manager.check_expired, 'interval', args=[context.make_context()], seconds=interval, next_run_time=datetime.datetime.now()) def cancel_ongoing_audits(self): audit_filters = { 'audit_type': objects.audit.AuditType.ONESHOT.value, 'state': objects.audit.State.ONGOING, 'hostname': CONF.host } local_context = context.make_context() ongoing_audits = objects.Audit.list( local_context, filters=audit_filters) for audit in ongoing_audits: audit.state = objects.audit.State.CANCELLED audit.save() LOG.info("Audit %(uuid)s has been cancelled because it was in " "%(state)s state when Decision Engine had been stopped " "on %(hostname)s host.", {'uuid': audit.uuid, 'state': objects.audit.State.ONGOING, 'hostname': audit.hostname}) def start(self): """Start service.""" self.add_sync_jobs() self.add_checkstate_job() self.cancel_ongoing_audits() super(DecisionEngineSchedulingService, self).start() def stop(self): """Stop service.""" self.shutdown() def wait(self): """Wait for service to complete.""" def reset(self): """Reset service. Called in case service running in daemon mode receives SIGHUP. """ python-watcher-4.0.0/watcher/decision_engine/datasources/0000775000175000017500000000000013656752352023623 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/datasources/__init__.py0000664000175000017500000000000013656752270025721 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/datasources/gnocchi.py0000664000175000017500000001615713656752270025620 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from datetime import datetime from datetime import timedelta from oslo_config import cfg from oslo_log import log from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import base CONF = cfg.CONF LOG = log.getLogger(__name__) class GnocchiHelper(base.DataSourceBase): NAME = 'gnocchi' METRIC_MAP = dict(host_cpu_usage='compute.node.cpu.percent', host_ram_usage='hardware.memory.used', host_outlet_temp='hardware.ipmi.node.outlet_temperature', host_inlet_temp='hardware.ipmi.node.temperature', host_airflow='hardware.ipmi.node.airflow', host_power='hardware.ipmi.node.power', instance_cpu_usage='cpu_util', instance_ram_usage='memory.resident', instance_ram_allocated='memory', instance_l3_cache_usage='cpu_l3_cache', instance_root_disk_size='disk.root.size', ) def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.gnocchi = self.osc.gnocchi() def check_availability(self): status = self.query_retry(self.gnocchi.status.get) if status: return 'available' else: return 'not available' def list_metrics(self): """List the user's meters.""" response = self.query_retry(f=self.gnocchi.metric.list) if not response: return set() else: return set([metric['name'] for metric in response]) def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): stop_time = datetime.utcnow() start_time = stop_time - timedelta(seconds=(int(period))) meter = self.METRIC_MAP.get(meter_name) if meter is None: raise exception.MetricNotAvailable(metric=meter_name) if aggregate == 'count': aggregate = 'mean' LOG.warning('aggregate type count not supported by gnocchi,' ' replaced with mean.') resource_id = resource.uuid if resource_type == 'compute_node': resource_id = "%s_%s" % (resource.hostname, resource.hostname) kwargs = dict(query={"=": {"original_resource_id": resource_id}}, limit=1) resources = self.query_retry( f=self.gnocchi.resource.search, **kwargs) if not resources: LOG.warning("The {0} resource {1} could not be " "found".format(self.NAME, resource_id)) return resource_id = resources[0]['id'] raw_kwargs = dict( metric=meter, start=start_time, stop=stop_time, resource_id=resource_id, granularity=granularity, aggregation=aggregate, ) kwargs = {k: v for k, v in raw_kwargs.items() if k and v} statistics = self.query_retry( f=self.gnocchi.metric.get_measures, **kwargs) return_value = None if statistics: # return value of latest measure # measure has structure [time, granularity, value] return_value = statistics[-1][2] if meter_name == 'host_airflow': # Airflow from hardware.ipmi.node.airflow is reported as # 1/10 th of actual CFM return_value *= 10 return return_value def get_host_cpu_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_cpu_usage', period, aggregate, granularity) def get_host_ram_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_ram_usage', period, aggregate, granularity) def get_host_outlet_temp(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_outlet_temp', period, aggregate, granularity) def get_host_inlet_temp(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_inlet_temp', period, aggregate, granularity) def get_host_airflow(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_airflow', period, aggregate, granularity) def get_host_power(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'compute_node', 'host_power', period, aggregate, granularity) def get_instance_cpu_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_cpu_usage', period, aggregate, granularity) def get_instance_ram_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_ram_usage', period, aggregate, granularity) def get_instance_ram_allocated(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_ram_allocated', period, aggregate, granularity) def get_instance_l3_cache_usage(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_l3_cache_usage', period, aggregate, granularity) def get_instance_root_disk_size(self, resource, period, aggregate, granularity=300): return self.statistic_aggregation( resource, 'instance', 'instance_root_disk_size', period, aggregate, granularity) python-watcher-4.0.0/watcher/decision_engine/datasources/ceilometer.py0000664000175000017500000002434413656752270026333 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from oslo_log import log from oslo_utils import timeutils from watcher._i18n import _ from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import base LOG = log.getLogger(__name__) try: from ceilometerclient import exc HAS_CEILCLIENT = True except ImportError: HAS_CEILCLIENT = False class CeilometerHelper(base.DataSourceBase): NAME = 'ceilometer' METRIC_MAP = dict(host_cpu_usage='compute.node.cpu.percent', host_ram_usage='hardware.memory.used', host_outlet_temp='hardware.ipmi.node.outlet_temperature', host_inlet_temp='hardware.ipmi.node.temperature', host_airflow='hardware.ipmi.node.airflow', host_power='hardware.ipmi.node.power', instance_cpu_usage='cpu_util', instance_ram_usage='memory.resident', instance_ram_allocated='memory', instance_l3_cache_usage='cpu_l3_cache', instance_root_disk_size='disk.root.size', ) def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.ceilometer = self.osc.ceilometer() LOG.warning("Ceilometer API is deprecated and Ceilometer Datasource " "module is no longer maintained. We recommend to use " "Gnocchi instead.") @staticmethod def format_query(user_id, tenant_id, resource_id, user_ids, tenant_ids, resource_ids): query = [] def query_append(query, _id, _ids, field): if _id: _ids = [_id] for x_id in _ids: query.append({"field": field, "op": "eq", "value": x_id}) query_append(query, user_id, (user_ids or []), "user_id") query_append(query, tenant_id, (tenant_ids or []), "project_id") query_append(query, resource_id, (resource_ids or []), "resource_id") return query def _timestamps(self, start_time, end_time): def _format_timestamp(_time): if _time: if isinstance(_time, datetime.datetime): return _time.isoformat() return _time return None start_timestamp = _format_timestamp(start_time) end_timestamp = _format_timestamp(end_time) if ((start_timestamp is not None) and (end_timestamp is not None) and (timeutils.parse_isotime(start_timestamp) > timeutils.parse_isotime(end_timestamp))): raise exception.Invalid( _("Invalid query: %(start_time)s > %(end_time)s") % dict( start_time=start_timestamp, end_time=end_timestamp)) return start_timestamp, end_timestamp def build_query(self, user_id=None, tenant_id=None, resource_id=None, user_ids=None, tenant_ids=None, resource_ids=None, start_time=None, end_time=None): """Returns query built from given parameters. This query can be then used for querying resources, meters and statistics. :param user_id: user_id, has a priority over list of ids :param tenant_id: tenant_id, has a priority over list of ids :param resource_id: resource_id, has a priority over list of ids :param user_ids: list of user_ids :param tenant_ids: list of tenant_ids :param resource_ids: list of resource_ids :param start_time: datetime from which measurements should be collected :param end_time: datetime until which measurements should be collected """ query = self.format_query(user_id, tenant_id, resource_id, user_ids, tenant_ids, resource_ids) start_timestamp, end_timestamp = self._timestamps(start_time, end_time) if start_timestamp: query.append({"field": "timestamp", "op": "ge", "value": start_timestamp}) if end_timestamp: query.append({"field": "timestamp", "op": "le", "value": end_timestamp}) return query def query_retry_reset(self, exception_instance): if isinstance(exception_instance, exc.HTTPUnauthorized): self.osc.reset_clients() self.ceilometer = self.osc.ceilometer() def list_metrics(self): """List the user's meters.""" meters = self.query_retry(f=self.ceilometer.meters.list) if not meters: return set() else: return meters def check_availability(self): status = self.query_retry(self.ceilometer.resources.list) if status: return 'available' else: return 'not available' def query_sample(self, meter_name, query, limit=1): return self.query_retry(f=self.ceilometer.samples.list, meter_name=meter_name, limit=limit, q=query) def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, granularity=300, aggregate='mean'): end_time = datetime.datetime.utcnow() start_time = end_time - datetime.timedelta(seconds=int(period)) meter = self.METRIC_MAP.get(meter_name) if meter is None: raise exception.MetricNotAvailable(metric=meter_name) if aggregate == 'mean': aggregate = 'avg' elif aggregate == 'count': aggregate = 'avg' LOG.warning('aggregate type count not supported by ceilometer,' ' replaced with mean.') resource_id = resource.uuid if resource_type == 'compute_node': resource_id = "%s_%s" % (resource.hostname, resource.hostname) query = self.build_query( resource_id=resource_id, start_time=start_time, end_time=end_time) statistic = self.query_retry(f=self.ceilometer.statistics.list, meter_name=meter, q=query, period=period, aggregates=[ {'func': aggregate}]) item_value = None if statistic: item_value = statistic[-1]._info.get('aggregate').get(aggregate) if meter_name == 'host_airflow': # Airflow from hardware.ipmi.node.airflow is reported as # 1/10 th of actual CFM item_value *= 10 return item_value def get_host_cpu_usage(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_cpu_usage', period, aggregate, granularity) def get_host_ram_usage(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_ram_usage', period, aggregate, granularity) def get_host_outlet_temp(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_outlet_temp', period, aggregate, granularity) def get_host_inlet_temp(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_inlet_temp', period, aggregate, granularity) def get_host_airflow(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_airflow', period, aggregate, granularity) def get_host_power(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_power', period, aggregate, granularity) def get_instance_cpu_usage(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_cpu_usage', period, aggregate, granularity) def get_instance_ram_usage(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_ram_usage', period, aggregate, granularity) def get_instance_ram_allocated(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_ram_allocated', period, aggregate, granularity) def get_instance_l3_cache_usage(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_l3_cache_usage', period, aggregate, granularity) def get_instance_root_disk_size(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_root_disk_size', period, aggregate, granularity) python-watcher-4.0.0/watcher/decision_engine/datasources/grafana_translator/0000775000175000017500000000000013656752352027473 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/datasources/grafana_translator/__init__.py0000664000175000017500000000000013656752270031571 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/datasources/grafana_translator/base.py0000664000175000017500000001071213656752270030757 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc from watcher._i18n import _ from watcher.common import exception from watcher.decision_engine.datasources import base class BaseGrafanaTranslator(object): """Grafana translator baseclass to use with grafana for different databases Specific databasses that are proxied through grafana require some alterations depending on the database. """ """ data { metric: name of the metric as found in DataSourceBase.METRIC_MAP, db: database specified for this metric in grafana_client config options, attribute: the piece of information that will be selected from the resource object to build the query. query: the unformatted query from the configuration for this metric, resource: the object from the OpenStackClient resource_type: the type of the resource ['compute_node','instance', 'bare_metal', 'storage'], period: the period of time to collect metrics for in seconds, aggregate: the aggregation can be any from ['mean', 'max', 'min', 'count'], granularity: interval between datapoints in seconds (optional), } """ """Every grafana translator should have a uniquely identifying name""" NAME = '' RESOURCE_TYPES = base.DataSourceBase.RESOURCE_TYPES AGGREGATES = base.DataSourceBase.AGGREGATES def __init__(self, data): self._data = data self._validate_data() def _validate_data(self): """iterate through the supplied data and verify attributes""" optionals = ['granularity'] reference_data = { 'metric': None, 'db': None, 'attribute': None, 'query': None, 'resource': None, 'resource_type': None, 'period': None, 'aggregate': None, 'granularity': None } reference_data.update(self._data) for key, value in reference_data.items(): if value is None and key not in optionals: raise exception.InvalidParameter( message=(_("The value %(value)s for parameter " "%(parameter)s is invalid") % {'value': None, 'parameter': key} ) ) if reference_data['resource_type'] not in self.RESOURCE_TYPES: raise exception.InvalidParameter(parameter='resource_type', parameter_type='RESOURCE_TYPES') if reference_data['aggregate'] not in self.AGGREGATES: raise exception.InvalidParameter(parameter='aggregate', parameter_type='AGGREGATES') @staticmethod def _extract_attribute(resource, attribute): """Retrieve the desired attribute from the resource :param resource: The resource object to extract the attribute from. :param attribute: The name of the attribute to subtract as string. :return: The extracted attribute or None """ try: return getattr(resource, attribute) except AttributeError: raise @staticmethod def _query_format(query, aggregate, resource, period, granularity, translator_specific): return query.format(aggregate, resource, period, granularity, translator_specific) @abc.abstractmethod def build_params(self): """Build the set of parameters to send with the request""" raise NotImplementedError() @abc.abstractmethod def extract_result(self, raw_results): """Extrapolate the metric from the raw results of the request""" raise NotImplementedError() python-watcher-4.0.0/watcher/decision_engine/datasources/grafana_translator/influxdb.py0000664000175000017500000000625013656752270031662 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log from oslo_serialization import jsonutils from watcher.common import exception from watcher.decision_engine.datasources.grafana_translator.base import \ BaseGrafanaTranslator CONF = cfg.CONF LOG = log.getLogger(__name__) class InfluxDBGrafanaTranslator(BaseGrafanaTranslator): """Grafana translator to communicate with InfluxDB database""" NAME = 'influxdb' def __init__(self, data): super(InfluxDBGrafanaTranslator, self).__init__(data) def build_params(self): """""" data = self._data retention_period = None available_periods = CONF.grafana_translators.retention_periods.items() for key, value in sorted(available_periods, key=lambda x: x[1]): if int(data['period']) < int(value): retention_period = key break if retention_period is None: retention_period = max(available_periods)[0] LOG.warning("Longest retention period is to short for desired" " period") try: resource = self._extract_attribute( data['resource'], data['attribute']) except AttributeError: LOG.error("Resource: {0} does not contain attribute {1}".format( data['resource'], data['attribute'])) raise # Granularity is optional if it is None the minimal value for InfluxDB # will be 1 granularity = \ data['granularity'] if data['granularity'] is not None else 1 return {'db': data['db'], 'epoch': 'ms', 'q': self._query_format( data['query'], data['aggregate'], resource, data['period'], granularity, retention_period)} def extract_result(self, raw_results): """""" try: # For result structure see: # https://docs.openstack.org/watcher/latest/datasources/grafana.html#InfluxDB result = jsonutils.loads(raw_results) result = result['results'][0]['series'][0] index_aggregate = result['columns'].index(self._data['aggregate']) return result['values'][0][index_aggregate] except KeyError: LOG.error("Could not extract {0} for the resource: {1}".format( self._data['metric'], self._data['resource'])) raise exception.NoSuchMetricForHost( metric=self._data['metric'], host=self._data['resource']) python-watcher-4.0.0/watcher/decision_engine/datasources/manager.py0000664000175000017500000001306013656752270025606 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import yaml from collections import OrderedDict from oslo_config import cfg from oslo_log import log from watcher.common import exception from watcher.decision_engine.datasources import ceilometer as ceil from watcher.decision_engine.datasources import gnocchi as gnoc from watcher.decision_engine.datasources import grafana as graf from watcher.decision_engine.datasources import monasca as mon LOG = log.getLogger(__name__) class DataSourceManager(object): metric_map = OrderedDict([ (gnoc.GnocchiHelper.NAME, gnoc.GnocchiHelper.METRIC_MAP), (ceil.CeilometerHelper.NAME, ceil.CeilometerHelper.METRIC_MAP), (mon.MonascaHelper.NAME, mon.MonascaHelper.METRIC_MAP), (graf.GrafanaHelper.NAME, graf.GrafanaHelper.METRIC_MAP), ]) """Dictionary with all possible datasources, dictionary order is the default order for attempting to use datasources """ def __init__(self, config=None, osc=None): self.osc = osc self.config = config self._ceilometer = None self._monasca = None self._gnocchi = None self._grafana = None # Dynamically update grafana metric map, only available at runtime # The metric map can still be overridden by a yaml config file self.metric_map[graf.GrafanaHelper.NAME] = self.grafana.METRIC_MAP metric_map_path = cfg.CONF.watcher_decision_engine.metric_map_path metrics_from_file = self.load_metric_map(metric_map_path) for ds, mp in self.metric_map.items(): try: self.metric_map[ds].update(metrics_from_file.get(ds, {})) except KeyError: msgargs = (ds, self.metric_map.keys()) LOG.warning('Invalid Datasource: %s. Allowed: %s ', *msgargs) self.datasources = self.config.datasources @property def ceilometer(self): if self._ceilometer is None: self.ceilometer = ceil.CeilometerHelper(osc=self.osc) return self._ceilometer @ceilometer.setter def ceilometer(self, ceilometer): self._ceilometer = ceilometer @property def monasca(self): if self._monasca is None: self._monasca = mon.MonascaHelper(osc=self.osc) return self._monasca @monasca.setter def monasca(self, monasca): self._monasca = monasca @property def gnocchi(self): if self._gnocchi is None: self._gnocchi = gnoc.GnocchiHelper(osc=self.osc) return self._gnocchi @gnocchi.setter def gnocchi(self, gnocchi): self._gnocchi = gnocchi @property def grafana(self): if self._grafana is None: self._grafana = graf.GrafanaHelper(osc=self.osc) return self._grafana @grafana.setter def grafana(self, grafana): self._grafana = grafana def get_backend(self, metrics): """Determine the datasource to use from the configuration Iterates over the configured datasources in order to find the first which can support all specified metrics. Upon a missing metric the next datasource is attempted. """ if not self.datasources or len(self.datasources) == 0: raise exception.NoDatasourceAvailable if not metrics or len(metrics) == 0: LOG.critical("Can not retrieve datasource without specifying " "list of required metrics.") raise exception.InvalidParameter(parameter='metrics', parameter_type='none empty list') for datasource in self.datasources: no_metric = False for metric in metrics: if (metric not in self.metric_map[datasource] or self.metric_map[datasource].get(metric) is None): no_metric = True LOG.warning("Datasource: {0} could not be used due to " "metric: {1}".format(datasource, metric)) break if not no_metric: # Try to use a specific datasource but attempt additional # datasources upon exceptions (if config has more datasources) try: ds = getattr(self, datasource) ds.METRIC_MAP.update(self.metric_map[ds.NAME]) return ds except Exception: pass raise exception.MetricNotAvailable(metric=metric) def load_metric_map(self, file_path): """Load metrics from the metric_map_path""" if file_path and os.path.exists(file_path): with open(file_path, 'r') as f: try: ret = yaml.safe_load(f.read()) # return {} if the file is empty return ret if ret else {} except yaml.YAMLError as e: LOG.warning('Could not load %s: %s', file_path, e) return {} else: return {} python-watcher-4.0.0/watcher/decision_engine/datasources/monasca.py0000664000175000017500000001402113656752270025613 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from monascaclient import exc from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import base class MonascaHelper(base.DataSourceBase): NAME = 'monasca' METRIC_MAP = dict(host_cpu_usage='cpu.percent', host_ram_usage=None, host_outlet_temp=None, host_inlet_temp=None, host_airflow=None, host_power=None, instance_cpu_usage='vm.cpu.utilization_perc', instance_ram_usage=None, instance_ram_allocated=None, instance_l3_cache_usage=None, instance_root_disk_size=None, ) def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.monasca = self.osc.monasca() def _format_time_params(self, start_time, end_time, period): """Format time-related params to the correct Monasca format :param start_time: Start datetime from which metrics will be used :param end_time: End datetime from which metrics will be used :param period: interval in seconds (int) :return: start ISO time, end ISO time, period """ if not period: period = int(datetime.timedelta(hours=3).total_seconds()) if not start_time: start_time = ( datetime.datetime.utcnow() - datetime.timedelta(seconds=period)) start_timestamp = None if not start_time else start_time.isoformat() end_timestamp = None if not end_time else end_time.isoformat() return start_timestamp, end_timestamp, period def query_retry_reset(self, exception_instance): if isinstance(exception_instance, exc.Unauthorized): self.osc.reset_clients() self.monasca = self.osc.monasca() def check_availability(self): result = self.query_retry(self.monasca.metrics.list) if result: return 'available' else: return 'not available' def list_metrics(self): # TODO(alexchadin): this method should be implemented in accordance to # monasca API. pass def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): stop_time = datetime.datetime.utcnow() start_time = stop_time - datetime.timedelta(seconds=(int(period))) meter = self.METRIC_MAP.get(meter_name) if meter is None: raise exception.MetricNotAvailable(metric=meter_name) if aggregate == 'mean': aggregate = 'avg' raw_kwargs = dict( name=meter, start_time=start_time.isoformat(), end_time=stop_time.isoformat(), dimensions={'hostname': resource.uuid}, period=period, statistics=aggregate, group_by='*', ) kwargs = {k: v for k, v in raw_kwargs.items() if k and v} statistics = self.query_retry( f=self.monasca.metrics.list_statistics, **kwargs) cpu_usage = None for stat in statistics: avg_col_idx = stat['columns'].index(aggregate) values = [r[avg_col_idx] for r in stat['statistics']] value = float(sum(values)) / len(values) cpu_usage = value return cpu_usage def get_host_cpu_usage(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_cpu_usage', period, aggregate, granularity) def get_host_ram_usage(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_host_outlet_temp(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_host_inlet_temp(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_host_airflow(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_host_power(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_instance_cpu_usage(self, resource, period, aggregate, granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_cpu_usage', period, aggregate, granularity) def get_instance_ram_usage(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_instance_ram_allocated(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_instance_l3_cache_usage(self, resource, period, aggregate, granularity=None): raise NotImplementedError def get_instance_root_disk_size(self, resource, period, aggregate, granularity=None): raise NotImplementedError python-watcher-4.0.0/watcher/decision_engine/datasources/grafana.py0000664000175000017500000002313713656752270025601 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_log import log import six.moves.urllib.parse as urlparse from watcher.common import clients from watcher.common import exception from watcher.decision_engine.datasources import base from watcher.decision_engine.datasources.grafana_translator import influxdb import requests CONF = cfg.CONF LOG = log.getLogger(__name__) class GrafanaHelper(base.DataSourceBase): NAME = 'grafana' """METRIC_MAP is only available at runtime _build_metric_map""" METRIC_MAP = dict() """All available translators""" TRANSLATOR_LIST = [ influxdb.InfluxDBGrafanaTranslator.NAME ] def __init__(self, osc=None): """:param osc: an OpenStackClients instance""" self.osc = osc if osc else clients.OpenStackClients() self.nova = self.osc.nova() self.configured = False self._base_url = None self._headers = None self._setup() def _setup(self): """Configure grafana helper to perform requests""" token = CONF.grafana_client.token base_url = CONF.grafana_client.base_url if not token: LOG.critical("GrafanaHelper authentication token not configured") return self._headers = {"Authorization": "Bearer " + token, "Content-Type": "Application/json"} if not base_url: LOG.critical("GrafanaHelper url not properly configured, " "check base_url") return self._base_url = base_url # Very basic url parsing parse = urlparse.urlparse(self._base_url) if parse.scheme == '' or parse.netloc == '' or parse.path == '': LOG.critical("GrafanaHelper url not properly configured, " "check base_url and project_id") return self._build_metric_map() if len(self.METRIC_MAP) == 0: LOG.critical("GrafanaHelper not configured for any metrics") self.configured = True def _build_metric_map(self): """Builds the metric map by reading config information""" for key, value in CONF.grafana_client.database_map.items(): try: project = CONF.grafana_client.project_id_map[key] attribute = CONF.grafana_client.attribute_map[key] translator = CONF.grafana_client.translator_map[key] query = CONF.grafana_client.query_map[key] if project is not None and \ value is not None and\ translator in self.TRANSLATOR_LIST and\ query is not None: self.METRIC_MAP[key] = { 'db': value, 'project': project, 'attribute': attribute, 'translator': translator, 'query': query } except KeyError as e: LOG.error(e) def _build_translator_schema(self, metric, db, attribute, query, resource, resource_type, period, aggregate, granularity): """Create dictionary to pass to grafana proxy translators""" return {'metric': metric, 'db': db, 'attribute': attribute, 'query': query, 'resource': resource, 'resource_type': resource_type, 'period': period, 'aggregate': aggregate, 'granularity': granularity} def _get_translator(self, name, data): """Use the names of translators to get the translator for the metric""" if name == influxdb.InfluxDBGrafanaTranslator.NAME: return influxdb.InfluxDBGrafanaTranslator(data) else: raise exception.InvalidParameter( parameter='name', parameter_type='grafana translator') def _request(self, params, project_id): """Make the request to the endpoint to retrieve data If the request fails, determines what error to raise. """ if self.configured is False: raise exception.DataSourceNotAvailable(self.NAME) resp = requests.get(self._base_url + str(project_id) + '/query', params=params, headers=self._headers) if resp.status_code == 200: return resp elif resp.status_code == 400: LOG.error("Query for metric is invalid") elif resp.status_code == 401: LOG.error("Authorization token is invalid") raise exception.DataSourceNotAvailable(self.NAME) def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): """Get the value for the specific metric based on specified parameters """ try: self.METRIC_MAP[meter_name] except KeyError: LOG.error("Metric: {0} does not appear in the current Grafana " "metric map".format(meter_name)) raise exception.MetricNotAvailable(metric=meter_name) db = self.METRIC_MAP[meter_name]['db'] project = self.METRIC_MAP[meter_name]['project'] attribute = self.METRIC_MAP[meter_name]['attribute'] translator_name = self.METRIC_MAP[meter_name]['translator'] query = self.METRIC_MAP[meter_name]['query'] data = self._build_translator_schema( meter_name, db, attribute, query, resource, resource_type, period, aggregate, granularity) translator = self._get_translator(translator_name, data) params = translator.build_params() raw_kwargs = dict( params=params, project_id=project, ) kwargs = {k: v for k, v in raw_kwargs.items() if k and v} resp = self.query_retry(self._request, **kwargs) if not resp: LOG.warning("Datasource {0} is not available.".format(self.NAME)) return result = translator.extract_result(resp.content) return result def get_host_cpu_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_cpu_usage', period, aggregate, granularity) def get_host_ram_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_ram_usage', period, aggregate, granularity) def get_host_outlet_temp(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_outlet_temp', period, aggregate, granularity) def get_host_inlet_temp(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_inlet_temp', period, aggregate, granularity) def get_host_airflow(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_airflow', period, aggregate, granularity) def get_host_power(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'compute_node', 'host_power', period, aggregate, granularity) def get_instance_cpu_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_cpu_usage', period, aggregate, granularity) def get_instance_ram_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_ram_usage', period, aggregate, granularity) def get_instance_ram_allocated(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_ram_allocated', period, aggregate, granularity) def get_instance_l3_cache_usage(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_l3_cache_usage', period, aggregate, granularity) def get_instance_root_disk_size(self, resource, period=300, aggregate="mean", granularity=None): return self.statistic_aggregation( resource, 'instance', 'instance_root_disk_size', period, aggregate, granularity) python-watcher-4.0.0/watcher/decision_engine/datasources/base.py0000664000175000017500000001742213656752270025114 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import time from oslo_config import cfg from oslo_log import log CONF = cfg.CONF LOG = log.getLogger(__name__) class DataSourceBase(object): """Base Class for datasources in Watcher This base class defines the abstract methods that datasources should implement and contains details on the values expected for parameters as well as what the values for return types should be. """ """Possible options for the parameters named aggregate""" AGGREGATES = ['mean', 'min', 'max', 'count'] """Possible options for the parameters named resource_type""" RESOURCE_TYPES = ['compute_node', 'instance', 'bare_metal', 'storage'] """Each datasource should have a uniquely identifying name""" NAME = '' """Possible metrics a datasource can support and their internal name""" METRIC_MAP = dict(host_cpu_usage=None, host_ram_usage=None, host_outlet_temp=None, host_inlet_temp=None, host_airflow=None, host_power=None, instance_cpu_usage=None, instance_ram_usage=None, instance_ram_allocated=None, instance_l3_cache_usage=None, instance_root_disk_size=None, ) def query_retry(self, f, *args, **kwargs): """Attempts to retrieve metrics from the external service Attempts to access data from the external service and handles exceptions upon exception the retrieval should be retried in accordance to the value of query_max_retries :param f: The method that performs the actual querying for metrics :param args: Array of arguments supplied to the method :param kwargs: The amount of arguments supplied to the method :return: The value as retrieved from the external service """ num_retries = CONF.watcher_datasources.query_max_retries timeout = CONF.watcher_datasources.query_timeout for i in range(num_retries): try: return f(*args, **kwargs) except Exception as e: LOG.exception(e) self.query_retry_reset(e) LOG.warning("Retry {0} of {1} while retrieving metrics retry " "in {2} seconds".format(i+1, num_retries, timeout)) time.sleep(timeout) @abc.abstractmethod def query_retry_reset(self, exception_instance): """Abstract method to perform reset operations upon request failure""" pass @abc.abstractmethod def list_metrics(self): """Returns the supported metrics that the datasource can retrieve :return: List of supported metrics containing keys from METRIC_MAP """ pass @abc.abstractmethod def check_availability(self): """Tries to contact the datasource to see if it is available :return: True or False with true meaning the datasource is available """ pass @abc.abstractmethod def statistic_aggregation(self, resource=None, resource_type=None, meter_name=None, period=300, aggregate='mean', granularity=300): """Retrieves and converts metrics based on the specified parameters :param resource: Resource object as defined in watcher models such as ComputeNode and Instance :param resource_type: Indicates which type of object is supplied to the resource parameter :param meter_name: The desired metric to retrieve as key from METRIC_MAP :param period: Time span to collect metrics from in seconds :param granularity: Interval between samples in measurements in seconds :param aggregate: Aggregation method to extract value from set of samples :return: The gathered value for the metric the type of value depends on the meter_name """ pass @abc.abstractmethod def get_host_cpu_usage(self, resource, period, aggregate, granularity=None): """Get the cpu usage for a host such as a compute_node :return: cpu usage as float ranging between 0 and 100 representing the total cpu usage as percentage """ pass @abc.abstractmethod def get_host_ram_usage(self, resource, period, aggregate, granularity=None): """Get the ram usage for a host such as a compute_node :return: ram usage as float in megabytes """ pass @abc.abstractmethod def get_host_outlet_temp(self, resource, period, aggregate, granularity=None): """Get the outlet temperature for a host such as compute_node :return: outlet temperature as float in degrees celsius """ pass @abc.abstractmethod def get_host_inlet_temp(self, resource, period, aggregate, granularity=None): """Get the inlet temperature for a host such as compute_node :return: inlet temperature as float in degrees celsius """ pass @abc.abstractmethod def get_host_airflow(self, resource, period, aggregate, granularity=None): """Get the airflow for a host such as compute_node :return: airflow as float in cfm """ pass @abc.abstractmethod def get_host_power(self, resource, period, aggregate, granularity=None): """Get the power for a host such as compute_node :return: power as float in watts """ pass @abc.abstractmethod def get_instance_cpu_usage(self, resource, period, aggregate, granularity=None): """Get the cpu usage for an instance :return: cpu usage as float ranging between 0 and 100 representing the total cpu usage as percentage """ pass @abc.abstractmethod def get_instance_ram_usage(self, resource, period, aggregate, granularity=None): """Get the ram usage for an instance :return: ram usage as float in megabytes """ pass @abc.abstractmethod def get_instance_ram_allocated(self, resource, period, aggregate, granularity=None): """Get the ram allocated for an instance :return: total ram allocated as float in megabytes """ pass @abc.abstractmethod def get_instance_l3_cache_usage(self, resource, period, aggregate, granularity=None): """Get the l3 cache usage for an instance :return: l3 cache usage as integer in bytes """ pass @abc.abstractmethod def get_instance_root_disk_size(self, resource, period, aggregate, granularity=None): """Get the size of the root disk for an instance :return: root disk size as float in gigabytes """ pass python-watcher-4.0.0/watcher/decision_engine/audit/0000775000175000017500000000000013656752352022414 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/audit/__init__.py0000664000175000017500000000000013656752270024512 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/audit/event.py0000664000175000017500000000203113656752270024102 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.audit import base from watcher import objects class EventAuditHandler(base.AuditHandler): def post_execute(self, audit, solution, request_context): super(EventAuditHandler, self).post_execute(audit, solution, request_context) # change state of the audit to SUCCEEDED self.update_audit_state(audit, objects.audit.State.SUCCEEDED) python-watcher-4.0.0/watcher/decision_engine/audit/continuous.py0000664000175000017500000002242213656752270025175 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica LTD # Copyright (c) 2016 Intel Corp # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from dateutil import tz from croniter import croniter from watcher.common import context from watcher.common import scheduling from watcher.common import utils from watcher import conf from watcher.db.sqlalchemy import api as sq_api from watcher.db.sqlalchemy import job_store from watcher.decision_engine.audit import base from watcher import objects CONF = conf.CONF class ContinuousAuditHandler(base.AuditHandler): def __init__(self): super(ContinuousAuditHandler, self).__init__() # scheduler for executing audits self._audit_scheduler = None # scheduler for a periodic task to launch audit self._period_scheduler = None self.context_show_deleted = context.RequestContext(is_admin=True, show_deleted=True) @property def scheduler(self): if self._audit_scheduler is None: self._audit_scheduler = scheduling.BackgroundSchedulerService( jobstores={ 'default': job_store.WatcherJobStore( engine=sq_api.get_engine()), } ) return self._audit_scheduler @property def period_scheduler(self): if self._period_scheduler is None: self._period_scheduler = scheduling.BackgroundSchedulerService() return self._period_scheduler def _is_audit_inactive(self, audit): audit = objects.Audit.get_by_uuid( self.context_show_deleted, audit.uuid, eager=True) if (objects.audit.AuditStateTransitionManager().is_inactive(audit) or (audit.hostname != CONF.host) or (self.check_audit_expired(audit))): # if audit isn't in active states, audit's job must be removed to # prevent using of inactive audit in future. jobs = [job for job in self.scheduler.get_jobs() if job.name == 'execute_audit' and job.args[0].uuid == audit.uuid] if jobs: jobs[0].remove() return True return False def do_execute(self, audit, request_context): solution = super(ContinuousAuditHandler, self)\ .do_execute(audit, request_context) if audit.audit_type == objects.audit.AuditType.CONTINUOUS.value: a_plan_filters = {'audit_uuid': audit.uuid, 'state': objects.action_plan.State.RECOMMENDED} action_plans = objects.ActionPlan.list( request_context, filters=a_plan_filters, eager=True) for plan in action_plans: plan.state = objects.action_plan.State.CANCELLED plan.save() return solution @staticmethod def _next_cron_time(audit): if utils.is_cron_like(audit.interval): return croniter(audit.interval, datetime.datetime.utcnow() ).get_next(datetime.datetime) @classmethod def execute_audit(cls, audit, request_context): self = cls() if not self._is_audit_inactive(audit): try: self.execute(audit, request_context) except Exception: raise finally: if utils.is_int_like(audit.interval): audit.next_run_time = ( datetime.datetime.utcnow() + datetime.timedelta(seconds=int(audit.interval))) else: audit.next_run_time = self._next_cron_time(audit) audit.save() def _add_job(self, trigger, audit, audit_context, **trigger_args): time_var = 'next_run_time' if trigger_args.get( 'next_run_time') else 'run_date' # We should convert UTC time to local time without tzinfo trigger_args[time_var] = trigger_args[time_var].replace( tzinfo=tz.tzutc()).astimezone(tz.tzlocal()).replace(tzinfo=None) self.scheduler.add_job(self.execute_audit, trigger, args=[audit, audit_context], name='execute_audit', **trigger_args) def check_audit_expired(self, audit): current = datetime.datetime.utcnow() # Note: if audit still didn't get into the timeframe, # skip it if audit.start_time and audit.start_time > current: return True if audit.end_time and audit.end_time < current: if audit.state != objects.audit.State.SUCCEEDED: audit.state = objects.audit.State.SUCCEEDED audit.save() return True return False def launch_audits_periodically(self): # if audit scheduler stop, restart it if not self.scheduler.running: self.scheduler.start() audit_context = context.RequestContext(is_admin=True) audit_filters = { 'audit_type': objects.audit.AuditType.CONTINUOUS.value, 'state__in': (objects.audit.State.PENDING, objects.audit.State.ONGOING), } audit_filters['hostname'] = None unscheduled_audits = objects.Audit.list( audit_context, filters=audit_filters, eager=True) for audit in unscheduled_audits: # If continuous audit doesn't have a hostname yet, # Watcher will set current CONF.host value. # TODO(alexchadin): Add scheduling of new continuous audits. audit.hostname = CONF.host audit.save() scheduler_job_args = [ (job.args[0].uuid, job) for job in self.scheduler.get_jobs() if job.name == 'execute_audit'] scheduler_jobs = dict(scheduler_job_args) # if audit isn't in active states, audit's job should be removed jobs_to_remove = [] for job in scheduler_jobs.values(): if self._is_audit_inactive(job.args[0]): jobs_to_remove.append(job.args[0].uuid) for audit_uuid in jobs_to_remove: scheduler_jobs.pop(audit_uuid) audit_filters['hostname'] = CONF.host audits = objects.Audit.list( audit_context, filters=audit_filters, eager=True) for audit in audits: if self.check_audit_expired(audit): continue existing_job = scheduler_jobs.get(audit.uuid, None) # if audit is not presented in scheduled audits yet, # just add a new audit job. # if audit is already in the job queue, and interval has changed, # we need to remove the old job and add a new one. if (existing_job is None) or ( existing_job and audit.interval != existing_job.args[0].interval): if existing_job: self.scheduler.remove_job(existing_job.id) # if interval is provided with seconds if utils.is_int_like(audit.interval): # if audit has already been provided and we need # to restore it after shutdown if audit.next_run_time is not None: old_run_time = audit.next_run_time current = datetime.datetime.utcnow() if old_run_time < current: delta = datetime.timedelta( seconds=(int(audit.interval) - ( current - old_run_time).seconds % int(audit.interval))) audit.next_run_time = current + delta next_run_time = audit.next_run_time # if audit is new one else: next_run_time = datetime.datetime.utcnow() self._add_job('interval', audit, audit_context, seconds=int(audit.interval), next_run_time=next_run_time) else: audit.next_run_time = self._next_cron_time(audit) self._add_job('date', audit, audit_context, run_date=audit.next_run_time) audit.hostname = CONF.host audit.save() def start(self): self.period_scheduler.add_job( self.launch_audits_periodically, 'interval', seconds=CONF.watcher_decision_engine.continuous_audit_interval, next_run_time=datetime.datetime.now()) self.period_scheduler.start() # audit scheduler start self.scheduler.start() python-watcher-4.0.0/watcher/decision_engine/audit/base.py0000664000175000017500000001304613656752270023703 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import six from oslo_config import cfg from oslo_log import log from watcher.applier import rpcapi from watcher.common import exception from watcher.common import service from watcher.decision_engine.loading import default as loader from watcher.decision_engine.strategy.context import default as default_context from watcher import notifications from watcher import objects from watcher.objects import fields CONF = cfg.CONF LOG = log.getLogger(__name__) @six.add_metaclass(abc.ABCMeta) @six.add_metaclass(service.Singleton) class BaseAuditHandler(object): @abc.abstractmethod def execute(self, audit, request_context): raise NotImplementedError() @abc.abstractmethod def pre_execute(self, audit, request_context): raise NotImplementedError() @abc.abstractmethod def do_execute(self, audit, request_context): raise NotImplementedError() @abc.abstractmethod def post_execute(self, audit, solution, request_context): raise NotImplementedError() @six.add_metaclass(abc.ABCMeta) class AuditHandler(BaseAuditHandler): def __init__(self): super(AuditHandler, self).__init__() self._strategy_context = default_context.DefaultStrategyContext() self._planner_loader = loader.DefaultPlannerLoader() self.applier_client = rpcapi.ApplierAPI() def get_planner(self, solution): # because AuditHandler is a singletone we need to avoid race condition. # thus we need to load planner every time planner_name = solution.strategy.planner LOG.debug("Loading %s", planner_name) planner = self._planner_loader.load(name=planner_name) return planner @property def strategy_context(self): return self._strategy_context def do_execute(self, audit, request_context): # execute the strategy solution = self.strategy_context.execute_strategy( audit, request_context) return solution def do_schedule(self, request_context, audit, solution): try: notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.PLANNER, phase=fields.NotificationPhase.START) planner = self.get_planner(solution) action_plan = planner.schedule(request_context, audit.id, solution) notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.PLANNER, phase=fields.NotificationPhase.END) return action_plan except Exception: notifications.audit.send_action_notification( request_context, audit, action=fields.NotificationAction.PLANNER, priority=fields.NotificationPriority.ERROR, phase=fields.NotificationPhase.ERROR) raise @staticmethod def update_audit_state(audit, state): if audit.state != state: LOG.debug("Update audit state: %s", state) audit.state = state audit.save() @staticmethod def check_ongoing_action_plans(request_context): a_plan_filters = {'state': objects.action_plan.State.ONGOING} ongoing_action_plans = objects.ActionPlan.list( request_context, filters=a_plan_filters) if ongoing_action_plans: raise exception.ActionPlanIsOngoing( action_plan=ongoing_action_plans[0].uuid) def pre_execute(self, audit, request_context): LOG.debug("Trigger audit %s", audit.uuid) # If audit.force is true, audit will be executed # despite of ongoing actionplan if not audit.force: self.check_ongoing_action_plans(request_context) # Write hostname that will execute this audit. audit.hostname = CONF.host # change state of the audit to ONGOING self.update_audit_state(audit, objects.audit.State.ONGOING) def post_execute(self, audit, solution, request_context): action_plan = self.do_schedule(request_context, audit, solution) if audit.auto_trigger: self.applier_client.launch_action_plan(request_context, action_plan.uuid) def execute(self, audit, request_context): try: self.pre_execute(audit, request_context) solution = self.do_execute(audit, request_context) self.post_execute(audit, solution, request_context) except exception.ActionPlanIsOngoing as e: LOG.warning(e) if audit.audit_type == objects.audit.AuditType.ONESHOT.value: self.update_audit_state(audit, objects.audit.State.CANCELLED) except Exception as e: LOG.exception(e) self.update_audit_state(audit, objects.audit.State.FAILED) python-watcher-4.0.0/watcher/decision_engine/audit/oneshot.py0000664000175000017500000000202613656752270024444 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.decision_engine.audit import base from watcher import objects class OneShotAuditHandler(base.AuditHandler): def post_execute(self, audit, solution, request_context): super(OneShotAuditHandler, self).post_execute(audit, solution, request_context) # change state of the audit to SUCCEEDED self.update_audit_state(audit, objects.audit.State.SUCCEEDED) python-watcher-4.0.0/watcher/decision_engine/threading.py0000664000175000017500000000736713656752270023641 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import futurist from futurist import waiters import six from oslo_config import cfg from oslo_log import log from oslo_service import service CONF = cfg.CONF LOG = log.getLogger(__name__) @six.add_metaclass(service.Singleton) class DecisionEngineThreadPool(object): """Singleton threadpool to submit general tasks to""" def __init__(self): self.amount_workers = CONF.watcher_decision_engine.max_general_workers self._threadpool = futurist.GreenThreadPoolExecutor( max_workers=self.amount_workers) def submit(self, fn, *args, **kwargs): """Will submit the job to the underlying threadpool :param fn: function to execute in another thread :param args: arguments for the function :param kwargs: amount of arguments for the function :return: future to monitor progress of execution :rtype: :py:class"`futurist.GreenFuture` """ return self._threadpool.submit(fn, *args, **kwargs) @staticmethod def do_while_futures(futures, fn, *args, **kwargs): """Do while to execute a function upon completion from a collection Will execute the specified function with its arguments when one of the futures from the passed collection finishes. Additionally, the future is passed as first argument to the function. Does not modify the passed collection of futures. :param futures: list, set or dictionary of futures :type futures: list :py:class:`futurist.GreenFuture` :param fn: function to execute upon the future finishing exection :param args: arguments for the function :param kwargs: amount of arguments for the function """ # shallow copy the collection to not modify it outside of this method. # shallow copy must be used because the type of collection needs to be # determined at runtime (can be both list, set and dict). futures = copy.copy(futures) DecisionEngineThreadPool.do_while_futures_modify( futures, fn, *args, **kwargs) @staticmethod def do_while_futures_modify(futures, fn, *args, **kwargs): """Do while to execute a function upon completion from a collection Will execute the specified function with its arguments when one of the futures from the passed collection finishes. Additionally, the future is passed as first argument to the function. Modifies the collection by removing completed elements, :param futures: list, set or dictionary of futures :type futures: list :py:class:`futurist.GreenFuture` :param fn: function to execute upon the future finishing exection :param args: arguments for the function :param kwargs: amount of arguments for the function """ waits = waiters.wait_for_any(futures) while len(waits[0]) > 0 or len(waits[1]) > 0: for future in waiters.wait_for_any(futures)[0]: fn(future, *args, **kwargs) futures.remove(future) waits = waiters.wait_for_any(futures) python-watcher-4.0.0/watcher/decision_engine/rpcapi.py0000664000175000017500000000435613656752270023145 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher.common import exception from watcher.common import service from watcher.common import service_manager from watcher.common import utils from watcher import conf CONF = conf.CONF class DecisionEngineAPI(service.Service): def __init__(self): super(DecisionEngineAPI, self).__init__(DecisionEngineAPIManager) def trigger_audit(self, context, audit_uuid=None): if not utils.is_uuid_like(audit_uuid): raise exception.InvalidUuidOrName(name=audit_uuid) self.conductor_client.cast( context, 'trigger_audit', audit_uuid=audit_uuid) def get_strategy_info(self, context, strategy_name): return self.conductor_client.call( context, 'get_strategy_info', strategy_name=strategy_name) def get_data_model_info(self, context, data_model_type, audit): return self.conductor_client.call( context, 'get_data_model_info', data_model_type=data_model_type, audit=audit) class DecisionEngineAPIManager(service_manager.ServiceManager): @property def service_name(self): return None @property def api_version(self): return '1.0' @property def publisher_id(self): return CONF.watcher_decision_engine.publisher_id @property def conductor_topic(self): return CONF.watcher_decision_engine.conductor_topic @property def notification_topics(self): return [] @property def conductor_endpoints(self): return [] @property def notification_endpoints(self): return [] python-watcher-4.0.0/watcher/decision_engine/loading/0000775000175000017500000000000013656752352022723 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/loading/__init__.py0000664000175000017500000000000013656752270025021 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/decision_engine/loading/default.py0000664000175000017500000000366213656752270024727 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # Vincent FRANCOISE # Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import unicode_literals from watcher.common.loader import default class DefaultStrategyLoader(default.DefaultLoader): def __init__(self): super(DefaultStrategyLoader, self).__init__( namespace='watcher_strategies') class DefaultGoalLoader(default.DefaultLoader): def __init__(self): super(DefaultGoalLoader, self).__init__( namespace='watcher_goals') class DefaultPlannerLoader(default.DefaultLoader): def __init__(self): super(DefaultPlannerLoader, self).__init__( namespace='watcher_planners') class ClusterDataModelCollectorLoader(default.DefaultLoader): def __init__(self): super(ClusterDataModelCollectorLoader, self).__init__( namespace='watcher_cluster_data_model_collectors') class DefaultScoringLoader(default.DefaultLoader): def __init__(self): super(DefaultScoringLoader, self).__init__( namespace='watcher_scoring_engines') class DefaultScoringContainerLoader(default.DefaultLoader): def __init__(self): super(DefaultScoringContainerLoader, self).__init__( namespace='watcher_scoring_engine_containers') python-watcher-4.0.0/watcher/applier/0000775000175000017500000000000013656752352017620 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/workflow_engine/0000775000175000017500000000000013656752352023017 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/workflow_engine/__init__.py0000664000175000017500000000000013656752270025115 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/workflow_engine/default.py0000664000175000017500000001642113656752270025020 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_concurrency import processutils from oslo_config import cfg from oslo_log import log from taskflow import engines from taskflow import exceptions as tf_exception from taskflow.patterns import graph_flow as gf from taskflow import task as flow_task from watcher.applier.workflow_engine import base from watcher.common import exception from watcher import objects LOG = log.getLogger(__name__) class DefaultWorkFlowEngine(base.BaseWorkFlowEngine): """Taskflow as a workflow engine for Watcher Full documentation on taskflow at https://docs.openstack.org/taskflow/latest """ def decider(self, history): # decider – A callback function that will be expected to # decide at runtime whether v should be allowed to execute # (or whether the execution of v should be ignored, # and therefore not executed). It is expected to take as single # keyword argument history which will be the execution results of # all u decidable links that have v as a target. It is expected # to return a single boolean # (True to allow v execution or False to not). LOG.info("decider history: %s", history) if history and self.execution_rule == 'ANY': return not list(history.values())[0] else: return True @classmethod def get_config_opts(cls): return [ cfg.IntOpt( 'max_workers', default=processutils.get_worker_count(), min=1, required=True, help='Number of workers for taskflow engine ' 'to execute actions.'), cfg.DictOpt( 'action_execution_rule', default={}, help='The execution rule for linked actions,' 'the key is strategy name and ' 'value ALWAYS means all actions will be executed,' 'value ANY means if previous action executes ' 'success, the next action will be ignored.' 'None means ALWAYS.') ] def get_execution_rule(self, actions): if actions: actionplan_object = objects.ActionPlan.get_by_id( self.context, actions[0].action_plan_id) strategy_object = objects.Strategy.get_by_id( self.context, actionplan_object.strategy_id) return self.config.action_execution_rule.get( strategy_object.name) def execute(self, actions): try: # NOTE(jed) We want to have a strong separation of concern # between the Watcher planner and the Watcher Applier in order # to us the possibility to support several workflow engine. # We want to provide the 'taskflow' engine by # default although we still want to leave the possibility for # the users to change it. # The current implementation uses graph with linked actions. # todo(jed) add oslo conf for retry and name self.execution_rule = self.get_execution_rule(actions) flow = gf.Flow("watcher_flow") actions_uuid = {} for a in actions: task = TaskFlowActionContainer(a, self) flow.add(task) actions_uuid[a.uuid] = task for a in actions: for parent_id in a.parents: flow.link(actions_uuid[parent_id], actions_uuid[a.uuid], decider=self.decider) e = engines.load( flow, executor='greenthreaded', engine='parallel', max_workers=self.config.max_workers) e.run() return flow except exception.ActionPlanCancelled: raise except tf_exception.WrappedFailure as e: if e.check("watcher.common.exception.ActionPlanCancelled"): raise exception.ActionPlanCancelled else: raise exception.WorkflowExecutionException(error=e) except Exception as e: raise exception.WorkflowExecutionException(error=e) class TaskFlowActionContainer(base.BaseTaskFlowActionContainer): def __init__(self, db_action, engine): name = "action_type:{0} uuid:{1}".format(db_action.action_type, db_action.uuid) super(TaskFlowActionContainer, self).__init__(name, db_action, engine) def do_pre_execute(self): db_action = self.engine.notify(self._db_action, objects.action.State.ONGOING) LOG.debug("Pre-condition action: %s", self.name) self.action.pre_condition() return db_action def do_execute(self, *args, **kwargs): LOG.debug("Running action: %s", self.name) # NOTE:Some actions(such as migrate) will return None when exception # Only when True is returned, the action state is set to SUCCEEDED result = self.action.execute() if result is True: return self.engine.notify(self._db_action, objects.action.State.SUCCEEDED) else: self.engine.notify(self._db_action, objects.action.State.FAILED) raise exception.ActionExecutionFailure( action_id=self._db_action.uuid) def do_post_execute(self): LOG.debug("Post-condition action: %s", self.name) self.action.post_condition() def do_revert(self, *args, **kwargs): LOG.warning("Revert action: %s", self.name) try: # TODO(jed): do we need to update the states in case of failure? self.action.revert() except Exception as e: LOG.exception(e) LOG.critical("Oops! We need a disaster recover plan.") def do_abort(self, *args, **kwargs): LOG.warning("Aborting action: %s", self.name) try: result = self.action.abort() if result: # Aborted the action. return self.engine.notify(self._db_action, objects.action.State.CANCELLED) else: return self.engine.notify(self._db_action, objects.action.State.SUCCEEDED) except Exception as e: LOG.exception(e) return self.engine.notify(self._db_action, objects.action.State.FAILED) class TaskFlowNop(flow_task.Task): """This class is used in case of the workflow have only one Action. We need at least two atoms to create a link. """ def execute(self): pass python-watcher-4.0.0/watcher/applier/workflow_engine/base.py0000664000175000017500000002755113656752270024314 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import six import time import eventlet from oslo_log import log from taskflow import task as flow_task from watcher.applier.actions import factory from watcher.common import clients from watcher.common import exception from watcher.common.loader import loadable from watcher import notifications from watcher import objects from watcher.objects import fields LOG = log.getLogger(__name__) CANCEL_STATE = [objects.action_plan.State.CANCELLING, objects.action_plan.State.CANCELLED] @six.add_metaclass(abc.ABCMeta) class BaseWorkFlowEngine(loadable.Loadable): def __init__(self, config, context=None, applier_manager=None): """Constructor :param config: A mapping containing the configuration of this workflow engine :type config: dict :param osc: an OpenStackClients object, defaults to None :type osc: :py:class:`~.OpenStackClients` instance, optional """ super(BaseWorkFlowEngine, self).__init__(config) self._context = context self._applier_manager = applier_manager self._action_factory = factory.ActionFactory() self._osc = None self._is_notified = False self.execution_rule = None @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] @property def context(self): return self._context @property def osc(self): if not self._osc: self._osc = clients.OpenStackClients() return self._osc @property def applier_manager(self): return self._applier_manager @property def action_factory(self): return self._action_factory def notify(self, action, state): db_action = objects.Action.get_by_uuid(self.context, action.uuid, eager=True) db_action.state = state db_action.save() return db_action def notify_cancel_start(self, action_plan_uuid): action_plan = objects.ActionPlan.get_by_uuid(self.context, action_plan_uuid, eager=True) if not self._is_notified: self._is_notified = True notifications.action_plan.send_cancel_notification( self._context, action_plan, action=fields.NotificationAction.CANCEL, phase=fields.NotificationPhase.START) @abc.abstractmethod def execute(self, actions): raise NotImplementedError() class BaseTaskFlowActionContainer(flow_task.Task): def __init__(self, name, db_action, engine, **kwargs): super(BaseTaskFlowActionContainer, self).__init__(name=name) self._db_action = db_action self._engine = engine self.loaded_action = None @property def engine(self): return self._engine @property def action(self): if self.loaded_action is None: action = self.engine.action_factory.make_action( self._db_action, osc=self._engine.osc) self.loaded_action = action return self.loaded_action @abc.abstractmethod def do_pre_execute(self): raise NotImplementedError() @abc.abstractmethod def do_execute(self, *args, **kwargs): raise NotImplementedError() @abc.abstractmethod def do_post_execute(self): raise NotImplementedError() @abc.abstractmethod def do_revert(self): raise NotImplementedError() @abc.abstractmethod def do_abort(self, *args, **kwargs): raise NotImplementedError() # NOTE(alexchadin): taskflow does 3 method calls (pre_execute, execute, # post_execute) independently. We want to support notifications in base # class, so child's methods should be named with `do_` prefix and wrapped. def pre_execute(self): try: # NOTE(adisky): check the state of action plan before starting # next action, if action plan is cancelled raise the exceptions # so that taskflow does not schedule further actions. action_plan = objects.ActionPlan.get_by_id( self.engine.context, self._db_action.action_plan_id) if action_plan.state in CANCEL_STATE: raise exception.ActionPlanCancelled(uuid=action_plan.uuid) db_action = self.do_pre_execute() notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.START) except exception.ActionPlanCancelled as e: LOG.exception(e) self.engine.notify_cancel_start(action_plan.uuid) raise except Exception as e: LOG.exception(e) db_action = self.engine.notify(self._db_action, objects.action.State.FAILED) notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.ERROR, priority=fields.NotificationPriority.ERROR) def execute(self, *args, **kwargs): def _do_execute_action(*args, **kwargs): try: db_action = self.do_execute(*args, **kwargs) notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.END) except Exception as e: LOG.exception(e) LOG.error('The workflow engine has failed ' 'to execute the action: %s', self.name) db_action = self.engine.notify(self._db_action, objects.action.State.FAILED) notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.ERROR, priority=fields.NotificationPriority.ERROR) raise # NOTE: spawn a new thread for action execution, so that if action plan # is cancelled workflow engine will not wait to finish action execution et = eventlet.spawn(_do_execute_action, *args, **kwargs) # NOTE: check for the state of action plan periodically,so that if # action is finished or action plan is cancelled we can exit from here. result = False while True: action_object = objects.Action.get_by_uuid( self.engine.context, self._db_action.uuid, eager=True) action_plan_object = objects.ActionPlan.get_by_id( self.engine.context, action_object.action_plan_id) if action_object.state == objects.action.State.SUCCEEDED: result = True if (action_object.state in [objects.action.State.SUCCEEDED, objects.action.State.FAILED] or action_plan_object.state in CANCEL_STATE): break time.sleep(1) try: # NOTE: kill the action execution thread, if action plan is # cancelled for all other cases wait for the result from action # execution thread. # Not all actions support abort operations, kill only those action # which support abort operations abort = self.action.check_abort() if (action_plan_object.state in CANCEL_STATE and abort): et.kill() et.wait() return result # NOTE: catch the greenlet exit exception due to thread kill, # taskflow will call revert for the action, # we will redirect it to abort. except eventlet.greenlet.GreenletExit: self.engine.notify_cancel_start(action_plan_object.uuid) raise exception.ActionPlanCancelled(uuid=action_plan_object.uuid) except Exception as e: LOG.exception(e) # return False instead of raising an exception return False def post_execute(self): try: self.do_post_execute() except Exception as e: LOG.exception(e) db_action = self.engine.notify(self._db_action, objects.action.State.FAILED) notifications.action.send_execution_notification( self.engine.context, db_action, fields.NotificationAction.EXECUTION, fields.NotificationPhase.ERROR, priority=fields.NotificationPriority.ERROR) def revert(self, *args, **kwargs): action_plan = objects.ActionPlan.get_by_id( self.engine.context, self._db_action.action_plan_id, eager=True) # NOTE: check if revert cause by cancel action plan or # some other exception occurred during action plan execution # if due to some other exception keep the flow intact. if action_plan.state not in CANCEL_STATE: self.do_revert() return action_object = objects.Action.get_by_uuid( self.engine.context, self._db_action.uuid, eager=True) try: if action_object.state == objects.action.State.ONGOING: action_object.state = objects.action.State.CANCELLING action_object.save() notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.START) action_object = self.abort() notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.END) if action_object.state == objects.action.State.PENDING: notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.START) action_object.state = objects.action.State.CANCELLED action_object.save() notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.END) except Exception as e: LOG.exception(e) action_object.state = objects.action.State.FAILED action_object.save() notifications.action.send_cancel_notification( self.engine.context, action_object, fields.NotificationAction.CANCEL, fields.NotificationPhase.ERROR, priority=fields.NotificationPriority.ERROR) def abort(self, *args, **kwargs): return self.do_abort(*args, **kwargs) python-watcher-4.0.0/watcher/applier/__init__.py0000664000175000017500000000000013656752270021716 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/actions/0000775000175000017500000000000013656752352021260 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/actions/__init__.py0000664000175000017500000000000013656752270023356 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/actions/resize.py0000664000175000017500000000650113656752270023134 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.applier.actions import base from watcher.common import nova_helper LOG = log.getLogger(__name__) class Resize(base.BaseAction): """Resizes a server with specified flavor. This action will allow you to resize a server to another flavor. The action schema is:: schema = Schema({ 'resource_id': str, # should be a UUID 'flavor': str, # should be either ID or Name of Flavor }) The `resource_id` is the UUID of the server to resize. The `flavor` is the ID or Name of Flavor (Nova accepts either ID or Name of Flavor to resize() function). """ # input parameters constants FLAVOR = 'flavor' @property def schema(self): return { 'type': 'object', 'properties': { 'resource_id': { 'type': 'string', 'minlength': 1, 'pattern': ('^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-' '([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-' '([a-fA-F0-9]){12}$') }, 'flavor': { 'type': 'string', 'minlength': 1, }, }, 'required': ['resource_id', 'flavor'], 'additionalProperties': False, } @property def instance_uuid(self): return self.resource_id @property def flavor(self): return self.input_parameters.get(self.FLAVOR) def resize(self): nova = nova_helper.NovaHelper(osc=self.osc) LOG.debug("Resize instance %s to %s flavor", self.instance_uuid, self.flavor) instance = nova.find_instance(self.instance_uuid) result = None if instance: try: result = nova.resize_instance( instance_id=self.instance_uuid, flavor=self.flavor) except Exception as exc: LOG.exception(exc) LOG.critical( "Unexpected error occurred. Resizing failed for " "instance %s.", self.instance_uuid) return result def execute(self): return self.resize() def revert(self): return self.migrate(destination=self.source_node) def pre_condition(self): # TODO(jed): check if the instance exists / check if the instance is on # the source_node pass def post_condition(self): # TODO(jed): check extra parameters (network response, etc.) pass def get_description(self): """Description of the action""" return "Resize a server with specified flavor." python-watcher-4.0.0/watcher/applier/actions/nop.py0000664000175000017500000000352513656752270022432 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher.applier.actions import base LOG = log.getLogger(__name__) class Nop(base.BaseAction): """logs a message The action schema is:: schema = Schema({ 'message': str, }) The `message` is the actual message that will be logged. """ MESSAGE = 'message' @property def schema(self): return { 'type': 'object', 'properties': { 'message': { 'type': ['string', 'null'] } }, 'required': ['message'], 'additionalProperties': False, } @property def message(self): return self.input_parameters.get(self.MESSAGE) def execute(self): LOG.debug("Executing action NOP message: %s ", self.message) return True def revert(self): LOG.debug("Revert action NOP") return True def pre_condition(self): pass def post_condition(self): pass def get_description(self): """Description of the action""" return "Logging a NOP message" def abort(self): LOG.debug("Abort action NOP") return True python-watcher-4.0.0/watcher/applier/actions/change_nova_service_state.py0000664000175000017500000001060413656752270027022 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import exception from watcher.common import nova_helper from watcher.decision_engine.model import element class ChangeNovaServiceState(base.BaseAction): """Disables or enables the nova-compute service, deployed on a host By using this action, you will be able to update the state of a nova-compute service. A disabled nova-compute service can not be selected by the nova scheduler for future deployment of server. The action schema is:: schema = Schema({ 'resource_id': str, 'state': str, 'disabled_reason': str, }) The `resource_id` references a nova-compute service name (list of available nova-compute services is returned by this command: ``nova service-list --binary nova-compute``). The `state` value should either be `ONLINE` or `OFFLINE`. The `disabled_reason` references the reason why Watcher disables this nova-compute service. The value should be with `watcher_` prefix, such as `watcher_disabled`, `watcher_maintaining`. """ STATE = 'state' REASON = 'disabled_reason' RESOURCE_NAME = 'resource_name' @property def schema(self): return { 'type': 'object', 'properties': { 'resource_id': { 'type': 'string', "minlength": 1 }, 'resource_name': { 'type': 'string', "minlength": 1 }, 'state': { 'type': 'string', 'enum': [element.ServiceState.ONLINE.value, element.ServiceState.OFFLINE.value, element.ServiceState.ENABLED.value, element.ServiceState.DISABLED.value] }, 'disabled_reason': { 'type': 'string', "minlength": 1 } }, 'required': ['resource_id', 'state'], 'additionalProperties': False, } @property def host(self): return self.input_parameters.get(self.RESOURCE_NAME) @property def state(self): return self.input_parameters.get(self.STATE) @property def reason(self): return self.input_parameters.get(self.REASON) def execute(self): target_state = None if self.state == element.ServiceState.DISABLED.value: target_state = False elif self.state == element.ServiceState.ENABLED.value: target_state = True return self._nova_manage_service(target_state) def revert(self): target_state = None if self.state == element.ServiceState.DISABLED.value: target_state = True elif self.state == element.ServiceState.ENABLED.value: target_state = False return self._nova_manage_service(target_state) def _nova_manage_service(self, state): if state is None: raise exception.IllegalArgumentException( message=_("The target state is not defined")) nova = nova_helper.NovaHelper(osc=self.osc) if state is True: return nova.enable_service_nova_compute(self.host) else: return nova.disable_service_nova_compute(self.host, self.reason) def pre_condition(self): pass def post_condition(self): pass def get_description(self): """Description of the action""" return ("Disables or enables the nova-compute service." "A disabled nova-compute service can not be selected " "by the nova for future deployment of new server.") python-watcher-4.0.0/watcher/applier/actions/sleep.py0000664000175000017500000000375013656752270022746 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import time from oslo_log import log from watcher.applier.actions import base LOG = log.getLogger(__name__) class Sleep(base.BaseAction): """Makes the executor of the action plan wait for a given duration The action schema is:: schema = Schema({ 'duration': float, }) The `duration` is expressed in seconds. """ DURATION = 'duration' @property def schema(self): return { 'type': 'object', 'properties': { 'duration': { 'type': 'number', 'minimum': 0 }, }, 'required': ['duration'], 'additionalProperties': False, } @property def duration(self): return int(self.input_parameters.get(self.DURATION)) def execute(self): LOG.debug("Starting action sleep with duration: %s ", self.duration) time.sleep(self.duration) return True def revert(self): LOG.debug("Revert action sleep") return True def pre_condition(self): pass def post_condition(self): pass def get_description(self): """Description of the action""" return "Wait for a given interval in seconds." def abort(self): LOG.debug("Abort action sleep") return True python-watcher-4.0.0/watcher/applier/actions/factory.py0000664000175000017500000000311113656752270023274 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import unicode_literals from oslo_log import log from watcher.applier.loading import default LOG = log.getLogger(__name__) class ActionFactory(object): def __init__(self): self.action_loader = default.DefaultActionLoader() def make_action(self, object_action, osc=None): LOG.debug("Creating instance of %s", object_action.action_type) loaded_action = self.action_loader.load(name=object_action.action_type, osc=osc) loaded_action.input_parameters = object_action.input_parameters LOG.debug("Checking the input parameters") # NOTE(jed) if we change the schema of an action and we try to reload # an older version of the Action, the validation can fail. # We need to add the versioning of an Action or a migration tool. # We can also create an new Action which extends the previous one. loaded_action.validate_parameters() return loaded_action python-watcher-4.0.0/watcher/applier/actions/change_node_power_state.py0000664000175000017500000000771713656752270026513 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Authors: Li Canwei # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import enum import time from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import exception class NodeState(enum.Enum): POWERON = 'on' POWEROFF = 'off' class ChangeNodePowerState(base.BaseAction): """Compute node power on/off By using this action, you will be able to on/off the power of a compute node. The action schema is:: schema = Schema({ 'resource_id': str, 'state': str, }) The `resource_id` references a ironic node id (list of available ironic node is returned by this command: ``ironic node-list``). The `state` value should either be `on` or `off`. """ STATE = 'state' @property def schema(self): return { 'type': 'object', 'properties': { 'resource_id': { 'type': 'string', "minlength": 1 }, 'state': { 'type': 'string', 'enum': [NodeState.POWERON.value, NodeState.POWEROFF.value] } }, 'required': ['resource_id', 'state'], 'additionalProperties': False, } @property def node_uuid(self): return self.resource_id @property def state(self): return self.input_parameters.get(self.STATE) def execute(self): target_state = self.state return self._node_manage_power(target_state) def revert(self): if self.state == NodeState.POWERON.value: target_state = NodeState.POWEROFF.value elif self.state == NodeState.POWEROFF.value: target_state = NodeState.POWERON.value return self._node_manage_power(target_state) def _node_manage_power(self, state, retry=60): if state is None: raise exception.IllegalArgumentException( message=_("The target state is not defined")) ironic_client = self.osc.ironic() nova_client = self.osc.nova() current_state = ironic_client.node.get(self.node_uuid).power_state # power state: 'power on' or 'power off', if current node state # is the same as state, just return True if state in current_state: return True if state == NodeState.POWEROFF.value: node_info = ironic_client.node.get(self.node_uuid).to_dict() compute_node_id = node_info['extra']['compute_node_id'] compute_node = nova_client.hypervisors.get(compute_node_id) compute_node = compute_node.to_dict() if (compute_node['running_vms'] == 0): ironic_client.node.set_power_state( self.node_uuid, state) else: ironic_client.node.set_power_state(self.node_uuid, state) ironic_node = ironic_client.node.get(self.node_uuid) while ironic_node.power_state == current_state and retry: time.sleep(10) retry -= 1 ironic_node = ironic_client.node.get(self.node_uuid) if retry > 0: return True else: return False def pre_condition(self): pass def post_condition(self): pass def get_description(self): """Description of the action""" return ("Compute node power on/off through ironic.") python-watcher-4.0.0/watcher/applier/actions/volume_migration.py0000664000175000017500000002051313656752270025212 0ustar zuulzuul00000000000000# Copyright 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import jsonschema from oslo_log import log from cinderclient import client as cinder_client from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import cinder_helper from watcher.common import exception from watcher.common import keystone_helper from watcher.common import nova_helper from watcher.common import utils from watcher import conf CONF = conf.CONF LOG = log.getLogger(__name__) class VolumeMigrate(base.BaseAction): """Migrates a volume to destination node or type By using this action, you will be able to migrate cinder volume. Migration type 'swap' can only be used for migrating attached volume. Migration type 'migrate' can be used for migrating detached volume to the pool of same volume type. Migration type 'retype' can be used for changing volume type of detached volume. The action schema is:: schema = Schema({ 'resource_id': str, # should be a UUID 'migration_type': str, # choices -> "swap", "migrate","retype" 'destination_node': str, 'destination_type': str, }) The `resource_id` is the UUID of cinder volume to migrate. The `destination_node` is the destination block storage pool name. (list of available pools are returned by this command: ``cinder get-pools``) which is mandatory for migrating detached volume to the one with same volume type. The `destination_type` is the destination block storage type name. (list of available types are returned by this command: ``cinder type-list``) which is mandatory for migrating detached volume or swapping attached volume to the one with different volume type. """ MIGRATION_TYPE = 'migration_type' SWAP = 'swap' RETYPE = 'retype' MIGRATE = 'migrate' DESTINATION_NODE = "destination_node" DESTINATION_TYPE = "destination_type" def __init__(self, config, osc=None): super(VolumeMigrate, self).__init__(config) self.temp_username = utils.random_string(10) self.temp_password = utils.random_string(10) self.cinder_util = cinder_helper.CinderHelper(osc=self.osc) self.nova_util = nova_helper.NovaHelper(osc=self.osc) @property def schema(self): return { 'type': 'object', 'properties': { 'resource_id': { 'type': 'string', "minlength": 1, "pattern": ("^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-" "([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-" "([a-fA-F0-9]){12}$") }, 'resource_name': { 'type': 'string', "minlength": 1 }, 'migration_type': { 'type': 'string', "enum": ["swap", "retype", "migrate"] }, 'destination_node': { "anyof": [ {'type': 'string', "minLength": 1}, {'type': 'None'} ] }, 'destination_type': { "anyof": [ {'type': 'string', "minLength": 1}, {'type': 'None'} ] } }, 'required': ['resource_id', 'migration_type'], 'additionalProperties': False, } def validate_parameters(self): jsonschema.validate(self.input_parameters, self.schema) return True @property def volume_id(self): return self.input_parameters.get(self.RESOURCE_ID) @property def migration_type(self): return self.input_parameters.get(self.MIGRATION_TYPE) @property def destination_node(self): return self.input_parameters.get(self.DESTINATION_NODE) @property def destination_type(self): return self.input_parameters.get(self.DESTINATION_TYPE) def _can_swap(self, volume): """Judge volume can be swapped""" if not volume.attachments: return False instance_id = volume.attachments[0]['server_id'] instance_status = self.nova_util.find_instance(instance_id).status if (volume.status == 'in-use' and instance_status in ('ACTIVE', 'PAUSED', 'RESIZED')): return True return False def _create_user(self, volume, user): """Create user with volume attribute and user information""" keystone_util = keystone_helper.KeystoneHelper(osc=self.osc) project_id = getattr(volume, 'os-vol-tenant-attr:tenant_id') user['project'] = project_id user['domain'] = keystone_util.get_project(project_id).domain_id user['roles'] = ['admin'] return keystone_util.create_user(user) def _get_cinder_client(self, session): """Get cinder client by session""" return cinder_client.Client( CONF.cinder_client.api_version, session=session, endpoint_type=CONF.cinder_client.endpoint_type) def _swap_volume(self, volume, dest_type): """Swap volume to dest_type Limitation note: only for compute libvirt driver """ if not dest_type: raise exception.Invalid( message=(_("destination type is required when " "migration type is swap"))) if not self._can_swap(volume): raise exception.Invalid( message=(_("Invalid state for swapping volume"))) user_info = { 'name': self.temp_username, 'password': self.temp_password} user = self._create_user(volume, user_info) keystone_util = keystone_helper.KeystoneHelper(osc=self.osc) try: session = keystone_util.create_session( user.id, self.temp_password) temp_cinder = self._get_cinder_client(session) # swap volume new_volume = self.cinder_util.create_volume( temp_cinder, volume, dest_type) self.nova_util.swap_volume(volume, new_volume) # delete old volume self.cinder_util.delete_volume(volume) finally: keystone_util.delete_user(user) return True def _migrate(self, volume_id, dest_node, dest_type): try: volume = self.cinder_util.get_volume(volume_id) if self.migration_type == self.SWAP: if dest_node: LOG.warning("dest_node is ignored") return self._swap_volume(volume, dest_type) elif self.migration_type == self.RETYPE: return self.cinder_util.retype(volume, dest_type) elif self.migration_type == self.MIGRATE: return self.cinder_util.migrate(volume, dest_node) else: raise exception.Invalid( message=(_("Migration of type '%(migration_type)s' is not " "supported.") % {'migration_type': self.migration_type})) except exception.Invalid as ei: LOG.exception(ei) return False except Exception as e: LOG.critical("Unexpected exception occurred.") LOG.exception(e) return False def execute(self): return self._migrate(self.volume_id, self.destination_node, self.destination_type) def revert(self): LOG.warning("revert not supported") def abort(self): pass def pre_condition(self): pass def post_condition(self): pass def get_description(self): return "Moving a volume to destination_node or destination_type" python-watcher-4.0.0/watcher/applier/actions/migration.py0000664000175000017500000001745513656752270023636 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log from watcher._i18n import _ from watcher.applier.actions import base from watcher.common import exception from watcher.common import nova_helper LOG = log.getLogger(__name__) class Migrate(base.BaseAction): """Migrates a server to a destination nova-compute host This action will allow you to migrate a server to another compute destination host. Migration type 'live' can only be used for migrating active VMs. Migration type 'cold' can be used for migrating non-active VMs as well active VMs, which will be shut down while migrating. The action schema is:: schema = Schema({ 'resource_id': str, # should be a UUID 'migration_type': str, # choices -> "live", "cold" 'destination_node': str, 'source_node': str, }) The `resource_id` is the UUID of the server to migrate. The `source_node` and `destination_node` parameters are respectively the source and the destination compute hostname (list of available compute hosts is returned by this command: ``nova service-list --binary nova-compute``). .. note:: Nova API version must be 2.56 or above if `destination_node` parameter is given. """ # input parameters constants MIGRATION_TYPE = 'migration_type' LIVE_MIGRATION = 'live' COLD_MIGRATION = 'cold' DESTINATION_NODE = 'destination_node' SOURCE_NODE = 'source_node' @property def schema(self): return { 'type': 'object', 'properties': { 'destination_node': { "anyof": [ {'type': 'string', "minLength": 1}, {'type': 'None'} ] }, 'migration_type': { 'type': 'string', "enum": ["live", "cold"] }, 'resource_id': { 'type': 'string', "minlength": 1, "pattern": ("^([a-fA-F0-9]){8}-([a-fA-F0-9]){4}-" "([a-fA-F0-9]){4}-([a-fA-F0-9]){4}-" "([a-fA-F0-9]){12}$") }, 'resource_name': { 'type': 'string', "minlength": 1 }, 'source_node': { 'type': 'string', "minLength": 1 } }, 'required': ['migration_type', 'resource_id', 'source_node'], 'additionalProperties': False, } @property def instance_uuid(self): return self.resource_id @property def migration_type(self): return self.input_parameters.get(self.MIGRATION_TYPE) @property def destination_node(self): return self.input_parameters.get(self.DESTINATION_NODE) @property def source_node(self): return self.input_parameters.get(self.SOURCE_NODE) def _live_migrate_instance(self, nova, destination): result = None try: result = nova.live_migrate_instance(instance_id=self.instance_uuid, dest_hostname=destination) except nova_helper.nvexceptions.ClientException as e: LOG.debug("Nova client exception occurred while live " "migrating instance " "%(instance)s.Exception: %(exception)s", {'instance': self.instance_uuid, 'exception': e}) except Exception as e: LOG.exception(e) LOG.critical("Unexpected error occurred. Migration failed for " "instance %s. Leaving instance on previous " "host.", self.instance_uuid) return result def _cold_migrate_instance(self, nova, destination): result = None try: result = nova.watcher_non_live_migrate_instance( instance_id=self.instance_uuid, dest_hostname=destination) except Exception as exc: LOG.exception(exc) LOG.critical("Unexpected error occurred. Migration failed for " "instance %s. Leaving instance on previous " "host.", self.instance_uuid) return result def _abort_cold_migrate(self, nova): # TODO(adisky): currently watcher uses its own version of cold migrate # implement cold migrate using nova dependent on the blueprint # https://blueprints.launchpad.net/nova/+spec/cold-migration-with-target # Abort operation for cold migrate is dependent on blueprint # https://blueprints.launchpad.net/nova/+spec/abort-cold-migration LOG.warning("Abort operation for cold migration is not implemented") def _abort_live_migrate(self, nova, source, destination): return nova.abort_live_migrate(instance_id=self.instance_uuid, source=source, destination=destination) def migrate(self, destination=None): nova = nova_helper.NovaHelper(osc=self.osc) if destination is None: LOG.debug("Migrating instance %s, destination node will be " "determined by nova-scheduler", self.instance_uuid) else: LOG.debug("Migrate instance %s to %s", self.instance_uuid, destination) instance = nova.find_instance(self.instance_uuid) if instance: if self.migration_type == self.LIVE_MIGRATION: return self._live_migrate_instance(nova, destination) elif self.migration_type == self.COLD_MIGRATION: return self._cold_migrate_instance(nova, destination) else: raise exception.Invalid( message=(_("Migration of type '%(migration_type)s' is not " "supported.") % {'migration_type': self.migration_type})) else: raise exception.InstanceNotFound(name=self.instance_uuid) def execute(self): return self.migrate(destination=self.destination_node) def revert(self): LOG.info('Migrate action do not revert!') def abort(self): nova = nova_helper.NovaHelper(osc=self.osc) instance = nova.find_instance(self.instance_uuid) if instance: if self.migration_type == self.COLD_MIGRATION: return self._abort_cold_migrate(nova) elif self.migration_type == self.LIVE_MIGRATION: return self._abort_live_migrate( nova, source=self.source_node, destination=self.destination_node) else: raise exception.InstanceNotFound(name=self.instance_uuid) def pre_condition(self): # TODO(jed): check if the instance exists / check if the instance is on # the source_node pass def post_condition(self): # TODO(jed): check extra parameters (network response, etc.) pass def get_description(self): """Description of the action""" return "Moving a VM instance from source_node to destination_node" python-watcher-4.0.0/watcher/applier/actions/base.py0000664000175000017500000001144013656752270022543 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import jsonschema import six from watcher.common import clients from watcher.common.loader import loadable @six.add_metaclass(abc.ABCMeta) class BaseAction(loadable.Loadable): # NOTE(jed): by convention we decided # that the attribute "resource_id" is the unique id of # the resource to which the Action applies to allow us to use it in the # watcher dashboard and will be nested in input_parameters RESOURCE_ID = 'resource_id' # Add action class name to the list, if implementing abort. ABORT_TRUE = ['Sleep', 'Nop'] def __init__(self, config, osc=None): """Constructor :param config: A mapping containing the configuration of this action :type config: dict :param osc: an OpenStackClients instance, defaults to None :type osc: :py:class:`~.OpenStackClients` instance, optional """ super(BaseAction, self).__init__(config) self._input_parameters = {} self._osc = osc @property def osc(self): if not self._osc: self._osc = clients.OpenStackClients() return self._osc @property def input_parameters(self): return self._input_parameters @input_parameters.setter def input_parameters(self, p): self._input_parameters = p @property def resource_id(self): return self.input_parameters[self.RESOURCE_ID] @classmethod def get_config_opts(cls): """Defines the configuration options to be associated to this loadable :return: A list of configuration options relative to this Loadable :rtype: list of :class:`oslo_config.cfg.Opt` instances """ return [] @abc.abstractmethod def execute(self): """Executes the main logic of the action This method can be used to perform an action on a given set of input parameters to accomplish some type of operation. This operation may return a boolean value as a result of its execution. If False, this will be considered as an error and will then trigger the reverting of the actions. :returns: A flag indicating whether or not the action succeeded :rtype: bool """ raise NotImplementedError() @abc.abstractmethod def revert(self): """Revert this action This method should rollback the resource to its initial state in the event of a faulty execution. This happens when the action raised an exception during its :py:meth:`~.BaseAction.execute`. """ raise NotImplementedError() @abc.abstractmethod def pre_condition(self): """Hook: called before the execution of an action This method can be used to perform some initializations or to make some more advanced validation on its input parameters. So if you wish to block its execution based on this factor, `raise` the related exception. """ raise NotImplementedError() @abc.abstractmethod def post_condition(self): """Hook: called after the execution of an action This function is called regardless of whether an action succeeded or not. So you can use it to perform cleanup operations. """ raise NotImplementedError() @abc.abstractproperty def schema(self): """Defines a Schema that the input parameters shall comply to :returns: A schema declaring the input parameters this action should be provided along with their respective constraints :rtype: :py:class:`jsonschema.Schema` instance """ raise NotImplementedError() def validate_parameters(self): jsonschema.validate(self.input_parameters, self.schema) return True @abc.abstractmethod def get_description(self): """Description of the action""" raise NotImplementedError() def check_abort(self): if self.__class__.__name__ == 'Migrate': if self.migration_type == self.LIVE_MIGRATION: return True else: return False else: return bool(self.__class__.__name__ in self.ABORT_TRUE) python-watcher-4.0.0/watcher/applier/action_plan/0000775000175000017500000000000013656752352022107 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/action_plan/__init__.py0000664000175000017500000000000013656752270024205 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/action_plan/default.py0000664000175000017500000001027613656752270024112 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher.applier.action_plan import base from watcher.applier import default from watcher.common import exception from watcher import notifications from watcher import objects from watcher.objects import fields CONF = cfg.CONF LOG = log.getLogger(__name__) class DefaultActionPlanHandler(base.BaseActionPlanHandler): def __init__(self, context, service, action_plan_uuid): super(DefaultActionPlanHandler, self).__init__() self.ctx = context self.service = service self.action_plan_uuid = action_plan_uuid def execute(self): try: action_plan = objects.ActionPlan.get_by_uuid( self.ctx, self.action_plan_uuid, eager=True) if action_plan.state == objects.action_plan.State.CANCELLED: self._update_action_from_pending_to_cancelled() return action_plan.hostname = CONF.host action_plan.state = objects.action_plan.State.ONGOING action_plan.save() notifications.action_plan.send_action_notification( self.ctx, action_plan, action=fields.NotificationAction.EXECUTION, phase=fields.NotificationPhase.START) applier = default.DefaultApplier(self.ctx, self.service) applier.execute(self.action_plan_uuid) action_plan.state = objects.action_plan.State.SUCCEEDED action_plan.save() notifications.action_plan.send_action_notification( self.ctx, action_plan, action=fields.NotificationAction.EXECUTION, phase=fields.NotificationPhase.END) except exception.ActionPlanCancelled as e: LOG.exception(e) action_plan.state = objects.action_plan.State.CANCELLED self._update_action_from_pending_to_cancelled() action_plan.save() notifications.action_plan.send_cancel_notification( self.ctx, action_plan, action=fields.NotificationAction.CANCEL, phase=fields.NotificationPhase.END) except Exception as e: LOG.exception(e) action_plan = objects.ActionPlan.get_by_uuid( self.ctx, self.action_plan_uuid, eager=True) if action_plan.state == objects.action_plan.State.CANCELLING: action_plan.state = objects.action_plan.State.FAILED action_plan.save() notifications.action_plan.send_cancel_notification( self.ctx, action_plan, action=fields.NotificationAction.CANCEL, priority=fields.NotificationPriority.ERROR, phase=fields.NotificationPhase.ERROR) else: action_plan.state = objects.action_plan.State.FAILED action_plan.save() notifications.action_plan.send_action_notification( self.ctx, action_plan, action=fields.NotificationAction.EXECUTION, priority=fields.NotificationPriority.ERROR, phase=fields.NotificationPhase.ERROR) def _update_action_from_pending_to_cancelled(self): filters = {'action_plan_uuid': self.action_plan_uuid, 'state': objects.action.State.PENDING} actions = objects.Action.list(self.ctx, filters=filters, eager=True) if actions: for a in actions: a.state = objects.action.State.CANCELLED a.save() python-watcher-4.0.0/watcher/applier/action_plan/base.py0000664000175000017500000000151013656752270023367 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import abc import six @six.add_metaclass(abc.ABCMeta) class BaseActionPlanHandler(object): @abc.abstractmethod def execute(self): raise NotImplementedError() python-watcher-4.0.0/watcher/applier/messaging/0000775000175000017500000000000013656752352021575 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/messaging/__init__.py0000664000175000017500000000000013656752270023673 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/messaging/trigger.py0000664000175000017500000000330013656752270023605 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import futurist from oslo_config import cfg from oslo_log import log from watcher.applier.action_plan import default LOG = log.getLogger(__name__) CONF = cfg.CONF class TriggerActionPlan(object): def __init__(self, applier_manager): self.applier_manager = applier_manager workers = CONF.watcher_applier.workers self.executor = futurist.GreenThreadPoolExecutor(max_workers=workers) def do_launch_action_plan(self, context, action_plan_uuid): try: cmd = default.DefaultActionPlanHandler(context, self.applier_manager, action_plan_uuid) cmd.execute() except Exception as e: LOG.exception(e) def launch_action_plan(self, context, action_plan_uuid): LOG.debug("Trigger ActionPlan %s", action_plan_uuid) # submit self.executor.submit(self.do_launch_action_plan, context, action_plan_uuid) return action_plan_uuid python-watcher-4.0.0/watcher/applier/manager.py0000664000175000017500000000264613656752270021613 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from watcher.applier.messaging import trigger from watcher.common import service_manager from watcher import conf CONF = conf.CONF class ApplierManager(service_manager.ServiceManager): @property def service_name(self): return 'watcher-applier' @property def api_version(self): return '1.0' @property def publisher_id(self): return CONF.watcher_applier.publisher_id @property def conductor_topic(self): return CONF.watcher_applier.conductor_topic @property def notification_topics(self): return [] @property def conductor_endpoints(self): return [trigger.TriggerActionPlan] @property def notification_endpoints(self): return [] python-watcher-4.0.0/watcher/applier/sync.py0000664000175000017500000000571413656752270021154 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher.applier.loading import default from watcher.common import context from watcher.common import exception from watcher import objects CONF = cfg.CONF LOG = log.getLogger(__name__) class Syncer(object): """Syncs all available actions with the Watcher DB""" def sync(self): ctx = context.make_context() action_loader = default.DefaultActionLoader() available_actions = action_loader.list_available() for action_type in available_actions.keys(): load_action = action_loader.load(action_type) load_description = load_action.get_description() try: action_desc = objects.ActionDescription.get_by_type( ctx, action_type) if action_desc.description != load_description: action_desc.description = load_description action_desc.save() except exception.ActionDescriptionNotFound: obj_action_desc = objects.ActionDescription(ctx) obj_action_desc.action_type = action_type obj_action_desc.description = load_description obj_action_desc.create() self._cancel_ongoing_actionplans(ctx) def _cancel_ongoing_actionplans(self, context): actions_plans = objects.ActionPlan.list( context, filters={'state': objects.action_plan.State.ONGOING, 'hostname': CONF.host}, eager=True) for ap in actions_plans: ap.state = objects.action_plan.State.CANCELLED ap.save() filters = {'action_plan_uuid': ap.uuid, 'state__in': (objects.action.State.PENDING, objects.action.State.ONGOING)} actions = objects.Action.list(context, filters=filters, eager=True) for a in actions: a.state = objects.action.State.CANCELLED a.save() LOG.info("Action Plan %(uuid)s along with appropriate Actions " "has been cancelled because it was in %(state)s state " "when Applier had been stopped on %(hostname)s host.", {'uuid': ap.uuid, 'state': objects.action_plan.State.ONGOING, 'hostname': ap.hostname}) python-watcher-4.0.0/watcher/applier/default.py0000775000175000017500000000405513656752270021624 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_config import cfg from oslo_log import log from watcher.applier import base from watcher.applier.loading import default from watcher import objects LOG = log.getLogger(__name__) CONF = cfg.CONF class DefaultApplier(base.BaseApplier): def __init__(self, context, applier_manager): super(DefaultApplier, self).__init__() self._applier_manager = applier_manager self._loader = default.DefaultWorkFlowEngineLoader() self._engine = None self._context = context @property def context(self): return self._context @property def applier_manager(self): return self._applier_manager @property def engine(self): if self._engine is None: selected_workflow_engine = CONF.watcher_applier.workflow_engine LOG.debug("Loading workflow engine %s ", selected_workflow_engine) self._engine = self._loader.load( name=selected_workflow_engine, context=self.context, applier_manager=self.applier_manager) return self._engine def execute(self, action_plan_uuid): LOG.debug("Executing action plan %s ", action_plan_uuid) filters = {'action_plan_uuid': action_plan_uuid} actions = objects.Action.list(self.context, filters=filters, eager=True) return self.engine.execute(actions) python-watcher-4.0.0/watcher/applier/base.py0000664000175000017500000000210513656752270021101 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # """ This component is in charge of executing the :ref:`Action Plan ` built by the :ref:`Watcher Decision Engine `. See: :doc:`../architecture` for more details on this component. """ import abc import six @six.add_metaclass(abc.ABCMeta) class BaseApplier(object): @abc.abstractmethod def execute(self, action_plan_uuid): raise NotImplementedError() python-watcher-4.0.0/watcher/applier/rpcapi.py0000664000175000017500000000354613656752270021457 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Jean-Emile DARTOIS # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import service from watcher.common import service_manager from watcher.common import utils from watcher import conf CONF = conf.CONF class ApplierAPI(service.Service): def __init__(self): super(ApplierAPI, self).__init__(ApplierAPIManager) def launch_action_plan(self, context, action_plan_uuid=None): if not utils.is_uuid_like(action_plan_uuid): raise exception.InvalidUuidOrName(name=action_plan_uuid) self.conductor_client.cast( context, 'launch_action_plan', action_plan_uuid=action_plan_uuid) class ApplierAPIManager(service_manager.ServiceManager): @property def service_name(self): return None @property def api_version(self): return '1.0' @property def publisher_id(self): return CONF.watcher_applier.publisher_id @property def conductor_topic(self): return CONF.watcher_applier.conductor_topic @property def notification_topics(self): return [] @property def conductor_endpoints(self): return [] @property def notification_endpoints(self): return [] python-watcher-4.0.0/watcher/applier/loading/0000775000175000017500000000000013656752352021235 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/loading/__init__.py0000664000175000017500000000000013656752270023333 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/applier/loading/default.py0000664000175000017500000000173413656752270023237 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import unicode_literals from watcher.common.loader import default class DefaultWorkFlowEngineLoader(default.DefaultLoader): def __init__(self): super(DefaultWorkFlowEngineLoader, self).__init__( namespace='watcher_workflow_engines') class DefaultActionLoader(default.DefaultLoader): def __init__(self): super(DefaultActionLoader, self).__init__( namespace='watcher_actions') python-watcher-4.0.0/watcher/db/0000775000175000017500000000000013656752352016551 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/db/__init__.py0000664000175000017500000000000013656752270020647 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/db/api.py0000664000175000017500000007707013656752270017706 0ustar zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Base classes for storage engines """ import abc from oslo_config import cfg from oslo_db import api as db_api import six _BACKEND_MAPPING = {'sqlalchemy': 'watcher.db.sqlalchemy.api'} IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING, lazy=True) def get_instance(): """Return a DB API instance.""" return IMPL @six.add_metaclass(abc.ABCMeta) class BaseConnection(object): """Base class for storage system connections.""" @abc.abstractmethod def get_goal_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching goals. Return a list of the specified columns for all goals that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of goals to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_goal(self, values): """Create a new goal. :param values: A dict containing several items used to identify and track the goal. For example: :: { 'uuid': utils.generate_uuid(), 'name': 'DUMMY', 'display_name': 'Dummy', } :returns: A goal :raises: :py:class:`~.GoalAlreadyExists` """ @abc.abstractmethod def get_goal_by_id(self, context, goal_id, eager=False): """Return a goal given its ID. :param context: The security context :param goal_id: The ID of a goal :param eager: If True, also loads One-to-X data (Default: False) :returns: A goal :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def get_goal_by_uuid(self, context, goal_uuid, eager=False): """Return a goal given its UUID. :param context: The security context :param goal_uuid: The UUID of a goal :param eager: If True, also loads One-to-X data (Default: False) :returns: A goal :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def get_goal_by_name(self, context, goal_name, eager=False): """Return a goal given its name. :param context: The security context :param goal_name: The name of a goal :param eager: If True, also loads One-to-X data (Default: False) :returns: A goal :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def destroy_goal(self, goal_uuid): """Destroy a goal. :param goal_uuid: The UUID of a goal :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def update_goal(self, goal_uuid, values): """Update properties of a goal. :param goal_uuid: The UUID of a goal :param values: A dict containing several items used to identify and track the goal. For example: :: { 'uuid': utils.generate_uuid(), 'name': 'DUMMY', 'display_name': 'Dummy', } :returns: A goal :raises: :py:class:`~.GoalNotFound` :raises: :py:class:`~.Invalid` """ def soft_delete_goal(self, goal_id): """Soft delete a goal. :param goal_id: The id or uuid of a goal. :raises: :py:class:`~.GoalNotFound` """ @abc.abstractmethod def get_strategy_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=True): """Get specific columns for matching strategies. Return a list of the specified columns for all strategies that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of strategies to return. :param marker: The last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: Direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_strategy(self, values): """Create a new strategy. :param values: A dict containing items used to identify and track the strategy. For example: :: { 'id': 1, 'uuid': utils.generate_uuid(), 'name': 'my_strategy', 'display_name': 'My strategy', 'goal_uuid': utils.generate_uuid(), } :returns: A strategy :raises: :py:class:`~.StrategyAlreadyExists` """ @abc.abstractmethod def get_strategy_by_id(self, context, strategy_id, eager=False): """Return a strategy given its ID. :param context: The security context :param strategy_id: The ID of a strategy :param eager: If True, also loads One-to-X data (Default: False) :returns: A strategy :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def get_strategy_by_uuid(self, context, strategy_uuid, eager=False): """Return a strategy given its UUID. :param context: The security context :param strategy_uuid: The UUID of a strategy :param eager: If True, also loads One-to-X data (Default: False) :returns: A strategy :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def get_strategy_by_name(self, context, strategy_name, eager=False): """Return a strategy given its name. :param context: The security context :param strategy_name: The name of a strategy :param eager: If True, also loads One-to-X data (Default: False) :returns: A strategy :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def destroy_strategy(self, strategy_uuid): """Destroy a strategy. :param strategy_uuid: The UUID of a strategy :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def update_strategy(self, strategy_uuid, values): """Update properties of a strategy. :param strategy_uuid: The UUID of a strategy :returns: A strategy :raises: :py:class:`~.StrategyNotFound` :raises: :py:class:`~.Invalid` """ def soft_delete_strategy(self, strategy_id): """Soft delete a strategy. :param strategy_id: The id or uuid of a strategy. :raises: :py:class:`~.StrategyNotFound` """ @abc.abstractmethod def get_audit_template_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching audit templates. Return a list of the specified columns for all audit templates that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of audit templates to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_audit_template(self, values): """Create a new audit template. :param values: A dict containing several items used to identify and track the audit template. For example: :: { 'uuid': utils.generate_uuid(), 'name': 'example', 'description': 'free text description' 'goal': 'DUMMY' } :returns: An audit template. :raises: :py:class:`~.AuditTemplateAlreadyExists` """ @abc.abstractmethod def get_audit_template_by_id(self, context, audit_template_id, eager=False): """Return an audit template. :param context: The security context :param audit_template_id: The id of an audit template. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ @abc.abstractmethod def get_audit_template_by_uuid(self, context, audit_template_uuid, eager=False): """Return an audit template. :param context: The security context :param audit_template_uuid: The uuid of an audit template. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ def get_audit_template_by_name(self, context, audit_template_name, eager=False): """Return an audit template. :param context: The security context :param audit_template_name: The name of an audit template. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ @abc.abstractmethod def destroy_audit_template(self, audit_template_id): """Destroy an audit template. :param audit_template_id: The id or uuid of an audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ @abc.abstractmethod def update_audit_template(self, audit_template_id, values): """Update properties of an audit template. :param audit_template_id: The id or uuid of an audit template. :returns: An audit template. :raises: :py:class:`~.AuditTemplateNotFound` :raises: :py:class:`~.Invalid` """ @abc.abstractmethod def soft_delete_audit_template(self, audit_template_id): """Soft delete an audit template. :param audit_template_id: The id or uuid of an audit template. :raises: :py:class:`~.AuditTemplateNotFound` """ @abc.abstractmethod def get_audit_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching audits. Return a list of the specified columns for all audits that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of audits to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_audit(self, values): """Create a new audit. :param values: A dict containing several items used to identify and track the audit, and several dicts which are passed into the Drivers when managing this audit. For example: :: { 'uuid': utils.generate_uuid(), 'type': 'ONESHOT', } :returns: An audit. :raises: :py:class:`~.AuditAlreadyExists` """ @abc.abstractmethod def get_audit_by_id(self, context, audit_id, eager=False): """Return an audit. :param context: The security context :param audit_id: The id of an audit. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit. :raises: :py:class:`~.AuditNotFound` """ @abc.abstractmethod def get_audit_by_uuid(self, context, audit_uuid, eager=False): """Return an audit. :param context: The security context :param audit_uuid: The uuid of an audit. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit. :raises: :py:class:`~.AuditNotFound` """ def get_audit_by_name(self, context, audit_name, eager=False): """Return an audit. :param context: The security context :param audit_name: The name of an audit. :param eager: If True, also loads One-to-X data (Default: False) :returns: An audit. :raises: :py:class:`~.AuditNotFound` """ @abc.abstractmethod def destroy_audit(self, audit_id): """Destroy an audit and all associated action plans. :param audit_id: The id or uuid of an audit. :raises: :py:class:`~.AuditNotFound` """ @abc.abstractmethod def update_audit(self, audit_id, values): """Update properties of an audit. :param audit_id: The id or uuid of an audit. :returns: An audit. :raises: :py:class:`~.AuditNotFound` :raises: :py:class:`~.Invalid` """ def soft_delete_audit(self, audit_id): """Soft delete an audit and all associated action plans. :param audit_id: The id or uuid of an audit. :raises: :py:class:`~.AuditNotFound` """ @abc.abstractmethod def get_action_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching actions. Return a list of the specified columns for all actions that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of actions to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_action(self, values): """Create a new action. :param values: A dict containing several items used to identify and track the action, and several dicts which are passed into the Drivers when managing this action. For example: :: { 'uuid': utils.generate_uuid(), 'name': 'example', 'description': 'free text description' 'aggregate': 'nova aggregate name or uuid' } :returns: A action. :raises: :py:class:`~.ActionAlreadyExists` """ @abc.abstractmethod def get_action_by_id(self, context, action_id, eager=False): """Return a action. :param context: The security context :param action_id: The id of a action. :param eager: If True, also loads One-to-X data (Default: False) :returns: A action. :raises: :py:class:`~.ActionNotFound` """ @abc.abstractmethod def get_action_by_uuid(self, context, action_uuid, eager=False): """Return a action. :param context: The security context :param action_uuid: The uuid of a action. :param eager: If True, also loads One-to-X data (Default: False) :returns: A action. :raises: :py:class:`~.ActionNotFound` """ @abc.abstractmethod def destroy_action(self, action_id): """Destroy a action and all associated interfaces. :param action_id: The id or uuid of a action. :raises: :py:class:`~.ActionNotFound` :raises: :py:class:`~.ActionReferenced` """ @abc.abstractmethod def update_action(self, action_id, values): """Update properties of a action. :param action_id: The id or uuid of a action. :returns: A action. :raises: :py:class:`~.ActionNotFound` :raises: :py:class:`~.ActionReferenced` :raises: :py:class:`~.Invalid` """ def soft_delete_action(self, action_id): """Soft delete an action. :param action_id: The id or uuid of an action. :raises: :py:class:`~.ActionNotFound` """ @abc.abstractmethod def get_action_plan_list( self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching action plans. Return a list of the specified columns for all action plans that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of audits to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_action_plan(self, values): """Create a new action plan. :param values: A dict containing several items used to identify and track the action plan. :returns: An action plan. :raises: :py:class:`~.ActionPlanAlreadyExists` """ @abc.abstractmethod def get_action_plan_by_id(self, context, action_plan_id, eager=False): """Return an action plan. :param context: The security context :param action_plan_id: The id of an action plan. :param eager: If True, also loads One-to-X data (Default: False) :returns: An action plan. :raises: :py:class:`~.ActionPlanNotFound` """ @abc.abstractmethod def get_action_plan_by_uuid(self, context, action_plan__uuid, eager=False): """Return a action plan. :param context: The security context :param action_plan__uuid: The uuid of an action plan. :param eager: If True, also loads One-to-X data (Default: False) :returns: An action plan. :raises: :py:class:`~.ActionPlanNotFound` """ @abc.abstractmethod def destroy_action_plan(self, action_plan_id): """Destroy an action plan and all associated interfaces. :param action_plan_id: The id or uuid of a action plan. :raises: :py:class:`~.ActionPlanNotFound` :raises: :py:class:`~.ActionPlanReferenced` """ @abc.abstractmethod def update_action_plan(self, action_plan_id, values): """Update properties of an action plan. :param action_plan_id: The id or uuid of an action plan. :returns: An action plan. :raises: :py:class:`~.ActionPlanNotFound` :raises: :py:class:`~.ActionPlanReferenced` :raises: :py:class:`~.Invalid` """ def soft_delete_action_plan(self, action_plan_id): """Soft delete an action plan. :param action_plan_id: The id or uuid of an action plan. :raises: :py:class:`~.ActionPlanNotFound` """ @abc.abstractmethod def get_efficacy_indicator_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching efficacy indicators. Return a list of the specified columns for all efficacy indicators that match the specified filters. :param context: The security context :param columns: List of column names to return. Defaults to 'id' column when columns == None. :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of efficacy indicators to return. :param marker: The last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: Direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_efficacy_indicator(self, values): """Create a new efficacy indicator. :param values: A dict containing items used to identify and track the efficacy indicator. For example: :: { 'id': 1, 'uuid': utils.generate_uuid(), 'name': 'my_efficacy_indicator', 'display_name': 'My efficacy indicator', 'goal_uuid': utils.generate_uuid(), } :returns: An efficacy_indicator :raises: :py:class:`~.EfficacyIndicatorAlreadyExists` """ @abc.abstractmethod def get_efficacy_indicator_by_id(self, context, efficacy_indicator_id, eager=False): """Return an efficacy indicator given its ID. :param context: The security context :param efficacy_indicator_id: The ID of an efficacy indicator :param eager: If True, also loads One-to-X data (Default: False) :returns: An efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` """ @abc.abstractmethod def get_efficacy_indicator_by_uuid(self, context, efficacy_indicator_uuid, eager=False): """Return an efficacy indicator given its UUID. :param context: The security context :param efficacy_indicator_uuid: The UUID of an efficacy indicator :param eager: If True, also loads One-to-X data (Default: False) :returns: An efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` """ @abc.abstractmethod def get_efficacy_indicator_by_name(self, context, efficacy_indicator_name, eager=False): """Return an efficacy indicator given its name. :param context: The security context :param efficacy_indicator_name: The name of an efficacy indicator :param eager: If True, also loads One-to-X data (Default: False) :returns: An efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` """ @abc.abstractmethod def destroy_efficacy_indicator(self, efficacy_indicator_uuid): """Destroy an efficacy indicator. :param efficacy_indicator_uuid: The UUID of an efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` """ @abc.abstractmethod def update_efficacy_indicator(self, efficacy_indicator_id, values): """Update properties of an efficacy indicator. :param efficacy_indicator_id: The ID of an efficacy indicator :returns: An efficacy indicator :raises: :py:class:`~.EfficacyIndicatorNotFound` :raises: :py:class:`~.Invalid` """ @abc.abstractmethod def get_scoring_engine_list( self, context, columns=None, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching scoring engines. Return a list of the specified columns for all scoring engines that match the specified filters. :param context: The security context :param columns: List of column names to return. Defaults to 'id' column when columns == None. :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of scoring engines to return. :param marker: the last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_scoring_engine(self, values): """Create a new scoring engine. :param values: A dict containing several items used to identify and track the scoring engine. :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineAlreadyExists` """ @abc.abstractmethod def get_scoring_engine_by_id(self, context, scoring_engine_id, eager=False): """Return a scoring engine by its id. :param context: The security context :param scoring_engine_id: The id of a scoring engine. :param eager: If True, also loads One-to-X data (Default: False) :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` """ @abc.abstractmethod def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid, eager=False): """Return a scoring engine by its uuid. :param context: The security context :param scoring_engine_uuid: The uuid of a scoring engine. :param eager: If True, also loads One-to-X data (Default: False) :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` """ @abc.abstractmethod def get_scoring_engine_by_name(self, context, scoring_engine_name, eager=False): """Return a scoring engine by its name. :param context: The security context :param scoring_engine_name: The name of a scoring engine. :param eager: If True, also loads One-to-X data (Default: False) :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` """ @abc.abstractmethod def destroy_scoring_engine(self, scoring_engine_id): """Destroy a scoring engine. :param scoring_engine_id: The id of a scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` """ @abc.abstractmethod def update_scoring_engine(self, scoring_engine_id, values): """Update properties of a scoring engine. :param scoring_engine_id: The id of a scoring engine. :returns: A scoring engine. :raises: :py:class:`~.ScoringEngineNotFound` :raises: :py:class:`~.Invalid` """ @abc.abstractmethod def get_service_list(self, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Get specific columns for matching services. Return a list of the specified columns for all services that match the specified filters. :param context: The security context :param filters: Filters to apply. Defaults to None. :param limit: Maximum number of services to return. :param marker: The last item of the previous page; we return the next result set. :param sort_key: Attribute by which results should be sorted. :param sort_dir: Direction in which results should be sorted. (asc, desc) :param eager: If True, also loads One-to-X data (Default: False) :returns: A list of tuples of the specified columns. """ @abc.abstractmethod def create_service(self, values): """Create a new service. :param values: A dict containing items used to identify and track the service. For example: :: { 'id': 1, 'name': 'watcher-api', 'status': 'ACTIVE', 'host': 'controller' } :returns: A service :raises: :py:class:`~.ServiceAlreadyExists` """ @abc.abstractmethod def get_service_by_id(self, context, service_id, eager=False): """Return a service given its ID. :param context: The security context :param service_id: The ID of a service :param eager: If True, also loads One-to-X data (Default: False) :returns: A service :raises: :py:class:`~.ServiceNotFound` """ @abc.abstractmethod def get_service_by_name(self, context, service_name, eager=False): """Return a service given its name. :param context: The security context :param service_name: The name of a service :param eager: If True, also loads One-to-X data (Default: False) :returns: A service :raises: :py:class:`~.ServiceNotFound` """ @abc.abstractmethod def destroy_service(self, service_id): """Destroy a service. :param service_id: The ID of a service :raises: :py:class:`~.ServiceNotFound` """ @abc.abstractmethod def update_service(self, service_id, values): """Update properties of a service. :param service_id: The ID of a service :returns: A service :raises: :py:class:`~.ServiceyNotFound` :raises: :py:class:`~.Invalid` """ @abc.abstractmethod def soft_delete_service(self, service_id): """Soft delete a service. :param service_id: The id of a service. :returns: A service. :raises: :py:class:`~.ServiceNotFound` """ python-watcher-4.0.0/watcher/db/migration.py0000664000175000017500000000314413656752270021115 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Database setup and migration commands.""" from oslo_config import cfg from stevedore import driver _IMPL = None def get_backend(): global _IMPL if not _IMPL: cfg.CONF.import_opt('backend', 'oslo_db.options', group='database') _IMPL = driver.DriverManager("watcher.database.migration_backend", cfg.CONF.database.backend).driver return _IMPL def upgrade(version=None): """Migrate the database to `version` or the most recent version.""" return get_backend().upgrade(version) def downgrade(version=None): return get_backend().downgrade(version) def version(): return get_backend().version() def stamp(version): return get_backend().stamp(version) def revision(message, autogenerate): return get_backend().revision(message, autogenerate) def create_schema(): return get_backend().create_schema() python-watcher-4.0.0/watcher/db/purge.py0000664000175000017500000004103513656752270020247 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import collections import datetime import itertools import sys from oslo_log import log from oslo_utils import strutils import prettytable as ptable from six.moves import input from watcher._i18n import _ from watcher._i18n import lazy_translation_enabled from watcher.common import context from watcher.common import exception from watcher.common import utils from watcher import objects LOG = log.getLogger(__name__) class WatcherObjectsMap(object): """Wrapper to deal with watcher objects per type This wrapper object contains a list of watcher objects per type. Its main use is to simplify the merge of watcher objects by avoiding duplicates, but also for representing the relationships between these objects. """ # This is for generating the .pot translations keymap = collections.OrderedDict([ ("goals", _("Goals")), ("strategies", _("Strategies")), ("audit_templates", _("Audit Templates")), ("audits", _("Audits")), ("action_plans", _("Action Plans")), ("actions", _("Actions")), ]) def __init__(self): for attr_name in self.keys(): setattr(self, attr_name, []) def values(self): return (getattr(self, key) for key in self.keys()) @classmethod def keys(cls): return cls.keymap.keys() def __iter__(self): return itertools.chain(*self.values()) def __add__(self, other): new_map = self.__class__() # Merge the 2 items dicts into a new object (and avoid dupes) for attr_name, initials, others in zip(self.keys(), self.values(), other.values()): # Creates a copy merged = initials[:] initials_ids = [item.id for item in initials] non_dupes = [item for item in others if item.id not in initials_ids] merged += non_dupes setattr(new_map, attr_name, merged) return new_map def __str__(self): out = "" for key, vals in zip(self.keys(), self.values()): ids = [val.id for val in vals] out += "%(key)s: %(val)s" % (dict(key=key, val=ids)) out += "\n" return out def __len__(self): return sum(len(getattr(self, key)) for key in self.keys()) def get_count_table(self): headers = list(self.keymap.values()) headers.append(_("Total")) # We also add a total count translated_headers = [ h.translate() if lazy_translation_enabled() else h for h in headers ] counters = [len(cat_vals) for cat_vals in self.values()] + [len(self)] table = ptable.PrettyTable(field_names=translated_headers) table.add_row(counters) return table.get_string() class PurgeCommand(object): """Purges the DB by removing soft deleted entries The workflow for this purge is the following: # Find soft deleted objects which are expired # Find orphan objects # Find their related objects whether they are expired or not # Merge them together # If it does not exceed the limit, destroy them all """ ctx = context.make_context(show_deleted=True) def __init__(self, age_in_days=None, max_number=None, uuid=None, exclude_orphans=False, dry_run=None): self.age_in_days = age_in_days self.max_number = max_number self.uuid = uuid self.exclude_orphans = exclude_orphans self.dry_run = dry_run self._delete_up_to_max = None self._objects_map = WatcherObjectsMap() def get_expiry_date(self): if not self.age_in_days: return None today = datetime.datetime.today() expiry_date = today - datetime.timedelta(days=self.age_in_days) return expiry_date @classmethod def get_goal_uuid(cls, uuid_or_name): if uuid_or_name is None: return query_func = None if not utils.is_uuid_like(uuid_or_name): query_func = objects.Goal.get_by_name else: query_func = objects.Goal.get_by_uuid try: goal = query_func(cls.ctx, uuid_or_name) except Exception as exc: LOG.exception(exc) raise exception.GoalNotFound(goal=uuid_or_name) if not goal.deleted_at: raise exception.NotSoftDeletedStateError( name=_('Goal'), id=uuid_or_name) return goal.uuid def _find_goals(self, filters=None): return objects.Goal.list(self.ctx, filters=filters) def _find_strategies(self, filters=None): return objects.Strategy.list(self.ctx, filters=filters) def _find_audit_templates(self, filters=None): return objects.AuditTemplate.list(self.ctx, filters=filters) def _find_audits(self, filters=None): return objects.Audit.list(self.ctx, filters=filters) def _find_action_plans(self, filters=None): return objects.ActionPlan.list(self.ctx, filters=filters) def _find_actions(self, filters=None): return objects.Action.list(self.ctx, filters=filters) def _find_orphans(self): orphans = WatcherObjectsMap() filters = dict(deleted=False) goals = objects.Goal.list(self.ctx, filters=filters) strategies = objects.Strategy.list(self.ctx, filters=filters) audit_templates = objects.AuditTemplate.list(self.ctx, filters=filters) audits = objects.Audit.list(self.ctx, filters=filters) action_plans = objects.ActionPlan.list(self.ctx, filters=filters) actions = objects.Action.list(self.ctx, filters=filters) goal_ids = set(g.id for g in goals) orphans.strategies = [ strategy for strategy in strategies if strategy.goal_id not in goal_ids] strategy_ids = [s.id for s in (s for s in strategies if s not in orphans.strategies)] orphans.audit_templates = [ audit_template for audit_template in audit_templates if audit_template.goal_id not in goal_ids or (audit_template.strategy_id and audit_template.strategy_id not in strategy_ids)] orphans.audits = [ audit for audit in audits if audit.goal_id not in goal_ids or (audit.strategy_id and audit.strategy_id not in strategy_ids)] # Objects with orphan parents are themselves orphans audit_ids = [audit.id for audit in audits if audit not in orphans.audits] orphans.action_plans = [ ap for ap in action_plans if ap.audit_id not in audit_ids or ap.strategy_id not in strategy_ids] # Objects with orphan parents are themselves orphans action_plan_ids = [ap.id for ap in action_plans if ap not in orphans.action_plans] orphans.actions = [ action for action in actions if action.action_plan_id not in action_plan_ids] LOG.debug("Orphans found:\n%s", orphans) LOG.info("Orphans found:\n%s", orphans.get_count_table()) return orphans def _find_soft_deleted_objects(self): to_be_deleted = WatcherObjectsMap() expiry_date = self.get_expiry_date() filters = dict(deleted=True) if self.uuid: filters["uuid"] = self.uuid if expiry_date: filters.update(dict(deleted_at__lt=expiry_date)) to_be_deleted.goals.extend(self._find_goals(filters)) to_be_deleted.strategies.extend(self._find_strategies(filters)) to_be_deleted.audit_templates.extend( self._find_audit_templates(filters)) to_be_deleted.audits.extend(self._find_audits(filters)) to_be_deleted.action_plans.extend( self._find_action_plans(filters)) to_be_deleted.actions.extend(self._find_actions(filters)) soft_deleted_objs = self._find_related_objects( to_be_deleted, base_filters=dict(deleted=True)) LOG.debug("Soft deleted objects:\n%s", soft_deleted_objs) return soft_deleted_objs def _find_related_objects(self, objects_map, base_filters=None): base_filters = base_filters or {} for goal in objects_map.goals: filters = {} filters.update(base_filters) filters.update(dict(goal_id=goal.id)) related_objs = WatcherObjectsMap() related_objs.strategies = self._find_strategies(filters) related_objs.audit_templates = self._find_audit_templates(filters) related_objs.audits = self._find_audits(filters) objects_map += related_objs for strategy in objects_map.strategies: filters = {} filters.update(base_filters) filters.update(dict(strategy_id=strategy.id)) related_objs = WatcherObjectsMap() related_objs.audit_templates = self._find_audit_templates(filters) related_objs.audits = self._find_audits(filters) objects_map += related_objs for audit in objects_map.audits: filters = {} filters.update(base_filters) filters.update(dict(audit_id=audit.id)) related_objs = WatcherObjectsMap() related_objs.action_plans = self._find_action_plans(filters) objects_map += related_objs for action_plan in objects_map.action_plans: filters = {} filters.update(base_filters) filters.update(dict(action_plan_id=action_plan.id)) related_objs = WatcherObjectsMap() related_objs.actions = self._find_actions(filters) objects_map += related_objs return objects_map def confirmation_prompt(self): print(self._objects_map.get_count_table()) raw_val = input( _("There are %(count)d objects set for deletion. " "Continue? [y/N]") % dict(count=len(self._objects_map))) return strutils.bool_from_string(raw_val) def delete_up_to_max_prompt(self, objects_map): print(objects_map.get_count_table()) print(_("The number of objects (%(num)s) to delete from the database " "exceeds the maximum number of objects (%(max_number)s) " "specified.") % dict(max_number=self.max_number, num=len(objects_map))) raw_val = input( _("Do you want to delete objects up to the specified maximum " "number? [y/N]")) self._delete_up_to_max = strutils.bool_from_string(raw_val) return self._delete_up_to_max def _aggregate_objects(self): """Objects aggregated on a 'per goal' basis""" # todo: aggregate orphans as well aggregate = [] for goal in self._objects_map.goals: related_objs = WatcherObjectsMap() # goals related_objs.goals = [goal] # strategies goal_ids = [goal.id] related_objs.strategies = [ strategy for strategy in self._objects_map.strategies if strategy.goal_id in goal_ids ] # audit templates strategy_ids = [ strategy.id for strategy in related_objs.strategies] related_objs.audit_templates = [ at for at in self._objects_map.audit_templates if at.goal_id in goal_ids or (at.strategy_id and at.strategy_id in strategy_ids) ] # audits related_objs.audits = [ audit for audit in self._objects_map.audits if audit.goal_id in goal_ids ] # action plans audit_ids = [audit.id for audit in related_objs.audits] related_objs.action_plans = [ action_plan for action_plan in self._objects_map.action_plans if action_plan.audit_id in audit_ids ] # actions action_plan_ids = [ action_plan.id for action_plan in related_objs.action_plans ] related_objs.actions = [ action for action in self._objects_map.actions if action.action_plan_id in action_plan_ids ] aggregate.append(related_objs) return aggregate def _get_objects_up_to_limit(self): aggregated_objects = self._aggregate_objects() to_be_deleted_subset = WatcherObjectsMap() for aggregate in aggregated_objects: if len(aggregate) + len(to_be_deleted_subset) <= self.max_number: to_be_deleted_subset += aggregate else: break LOG.debug(to_be_deleted_subset) return to_be_deleted_subset def find_objects_to_delete(self): """Finds all the objects to be purged :returns: A mapping with all the Watcher objects to purged :rtype: :py:class:`~.WatcherObjectsMap` instance """ to_be_deleted = self._find_soft_deleted_objects() if not self.exclude_orphans: to_be_deleted += self._find_orphans() LOG.debug("Objects to be deleted:\n%s", to_be_deleted) return to_be_deleted def do_delete(self): LOG.info("Deleting...") # Reversed to avoid errors with foreign keys for entry in reversed(list(self._objects_map)): entry.destroy() def execute(self): LOG.info("Starting purge command") self._objects_map = self.find_objects_to_delete() if (self.max_number is not None and len(self._objects_map) > self.max_number): if self.delete_up_to_max_prompt(self._objects_map): self._objects_map = self._get_objects_up_to_limit() else: return _orphans_note = (_(" (orphans excluded)") if self.exclude_orphans else _(" (may include orphans)")) if not self.dry_run and self.confirmation_prompt(): self.do_delete() print(_("Purge results summary%s:") % _orphans_note) LOG.info("Purge results summary%s:", _orphans_note) else: LOG.debug(self._objects_map) print(_("Here below is a table containing the objects " "that can be purged%s:") % _orphans_note) LOG.info("\n%s", self._objects_map.get_count_table()) print(self._objects_map.get_count_table()) LOG.info("Purge process completed") def purge(age_in_days, max_number, goal, exclude_orphans, dry_run): """Removes soft deleted objects from the database :param age_in_days: Number of days since deletion (from today) to exclude from the purge. If None, everything will be purged. :type age_in_days: int :param max_number: Max number of objects expected to be deleted. Prevents the deletion if exceeded. No limit if set to None. :type max_number: int :param goal: UUID or name of the goal to purge. :type goal: str :param exclude_orphans: Flag to indicate whether or not you want to exclude orphans from deletion (default: False). :type exclude_orphans: bool :param dry_run: Flag to indicate whether or not you want to perform a dry run (no deletion). :type dry_run: bool """ try: if max_number and max_number < 0: raise exception.NegativeLimitError LOG.info("[options] age_in_days = %s", age_in_days) LOG.info("[options] max_number = %s", max_number) LOG.info("[options] goal = %s", goal) LOG.info("[options] exclude_orphans = %s", exclude_orphans) LOG.info("[options] dry_run = %s", dry_run) uuid = PurgeCommand.get_goal_uuid(goal) cmd = PurgeCommand(age_in_days, max_number, uuid, exclude_orphans, dry_run) cmd.execute() except Exception as exc: LOG.exception(exc) print(exc) sys.exit(1) python-watcher-4.0.0/watcher/db/sqlalchemy/0000775000175000017500000000000013656752352020713 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/db/sqlalchemy/__init__.py0000664000175000017500000000000013656752270023011 0ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/db/sqlalchemy/api.py0000664000175000017500000012611313656752270022041 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """SQLAlchemy storage backend.""" import collections import datetime import operator from oslo_config import cfg from oslo_db import exception as db_exc from oslo_db.sqlalchemy import session as db_session from oslo_db.sqlalchemy import utils as db_utils from oslo_utils import timeutils from sqlalchemy.inspection import inspect from sqlalchemy.orm import exc from sqlalchemy.orm import joinedload from watcher._i18n import _ from watcher.common import exception from watcher.common import utils from watcher.db import api from watcher.db.sqlalchemy import models from watcher import objects CONF = cfg.CONF _FACADE = None def _create_facade_lazily(): global _FACADE if _FACADE is None: _FACADE = db_session.EngineFacade.from_config(CONF) return _FACADE def get_engine(): facade = _create_facade_lazily() return facade.get_engine() def get_session(**kwargs): facade = _create_facade_lazily() return facade.get_session(**kwargs) def get_backend(): """The backend is this module itself.""" return Connection() def model_query(model, *args, **kwargs): """Query helper for simpler session usage. :param session: if present, the session to use """ session = kwargs.get('session') or get_session() query = session.query(model, *args) return query def add_identity_filter(query, value): """Adds an identity filter to a query. Filters results by ID, if supplied value is a valid integer. Otherwise attempts to filter results by UUID. :param query: Initial query to add filter to. :param value: Value for filtering results by. :return: Modified query. """ if utils.is_int_like(value): return query.filter_by(id=value) elif utils.is_uuid_like(value): return query.filter_by(uuid=value) else: raise exception.InvalidIdentity(identity=value) def _paginate_query(model, limit=None, marker=None, sort_key=None, sort_dir=None, query=None): if not query: query = model_query(model) sort_keys = ['id'] if sort_key and sort_key not in sort_keys: sort_keys.insert(0, sort_key) query = db_utils.paginate_query(query, model, limit, sort_keys, marker=marker, sort_dir=sort_dir) return query.all() class JoinMap(utils.Struct): """Mapping for the Join-based queries""" NaturalJoinFilter = collections.namedtuple( 'NaturalJoinFilter', ['join_fieldname', 'join_model']) class Connection(api.BaseConnection): """SqlAlchemy connection.""" valid_operators = { "": operator.eq, "eq": operator.eq, "neq": operator.ne, "gt": operator.gt, "gte": operator.ge, "lt": operator.lt, "lte": operator.le, "in": lambda field, choices: field.in_(choices), "notin": lambda field, choices: field.notin_(choices), } def __init__(self): super(Connection, self).__init__() def __add_simple_filter(self, query, model, fieldname, value, operator_): field = getattr(model, fieldname) if (fieldname != 'deleted' and value and field.type.python_type is datetime.datetime): if not isinstance(value, datetime.datetime): value = timeutils.parse_isotime(value) return query.filter(self.valid_operators[operator_](field, value)) def __add_join_filter(self, query, model, fieldname, value, operator_): query = query.join(model) return self.__add_simple_filter(query, model, fieldname, value, operator_) def __decompose_filter(self, raw_fieldname): """Decompose a filter name into its 2 subparts A filter can take 2 forms: - "" which is a syntactic sugar for "__eq" - "__" where is the comparison operator to be used. Available operators are: - eq - neq - gt - gte - lt - lte - in - notin """ separator = '__' fieldname, separator, operator_ = raw_fieldname.partition(separator) if operator_ and operator_ not in self.valid_operators: raise exception.InvalidOperator( operator=operator_, valid_operators=self.valid_operators) return fieldname, operator_ def _add_filters(self, query, model, filters=None, plain_fields=None, join_fieldmap=None): """Generic way to add filters to a Watcher model Each filter key provided by the `filters` parameter will be decomposed into 2 pieces: the field name and the comparison operator - "": By default, the "eq" is applied if no operator is provided - "eq", which stands for "equal" : e.g. {"state__eq": "PENDING"} will result in the "WHERE state = 'PENDING'" clause. - "neq", which stands for "not equal" : e.g. {"state__neq": "PENDING"} will result in the "WHERE state != 'PENDING'" clause. - "gt", which stands for "greater than" : e.g. {"created_at__gt": "2016-06-06T10:33:22.063176"} will result in the "WHERE created_at > '2016-06-06T10:33:22.063176'" clause. - "gte", which stands for "greater than or equal to" : e.g. {"created_at__gte": "2016-06-06T10:33:22.063176"} will result in the "WHERE created_at >= '2016-06-06T10:33:22.063176'" clause. - "lt", which stands for "less than" : e.g. {"created_at__lt": "2016-06-06T10:33:22.063176"} will result in the "WHERE created_at < '2016-06-06T10:33:22.063176'" clause. - "lte", which stands for "less than or equal to" : e.g. {"created_at__lte": "2016-06-06T10:33:22.063176"} will result in the "WHERE created_at <= '2016-06-06T10:33:22.063176'" clause. - "in": e.g. {"state__in": ('SUCCEEDED', 'FAILED')} will result in the "WHERE state IN ('SUCCEEDED', 'FAILED')" clause. :param query: a :py:class:`sqlalchemy.orm.query.Query` instance :param model: the model class the filters should relate to :param filters: dict with the following structure {"fieldname": value} :param plain_fields: a :py:class:`sqlalchemy.orm.query.Query` instance :param join_fieldmap: a :py:class:`sqlalchemy.orm.query.Query` instance """ soft_delete_mixin_fields = ['deleted', 'deleted_at'] timestamp_mixin_fields = ['created_at', 'updated_at'] filters = filters or {} # Special case for 'deleted' because it is a non-boolean flag if 'deleted' in filters: deleted_filter = filters.pop('deleted') op = 'eq' if not bool(deleted_filter) else 'neq' filters['deleted__%s' % op] = 0 plain_fields = tuple( (list(plain_fields) or []) + soft_delete_mixin_fields + timestamp_mixin_fields) join_fieldmap = join_fieldmap or {} for raw_fieldname, value in filters.items(): fieldname, operator_ = self.__decompose_filter(raw_fieldname) if fieldname in plain_fields: query = self.__add_simple_filter( query, model, fieldname, value, operator_) elif fieldname in join_fieldmap: join_field, join_model = join_fieldmap[fieldname] query = self.__add_join_filter( query, join_model, join_field, value, operator_) return query @staticmethod def _get_relationships(model): return inspect(model).relationships @staticmethod def _set_eager_options(model, query): relationships = inspect(model).relationships for relationship in relationships: if not relationship.uselist: # We have a One-to-X relationship query = query.options(joinedload(relationship.key)) return query def _create(self, model, values): obj = model() cleaned_values = {k: v for k, v in values.items() if k not in self._get_relationships(model)} obj.update(cleaned_values) obj.save() return obj def _get(self, context, model, fieldname, value, eager): query = model_query(model) if eager: query = self._set_eager_options(model, query) query = query.filter(getattr(model, fieldname) == value) if not context.show_deleted: query = query.filter(model.deleted_at.is_(None)) try: obj = query.one() except exc.NoResultFound: raise exception.ResourceNotFound(name=model.__name__, id=value) return obj @staticmethod def _update(model, id_, values): session = get_session() with session.begin(): query = model_query(model, session=session) query = add_identity_filter(query, id_) try: ref = query.with_lockmode('update').one() except exc.NoResultFound: raise exception.ResourceNotFound(name=model.__name__, id=id_) ref.update(values) return ref @staticmethod def _soft_delete(model, id_): session = get_session() with session.begin(): query = model_query(model, session=session) query = add_identity_filter(query, id_) try: row = query.one() except exc.NoResultFound: raise exception.ResourceNotFound(name=model.__name__, id=id_) row.soft_delete(session) return row @staticmethod def _destroy(model, id_): session = get_session() with session.begin(): query = model_query(model, session=session) query = add_identity_filter(query, id_) try: query.one() except exc.NoResultFound: raise exception.ResourceNotFound(name=model.__name__, id=id_) query.delete() def _get_model_list(self, model, add_filters_func, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): query = model_query(model) if eager: query = self._set_eager_options(model, query) query = add_filters_func(query, filters) if not context.show_deleted: query = query.filter(model.deleted_at.is_(None)) return _paginate_query(model, limit, marker, sort_key, sort_dir, query) # NOTE(erakli): _add_..._filters methods should be refactored to have same # content. join_fieldmap should be filled with JoinMap instead of dict def _add_goals_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'name', 'display_name'] return self._add_filters( query=query, model=models.Goal, filters=filters, plain_fields=plain_fields) def _add_strategies_filters(self, query, filters): plain_fields = ['uuid', 'name', 'display_name', 'goal_id'] join_fieldmap = JoinMap( goal_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Goal), goal_name=NaturalJoinFilter( join_fieldname="name", join_model=models.Goal)) return self._add_filters( query=query, model=models.Strategy, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_audit_templates_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'name', 'goal_id', 'strategy_id'] join_fieldmap = JoinMap( goal_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Goal), goal_name=NaturalJoinFilter( join_fieldname="name", join_model=models.Goal), strategy_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Strategy), strategy_name=NaturalJoinFilter( join_fieldname="name", join_model=models.Strategy), ) return self._add_filters( query=query, model=models.AuditTemplate, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_audits_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'audit_type', 'state', 'goal_id', 'strategy_id', 'hostname'] join_fieldmap = { 'goal_uuid': ("uuid", models.Goal), 'goal_name': ("name", models.Goal), 'strategy_uuid': ("uuid", models.Strategy), 'strategy_name': ("name", models.Strategy), } return self._add_filters( query=query, model=models.Audit, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_action_plans_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'state', 'audit_id', 'strategy_id'] join_fieldmap = JoinMap( audit_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Audit), strategy_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.Strategy), strategy_name=NaturalJoinFilter( join_fieldname="name", join_model=models.Strategy), ) return self._add_filters( query=query, model=models.ActionPlan, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_actions_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'state', 'action_plan_id'] join_fieldmap = { 'action_plan_uuid': ("uuid", models.ActionPlan), } query = self._add_filters( query=query, model=models.Action, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) if 'audit_uuid' in filters: stmt = model_query(models.ActionPlan).join( models.Audit, models.Audit.id == models.ActionPlan.audit_id)\ .filter_by(uuid=filters['audit_uuid']).subquery() query = query.filter_by(action_plan_id=stmt.c.id) return query def _add_efficacy_indicators_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['uuid', 'name', 'unit', 'schema', 'action_plan_id'] join_fieldmap = JoinMap( action_plan_uuid=NaturalJoinFilter( join_fieldname="uuid", join_model=models.ActionPlan), ) return self._add_filters( query=query, model=models.EfficacyIndicator, filters=filters, plain_fields=plain_fields, join_fieldmap=join_fieldmap) def _add_scoring_engine_filters(self, query, filters): if filters is None: filters = {} plain_fields = ['id', 'description'] return self._add_filters( query=query, model=models.ScoringEngine, filters=filters, plain_fields=plain_fields) def _add_action_descriptions_filters(self, query, filters): if not filters: filters = {} plain_fields = ['id', 'action_type'] return self._add_filters( query=query, model=models.ActionDescription, filters=filters, plain_fields=plain_fields) def _add_services_filters(self, query, filters): if not filters: filters = {} plain_fields = ['id', 'name', 'host'] return self._add_filters( query=query, model=models.Service, filters=filters, plain_fields=plain_fields) # ### GOALS ### # def get_goal_list(self, *args, **kwargs): return self._get_model_list(models.Goal, self._add_goals_filters, *args, **kwargs) def create_goal(self, values): # ensure defaults are present for new goals if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: goal = self._create(models.Goal, values) except db_exc.DBDuplicateEntry: raise exception.GoalAlreadyExists(uuid=values['uuid']) return goal def _get_goal(self, context, fieldname, value, eager): try: return self._get(context, model=models.Goal, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.GoalNotFound(goal=value) def get_goal_by_id(self, context, goal_id, eager=False): return self._get_goal( context, fieldname="id", value=goal_id, eager=eager) def get_goal_by_uuid(self, context, goal_uuid, eager=False): return self._get_goal( context, fieldname="uuid", value=goal_uuid, eager=eager) def get_goal_by_name(self, context, goal_name, eager=False): return self._get_goal( context, fieldname="name", value=goal_name, eager=eager) def destroy_goal(self, goal_id): try: return self._destroy(models.Goal, goal_id) except exception.ResourceNotFound: raise exception.GoalNotFound(goal=goal_id) def update_goal(self, goal_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing Goal.")) try: return self._update(models.Goal, goal_id, values) except exception.ResourceNotFound: raise exception.GoalNotFound(goal=goal_id) def soft_delete_goal(self, goal_id): try: return self._soft_delete(models.Goal, goal_id) except exception.ResourceNotFound: raise exception.GoalNotFound(goal=goal_id) # ### STRATEGIES ### # def get_strategy_list(self, *args, **kwargs): return self._get_model_list(models.Strategy, self._add_strategies_filters, *args, **kwargs) def create_strategy(self, values): # ensure defaults are present for new strategies if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: strategy = self._create(models.Strategy, values) except db_exc.DBDuplicateEntry: raise exception.StrategyAlreadyExists(uuid=values['uuid']) return strategy def _get_strategy(self, context, fieldname, value, eager): try: return self._get(context, model=models.Strategy, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.StrategyNotFound(strategy=value) def get_strategy_by_id(self, context, strategy_id, eager=False): return self._get_strategy( context, fieldname="id", value=strategy_id, eager=eager) def get_strategy_by_uuid(self, context, strategy_uuid, eager=False): return self._get_strategy( context, fieldname="uuid", value=strategy_uuid, eager=eager) def get_strategy_by_name(self, context, strategy_name, eager=False): return self._get_strategy( context, fieldname="name", value=strategy_name, eager=eager) def destroy_strategy(self, strategy_id): try: return self._destroy(models.Strategy, strategy_id) except exception.ResourceNotFound: raise exception.StrategyNotFound(strategy=strategy_id) def update_strategy(self, strategy_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing Strategy.")) try: return self._update(models.Strategy, strategy_id, values) except exception.ResourceNotFound: raise exception.StrategyNotFound(strategy=strategy_id) def soft_delete_strategy(self, strategy_id): try: return self._soft_delete(models.Strategy, strategy_id) except exception.ResourceNotFound: raise exception.StrategyNotFound(strategy=strategy_id) # ### AUDIT TEMPLATES ### # def get_audit_template_list(self, *args, **kwargs): return self._get_model_list(models.AuditTemplate, self._add_audit_templates_filters, *args, **kwargs) def create_audit_template(self, values): # ensure defaults are present for new audit_templates if not values.get('uuid'): values['uuid'] = utils.generate_uuid() query = model_query(models.AuditTemplate) query = query.filter_by(name=values.get('name'), deleted_at=None) if len(query.all()) > 0: raise exception.AuditTemplateAlreadyExists( audit_template=values['name']) try: audit_template = self._create(models.AuditTemplate, values) except db_exc.DBDuplicateEntry: raise exception.AuditTemplateAlreadyExists( audit_template=values['name']) return audit_template def _get_audit_template(self, context, fieldname, value, eager): try: return self._get(context, model=models.AuditTemplate, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.AuditTemplateNotFound(audit_template=value) def get_audit_template_by_id(self, context, audit_template_id, eager=False): return self._get_audit_template( context, fieldname="id", value=audit_template_id, eager=eager) def get_audit_template_by_uuid(self, context, audit_template_uuid, eager=False): return self._get_audit_template( context, fieldname="uuid", value=audit_template_uuid, eager=eager) def get_audit_template_by_name(self, context, audit_template_name, eager=False): return self._get_audit_template( context, fieldname="name", value=audit_template_name, eager=eager) def destroy_audit_template(self, audit_template_id): try: return self._destroy(models.AuditTemplate, audit_template_id) except exception.ResourceNotFound: raise exception.AuditTemplateNotFound( audit_template=audit_template_id) def update_audit_template(self, audit_template_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "Audit Template.")) try: return self._update( models.AuditTemplate, audit_template_id, values) except exception.ResourceNotFound: raise exception.AuditTemplateNotFound( audit_template=audit_template_id) def soft_delete_audit_template(self, audit_template_id): try: return self._soft_delete(models.AuditTemplate, audit_template_id) except exception.ResourceNotFound: raise exception.AuditTemplateNotFound( audit_template=audit_template_id) # ### AUDITS ### # def get_audit_list(self, *args, **kwargs): return self._get_model_list(models.Audit, self._add_audits_filters, *args, **kwargs) def create_audit(self, values): # ensure defaults are present for new audits if not values.get('uuid'): values['uuid'] = utils.generate_uuid() query = model_query(models.Audit) query = query.filter_by(name=values.get('name'), deleted_at=None) if len(query.all()) > 0: raise exception.AuditAlreadyExists( audit=values['name']) if values.get('state') is None: values['state'] = objects.audit.State.PENDING if not values.get('auto_trigger'): values['auto_trigger'] = False try: audit = self._create(models.Audit, values) except db_exc.DBDuplicateEntry: raise exception.AuditAlreadyExists(audit=values['uuid']) return audit def _get_audit(self, context, fieldname, value, eager): try: return self._get(context, model=models.Audit, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.AuditNotFound(audit=value) def get_audit_by_id(self, context, audit_id, eager=False): return self._get_audit( context, fieldname="id", value=audit_id, eager=eager) def get_audit_by_uuid(self, context, audit_uuid, eager=False): return self._get_audit( context, fieldname="uuid", value=audit_uuid, eager=eager) def get_audit_by_name(self, context, audit_name, eager=False): return self._get_audit( context, fieldname="name", value=audit_name, eager=eager) def destroy_audit(self, audit_id): def is_audit_referenced(session, audit_id): """Checks whether the audit is referenced by action_plan(s).""" query = model_query(models.ActionPlan, session=session) query = self._add_action_plans_filters( query, {'audit_id': audit_id}) return query.count() != 0 session = get_session() with session.begin(): query = model_query(models.Audit, session=session) query = add_identity_filter(query, audit_id) try: audit_ref = query.one() except exc.NoResultFound: raise exception.AuditNotFound(audit=audit_id) if is_audit_referenced(session, audit_ref['id']): raise exception.AuditReferenced(audit=audit_id) query.delete() def update_audit(self, audit_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "Audit.")) try: return self._update(models.Audit, audit_id, values) except exception.ResourceNotFound: raise exception.AuditNotFound(audit=audit_id) def soft_delete_audit(self, audit_id): try: return self._soft_delete(models.Audit, audit_id) except exception.ResourceNotFound: raise exception.AuditNotFound(audit=audit_id) # ### ACTIONS ### # def get_action_list(self, *args, **kwargs): return self._get_model_list(models.Action, self._add_actions_filters, *args, **kwargs) def create_action(self, values): # ensure defaults are present for new actions if not values.get('uuid'): values['uuid'] = utils.generate_uuid() if values.get('state') is None: values['state'] = objects.action.State.PENDING try: action = self._create(models.Action, values) except db_exc.DBDuplicateEntry: raise exception.ActionAlreadyExists(uuid=values['uuid']) return action def _get_action(self, context, fieldname, value, eager): try: return self._get(context, model=models.Action, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ActionNotFound(action=value) def get_action_by_id(self, context, action_id, eager=False): return self._get_action( context, fieldname="id", value=action_id, eager=eager) def get_action_by_uuid(self, context, action_uuid, eager=False): return self._get_action( context, fieldname="uuid", value=action_uuid, eager=eager) def destroy_action(self, action_id): session = get_session() with session.begin(): query = model_query(models.Action, session=session) query = add_identity_filter(query, action_id) count = query.delete() if count != 1: raise exception.ActionNotFound(action_id) def update_action(self, action_id, values): # NOTE(dtantsur): this can lead to very strange errors if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing Action.")) return self._do_update_action(action_id, values) @staticmethod def _do_update_action(action_id, values): session = get_session() with session.begin(): query = model_query(models.Action, session=session) query = add_identity_filter(query, action_id) try: ref = query.with_lockmode('update').one() except exc.NoResultFound: raise exception.ActionNotFound(action=action_id) ref.update(values) return ref def soft_delete_action(self, action_id): try: return self._soft_delete(models.Action, action_id) except exception.ResourceNotFound: raise exception.ActionNotFound(action=action_id) # ### ACTION PLANS ### # def get_action_plan_list(self, *args, **kwargs): return self._get_model_list(models.ActionPlan, self._add_action_plans_filters, *args, **kwargs) def create_action_plan(self, values): # ensure defaults are present for new audits if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: action_plan = self._create(models.ActionPlan, values) except db_exc.DBDuplicateEntry: raise exception.ActionPlanAlreadyExists(uuid=values['uuid']) return action_plan def _get_action_plan(self, context, fieldname, value, eager): try: return self._get(context, model=models.ActionPlan, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ActionPlanNotFound(action_plan=value) def get_action_plan_by_id(self, context, action_plan_id, eager=False): return self._get_action_plan( context, fieldname="id", value=action_plan_id, eager=eager) def get_action_plan_by_uuid(self, context, action_plan_uuid, eager=False): return self._get_action_plan( context, fieldname="uuid", value=action_plan_uuid, eager=eager) def destroy_action_plan(self, action_plan_id): def is_action_plan_referenced(session, action_plan_id): """Checks whether the action_plan is referenced by action(s).""" query = model_query(models.Action, session=session) query = self._add_actions_filters( query, {'action_plan_id': action_plan_id}) return query.count() != 0 session = get_session() with session.begin(): query = model_query(models.ActionPlan, session=session) query = add_identity_filter(query, action_plan_id) try: action_plan_ref = query.one() except exc.NoResultFound: raise exception.ActionPlanNotFound(action_plan=action_plan_id) if is_action_plan_referenced(session, action_plan_ref['id']): raise exception.ActionPlanReferenced( action_plan=action_plan_id) query.delete() def update_action_plan(self, action_plan_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "Action Plan.")) return self._do_update_action_plan(action_plan_id, values) @staticmethod def _do_update_action_plan(action_plan_id, values): session = get_session() with session.begin(): query = model_query(models.ActionPlan, session=session) query = add_identity_filter(query, action_plan_id) try: ref = query.with_lockmode('update').one() except exc.NoResultFound: raise exception.ActionPlanNotFound(action_plan=action_plan_id) ref.update(values) return ref def soft_delete_action_plan(self, action_plan_id): try: return self._soft_delete(models.ActionPlan, action_plan_id) except exception.ResourceNotFound: raise exception.ActionPlanNotFound(action_plan=action_plan_id) # ### EFFICACY INDICATORS ### # def get_efficacy_indicator_list(self, *args, **kwargs): return self._get_model_list(models.EfficacyIndicator, self._add_efficacy_indicators_filters, *args, **kwargs) def create_efficacy_indicator(self, values): # ensure defaults are present for new efficacy indicators if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: efficacy_indicator = self._create(models.EfficacyIndicator, values) except db_exc.DBDuplicateEntry: raise exception.EfficacyIndicatorAlreadyExists(uuid=values['uuid']) return efficacy_indicator def _get_efficacy_indicator(self, context, fieldname, value, eager): try: return self._get(context, model=models.EfficacyIndicator, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.EfficacyIndicatorNotFound(efficacy_indicator=value) def get_efficacy_indicator_by_id(self, context, efficacy_indicator_id, eager=False): return self._get_efficacy_indicator( context, fieldname="id", value=efficacy_indicator_id, eager=eager) def get_efficacy_indicator_by_uuid(self, context, efficacy_indicator_uuid, eager=False): return self._get_efficacy_indicator( context, fieldname="uuid", value=efficacy_indicator_uuid, eager=eager) def get_efficacy_indicator_by_name(self, context, efficacy_indicator_name, eager=False): return self._get_efficacy_indicator( context, fieldname="name", value=efficacy_indicator_name, eager=eager) def update_efficacy_indicator(self, efficacy_indicator_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "efficacy indicator.")) try: return self._update( models.EfficacyIndicator, efficacy_indicator_id, values) except exception.ResourceNotFound: raise exception.EfficacyIndicatorNotFound( efficacy_indicator=efficacy_indicator_id) def soft_delete_efficacy_indicator(self, efficacy_indicator_id): try: return self._soft_delete( models.EfficacyIndicator, efficacy_indicator_id) except exception.ResourceNotFound: raise exception.EfficacyIndicatorNotFound( efficacy_indicator=efficacy_indicator_id) def destroy_efficacy_indicator(self, efficacy_indicator_id): try: return self._destroy( models.EfficacyIndicator, efficacy_indicator_id) except exception.ResourceNotFound: raise exception.EfficacyIndicatorNotFound( efficacy_indicator=efficacy_indicator_id) # ### SCORING ENGINES ### # def get_scoring_engine_list(self, *args, **kwargs): return self._get_model_list(models.ScoringEngine, self._add_scoring_engine_filters, *args, **kwargs) def create_scoring_engine(self, values): # ensure defaults are present for new scoring engines if not values.get('uuid'): values['uuid'] = utils.generate_uuid() try: scoring_engine = self._create(models.ScoringEngine, values) except db_exc.DBDuplicateEntry: raise exception.ScoringEngineAlreadyExists(uuid=values['uuid']) return scoring_engine def _get_scoring_engine(self, context, fieldname, value, eager): try: return self._get(context, model=models.ScoringEngine, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ScoringEngineNotFound(scoring_engine=value) def get_scoring_engine_by_id(self, context, scoring_engine_id, eager=False): return self._get_scoring_engine( context, fieldname="id", value=scoring_engine_id, eager=eager) def get_scoring_engine_by_uuid(self, context, scoring_engine_uuid, eager=False): return self._get_scoring_engine( context, fieldname="uuid", value=scoring_engine_uuid, eager=eager) def get_scoring_engine_by_name(self, context, scoring_engine_name, eager=False): return self._get_scoring_engine( context, fieldname="name", value=scoring_engine_name, eager=eager) def destroy_scoring_engine(self, scoring_engine_id): try: return self._destroy(models.ScoringEngine, scoring_engine_id) except exception.ResourceNotFound: raise exception.ScoringEngineNotFound( scoring_engine=scoring_engine_id) def update_scoring_engine(self, scoring_engine_id, values): if 'uuid' in values: raise exception.Invalid( message=_("Cannot overwrite UUID for an existing " "Scoring Engine.")) try: return self._update( models.ScoringEngine, scoring_engine_id, values) except exception.ResourceNotFound: raise exception.ScoringEngineNotFound( scoring_engine=scoring_engine_id) def soft_delete_scoring_engine(self, scoring_engine_id): try: return self._soft_delete( models.ScoringEngine, scoring_engine_id) except exception.ResourceNotFound: raise exception.ScoringEngineNotFound( scoring_engine=scoring_engine_id) # ### SERVICES ### # def get_service_list(self, *args, **kwargs): return self._get_model_list(models.Service, self._add_services_filters, *args, **kwargs) def create_service(self, values): try: service = self._create(models.Service, values) except db_exc.DBDuplicateEntry: raise exception.ServiceAlreadyExists(name=values['name'], host=values['host']) return service def _get_service(self, context, fieldname, value, eager): try: return self._get(context, model=models.Service, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ServiceNotFound(service=value) def get_service_by_id(self, context, service_id, eager=False): return self._get_service( context, fieldname="id", value=service_id, eager=eager) def get_service_by_name(self, context, service_name, eager=False): return self._get_service( context, fieldname="name", value=service_name, eager=eager) def destroy_service(self, service_id): try: return self._destroy(models.Service, service_id) except exception.ResourceNotFound: raise exception.ServiceNotFound(service=service_id) def update_service(self, service_id, values): try: return self._update(models.Service, service_id, values) except exception.ResourceNotFound: raise exception.ServiceNotFound(service=service_id) def soft_delete_service(self, service_id): try: return self._soft_delete(models.Service, service_id) except exception.ResourceNotFound: raise exception.ServiceNotFound(service=service_id) # ### ACTION_DESCRIPTIONS ### # def get_action_description_list(self, *args, **kwargs): return self._get_model_list(models.ActionDescription, self._add_action_descriptions_filters, *args, **kwargs) def create_action_description(self, values): try: action_description = self._create(models.ActionDescription, values) except db_exc.DBDuplicateEntry: raise exception.ActionDescriptionAlreadyExists( action_type=values['action_type']) return action_description def _get_action_description(self, context, fieldname, value, eager): try: return self._get(context, model=models.ActionDescription, fieldname=fieldname, value=value, eager=eager) except exception.ResourceNotFound: raise exception.ActionDescriptionNotFound(action_id=value) def get_action_description_by_id(self, context, action_id, eager=False): return self._get_action_description( context, fieldname="id", value=action_id, eager=eager) def get_action_description_by_type(self, context, action_type, eager=False): return self._get_action_description( context, fieldname="action_type", value=action_type, eager=eager) def destroy_action_description(self, action_id): try: return self._destroy(models.ActionDescription, action_id) except exception.ResourceNotFound: raise exception.ActionDescriptionNotFound( action_id=action_id) def update_action_description(self, action_id, values): try: return self._update(models.ActionDescription, action_id, values) except exception.ResourceNotFound: raise exception.ActionDescriptionNotFound( action_id=action_id) def soft_delete_action_description(self, action_id): try: return self._soft_delete(models.ActionDescription, action_id) except exception.ResourceNotFound: raise exception.ActionDescriptionNotFound( action_id=action_id) python-watcher-4.0.0/watcher/db/sqlalchemy/alembic.ini0000664000175000017500000000171713656752270023015 0ustar zuulzuul00000000000000# A generic, single database configuration. [alembic] # path to migration scripts script_location = %(here)s/alembic # template used to generate migration files # file_template = %%(rev)s_%%(slug)s # max length of characters to apply to the # "slug" field #truncate_slug_length = 40 # set to 'true' to run the environment during # the 'revision' command, regardless of autogenerate # revision_environment = false #sqlalchemy.url = driver://user:pass@localhost/dbname # Logging configuration [loggers] keys = root,sqlalchemy,alembic [handlers] keys = console [formatters] keys = generic [logger_root] level = WARN handlers = console qualname = [logger_sqlalchemy] level = WARN handlers = qualname = sqlalchemy.engine [logger_alembic] level = INFO handlers = qualname = alembic [handler_console] class = StreamHandler args = (sys.stderr,) level = NOTSET formatter = generic [formatter_generic] format = %(levelname)-5.5s [%(name)s] %(message)s datefmt = %H:%M:%S python-watcher-4.0.0/watcher/db/sqlalchemy/job_store.py0000664000175000017500000001053113656752270023252 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica LTD # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_serialization import jsonutils from apscheduler.jobstores.base import ConflictingIdError from apscheduler.jobstores import sqlalchemy from apscheduler.util import datetime_to_utc_timestamp from apscheduler.util import maybe_ref from watcher.common import context from watcher.common import service from watcher import objects try: import cPickle as pickle except ImportError: # pragma: nocover import pickle from sqlalchemy import Table, MetaData, select, and_ from sqlalchemy.exc import IntegrityError class WatcherJobStore(sqlalchemy.SQLAlchemyJobStore): """Stores jobs in a database table using SQLAlchemy. The table will be created if it doesn't exist in the database. Plugin alias: ``sqlalchemy`` :param str url: connection string :param engine: an SQLAlchemy Engine to use instead of creating a new one based on ``url`` :param str tablename: name of the table to store jobs in :param metadata: a :class:`~sqlalchemy.MetaData` instance to use instead of creating a new one :param int pickle_protocol: pickle protocol level to use (for serialization), defaults to the highest available :param dict tag: tag description """ def __init__(self, url=None, engine=None, tablename='apscheduler_jobs', metadata=None, pickle_protocol=pickle.HIGHEST_PROTOCOL, tag=None): super(WatcherJobStore, self).__init__(url, engine, tablename, metadata, pickle_protocol) metadata = maybe_ref(metadata) or MetaData() self.jobs_t = Table(tablename, metadata, autoload=True, autoload_with=engine) service_ident = service.ServiceHeartbeat.get_service_name() self.tag = tag or {'host': service_ident[0], 'name': service_ident[1]} self.service_id = objects.Service.list(context=context.make_context(), filters=self.tag)[0].id def start(self, scheduler, alias): # There should be called 'start' method of parent of SQLAlchemyJobStore super(self.__class__.__bases__[0], self).start(scheduler, alias) def add_job(self, job): insert = self.jobs_t.insert().values(**{ 'id': job.id, 'next_run_time': datetime_to_utc_timestamp(job.next_run_time), 'job_state': pickle.dumps(job.__getstate__(), self.pickle_protocol), 'service_id': self.service_id, 'tag': jsonutils.dumps(self.tag) }) try: self.engine.execute(insert) except IntegrityError: raise ConflictingIdError(job.id) def get_all_jobs(self): jobs = self._get_jobs(self.jobs_t.c.tag == jsonutils.dumps(self.tag)) self._fix_paused_jobs_sorting(jobs) return jobs def _get_jobs(self, *conditions): jobs = [] conditions += (self.jobs_t.c.service_id == self.service_id,) selectable = select( [self.jobs_t.c.id, self.jobs_t.c.job_state, self.jobs_t.c.tag] ).order_by(self.jobs_t.c.next_run_time).where(and_(*conditions)) failed_job_ids = set() for row in self.engine.execute(selectable): try: jobs.append(self._reconstitute_job(row.job_state)) except Exception: self._logger.exception( 'Unable to restore job "%s" -- removing it', row.id) failed_job_ids.add(row.id) # Remove all the jobs we failed to restore if failed_job_ids: delete = self.jobs_t.delete().where( self.jobs_t.c.id.in_(failed_job_ids)) self.engine.execute(delete) return jobs python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/0000775000175000017500000000000013656752352022307 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/0000775000175000017500000000000013656752352024157 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/0f6042416884_add_apscheduler_jobs.py0000664000175000017500000000161713656752270032261 0ustar zuulzuul00000000000000"""Add apscheduler_jobs table to store background jobs Revision ID: 0f6042416884 Revises: 001 Create Date: 2017-03-24 11:21:29.036532 """ from alembic import op import sqlalchemy as sa from watcher.db.sqlalchemy import models # revision identifiers, used by Alembic. revision = '0f6042416884' down_revision = '001' def upgrade(): op.create_table( 'apscheduler_jobs', sa.Column('id', sa.Unicode(191, _warn_on_bytestring=False), nullable=False), sa.Column('next_run_time', sa.Float(25), index=True), sa.Column('job_state', sa.LargeBinary, nullable=False), sa.Column('service_id', sa.Integer(), nullable=False), sa.Column('tag', models.JSONEncodedDict(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.ForeignKeyConstraint(['service_id'], ['services.id']) ) def downgrade(): op.drop_table('apscheduler_jobs') python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/3cfc94cecf4e_add_name_for_audit.py0000664000175000017500000000064413656752270032445 0ustar zuulzuul00000000000000"""add name for audit Revision ID: 3cfc94cecf4e Revises: d098df6021e2 Create Date: 2017-07-19 15:44:57.661099 """ # revision identifiers, used by Alembic. revision = '3cfc94cecf4e' down_revision = 'd09a5945e4a0' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('audits', sa.Column('name', sa.String(length=63), nullable=True)) def downgrade(): op.drop_column('audits', 'name') python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/609bec748f2a_add_force_field.py0000664000175000017500000000063113656752270031505 0ustar zuulzuul00000000000000"""add_force_field Revision ID: 609bec748f2a Revises: 4b16194c56bc Create Date: 2019-05-05 14:06:14.249124 """ # revision identifiers, used by Alembic. revision = '609bec748f2a' down_revision = '4b16194c56bc' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('audits', sa.Column('force', sa.Boolean, default=False)) def downgrade(): op.drop_column('audits', 'force') python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/001_ocata.py0000664000175000017500000002243413656752270026204 0ustar zuulzuul00000000000000"""ocata release Revision ID: 9894235b4278 Revises: None Create Date: 2017-02-01 09:40:05.065981 """ from alembic import op import oslo_db import sqlalchemy as sa from watcher.db.sqlalchemy import models # revision identifiers, used by Alembic. revision = '001' down_revision = None def upgrade(): op.create_table( 'goals', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=63), nullable=False), sa.Column('display_name', sa.String(length=63), nullable=False), sa.Column('efficacy_specification', models.JSONEncodedList(), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'deleted', name='uniq_goals0name'), sa.UniqueConstraint('uuid', name='uniq_goals0uuid') ) op.create_table( 'scoring_engines', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('name', sa.String(length=63), nullable=False), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('metainfo', sa.Text(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'deleted', name='uniq_scoring_engines0name'), sa.UniqueConstraint('uuid', name='uniq_scoring_engines0uuid') ) op.create_table( 'services', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(length=255), nullable=False), sa.Column('host', sa.String(length=255), nullable=False), sa.Column('last_seen_up', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('host', 'name', 'deleted', name='uniq_services0host0name0deleted') ) op.create_table( 'strategies', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=63), nullable=False), sa.Column('display_name', sa.String(length=63), nullable=False), sa.Column('goal_id', sa.Integer(), nullable=False), sa.Column('parameters_spec', models.JSONEncodedDict(), nullable=True), sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'deleted', name='uniq_strategies0name'), sa.UniqueConstraint('uuid', name='uniq_strategies0uuid') ) op.create_table( 'audit_templates', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=63), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('goal_id', sa.Integer(), nullable=False), sa.Column('strategy_id', sa.Integer(), nullable=True), sa.Column('scope', models.JSONEncodedList(), nullable=True), sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'deleted', name='uniq_audit_templates0name'), sa.UniqueConstraint('uuid', name='uniq_audit_templates0uuid') ) op.create_table( 'audits', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('audit_type', sa.String(length=20), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('parameters', models.JSONEncodedDict(), nullable=True), sa.Column('interval', sa.Integer(), nullable=True), sa.Column('goal_id', sa.Integer(), nullable=False), sa.Column('strategy_id', sa.Integer(), nullable=True), sa.Column('scope', models.JSONEncodedList(), nullable=True), sa.Column('auto_trigger', sa.Boolean(), nullable=False), sa.ForeignKeyConstraint(['goal_id'], ['goals.id'], ), sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_audits0uuid') ) op.create_table( 'action_plans', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('audit_id', sa.Integer(), nullable=False), sa.Column('strategy_id', sa.Integer(), nullable=False), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('global_efficacy', models.JSONEncodedDict(), nullable=True), sa.ForeignKeyConstraint(['audit_id'], ['audits.id'], ), sa.ForeignKeyConstraint(['strategy_id'], ['strategies.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_action_plans0uuid') ) op.create_table( 'actions', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=False), sa.Column('action_plan_id', sa.Integer(), nullable=False), sa.Column('action_type', sa.String(length=255), nullable=False), sa.Column('input_parameters', models.JSONEncodedDict(), nullable=True), sa.Column('state', sa.String(length=20), nullable=True), sa.Column('parents', models.JSONEncodedList(), nullable=True), sa.ForeignKeyConstraint(['action_plan_id'], ['action_plans.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_actions0uuid') ) op.create_table( 'efficacy_indicators', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('uuid', sa.String(length=36), nullable=True), sa.Column('name', sa.String(length=63), nullable=True), sa.Column('description', sa.String(length=255), nullable=True), sa.Column('unit', sa.String(length=63), nullable=True), sa.Column('value', sa.Numeric(), nullable=True), sa.Column('action_plan_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['action_plan_id'], ['action_plans.id'], ), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('uuid', name='uniq_efficacy_indicators0uuid') ) def downgrade(): op.drop_table('efficacy_indicators') op.drop_table('actions') op.drop_table('action_plans') op.drop_table('audits') op.drop_table('audit_templates') op.drop_table('strategies') op.drop_table('services') op.drop_table('scoring_engines') op.drop_table('goals') python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/a86240e89a29_.py0000664000175000017500000000247613656752270026366 0ustar zuulzuul00000000000000"""Set name for Audit as part of backward compatibility Revision ID: a86240e89a29 Revises: 3cfc94cecf4e Create Date: 2017-12-21 13:00:09.278587 """ # revision identifiers, used by Alembic. revision = 'a86240e89a29' down_revision = '3cfc94cecf4e' from alembic import op from sqlalchemy.orm import sessionmaker from watcher.db.sqlalchemy import models def upgrade(): connection = op.get_bind() session = sessionmaker() s = session(bind=connection) audits = s.query( models.Audit.strategy_id.label('strategy_id'), models.Audit.created_at.label('created_at')).filter( models.Audit.name is None).all() for audit in audits: strategy_name = s.query(models.Strategy).filter_by( id=audit.strategy_id).one().name s.query().filter(models.Audit.name is None).update( {'name': strategy_name + '-' + str(audit.created_at)}) s.commit() def downgrade(): connection = op.get_bind() session = sessionmaker() s = session(bind=connection) audits = s.query( models.Audit.strategy_id.label('strategy_id'), models.Audit.created_at.label('created_at')).filter( models.Audit.name is not None).all() for audit in audits: s.query().filter(models.Audit.name is not None).update( {'name': None}) s.commit() ././@LongLink0000000000000000000000000000015100000000000011212 Lustar 00000000000000python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/d09a5945e4a0_add_action_description_table.pypython-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/d09a5945e4a0_add_action_description_tabl0000664000175000017500000000206413656752270033413 0ustar zuulzuul00000000000000"""add action description table Revision ID: d09a5945e4a0 Revises: d098df6021e2 Create Date: 2017-07-13 20:33:01.473711 """ from alembic import op import oslo_db import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd09a5945e4a0' down_revision = 'd098df6021e2' def upgrade(): op.create_table( 'action_descriptions', sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.Column('deleted_at', sa.DateTime(), nullable=True), sa.Column('deleted', oslo_db.sqlalchemy.types.SoftDeleteInteger(), nullable=True), sa.Column('id', sa.Integer(), nullable=False), sa.Column('action_type', sa.String(length=255), nullable=False), sa.Column('description', sa.String(length=255), nullable=False), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('action_type', name='uniq_action_description0action_type') ) def downgrade(): op.drop_table('action_descriptions') python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/52804f2498c4_add_hostname.py0000664000175000017500000000110713656752270030643 0ustar zuulzuul00000000000000"""Add hostname field to both Audit and Action Plan models Revision ID: 52804f2498c4 Revises: a86240e89a29 Create Date: 2018-06-26 13:06:45.530387 """ # revision identifiers, used by Alembic. revision = '52804f2498c4' down_revision = 'a86240e89a29' from alembic import op import sqlalchemy as sa def upgrade(): for table in ('audits', 'action_plans'): op.add_column( table, sa.Column('hostname', sa.String(length=255), nullable=True)) def downgrade(): for table in ('audits', 'action_plans'): op.drop_column(table, 'hostname') python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/4b16194c56bc_add_start_end_time.py0000664000175000017500000000104313656752270032154 0ustar zuulzuul00000000000000"""add_start_end_time Revision ID: 4b16194c56bc Revises: 52804f2498c4 Create Date: 2018-03-23 00:36:29.031259 """ # revision identifiers, used by Alembic. revision = '4b16194c56bc' down_revision = '52804f2498c4' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('audits', sa.Column('start_time', sa.DateTime(), nullable=True)) op.add_column('audits', sa.Column('end_time', sa.DateTime(), nullable=True)) def downgrade(): op.drop_column('audits', 'start_time') op.drop_column('audits', 'end_time') python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/versions/d098df6021e2_cron_support_for_audit.py0000664000175000017500000000124113656752270033135 0ustar zuulzuul00000000000000"""Add cron support for audit table Revision ID: d098df6021e2 Revises: 0f6042416884 Create Date: 2017-06-08 16:21:35.746752 """ from alembic import op import sqlalchemy as sa # revision identifiers, used by Alembic. revision = 'd098df6021e2' down_revision = '0f6042416884' def upgrade(): op.alter_column('audits', 'interval', existing_type=sa.String(36), nullable=True) op.add_column('audits', sa.Column('next_run_time', sa.DateTime(), nullable=True)) def downgrade(): op.alter_column('audits', 'interval', existing_type=sa.Integer(), nullable=True) op.drop_column('audits', 'next_run_time') python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/env.py0000664000175000017500000000335113656752270023452 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from logging import config as log_config from alembic import context from watcher.db.sqlalchemy import api as sqla_api from watcher.db.sqlalchemy import models # this is the Alembic Config object, which provides # access to the values within the .ini file in use. config = context.config # Interpret the config file for Python logging. # This line sets up loggers basically. log_config.fileConfig(config.config_file_name) # add your model's MetaData object here # for 'autogenerate' support # from myapp import mymodel target_metadata = models.Base.metadata # other values from the config, defined by the needs of env.py, # can be acquired: # my_important_option = config.get_main_option("my_important_option") # ... etc. def run_migrations_online(): """Run migrations in 'online' mode. In this scenario we need to create an Engine and associate a connection with the context. """ engine = sqla_api.get_engine() with engine.connect() as connection: context.configure(connection=connection, target_metadata=target_metadata) with context.begin_transaction(): context.run_migrations() run_migrations_online() python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/README.rst0000664000175000017500000000362613656752270024004 0ustar zuulzuul00000000000000The migrations in the alembic/versions contain the changes needed to migrate from older Watcher releases to newer versions. A migration occurs by executing a script that details the changes needed to upgrade/downgrade the database. The migration scripts are ordered so that multiple scripts can run sequentially to update the database. The scripts are executed by Watcher's migration wrapper which uses the Alembic library to manage the migration. Watcher supports migration from Ocata or later. If you are a deployer or developer and want to migrate from Ocata to later release you must first add version tracking to the database:: $ watcher-db-manage --config-file /path/to/watcher.conf stamp ocata You can upgrade to the latest database version via:: $ watcher-db-manage --config-file /path/to/watcher.conf upgrade head To check the current database version:: $ watcher-db-manage --config-file /path/to/watcher.conf version To create a script to run the migration offline:: $ watcher-db-manage --config-file /path/to/watcher.conf upgrade head --sql To run the offline migration between specific migration versions:: $ watcher-db-manage --config-file /path/to/watcher.conf upgrade \ : --sql Upgrade the database incrementally:: $ watcher-db-manage --config-file /path/to/watcher.conf upgrade --revision \ <# of revs> Downgrade the database by a certain number of revisions:: $ watcher-db-manage --config-file /path/to/watcher.conf downgrade --revision \ <# of revs> Create new revision:: $ watcher-db-manage --config-file /path/to/watcher.conf revision \ -m "description of revision" --autogenerate Create a blank file:: $ watcher-db-manage --config-file /path/to/watcher.conf revision \ -m "description of revision" Please see https://alembic.readthedocs.org/en/latest/index.html for general documentation python-watcher-4.0.0/watcher/db/sqlalchemy/alembic/script.py.mako0000664000175000017500000000063413656752270025115 0ustar zuulzuul00000000000000"""${message} Revision ID: ${up_revision} Revises: ${down_revision} Create Date: ${create_date} """ # revision identifiers, used by Alembic. revision = ${repr(up_revision)} down_revision = ${repr(down_revision)} from alembic import op import sqlalchemy as sa ${imports if imports else ""} def upgrade(): ${upgrades if upgrades else "pass"} def downgrade(): ${downgrades if downgrades else "pass"} python-watcher-4.0.0/watcher/db/sqlalchemy/migration.py0000664000175000017500000000713413656752270023262 0ustar zuulzuul00000000000000# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import alembic from alembic import config as alembic_config import alembic.migration as alembic_migration from oslo_db import exception as db_exc from watcher._i18n import _ from watcher.db.sqlalchemy import api as sqla_api from watcher.db.sqlalchemy import models def _alembic_config(): path = os.path.join(os.path.dirname(__file__), 'alembic.ini') config = alembic_config.Config(path) return config def version(engine=None): """Current database version. :returns: Database version :rtype: string """ if engine is None: engine = sqla_api.get_engine() with engine.connect() as conn: context = alembic_migration.MigrationContext.configure(conn) return context.get_current_revision() def upgrade(revision, config=None): """Used for upgrading database. :param version: Desired database version :type version: string """ revision = revision or 'head' config = config or _alembic_config() alembic.command.upgrade(config, revision) def create_schema(config=None, engine=None): """Create database schema from models description. Can be used for initial installation instead of upgrade('head'). """ if engine is None: engine = sqla_api.get_engine() # NOTE(viktors): If we will use metadata.create_all() for non empty db # schema, it will only add the new tables, but leave # existing as is. So we should avoid of this situation. if version(engine=engine) is not None: raise db_exc.DBMigrationError( _("Watcher database schema is already under version control; " "use upgrade() instead")) models.Base.metadata.create_all(engine) stamp('head', config=config) def downgrade(revision, config=None): """Used for downgrading database. :param version: Desired database version :type version: string """ revision = revision or 'base' config = config or _alembic_config() return alembic.command.downgrade(config, revision) def stamp(revision, config=None): """Stamps database with provided revision. Don't run any migrations. :param revision: Should match one from repository or head - to stamp database with most recent revision :type revision: string """ config = config or _alembic_config() return alembic.command.stamp(config, revision=revision) def revision(message=None, autogenerate=False, config=None): """Creates template for migration. :param message: Text that will be used for migration title :type message: string :param autogenerate: If True - generates diff based on current database state :type autogenerate: bool """ config = config or _alembic_config() return alembic.command.revision(config, message=message, autogenerate=autogenerate) python-watcher-4.0.0/watcher/db/sqlalchemy/models.py0000664000175000017500000002531413656752270022554 0ustar zuulzuul00000000000000# Copyright 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ SQLAlchemy models for watcher service """ from oslo_db.sqlalchemy import models from oslo_serialization import jsonutils import six.moves.urllib.parse as urlparse from sqlalchemy import Boolean from sqlalchemy import Column from sqlalchemy import DateTime from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import Float from sqlalchemy import ForeignKey from sqlalchemy import Integer from sqlalchemy import LargeBinary from sqlalchemy import Numeric from sqlalchemy import orm from sqlalchemy import String from sqlalchemy import Text from sqlalchemy.types import TypeDecorator, TEXT from sqlalchemy import UniqueConstraint from watcher import conf CONF = conf.CONF def table_args(): engine_name = urlparse.urlparse(CONF.database.connection).scheme if engine_name == 'mysql': return {'mysql_engine': CONF.database.mysql_engine, 'mysql_charset': "utf8"} return None class JsonEncodedType(TypeDecorator): """Abstract base type serialized as json-encoded string in db.""" type = None impl = TEXT def process_bind_param(self, value, dialect): if value is None: # Save default value according to current type to keep the # interface the consistent. value = self.type() elif not isinstance(value, self.type): raise TypeError("%s supposes to store %s objects, but %s given" % (self.__class__.__name__, self.type.__name__, type(value).__name__)) serialized_value = jsonutils.dumps(value) return serialized_value def process_result_value(self, value, dialect): if value is not None: value = jsonutils.loads(value) return value class JSONEncodedDict(JsonEncodedType): """Represents dict serialized as json-encoded string in db.""" type = dict class JSONEncodedList(JsonEncodedType): """Represents list serialized as json-encoded string in db.""" type = list class WatcherBase(models.SoftDeleteMixin, models.TimestampMixin, models.ModelBase): metadata = None def as_dict(self): d = {} for c in self.__table__.columns: d[c.name] = self[c.name] return d def save(self, session=None): import watcher.db.sqlalchemy.api as db_api if session is None: session = db_api.get_session() super(WatcherBase, self).save(session) Base = declarative_base(cls=WatcherBase) class Goal(Base): """Represents a goal.""" __tablename__ = 'goals' __table_args__ = ( UniqueConstraint('uuid', name='uniq_goals0uuid'), UniqueConstraint('name', 'deleted', name='uniq_goals0name'), table_args(), ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) name = Column(String(63), nullable=False) display_name = Column(String(63), nullable=False) efficacy_specification = Column(JSONEncodedList, nullable=False) class Strategy(Base): """Represents a strategy.""" __tablename__ = 'strategies' __table_args__ = ( UniqueConstraint('uuid', name='uniq_strategies0uuid'), UniqueConstraint('name', 'deleted', name='uniq_strategies0name'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) name = Column(String(63), nullable=False) display_name = Column(String(63), nullable=False) goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) parameters_spec = Column(JSONEncodedDict, nullable=True) goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) class AuditTemplate(Base): """Represents an audit template.""" __tablename__ = 'audit_templates' __table_args__ = ( UniqueConstraint('uuid', name='uniq_audit_templates0uuid'), UniqueConstraint('name', 'deleted', name='uniq_audit_templates0name'), table_args() ) id = Column(Integer, primary_key=True) uuid = Column(String(36)) name = Column(String(63), nullable=True) description = Column(String(255), nullable=True) goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=True) scope = Column(JSONEncodedList) goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) class Audit(Base): """Represents an audit.""" __tablename__ = 'audits' __table_args__ = ( UniqueConstraint('uuid', name='uniq_audits0uuid'), UniqueConstraint('name', 'deleted', name='uniq_audits0name'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) name = Column(String(63), nullable=True) audit_type = Column(String(20)) state = Column(String(20), nullable=True) parameters = Column(JSONEncodedDict, nullable=True) interval = Column(String(36), nullable=True) goal_id = Column(Integer, ForeignKey('goals.id'), nullable=False) strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=True) scope = Column(JSONEncodedList, nullable=True) auto_trigger = Column(Boolean, nullable=False) next_run_time = Column(DateTime, nullable=True) hostname = Column(String(255), nullable=True) start_time = Column(DateTime, nullable=True) end_time = Column(DateTime, nullable=True) force = Column(Boolean, nullable=False) goal = orm.relationship(Goal, foreign_keys=goal_id, lazy=None) strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) class ActionPlan(Base): """Represents an action plan.""" __tablename__ = 'action_plans' __table_args__ = ( UniqueConstraint('uuid', name='uniq_action_plans0uuid'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) audit_id = Column(Integer, ForeignKey('audits.id'), nullable=False) strategy_id = Column(Integer, ForeignKey('strategies.id'), nullable=False) state = Column(String(20), nullable=True) global_efficacy = Column(JSONEncodedList, nullable=True) hostname = Column(String(255), nullable=True) audit = orm.relationship(Audit, foreign_keys=audit_id, lazy=None) strategy = orm.relationship(Strategy, foreign_keys=strategy_id, lazy=None) class Action(Base): """Represents an action.""" __tablename__ = 'actions' __table_args__ = ( UniqueConstraint('uuid', name='uniq_actions0uuid'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36), nullable=False) action_plan_id = Column(Integer, ForeignKey('action_plans.id'), nullable=False) # only for the first version action_type = Column(String(255), nullable=False) input_parameters = Column(JSONEncodedDict, nullable=True) state = Column(String(20), nullable=True) parents = Column(JSONEncodedList, nullable=True) action_plan = orm.relationship( ActionPlan, foreign_keys=action_plan_id, lazy=None) class EfficacyIndicator(Base): """Represents an efficacy indicator.""" __tablename__ = 'efficacy_indicators' __table_args__ = ( UniqueConstraint('uuid', name='uniq_efficacy_indicators0uuid'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36)) name = Column(String(63)) description = Column(String(255), nullable=True) unit = Column(String(63), nullable=True) value = Column(Numeric()) action_plan_id = Column(Integer, ForeignKey('action_plans.id'), nullable=False) action_plan = orm.relationship( ActionPlan, foreign_keys=action_plan_id, lazy=None) class ScoringEngine(Base): """Represents a scoring engine.""" __tablename__ = 'scoring_engines' __table_args__ = ( UniqueConstraint('uuid', name='uniq_scoring_engines0uuid'), UniqueConstraint('name', 'deleted', name='uniq_scoring_engines0name'), table_args() ) id = Column(Integer, primary_key=True, autoincrement=True) uuid = Column(String(36), nullable=False) name = Column(String(63), nullable=False) description = Column(String(255), nullable=True) # Metainfo might contain some additional information about the data model. # The format might vary between different models (e.g. be JSON, XML or # even some custom format), the blob type should cover all scenarios. metainfo = Column(Text, nullable=True) class Service(Base): """Represents a service entity""" __tablename__ = 'services' __table_args__ = ( UniqueConstraint('host', 'name', 'deleted', name="uniq_services0host0name0deleted"), table_args() ) id = Column(Integer, primary_key=True) name = Column(String(255), nullable=False) host = Column(String(255), nullable=False) last_seen_up = Column(DateTime, nullable=True) class ActionDescription(Base): """Represents a action description""" __tablename__ = 'action_descriptions' __table_args__ = ( UniqueConstraint('action_type', name="uniq_action_description0action_type"), table_args() ) id = Column(Integer, primary_key=True) action_type = Column(String(255), nullable=False) description = Column(String(255), nullable=False) class APScheulerJob(Base): """Represents apscheduler jobs""" __tablename__ = 'apscheduler_jobs' __table_args__ = ( UniqueConstraint('id', name="uniq_apscheduler_jobs0id"), table_args() ) id = Column(String(191), nullable=False, primary_key=True) next_run_time = Column(Float(25), index=True) job_state = Column(LargeBinary, nullable=False) tag = Column(JSONEncodedDict(), nullable=True) service_id = Column(Integer, ForeignKey('services.id'), nullable=False) service = orm.relationship( Service, foreign_keys=service_id, lazy=None) python-watcher-4.0.0/watcher/objects/0000775000175000017500000000000013656752352017615 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/objects/__init__.py0000664000175000017500000000314013656752270021723 0ustar zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(comstud): You may scratch your head as you see code that imports # this module and then accesses attributes for objects such as Node, # etc, yet you do not see these attributes in here. Never fear, there is # a little bit of magic. When objects are registered, an attribute is set # on this module automatically, pointing to the newest/latest version of # the object. def register_all(): # NOTE(danms): You must make sure your object gets imported in this # function in order for it to be registered by services that may # need to receive it via RPC. __import__('watcher.objects.goal') __import__('watcher.objects.strategy') __import__('watcher.objects.audit_template') __import__('watcher.objects.audit') __import__('watcher.objects.action_plan') __import__('watcher.objects.action') __import__('watcher.objects.efficacy_indicator') __import__('watcher.objects.scoring_engine') __import__('watcher.objects.service') __import__('watcher.objects.action_description') python-watcher-4.0.0/watcher/objects/efficacy_indicator.py0000664000175000017500000001664413656752270024006 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class EfficacyIndicator(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'action_plan_id': wfields.IntegerField(), 'name': wfields.StringField(), 'description': wfields.StringField(nullable=True), 'unit': wfields.StringField(nullable=True), 'value': wfields.NumericField(), } @base.remotable_classmethod def get(cls, context, efficacy_indicator_id): """Find an efficacy indicator object given its ID or UUID :param efficacy_indicator_id: the ID or UUID of an efficacy indicator. :returns: a :class:`EfficacyIndicator` object. """ if utils.is_int_like(efficacy_indicator_id): return cls.get_by_id(context, efficacy_indicator_id) elif utils.is_uuid_like(efficacy_indicator_id): return cls.get_by_uuid(context, efficacy_indicator_id) else: raise exception.InvalidIdentity(identity=efficacy_indicator_id) @base.remotable_classmethod def get_by_id(cls, context, efficacy_indicator_id): """Find an efficacy indicator given its integer ID :param efficacy_indicator_id: the id of an efficacy indicator. :returns: a :class:`EfficacyIndicator` object. """ db_efficacy_indicator = cls.dbapi.get_efficacy_indicator_by_id( context, efficacy_indicator_id) efficacy_indicator = EfficacyIndicator._from_db_object( cls(context), db_efficacy_indicator) return efficacy_indicator @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find an efficacy indicator given its UUID :param uuid: the uuid of an efficacy indicator. :param context: Security context :returns: a :class:`EfficacyIndicator` object. """ db_efficacy_indicator = cls.dbapi.get_efficacy_indicator_by_uuid( context, uuid) efficacy_indicator = EfficacyIndicator._from_db_object( cls(context), db_efficacy_indicator) return efficacy_indicator @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None): """Return a list of EfficacyIndicator objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: Filters to apply. Defaults to None. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`EfficacyIndicator` object. """ db_efficacy_indicators = cls.dbapi.get_efficacy_indicator_list( context, limit=limit, marker=marker, filters=filters, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_efficacy_indicators] @base.remotable def create(self, context=None): """Create a EfficacyIndicator record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: EfficacyIndicator(context) """ values = self.obj_get_changes() db_efficacy_indicator = self.dbapi.create_efficacy_indicator(values) self._from_db_object(self, db_efficacy_indicator) def destroy(self, context=None): """Delete the EfficacyIndicator from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: EfficacyIndicator(context) """ self.dbapi.destroy_efficacy_indicator(self.uuid) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this EfficacyIndicator. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: EfficacyIndicator(context) """ updates = self.obj_get_changes() self.dbapi.update_efficacy_indicator(self.uuid, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None): """Loads updates for this EfficacyIndicator. Loads an efficacy indicator with the same uuid from the database and checks for updated attributes. Updates are applied to the loaded efficacy indicator column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: EfficacyIndicator(context) """ current = self.__class__.get_by_uuid(self._context, uuid=self.uuid) self.obj_refresh(current) @base.remotable def soft_delete(self, context=None): """Soft Delete the efficacy indicator from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) """ self.dbapi.soft_delete_efficacy_indicator(self.uuid) python-watcher-4.0.0/watcher/objects/action_plan.py0000664000175000017500000003220113656752270022453 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Action Plan ` is a flow of :ref:`Actions ` that should be executed in order to satisfy a given :ref:`Goal `. An :ref:`Action Plan ` is generated by Watcher when an :ref:`Audit ` is successful which implies that the :ref:`Strategy ` which was used has found a :ref:`Solution ` to achieve the :ref:`Goal ` of this :ref:`Audit `. In the default implementation of Watcher, an :ref:`Action Plan ` is only composed of successive :ref:`Actions ` (i.e., a Workflow of :ref:`Actions ` belonging to a unique branch). However, Watcher provides abstract interfaces for many of its components, allowing other implementations to generate and handle more complex :ref:`Action Plan(s) ` composed of two types of Action Item(s): - simple :ref:`Actions `: atomic tasks, which means it can not be split into smaller tasks or commands from an OpenStack point of view. - composite Actions: which are composed of several simple :ref:`Actions ` ordered in sequential and/or parallel flows. An :ref:`Action Plan ` may be described using standard workflow model description formats such as `Business Process Model and Notation 2.0 (BPMN 2.0) `_ or `Unified Modeling Language (UML) `_. An :ref:`Action Plan ` has a life-cycle and its current state may be one of the following: - **RECOMMENDED** : the :ref:`Action Plan ` is waiting for a validation from the :ref:`Administrator ` - **ONGOING** : the :ref:`Action Plan ` is currently being processed by the :ref:`Watcher Applier ` - **SUCCEEDED** : the :ref:`Action Plan ` has been executed successfully (i.e. all :ref:`Actions ` that it contains have been executed successfully) - **FAILED** : an error occurred while executing the :ref:`Action Plan ` - **DELETED** : the :ref:`Action Plan ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Action Plan ` was in **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` - **SUPERSEDED** : the :ref:`Action Plan ` was in **RECOMMENDED** state and was superseded by the :ref:`Administrator ` """ import datetime from watcher.common import exception from watcher.common import utils from watcher import conf from watcher.db import api as db_api from watcher import notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields CONF = conf.CONF class State(object): RECOMMENDED = 'RECOMMENDED' PENDING = 'PENDING' ONGOING = 'ONGOING' FAILED = 'FAILED' SUCCEEDED = 'SUCCEEDED' DELETED = 'DELETED' CANCELLED = 'CANCELLED' SUPERSEDED = 'SUPERSEDED' CANCELLING = 'CANCELLING' @base.WatcherObjectRegistry.register class ActionPlan(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'audit' and 'strategy' object field # Version 1.2: audit_id is not nullable anymore # Version 2.0: Removed 'first_action_id' object field # Version 2.1: Changed global_efficacy type # Version 2.2: Added 'hostname' field VERSION = '2.2' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'audit_id': wfields.IntegerField(), 'strategy_id': wfields.IntegerField(), 'state': wfields.StringField(nullable=True), 'global_efficacy': wfields.FlexibleListOfDictField(nullable=True), 'hostname': wfields.StringField(nullable=True), 'audit': wfields.ObjectField('Audit', nullable=True), 'strategy': wfields.ObjectField('Strategy', nullable=True), } object_fields = { 'audit': (objects.Audit, 'audit_id'), 'strategy': (objects.Strategy, 'strategy_id'), } # Proxified field so we can keep the previous value after an update _state = None _old_state = None # NOTE(v-francoise): The way oslo.versionedobjects works is by using a # __new__ that will automatically create the attributes referenced in # fields. These attributes are properties that raise an exception if no # value has been assigned, which means that they store the actual field # value in an "_obj_%(field)s" attribute. So because we want to proxify a # value that is already proxified, we have to do what you see below. @property def _obj_state(self): return self._state @property def _obj_old_state(self): return self._old_state @property def old_state(self): return self._old_state @_obj_old_state.setter def _obj_old_state(self, value): self._old_state = value @_obj_state.setter def _obj_state(self, value): if self._old_state is None and self._state is None: self._state = value else: self._old_state, self._state = self._state, value @base.remotable_classmethod def get(cls, context, action_plan_id, eager=False): """Find a action_plan based on its id or uuid and return a Action object. :param action_plan_id: the id *or* uuid of a action_plan. :param eager: Load object fields if True (Default: False) :returns: a :class:`Action` object. """ if utils.is_int_like(action_plan_id): return cls.get_by_id(context, action_plan_id, eager=eager) elif utils.is_uuid_like(action_plan_id): return cls.get_by_uuid(context, action_plan_id, eager=eager) else: raise exception.InvalidIdentity(identity=action_plan_id) @base.remotable_classmethod def get_by_id(cls, context, action_plan_id, eager=False): """Find a action_plan based on its integer id and return a ActionPlan object. :param action_plan_id: the id of a action_plan. :param eager: Load object fields if True (Default: False) :returns: a :class:`ActionPlan` object. """ db_action_plan = cls.dbapi.get_action_plan_by_id( context, action_plan_id, eager=eager) action_plan = cls._from_db_object( cls(context), db_action_plan, eager=eager) return action_plan @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find a action_plan based on uuid and return a :class:`ActionPlan` object. :param uuid: the uuid of a action_plan. :param context: Security context :param eager: Load object fields if True (Default: False) :returns: a :class:`ActionPlan` object. """ db_action_plan = cls.dbapi.get_action_plan_by_uuid( context, uuid, eager=eager) action_plan = cls._from_db_object( cls(context), db_action_plan, eager=eager) return action_plan @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None, eager=False): """Return a list of ActionPlan objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: Filters to apply. Defaults to None. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`ActionPlan` object. """ db_action_plans = cls.dbapi.get_action_plan_list(context, limit=limit, marker=marker, filters=filters, sort_key=sort_key, sort_dir=sort_dir, eager=eager) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_action_plans] @base.remotable def create(self): """Create an :class:`ActionPlan` record in the DB. :returns: An :class:`ActionPlan` object. """ values = self.obj_get_changes() db_action_plan = self.dbapi.create_action_plan(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_action_plan, eager=True) def _notify(): notifications.action_plan.send_create(self._context, self) _notify() @base.remotable def destroy(self): """Delete the action plan from the DB""" related_efficacy_indicators = objects.EfficacyIndicator.list( context=self._context, filters={"action_plan_uuid": self.uuid}) # Cascade soft_delete of related efficacy indicators for related_efficacy_indicator in related_efficacy_indicators: related_efficacy_indicator.destroy() self.dbapi.destroy_action_plan(self.uuid) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this Action plan. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_action_plan(self.uuid, updates) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) def _notify(): notifications.action_plan.send_update( self._context, self, old_state=self.old_state) _notify() self.obj_reset_changes() @base.remotable def refresh(self, eager=False): """Loads updates for this Action plan. Loads a action_plan with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded action_plan column by column, if there are any updates. :param eager: Load object fields if True (Default: False) """ current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the Action plan from the DB""" related_actions = objects.Action.list( context=self._context, filters={"action_plan_uuid": self.uuid}, eager=True) # Cascade soft_delete of related actions for related_action in related_actions: related_action.soft_delete() related_efficacy_indicators = objects.EfficacyIndicator.list( context=self._context, filters={"action_plan_uuid": self.uuid}) # Cascade soft_delete of related efficacy indicators for related_efficacy_indicator in related_efficacy_indicators: related_efficacy_indicator.soft_delete() self.state = State.DELETED self.save() db_obj = self.dbapi.soft_delete_action_plan(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) def _notify(): notifications.action_plan.send_delete(self._context, self) _notify() class StateManager(object): def check_expired(self, context): action_plan_expiry = ( CONF.watcher_decision_engine.action_plan_expiry) date_created = datetime.datetime.utcnow() - datetime.timedelta( hours=action_plan_expiry) filters = {'state__eq': State.RECOMMENDED, 'created_at__lt': date_created} action_plans = objects.ActionPlan.list( context, filters=filters, eager=True) for action_plan in action_plans: action_plan.state = State.SUPERSEDED action_plan.save() python-watcher-4.0.0/watcher/objects/scoring_engine.py0000664000175000017500000002040213656752270023155 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2016 Intel # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ A :ref:`Scoring Engine ` is an instance of a data model, to which a learning data was applied. Because there might be multiple algorithms used to build a particular data model (and therefore a scoring engine), the usage of scoring engine might vary. A metainfo field is supposed to contain any information which might be needed by the user of a given scoring engine. """ from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class ScoringEngine(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'description': wfields.StringField(nullable=True), 'metainfo': wfields.StringField(nullable=True), } @base.remotable_classmethod def get(cls, context, scoring_engine_id): """Find a scoring engine based on its id or uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param scoring_engine_name: the name of a scoring_engine. :returns: a :class:`ScoringEngine` object. """ if utils.is_int_like(scoring_engine_id): return cls.get_by_id(context, scoring_engine_id) elif utils.is_uuid_like(scoring_engine_id): return cls.get_by_uuid(context, scoring_engine_id) else: raise exception.InvalidIdentity(identity=scoring_engine_id) @base.remotable_classmethod def get_by_id(cls, context, scoring_engine_id): """Find a scoring engine based on its id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param scoring_engine_id: the id of a scoring_engine. :returns: a :class:`ScoringEngine` object. """ db_scoring_engine = cls.dbapi.get_scoring_engine_by_id( context, scoring_engine_id) scoring_engine = ScoringEngine._from_db_object(cls(context), db_scoring_engine) return scoring_engine @base.remotable_classmethod def get_by_uuid(cls, context, scoring_engine_uuid): """Find a scoring engine based on its uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param scoring_engine_uuid: the uuid of a scoring_engine. :returns: a :class:`ScoringEngine` object. """ db_scoring_engine = cls.dbapi.get_scoring_engine_by_uuid( context, scoring_engine_uuid) scoring_engine = ScoringEngine._from_db_object(cls(context), db_scoring_engine) return scoring_engine @base.remotable_classmethod def get_by_name(cls, context, scoring_engine_name): """Find a scoring engine based on its name :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param scoring_engine_name: the name of a scoring_engine. :returns: a :class:`ScoringEngine` object. """ db_scoring_engine = cls.dbapi.get_scoring_engine_by_name( context, scoring_engine_name) scoring_engine = ScoringEngine._from_db_object(cls(context), db_scoring_engine) return scoring_engine @base.remotable_classmethod def list(cls, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None): """Return a list of :class:`ScoringEngine` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ScoringEngine(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`ScoringEngine` objects. """ db_scoring_engines = cls.dbapi.get_scoring_engine_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_scoring_engines] @base.remotable def create(self): """Create a :class:`ScoringEngine` record in the DB.""" values = self.obj_get_changes() db_scoring_engine = self.dbapi.create_scoring_engine(values) self._from_db_object(self, db_scoring_engine) def destroy(self): """Delete the :class:`ScoringEngine` from the DB""" self.dbapi.destroy_scoring_engine(self.id) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this :class:`ScoringEngine`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_scoring_engine(self.uuid, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() def refresh(self): """Loads updates for this :class:`ScoringEngine`. Loads a scoring_engine with the same id from the database and checks for updated attributes. Updates are applied from the loaded scoring_engine column by column, if there are any updates. """ current = self.get_by_id(self._context, scoring_engine_id=self.id) self.obj_refresh(current) def soft_delete(self): """Soft Delete the :class:`ScoringEngine` from the DB""" db_obj = self.dbapi.soft_delete_scoring_engine(self.id) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) python-watcher-4.0.0/watcher/objects/action.py0000664000175000017500000001544113656752270021450 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher import notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields class State(object): PENDING = 'PENDING' ONGOING = 'ONGOING' FAILED = 'FAILED' SUCCEEDED = 'SUCCEEDED' DELETED = 'DELETED' CANCELLED = 'CANCELLED' CANCELLING = 'CANCELLING' @base.WatcherObjectRegistry.register class Action(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'action_plan' object field # Version 2.0: Removed 'next' object field, Added 'parents' object field VERSION = '2.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'action_plan_id': wfields.IntegerField(), 'action_type': wfields.StringField(nullable=True), 'input_parameters': wfields.DictField(nullable=True), 'state': wfields.StringField(nullable=True), 'parents': wfields.ListOfStringsField(nullable=True), 'action_plan': wfields.ObjectField('ActionPlan', nullable=True), } object_fields = { 'action_plan': (objects.ActionPlan, 'action_plan_id'), } @base.remotable_classmethod def get(cls, context, action_id, eager=False): """Find a action based on its id or uuid and return a Action object. :param action_id: the id *or* uuid of a action. :param eager: Load object fields if True (Default: False) :returns: a :class:`Action` object. """ if utils.is_int_like(action_id): return cls.get_by_id(context, action_id, eager=eager) elif utils.is_uuid_like(action_id): return cls.get_by_uuid(context, action_id, eager=eager) else: raise exception.InvalidIdentity(identity=action_id) @base.remotable_classmethod def get_by_id(cls, context, action_id, eager=False): """Find a action based on its integer id and return a Action object. :param action_id: the id of a action. :param eager: Load object fields if True (Default: False) :returns: a :class:`Action` object. """ db_action = cls.dbapi.get_action_by_id(context, action_id, eager=eager) action = cls._from_db_object(cls(context), db_action, eager=eager) return action @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find a action based on uuid and return a :class:`Action` object. :param uuid: the uuid of a action. :param context: Security context :param eager: Load object fields if True (Default: False) :returns: a :class:`Action` object. """ db_action = cls.dbapi.get_action_by_uuid(context, uuid, eager=eager) action = cls._from_db_object(cls(context), db_action, eager=eager) return action @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None, eager=False): """Return a list of Action objects. :param context: Security context. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: Filters to apply. Defaults to None. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`Action` object. """ db_actions = cls.dbapi.get_action_list(context, limit=limit, marker=marker, filters=filters, sort_key=sort_key, sort_dir=sort_dir, eager=eager) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_actions] @base.remotable def create(self): """Create an :class:`Action` record in the DB. :returns: An :class:`Action` object. """ values = self.obj_get_changes() db_action = self.dbapi.create_action(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_action, eager=True) notifications.action.send_create(self.obj_context, self) def destroy(self): """Delete the Action from the DB""" self.dbapi.destroy_action(self.uuid) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this Action. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_action(self.uuid, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) notifications.action.send_update(self.obj_context, self) self.obj_reset_changes() @base.remotable def refresh(self, eager=False): """Loads updates for this Action. Loads a action with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded action column by column, if there are any updates. :param eager: Load object fields if True (Default: False) """ current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the Audit from the DB""" self.state = State.DELETED self.save() db_obj = self.dbapi.soft_delete_action(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) notifications.action.send_delete(self.obj_context, self) python-watcher-4.0.0/watcher/objects/strategy.py0000664000175000017500000002363413656752270022040 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class Strategy(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added Goal object field VERSION = '1.1' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'display_name': wfields.StringField(), 'goal_id': wfields.IntegerField(), 'parameters_spec': wfields.FlexibleDictField(nullable=True), 'goal': wfields.ObjectField('Goal', nullable=True), } object_fields = {'goal': (objects.Goal, 'goal_id')} @base.remotable_classmethod def get(cls, context, strategy_id, eager=False): """Find a strategy based on its id or uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param strategy_id: the id *or* uuid of a strategy. :param eager: Load object fields if True (Default: False) :returns: A :class:`Strategy` object. """ if utils.is_int_like(strategy_id): return cls.get_by_id(context, strategy_id, eager=eager) elif utils.is_uuid_like(strategy_id): return cls.get_by_uuid(context, strategy_id, eager=eager) else: raise exception.InvalidIdentity(identity=strategy_id) @base.remotable_classmethod def get_by_id(cls, context, strategy_id, eager=False): """Find a strategy based on its integer id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param strategy_id: the id of a strategy. :param eager: Load object fields if True (Default: False) :returns: A :class:`Strategy` object. """ db_strategy = cls.dbapi.get_strategy_by_id( context, strategy_id, eager=eager) strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) return strategy @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find a strategy based on uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param uuid: the uuid of a strategy. :param eager: Load object fields if True (Default: False) :returns: A :class:`Strategy` object. """ db_strategy = cls.dbapi.get_strategy_by_uuid( context, uuid, eager=eager) strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) return strategy @base.remotable_classmethod def get_by_name(cls, context, name, eager=False): """Find a strategy based on name :param context: Security context :param name: the name of a strategy. :param eager: Load object fields if True (Default: False) :returns: A :class:`Strategy` object. """ db_strategy = cls.dbapi.get_strategy_by_name( context, name, eager=eager) strategy = cls._from_db_object(cls(context), db_strategy, eager=eager) return strategy @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None, eager=False): """Return a list of :class:`Strategy` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: dict mapping the filter key to a value. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc`". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`Strategy` object. """ db_strategies = cls.dbapi.get_strategy_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_strategies] @base.remotable def create(self, context=None): """Create a :class:`Strategy` record in the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :returns: A :class:`Strategy` object. """ values = self.obj_get_changes() db_strategy = self.dbapi.create_strategy(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_strategy, eager=True) def destroy(self, context=None): """Delete the :class:`Strategy` from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) """ self.dbapi.destroy_strategy(self.id) self.obj_reset_changes() @base.remotable def save(self, context=None): """Save updates to this :class:`Strategy`. Updates will be made column by column based on the result of self.what_changed(). :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) """ updates = self.obj_get_changes() self.dbapi.update_strategy(self.id, updates) self.obj_reset_changes() @base.remotable def refresh(self, context=None, eager=False): """Loads updates for this :class:`Strategy`. Loads a strategy with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded strategy column by column, if there are any updates. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) :param eager: Load object fields if True (Default: False) """ current = self.__class__.get_by_id( self._context, strategy_id=self.id, eager=eager) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] @base.remotable def soft_delete(self, context=None): """Soft Delete the :class:`Strategy` from the DB. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Strategy(context) """ self.dbapi.soft_delete_strategy(self.id) python-watcher-4.0.0/watcher/objects/goal.py0000664000175000017500000001516513656752270021120 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class Goal(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'display_name': wfields.StringField(), 'efficacy_specification': wfields.FlexibleListOfDictField(), } @base.remotable_classmethod def get(cls, context, goal_id): """Find a goal based on its id or uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Goal(context) :param goal_id: the id *or* uuid of a goal. :returns: a :class:`Goal` object. """ if utils.is_int_like(goal_id): return cls.get_by_id(context, goal_id) elif utils.is_uuid_like(goal_id): return cls.get_by_uuid(context, goal_id) else: raise exception.InvalidIdentity(identity=goal_id) @base.remotable_classmethod def get_by_id(cls, context, goal_id): """Find a goal based on its integer id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Goal(context) :param goal_id: the id *or* uuid of a goal. :returns: a :class:`Goal` object. """ db_goal = cls.dbapi.get_goal_by_id(context, goal_id) goal = cls._from_db_object(cls(context), db_goal) return goal @base.remotable_classmethod def get_by_uuid(cls, context, uuid): """Find a goal based on uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Goal(context) :param uuid: the uuid of a goal. :returns: a :class:`Goal` object. """ db_goal = cls.dbapi.get_goal_by_uuid(context, uuid) goal = cls._from_db_object(cls(context), db_goal) return goal @base.remotable_classmethod def get_by_name(cls, context, name): """Find a goal based on name :param name: the name of a goal. :param context: Security context :returns: a :class:`Goal` object. """ db_goal = cls.dbapi.get_goal_by_name(context, name) goal = cls._from_db_object(cls(context), db_goal) return goal @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None): """Return a list of :class:`Goal` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Goal(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`Goal` object. """ db_goals = cls.dbapi.get_goal_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_goals] @base.remotable def create(self): """Create a :class:`Goal` record in the DB""" values = self.obj_get_changes() db_goal = self.dbapi.create_goal(values) self._from_db_object(self, db_goal) def destroy(self): """Delete the :class:`Goal` from the DB""" self.dbapi.destroy_goal(self.id) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this :class:`Goal`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_goal(self.uuid, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() @base.remotable def refresh(self): """Loads updates for this :class:`Goal`. Loads a goal with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded goal column by column, if there are any updates. """ current = self.get_by_uuid(self._context, uuid=self.uuid) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the :class:`Goal` from the DB""" db_obj = self.dbapi.soft_delete_goal(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) python-watcher-4.0.0/watcher/objects/fields.py0000664000175000017500000001127513656752270021442 0ustar zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Utility methods for objects""" import ast import six from oslo_serialization import jsonutils from oslo_versionedobjects import fields BaseEnumField = fields.BaseEnumField BooleanField = fields.BooleanField DateTimeField = fields.DateTimeField Enum = fields.Enum FloatField = fields.FloatField IntegerField = fields.IntegerField ListOfStringsField = fields.ListOfStringsField NonNegativeFloatField = fields.NonNegativeFloatField NonNegativeIntegerField = fields.NonNegativeIntegerField ObjectField = fields.ObjectField StringField = fields.StringField UnspecifiedDefault = fields.UnspecifiedDefault class UUIDField(fields.UUIDField): def coerce(self, obj, attr, value): if value is None or value == "": return self._null(obj, attr) else: return self._type.coerce(obj, attr, value) class Numeric(fields.FieldType): @staticmethod def coerce(obj, attr, value): if value is None: return value f_value = float(value) return f_value if not f_value.is_integer() else value class NumericField(fields.AutoTypedField): AUTO_TYPE = Numeric() class DictField(fields.AutoTypedField): AUTO_TYPE = fields.Dict(fields.FieldType()) class ListOfUUIDsField(fields.AutoTypedField): AUTO_TYPE = fields.List(fields.UUID()) class FlexibleDict(fields.FieldType): @staticmethod def coerce(obj, attr, value): if isinstance(value, six.string_types): value = ast.literal_eval(value) return dict(value) class FlexibleDictField(fields.AutoTypedField): AUTO_TYPE = FlexibleDict() # TODO(lucasagomes): In our code we've always translated None to {}, # this method makes this field to work like this. But probably won't # be accepted as-is in the oslo_versionedobjects library def _null(self, obj, attr): if self.nullable: return {} super(FlexibleDictField, self)._null(obj, attr) class FlexibleListOfDict(fields.FieldType): @staticmethod def coerce(obj, attr, value): if isinstance(value, six.string_types): value = ast.literal_eval(value) return list(value) class FlexibleListOfDictField(fields.AutoTypedField): AUTO_TYPE = FlexibleListOfDict() # TODO(lucasagomes): In our code we've always translated None to {}, # this method makes this field to work like this. But probably won't # be accepted as-is in the oslo_versionedobjects library def _null(self, obj, attr): if self.nullable: return [] super(FlexibleListOfDictField, self)._null(obj, attr) class Json(fields.FieldType): def coerce(self, obj, attr, value): if isinstance(value, six.string_types): loaded = jsonutils.loads(value) return loaded return value def from_primitive(self, obj, attr, value): return self.coerce(obj, attr, value) def to_primitive(self, obj, attr, value): return jsonutils.dumps(value) class JsonField(fields.AutoTypedField): AUTO_TYPE = Json() # ### Notification fields ### # class BaseWatcherEnum(Enum): ALL = () def __init__(self, **kwargs): super(BaseWatcherEnum, self).__init__(valid_values=self.__class__.ALL) class NotificationPriority(BaseWatcherEnum): DEBUG = 'debug' INFO = 'info' WARNING = 'warning' ERROR = 'error' CRITICAL = 'critical' ALL = (DEBUG, INFO, WARNING, ERROR, CRITICAL) class NotificationPhase(BaseWatcherEnum): START = 'start' END = 'end' ERROR = 'error' ALL = (START, END, ERROR) class NotificationAction(BaseWatcherEnum): CREATE = 'create' UPDATE = 'update' EXCEPTION = 'exception' DELETE = 'delete' STRATEGY = 'strategy' PLANNER = 'planner' EXECUTION = 'execution' CANCEL = 'cancel' ALL = (CREATE, UPDATE, EXCEPTION, DELETE, STRATEGY, PLANNER, EXECUTION, CANCEL) class NotificationPriorityField(BaseEnumField): AUTO_TYPE = NotificationPriority() class NotificationPhaseField(BaseEnumField): AUTO_TYPE = NotificationPhase() class NotificationActionField(BaseEnumField): AUTO_TYPE = NotificationAction() python-watcher-4.0.0/watcher/objects/service.py0000664000175000017500000001244713656752270021636 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Servionica # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields class ServiceStatus(object): ACTIVE = 'ACTIVE' FAILED = 'FAILED' @base.WatcherObjectRegistry.register class Service(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'name': wfields.StringField(), 'host': wfields.StringField(), 'last_seen_up': wfields.DateTimeField( tzinfo_aware=False, nullable=True), } @base.remotable_classmethod def get(cls, context, service_id): """Find a service based on its id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Service(context) :param service_id: the id of a service. :returns: a :class:`Service` object. """ if utils.is_int_like(service_id): db_service = cls.dbapi.get_service_by_id(context, service_id) service = Service._from_db_object(cls(context), db_service) return service else: raise exception.InvalidIdentity(identity=service_id) @base.remotable_classmethod def get_by_name(cls, context, name): """Find a service based on name :param name: the name of a service. :param context: Security context :returns: a :class:`Service` object. """ db_service = cls.dbapi.get_service_by_name(context, name) service = cls._from_db_object(cls(context), db_service) return service @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None): """Return a list of :class:`Service` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Service(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`Service` object. """ db_services = cls.dbapi.get_service_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_services] @base.remotable def create(self): """Create a :class:`Service` record in the DB.""" values = self.obj_get_changes() db_service = self.dbapi.create_service(values) self._from_db_object(self, db_service) @base.remotable def save(self): """Save updates to this :class:`Service`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_service(self.id, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() def refresh(self): """Loads updates for this :class:`Service`. Loads a service with the same id from the database and checks for updated attributes. Updates are applied from the loaded service column by column, if there are any updates. """ current = self.get(self._context, service_id=self.id) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] def soft_delete(self): """Soft Delete the :class:`Service` from the DB.""" db_obj = self.dbapi.soft_delete_service(self.id) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) python-watcher-4.0.0/watcher/objects/audit.py0000664000175000017500000003403513656752270021301 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ In the Watcher system, an :ref:`Audit ` is a request for optimizing a :ref:`Cluster `. The optimization is done in order to satisfy one :ref:`Goal ` on a given :ref:`Cluster `. For each :ref:`Audit `, the Watcher system generates an :ref:`Action Plan `. An :ref:`Audit ` has a life-cycle and its current state may be one of the following: - **PENDING** : a request for an :ref:`Audit ` has been submitted (either manually by the :ref:`Administrator ` or automatically via some event handling mechanism) and is in the queue for being processed by the :ref:`Watcher Decision Engine ` - **ONGOING** : the :ref:`Audit ` is currently being processed by the :ref:`Watcher Decision Engine ` - **SUCCEEDED** : the :ref:`Audit ` has been executed successfully (note that it may not necessarily produce a :ref:`Solution `). - **FAILED** : an error occurred while executing the :ref:`Audit ` - **DELETED** : the :ref:`Audit ` is still stored in the :ref:`Watcher database ` but is not returned any more through the Watcher APIs. - **CANCELLED** : the :ref:`Audit ` was in **PENDING** or **ONGOING** state and was cancelled by the :ref:`Administrator ` - **SUSPENDED** : the :ref:`Audit ` was in **ONGOING** state and was suspended by the :ref:`Administrator ` """ import enum from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher import notifications from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields class State(object): ONGOING = 'ONGOING' SUCCEEDED = 'SUCCEEDED' FAILED = 'FAILED' CANCELLED = 'CANCELLED' DELETED = 'DELETED' PENDING = 'PENDING' SUSPENDED = 'SUSPENDED' class AuditType(enum.Enum): ONESHOT = 'ONESHOT' CONTINUOUS = 'CONTINUOUS' EVENT = 'EVENT' @base.WatcherObjectRegistry.register class Audit(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'goal' and 'strategy' object field # Version 1.2: Added 'auto_trigger' boolean field # Version 1.3: Added 'next_run_time' DateTime field, # 'interval' type has been changed from Integer to String # Version 1.4: Added 'name' string field # Version 1.5: Added 'hostname' field # Version 1.6: Added 'start_time' and 'end_time' DateTime fields # Version 1.7: Added 'force' boolean field VERSION = '1.7' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'audit_type': wfields.StringField(), 'state': wfields.StringField(), 'parameters': wfields.FlexibleDictField(nullable=True), 'interval': wfields.StringField(nullable=True), 'scope': wfields.FlexibleListOfDictField(nullable=True), 'goal_id': wfields.IntegerField(), 'strategy_id': wfields.IntegerField(nullable=True), 'auto_trigger': wfields.BooleanField(), 'next_run_time': wfields.DateTimeField(nullable=True, tzinfo_aware=False), 'hostname': wfields.StringField(nullable=True), 'start_time': wfields.DateTimeField(nullable=True, tzinfo_aware=False), 'end_time': wfields.DateTimeField(nullable=True, tzinfo_aware=False), 'force': wfields.BooleanField(default=False, nullable=False), 'goal': wfields.ObjectField('Goal', nullable=True), 'strategy': wfields.ObjectField('Strategy', nullable=True), } object_fields = { 'goal': (objects.Goal, 'goal_id'), 'strategy': (objects.Strategy, 'strategy_id'), } def __init__(self, *args, **kwargs): if 'force' not in kwargs: kwargs['force'] = False super(Audit, self).__init__(*args, **kwargs) # Proxified field so we can keep the previous value after an update _state = None _old_state = None # NOTE(v-francoise): The way oslo.versionedobjects works is by using a # __new__ that will automatically create the attributes referenced in # fields. These attributes are properties that raise an exception if no # value has been assigned, which means that they store the actual field # value in an "_obj_%(field)s" attribute. So because we want to proxify a # value that is already proxified, we have to do what you see below. @property def _obj_state(self): return self._state @property def _obj_old_state(self): return self._old_state @property def old_state(self): return self._old_state @_obj_old_state.setter def _obj_old_state(self, value): self._old_state = value @_obj_state.setter def _obj_state(self, value): if self._old_state is None and self._state is None: self._state = value else: self._old_state, self._state = self._state, value @base.remotable_classmethod def get(cls, context, audit_id, eager=False): """Find a audit based on its id or uuid and return a Audit object. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param audit_id: the id *or* uuid of a audit. :param eager: Load object fields if True (Default: False) :returns: a :class:`Audit` object. """ if utils.is_int_like(audit_id): return cls.get_by_id(context, audit_id, eager=eager) elif utils.is_uuid_like(audit_id): return cls.get_by_uuid(context, audit_id, eager=eager) else: raise exception.InvalidIdentity(identity=audit_id) @base.remotable_classmethod def get_by_id(cls, context, audit_id, eager=False): """Find a audit based on its integer id and return a Audit object. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param audit_id: the id of a audit. :param eager: Load object fields if True (Default: False) :returns: a :class:`Audit` object. """ db_audit = cls.dbapi.get_audit_by_id(context, audit_id, eager=eager) audit = cls._from_db_object(cls(context), db_audit, eager=eager) return audit @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find a audit based on uuid and return a :class:`Audit` object. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param uuid: the uuid of a audit. :param eager: Load object fields if True (Default: False) :returns: a :class:`Audit` object. """ db_audit = cls.dbapi.get_audit_by_uuid(context, uuid, eager=eager) audit = cls._from_db_object(cls(context), db_audit, eager=eager) return audit @base.remotable_classmethod def get_by_name(cls, context, name, eager=False): """Find an audit based on name and return a :class:`Audit` object. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param name: the name of an audit. :param eager: Load object fields if True (Default: False) :returns: a :class:`Audit` object. """ db_audit = cls.dbapi.get_audit_by_name(context, name, eager=eager) audit = cls._from_db_object(cls(context), db_audit, eager=eager) return audit @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None, eager=False): """Return a list of Audit objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: Audit(context) :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param filters: Filters to apply. Defaults to None. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`Audit` object. """ db_audits = cls.dbapi.get_audit_list(context, limit=limit, marker=marker, filters=filters, sort_key=sort_key, sort_dir=sort_dir, eager=eager) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_audits] @base.remotable def create(self): """Create an :class:`Audit` record in the DB. :returns: An :class:`Audit` object. """ values = self.obj_get_changes() db_audit = self.dbapi.create_audit(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_audit, eager=True) def _notify(): notifications.audit.send_create(self._context, self) _notify() @base.remotable def destroy(self): """Delete the Audit from the DB.""" self.dbapi.destroy_audit(self.uuid) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this Audit. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_audit(self.uuid, updates) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) def _notify(): notifications.audit.send_update( self._context, self, old_state=self.old_state) _notify() self.obj_reset_changes() @base.remotable def refresh(self, eager=False): """Loads updates for this Audit. Loads a audit with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded audit column by column, if there are any updates. :param eager: Load object fields if True (Default: False) """ current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the Audit from the DB.""" self.state = State.DELETED self.save() db_obj = self.dbapi.soft_delete_audit(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) def _notify(): notifications.audit.send_delete(self._context, self) _notify() class AuditStateTransitionManager(object): TRANSITIONS = { State.PENDING: [State.ONGOING, State.CANCELLED], State.ONGOING: [State.FAILED, State.SUCCEEDED, State.CANCELLED, State.SUSPENDED], State.FAILED: [State.DELETED], State.SUCCEEDED: [State.DELETED], State.CANCELLED: [State.DELETED], State.SUSPENDED: [State.ONGOING, State.DELETED], } INACTIVE_STATES = (State.CANCELLED, State.DELETED, State.FAILED, State.SUSPENDED) def check_transition(self, initial, new): return new in self.TRANSITIONS.get(initial, []) def is_inactive(self, audit): return audit.state in self.INACTIVE_STATES python-watcher-4.0.0/watcher/objects/action_description.py0000664000175000017500000001264013656752270024051 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class ActionDescription(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version VERSION = '1.0' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'action_type': wfields.StringField(), 'description': wfields.StringField(), } @base.remotable_classmethod def get(cls, context, action_id): """Find a action description based on its id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object :param action_id: the id of a action description. :returns: a :class:`ActionDescription` object. """ if utils.is_int_like(action_id): db_action = cls.dbapi.get_action_description_by_id( context, action_id) action = ActionDescription._from_db_object(cls(context), db_action) return action else: raise exception.InvalidIdentity(identity=action_id) @base.remotable_classmethod def get_by_type(cls, context, action_type): """Find a action description based on action type :param action_type: the action type of a action description. :param context: Security context :returns: a :class:`ActionDescription` object. """ db_action = cls.dbapi.get_action_description_by_type( context, action_type) action = cls._from_db_object(cls(context), db_action) return action @base.remotable_classmethod def list(cls, context, limit=None, marker=None, filters=None, sort_key=None, sort_dir=None): """Return a list of :class:`ActionDescription` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: ActionDescription(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :returns: a list of :class:`ActionDescription` object. """ db_actions = cls.dbapi.get_action_description_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir) return [cls._from_db_object(cls(context), obj) for obj in db_actions] @base.remotable def create(self): """Create a :class:`ActionDescription` record in the DB.""" values = self.obj_get_changes() db_action = self.dbapi.create_action_description(values) self._from_db_object(self, db_action) @base.remotable def save(self): """Save updates to this :class:`ActionDescription`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_action_description(self.id, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() def refresh(self): """Loads updates for this :class:`ActionDescription`. Loads a action description with the same id from the database and checks for updated attributes. Updates are applied from the loaded action description column by column, if there are any updates. """ current = self.get(self._context, action_id=self.id) for field in self.fields: if (hasattr(self, base.get_attrname(field)) and self[field] != current[field]): self[field] = current[field] def soft_delete(self): """Soft Delete the :class:`ActionDescription` from the DB.""" db_obj = self.dbapi.soft_delete_action_description(self.id) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) python-watcher-4.0.0/watcher/objects/audit_template.py0000664000175000017500000002420613656752270023173 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ An :ref:`Audit ` may be launched several times with the same settings (:ref:`Goal `, thresholds, ...). Therefore it makes sense to save those settings in some sort of Audit preset object, which is known as an :ref:`Audit Template `. An :ref:`Audit Template ` contains at least the :ref:`Goal ` of the :ref:`Audit `. It may also contain some error handling settings indicating whether: - :ref:`Watcher Applier ` stops the entire operation - :ref:`Watcher Applier ` performs a rollback and how many retries should be attempted before failure occurs (also the latter can be complex: for example the scenario in which there are many first-time failures on ultimately successful :ref:`Actions `). Moreover, an :ref:`Audit Template ` may contain some settings related to the level of automation for the :ref:`Action Plan ` that will be generated by the :ref:`Audit `. A flag will indicate whether the :ref:`Action Plan ` will be launched automatically or will need a manual confirmation from the :ref:`Administrator `. Last but not least, an :ref:`Audit Template ` may contain a list of extra parameters related to the :ref:`Strategy ` configuration. These parameters can be provided as a list of key-value pairs. """ from watcher.common import exception from watcher.common import utils from watcher.db import api as db_api from watcher import objects from watcher.objects import base from watcher.objects import fields as wfields @base.WatcherObjectRegistry.register class AuditTemplate(base.WatcherPersistentObject, base.WatcherObject, base.WatcherObjectDictCompat): # Version 1.0: Initial version # Version 1.1: Added 'goal' and 'strategy' object field VERSION = '1.1' dbapi = db_api.get_instance() fields = { 'id': wfields.IntegerField(), 'uuid': wfields.UUIDField(), 'name': wfields.StringField(), 'description': wfields.StringField(nullable=True), 'scope': wfields.FlexibleListOfDictField(nullable=True), 'goal_id': wfields.IntegerField(), 'strategy_id': wfields.IntegerField(nullable=True), 'goal': wfields.ObjectField('Goal', nullable=True), 'strategy': wfields.ObjectField('Strategy', nullable=True), } object_fields = { 'goal': (objects.Goal, 'goal_id'), 'strategy': (objects.Strategy, 'strategy_id'), } @base.remotable_classmethod def get(cls, context, audit_template_id, eager=False): """Find an audit template based on its id or uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: AuditTemplate(context) :param audit_template_id: the id *or* uuid of a audit_template. :param eager: Load object fields if True (Default: False) :returns: a :class:`AuditTemplate` object. """ if utils.is_int_like(audit_template_id): return cls.get_by_id(context, audit_template_id, eager=eager) elif utils.is_uuid_like(audit_template_id): return cls.get_by_uuid(context, audit_template_id, eager=eager) else: raise exception.InvalidIdentity(identity=audit_template_id) @base.remotable_classmethod def get_by_id(cls, context, audit_template_id, eager=False): """Find an audit template based on its integer id :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: AuditTemplate(context) :param audit_template_id: the id of a audit_template. :param eager: Load object fields if True (Default: False) :returns: a :class:`AuditTemplate` object. """ db_audit_template = cls.dbapi.get_audit_template_by_id( context, audit_template_id, eager=eager) audit_template = cls._from_db_object( cls(context), db_audit_template, eager=eager) return audit_template @base.remotable_classmethod def get_by_uuid(cls, context, uuid, eager=False): """Find an audit template based on uuid :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: AuditTemplate(context) :param uuid: the uuid of a audit_template. :param eager: Load object fields if True (Default: False) :returns: a :class:`AuditTemplate` object. """ db_audit_template = cls.dbapi.get_audit_template_by_uuid( context, uuid, eager=eager) audit_template = cls._from_db_object( cls(context), db_audit_template, eager=eager) return audit_template @base.remotable_classmethod def get_by_name(cls, context, name, eager=False): """Find an audit template based on name :param name: the logical name of a audit_template. :param context: Security context :param eager: Load object fields if True (Default: False) :returns: a :class:`AuditTemplate` object. """ db_audit_template = cls.dbapi.get_audit_template_by_name( context, name, eager=eager) audit_template = cls._from_db_object( cls(context), db_audit_template, eager=eager) return audit_template @base.remotable_classmethod def list(cls, context, filters=None, limit=None, marker=None, sort_key=None, sort_dir=None, eager=False): """Return a list of :class:`AuditTemplate` objects. :param context: Security context. NOTE: This should only be used internally by the indirection_api. Unfortunately, RPC requires context as the first argument, even though we don't use it. A context should be set when instantiating the object, e.g.: AuditTemplate(context) :param filters: dict mapping the filter key to a value. :param limit: maximum number of resources to return in a single result. :param marker: pagination marker for large data sets. :param sort_key: column to sort results by. :param sort_dir: direction to sort. "asc" or "desc". :param eager: Load object fields if True (Default: False) :returns: a list of :class:`AuditTemplate` object. """ db_audit_templates = cls.dbapi.get_audit_template_list( context, filters=filters, limit=limit, marker=marker, sort_key=sort_key, sort_dir=sort_dir, eager=eager) return [cls._from_db_object(cls(context), obj, eager=eager) for obj in db_audit_templates] @base.remotable def create(self): """Create a :class:`AuditTemplate` record in the DB :returns: An :class:`AuditTemplate` object. """ values = self.obj_get_changes() db_audit_template = self.dbapi.create_audit_template(values) # Note(v-francoise): Always load eagerly upon creation so we can send # notifications containing information about the related relationships self._from_db_object(self, db_audit_template, eager=True) def destroy(self): """Delete the :class:`AuditTemplate` from the DB""" self.dbapi.destroy_audit_template(self.uuid) self.obj_reset_changes() @base.remotable def save(self): """Save updates to this :class:`AuditTemplate`. Updates will be made column by column based on the result of self.what_changed(). """ updates = self.obj_get_changes() db_obj = self.dbapi.update_audit_template(self.uuid, updates) obj = self._from_db_object(self, db_obj, eager=False) self.obj_refresh(obj) self.obj_reset_changes() @base.remotable def refresh(self, eager=False): """Loads updates for this :class:`AuditTemplate`. Loads a audit_template with the same uuid from the database and checks for updated attributes. Updates are applied from the loaded audit_template column by column, if there are any updates. :param eager: Load object fields if True (Default: False) """ current = self.get_by_uuid(self._context, uuid=self.uuid, eager=eager) self.obj_refresh(current) @base.remotable def soft_delete(self): """Soft Delete the :class:`AuditTemplate` from the DB""" db_obj = self.dbapi.soft_delete_audit_template(self.uuid) obj = self._from_db_object( self.__class__(self._context), db_obj, eager=False) self.obj_refresh(obj) python-watcher-4.0.0/watcher/objects/base.py0000664000175000017500000001462213656752270021105 0ustar zuulzuul00000000000000# Copyright 2013 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Watcher common internal object model""" from oslo_utils import versionutils from oslo_versionedobjects import base as ovo_base from oslo_versionedobjects import fields as ovo_fields from watcher import objects remotable_classmethod = ovo_base.remotable_classmethod remotable = ovo_base.remotable def get_attrname(name): """Return the mangled name of the attribute's underlying storage.""" # FIXME(danms): This is just until we use o.vo's class properties # and object base. return '_obj_' + name class WatcherObjectRegistry(ovo_base.VersionedObjectRegistry): notification_classes = [] def registration_hook(self, cls, index): # NOTE(danms): This is called when an object is registered, # and is responsible for maintaining watcher.objects.$OBJECT # as the highest-versioned implementation of a given object. version = versionutils.convert_version_to_tuple(cls.VERSION) if not hasattr(objects, cls.obj_name()): setattr(objects, cls.obj_name(), cls) else: cur_version = versionutils.convert_version_to_tuple( getattr(objects, cls.obj_name()).VERSION) if version >= cur_version: setattr(objects, cls.obj_name(), cls) @classmethod def register_notification(cls, notification_cls): """Register a class as notification. Use only to register concrete notification or payload classes, do not register base classes intended for inheritance only. """ cls.register_if(False)(notification_cls) cls.notification_classes.append(notification_cls) return notification_cls @classmethod def register_notification_objects(cls): """Register previously decorated notification as normal ovos. This is not intended for production use but only for testing and document generation purposes. """ for notification_cls in cls.notification_classes: cls.register(notification_cls) class WatcherObject(ovo_base.VersionedObject): """Base class and object factory. This forms the base of all objects that can be remoted or instantiated via RPC. Simply defining a class that inherits from this base class will make it remotely instantiatable. Objects should implement the necessary "get" classmethod routines as well as "save" object methods as appropriate. """ OBJ_SERIAL_NAMESPACE = 'watcher_object' OBJ_PROJECT_NAMESPACE = 'watcher' def as_dict(self): return { k: getattr(self, k) for k in self.fields if self.obj_attr_is_set(k)} class WatcherObjectDictCompat(ovo_base.VersionedObjectDictCompat): pass class WatcherComparableObject(ovo_base.ComparableVersionedObject): pass class WatcherPersistentObject(object): """Mixin class for Persistent objects. This adds the fields that we use in common for all persistent objects. """ fields = { 'created_at': ovo_fields.DateTimeField(nullable=True), 'updated_at': ovo_fields.DateTimeField(nullable=True), 'deleted_at': ovo_fields.DateTimeField(nullable=True), } # Mapping between the object field name and a 2-tuple pair composed of # its object type (e.g. objects.RelatedObject) and the name of the # model field related ID (or UUID) foreign key field. # e.g.: # # fields = { # # [...] # 'related_object_id': fields.IntegerField(), # Foreign key # 'related_object': wfields.ObjectField('RelatedObject'), # } # {'related_object': (objects.RelatedObject, 'related_object_id')} object_fields = {} def obj_refresh(self, loaded_object): """Applies updates for objects that inherit from base.WatcherObject. Checks for updated attributes in an object. Updates are applied from the loaded object column by column in comparison with the current object. """ fields = (field for field in self.fields if field not in self.object_fields) for field in fields: if (self.obj_attr_is_set(field) and self[field] != loaded_object[field]): self[field] = loaded_object[field] @staticmethod def _from_db_object(obj, db_object, eager=False): """Converts a database entity to a formal object. :param obj: An object of the class. :param db_object: A DB model of the object :param eager: Enable the loading of object fields (Default: False) :return: The object of the class with the database entity added """ obj_class = type(obj) object_fields = obj_class.object_fields for field in obj.fields: if field not in object_fields: obj[field] = db_object[field] if eager: # Load object fields context = obj._context loadable_fields = ( (obj_field, related_obj_cls, rel_id) for obj_field, (related_obj_cls, rel_id) in object_fields.items() if obj[rel_id] ) for obj_field, related_obj_cls, rel_id in loadable_fields: if getattr(db_object, obj_field, None) and obj[rel_id]: # The object field data was eagerly loaded alongside # the main object data obj[obj_field] = related_obj_cls._from_db_object( related_obj_cls(context), db_object[obj_field]) else: # The object field data wasn't loaded yet obj[obj_field] = related_obj_cls.get(context, obj[rel_id]) obj.obj_reset_changes() return obj class WatcherObjectSerializer(ovo_base.VersionedObjectSerializer): # Base class to use for object hydration OBJ_BASE_CLASS = WatcherObject python-watcher-4.0.0/watcher/cmd/0000775000175000017500000000000013656752352016727 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/cmd/__init__.py0000664000175000017500000000155613656752270021046 0ustar zuulzuul00000000000000# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # NOTE(licanwei): Do eventlet monkey patching here, instead of in # common/service.py. This allows the API service to run without monkey # patching under Apache (which uses its own concurrency model). Mixing # concurrency models can cause undefined behavior and potentially API timeouts. import eventlet eventlet.monkey_patch() python-watcher-4.0.0/watcher/cmd/api.py0000664000175000017500000000330513656752270020052 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for the Watcher API service.""" import sys from oslo_config import cfg from oslo_log import log from watcher.api import scheduling from watcher.common import service from watcher import conf LOG = log.getLogger(__name__) CONF = conf.CONF def main(): service.prepare_service(sys.argv, CONF) host, port = cfg.CONF.api.host, cfg.CONF.api.port protocol = "http" if not CONF.api.enable_ssl_api else "https" # Build and start the WSGI app server = service.WSGIService('watcher-api', CONF.api.enable_ssl_api) if host == '127.0.0.1': LOG.info('serving on 127.0.0.1:%(port)s, ' 'view at %(protocol)s://127.0.0.1:%(port)s', dict(protocol=protocol, port=port)) else: LOG.info('serving on %(protocol)s://%(host)s:%(port)s', dict(protocol=protocol, host=host, port=port)) api_schedule = scheduling.APISchedulingService() api_schedule.start() launcher = service.launch(CONF, server, workers=server.workers) launcher.wait() python-watcher-4.0.0/watcher/cmd/decisionengine.py0000664000175000017500000000313213656752270022262 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for the Decision Engine manager service.""" import os import sys from oslo_log import log from watcher.common import service as watcher_service from watcher import conf from watcher.decision_engine import gmr from watcher.decision_engine import manager from watcher.decision_engine import scheduling from watcher.decision_engine import sync LOG = log.getLogger(__name__) CONF = conf.CONF def main(): watcher_service.prepare_service(sys.argv, CONF) gmr.register_gmr_plugins() LOG.info('Starting Watcher Decision Engine service in PID %s', os.getpid()) syncer = sync.Syncer() syncer.sync() de_service = watcher_service.Service(manager.DecisionEngineManager) bg_scheduler_service = scheduling.DecisionEngineSchedulingService() # Only 1 process launcher = watcher_service.launch(CONF, de_service) launcher.launch_service(bg_scheduler_service) launcher.wait() python-watcher-4.0.0/watcher/cmd/status.py0000664000175000017500000000315513656752270020627 0ustar zuulzuul00000000000000# Copyright (c) 2018 NEC, Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from oslo_upgradecheck import upgradecheck from watcher._i18n import _ from watcher.common import clients from watcher import conf CONF = conf.CONF class Checks(upgradecheck.UpgradeCommands): """Contains upgrade checks Various upgrade checks should be added as separate methods in this class and added to _upgrade_checks tuple. """ def _minimum_nova_api_version(self): """Checks the minimum required version of nova_client.api_version""" try: clients.check_min_nova_api_version(CONF.nova_client.api_version) except ValueError as e: return upgradecheck.Result( upgradecheck.Code.FAILURE, str(e)) return upgradecheck.Result(upgradecheck.Code.SUCCESS) _upgrade_checks = ( # Added in Train. (_('Minimum Nova API Version'), _minimum_nova_api_version), ) def main(): return upgradecheck.main( CONF, project='watcher', upgrade_command=Checks()) if __name__ == '__main__': sys.exit(main()) python-watcher-4.0.0/watcher/cmd/dbmanage.py0000664000175000017500000001237113656752270021042 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Run storage database migration. """ import sys from oslo_config import cfg from watcher.common import service from watcher import conf from watcher.db import migration from watcher.db import purge CONF = conf.CONF class DBCommand(object): @staticmethod def upgrade(): migration.upgrade(CONF.command.revision) @staticmethod def downgrade(): migration.downgrade(CONF.command.revision) @staticmethod def revision(): migration.revision(CONF.command.message, CONF.command.autogenerate) @staticmethod def stamp(): migration.stamp(CONF.command.revision) @staticmethod def version(): print(migration.version()) @staticmethod def create_schema(): migration.create_schema() @staticmethod def purge(): purge.purge(CONF.command.age_in_days, CONF.command.max_number, CONF.command.goal, CONF.command.exclude_orphans, CONF.command.dry_run) def add_command_parsers(subparsers): parser = subparsers.add_parser( 'upgrade', help="Upgrade the database schema to the latest version. " "Optionally, use --revision to specify an alembic revision " "string to upgrade to.") parser.set_defaults(func=DBCommand.upgrade) parser.add_argument('--revision', nargs='?') parser = subparsers.add_parser( 'downgrade', help="Downgrade the database schema to the oldest revision. " "While optional, one should generally use --revision to " "specify the alembic revision string to downgrade to.") parser.set_defaults(func=DBCommand.downgrade) parser.add_argument('--revision', nargs='?') parser = subparsers.add_parser('stamp') parser.add_argument('revision', nargs='?') parser.set_defaults(func=DBCommand.stamp) parser = subparsers.add_parser( 'revision', help="Create a new alembic revision. " "Use --message to set the message string.") parser.add_argument('-m', '--message') parser.add_argument('--autogenerate', action='store_true') parser.set_defaults(func=DBCommand.revision) parser = subparsers.add_parser( 'version', help="Print the current version information and exit.") parser.set_defaults(func=DBCommand.version) parser = subparsers.add_parser( 'create_schema', help="Create the database schema.") parser.set_defaults(func=DBCommand.create_schema) parser = subparsers.add_parser( 'purge', help="Purge the database.") parser.add_argument('-d', '--age-in-days', help="Number of days since deletion (from today) " "to exclude from the purge. If None, everything " "will be purged.", type=int, default=None, nargs='?') parser.add_argument('-n', '--max-number', help="Max number of objects expected to be deleted. " "Prevents the deletion if exceeded. No limit if " "set to None.", type=int, default=None, nargs='?') parser.add_argument('-t', '--goal', help="UUID or name of the goal to purge.", type=str, default=None, nargs='?') parser.add_argument('-e', '--exclude-orphans', action='store_true', help="Flag to indicate whether or not you want to " "exclude orphans from deletion (default: False).", default=False) parser.add_argument('--dry-run', action='store_true', help="Flag to indicate whether or not you want to " "perform a dry run (no deletion).", default=False) parser.set_defaults(func=DBCommand.purge) command_opt = cfg.SubCommandOpt('command', title='Command', help='Available commands', handler=add_command_parsers) def register_sub_command_opts(): cfg.CONF.register_cli_opt(command_opt) def main(): register_sub_command_opts() # this is hack to work with previous usage of watcher-dbsync # pls change it to watcher-dbsync upgrade valid_commands = set([ 'upgrade', 'downgrade', 'revision', 'version', 'stamp', 'create_schema', 'purge', ]) if not set(sys.argv).intersection(valid_commands): sys.argv.append('upgrade') service.prepare_service(sys.argv, CONF) CONF.command.func() python-watcher-4.0.0/watcher/cmd/sync.py0000664000175000017500000000204313656752270020253 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright (c) 2016 Intel # # Authors: Tomasz Kaczynski # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Script for the sync tool.""" import sys from oslo_log import log from watcher.common import service from watcher import conf from watcher.decision_engine import sync LOG = log.getLogger(__name__) CONF = conf.CONF def main(): LOG.info('Watcher sync started.') service.prepare_service(sys.argv, CONF) syncer = sync.Syncer() syncer.sync() LOG.info('Watcher sync finished.') python-watcher-4.0.0/watcher/cmd/applier.py0000664000175000017500000000246713656752270020745 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # # Copyright 2013 Hewlett-Packard Development Company, L.P. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Starter script for the Applier service.""" import os import sys from oslo_log import log from watcher.applier import manager from watcher.applier import sync from watcher.common import service as watcher_service from watcher import conf LOG = log.getLogger(__name__) CONF = conf.CONF def main(): watcher_service.prepare_service(sys.argv, CONF) LOG.info('Starting Watcher Applier service in PID %s', os.getpid()) applier_service = watcher_service.Service(manager.ApplierManager) syncer = sync.Syncer() syncer.sync() # Only 1 process launcher = watcher_service.launch(CONF, applier_service) launcher.wait() python-watcher-4.0.0/watcher/locale/0000775000175000017500000000000013656752352017423 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/locale/en_GB/0000775000175000017500000000000013656752352020375 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000013656752352022162 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/locale/en_GB/LC_MESSAGES/watcher.po0000664000175000017500000006145313656752270024167 0ustar zuulzuul00000000000000# Andi Chandler , 2017. #zanata # Andi Chandler , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: watcher VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2020-04-26 01:47+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-11-07 06:14+0000\n" "Last-Translator: Andi Chandler \n" "Language-Team: English (United Kingdom)\n" "Language: en_GB\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid " (may include orphans)" msgstr " (may include orphans)" msgid " (orphans excluded)" msgstr " (orphans excluded)" #, python-format msgid "%(client)s connection failed. Reason: %(reason)s" msgstr "%(client)s connection failed. Reason: %(reason)s" #, python-format msgid "%(field)s can't be updated." msgstr "%(field)s can't be updated." #, python-format msgid "%(parameter)s has to be of type %(parameter_type)s" msgstr "%(parameter)s has to be of type %(parameter_type)s" #, python-format msgid "%s is not JSON serializable" msgstr "%s is not JSON serialisable" #, python-format msgid "" "'%(strategy)s' strategy does relate to the '%(goal)s' goal. Possible " "choices: %(choices)s" msgstr "" "'%(strategy)s' strategy does relate to the '%(goal)s' goal. Possible " "choices: %(choices)s" #, python-format msgid "'%s' is a mandatory attribute and can not be removed" msgstr "'%s' is a mandatory attribute and can not be removed" #, python-format msgid "'%s' is an internal attribute and can not be updated" msgstr "'%s' is an internal attribute and can not be updated" msgid "'add' and 'replace' operations needs value" msgstr "'add' and 'replace' operations needs value" msgid "'obj' argument type is not valid" msgstr "'obj' argument type is not valid" #, python-format msgid "'obj' argument type is not valid: %s" msgstr "'obj' argument type is not valid: %s" #, python-format msgid "A goal with UUID %(uuid)s already exists" msgstr "A goal with UUID %(uuid)s already exists" #, python-format msgid "A scoring engine with UUID %(uuid)s already exists" msgstr "A scoring engine with UUID %(uuid)s already exists" #, python-format msgid "A service with name %(name)s is already working on %(host)s." msgstr "A service with name %(name)s is already working on %(host)s." #, python-format msgid "A strategy with UUID %(uuid)s already exists" msgstr "A strategy with UUID %(uuid)s already exists" msgid "A valid goal_id or audit_template_id must be provided" msgstr "A valid goal_id or audit_template_id must be provided" #, python-format msgid "Action %(action)s could not be found" msgstr "Action %(action)s could not be found" #, python-format msgid "Action %(action)s was not eagerly loaded" msgstr "Action %(action)s was not eagerly loaded" #, python-format msgid "Action Plan %(action_plan)s is currently running." msgstr "Action Plan %(action_plan)s is currently running." #, python-format msgid "Action Plan %(action_plan)s is referenced by one or multiple actions" msgstr "Action Plan %(action_plan)s is referenced by one or multiple actions" #, python-format msgid "Action Plan with UUID %(uuid)s is cancelled by user" msgstr "Action Plan with UUID %(uuid)s is cancelled by user" msgid "Action Plans" msgstr "Action Plans" #, python-format msgid "Action plan %(action_plan)s is invalid" msgstr "Action plan %(action_plan)s is invalid" #, python-format msgid "Action plan %(action_plan)s is referenced by one or multiple goals" msgstr "Action plan %(action_plan)s is referenced by one or multiple goals" #, python-format msgid "Action plan %(action_plan)s was not eagerly loaded" msgstr "Action plan %(action_plan)s was not eagerly loaded" #, python-format msgid "ActionPlan %(action_plan)s could not be found" msgstr "ActionPlan %(action_plan)s could not be found" msgid "Actions" msgstr "Actions" msgid "Actuator" msgstr "Actuator" #, python-format msgid "Adding a new attribute (%s) to the root of the resource is not allowed" msgstr "" "Adding a new attribute (%s) to the root of the resource is not allowed" msgid "Airflow Optimization" msgstr "Airflow Optimisation" #, python-format msgid "An action description with type %(action_type)s is already exist." msgstr "An action description with type %(action_type)s is already exist." #, python-format msgid "An action plan with UUID %(uuid)s already exists" msgstr "An action plan with UUID %(uuid)s already exists" #, python-format msgid "An action with UUID %(uuid)s already exists" msgstr "An action with UUID %(uuid)s already exists" #, python-format msgid "An audit with UUID or name %(audit)s already exists" msgstr "An audit with UUID or name %(audit)s already exists" #, python-format msgid "An audit_template with UUID or name %(audit_template)s already exists" msgstr "An audit_template with UUID or name %(audit_template)s already exists" msgid "An indicator value should be a number" msgstr "An indicator value should be a number" msgid "An unknown exception occurred" msgstr "An unknown exception occurred" msgid "At least one feature is required" msgstr "At least one feature is required" #, python-format msgid "Audit %(audit)s could not be found" msgstr "Audit %(audit)s could not be found" #, python-format msgid "Audit %(audit)s is invalid" msgstr "Audit %(audit)s is invalid" #, python-format msgid "Audit %(audit)s is referenced by one or multiple action plans" msgstr "Audit %(audit)s is referenced by one or multiple action plans" #, python-format msgid "Audit %(audit)s was not eagerly loaded" msgstr "Audit %(audit)s was not eagerly loaded" msgid "Audit Templates" msgstr "Audit Templates" #, python-format msgid "Audit parameter %(parameter)s are not allowed" msgstr "Audit parameter %(parameter)s are not allowed" #, python-format msgid "Audit type %(audit_type)s could not be found" msgstr "Audit type %(audit_type)s could not be found" #, python-format msgid "AuditTemplate %(audit_template)s could not be found" msgstr "AuditTemplate %(audit_template)s could not be found" msgid "Audits" msgstr "Audits" msgid "Basic offline consolidation" msgstr "Basic offline consolidation" msgid "CDMCs" msgstr "CDMCs" msgid "Cannot compile public API routes" msgstr "Cannot compile public API routes" msgid "Cannot create an action directly" msgstr "Cannot create an action directly" msgid "Cannot delete an action directly" msgstr "Cannot delete an action directly" msgid "Cannot modify an action directly" msgstr "Cannot modify an action directly" msgid "Cannot overwrite UUID for an existing Action Plan." msgstr "Cannot overwrite UUID for an existing Action Plan." msgid "Cannot overwrite UUID for an existing Action." msgstr "Cannot overwrite UUID for an existing Action." msgid "Cannot overwrite UUID for an existing Audit Template." msgstr "Cannot overwrite UUID for an existing Audit Template." msgid "Cannot overwrite UUID for an existing Audit." msgstr "Cannot overwrite UUID for an existing Audit." msgid "Cannot overwrite UUID for an existing Goal." msgstr "Cannot overwrite UUID for an existing Goal." msgid "Cannot overwrite UUID for an existing Scoring Engine." msgstr "Cannot overwrite UUID for an existing Scoring Engine." msgid "Cannot overwrite UUID for an existing Strategy." msgstr "Cannot overwrite UUID for an existing Strategy." msgid "Cannot overwrite UUID for an existing efficacy indicator." msgstr "Cannot overwrite UUID for an existing efficacy indicator." msgid "Cannot remove 'goal' attribute from an audit template" msgstr "Cannot remove 'goal' attribute from an audit template" msgid "Cluster Maintaining" msgstr "Cluster Maintaining" msgid "Conflict" msgstr "Conflict" #, python-format msgid "" "Could not compute the global efficacy for the '%(goal)s' goal using the " "'%(strategy)s' strategy." msgstr "" "Could not compute the global efficacy for the '%(goal)s' goal using the " "'%(strategy)s' strategy." #, python-format msgid "Could not load any strategy for goal %(goal)s" msgstr "Could not load any strategy for goal %(goal)s" #, python-format msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s" msgstr "Couldn't apply patch '%(patch)s'. Reason: %(reason)s" #, python-format msgid "Couldn't delete when state is '%(state)s'." msgstr "Couldn't delete when state is '%(state)s'." #, python-format msgid "Couldn't start when state is '%(state)s'." msgstr "Couldn't start when state is '%(state)s'." #, python-format msgid "Datasource %(datasource)s is not available." msgstr "Datasource %(datasource)s is not available." #, python-format msgid "Datasource %(datasource)s is not supported by strategy %(strategy)s" msgstr "Datasource %(datasource)s is not supported by strategy %(strategy)s" msgid "Do you want to delete objects up to the specified maximum number? [y/N]" msgstr "" "Do you want to delete objects up to the specified maximum number? [y/N]" #, python-format msgid "Domain name seems ambiguous: %s" msgstr "Domain name seems ambiguous: %s" #, python-format msgid "Domain not Found: %s" msgstr "Domain not Found: %s" msgid "Dummy Strategy using sample Scoring Engines" msgstr "Dummy Strategy using sample Scoring Engines" msgid "Dummy goal" msgstr "Dummy goal" msgid "Dummy strategy" msgstr "Dummy strategy" msgid "Dummy strategy with resize" msgstr "Dummy strategy with resize" #, python-format msgid "Efficacy indicator %(efficacy_indicator)s could not be found" msgstr "Efficacy indicator %(efficacy_indicator)s could not be found" #, python-format msgid "Error loading plugin '%(name)s'" msgstr "Error loading plugin '%(name)s'" #, python-format msgid "ErrorDocumentMiddleware received an invalid status %s" msgstr "ErrorDocumentMiddleware received an invalid status %s" msgid "Executing Host Maintenance Migration Strategy" msgstr "Executing Host Maintenance Migration Strategy" #, python-format msgid "Expected a logical name but received %(name)s" msgstr "Expected a logical name but received %(name)s" #, python-format msgid "Expected a logical name or uuid but received %(name)s" msgstr "Expected a logical name or UUID but received %(name)s" #, python-format msgid "Expected a uuid but received %(uuid)s" msgstr "Expected a UUID but received %(uuid)s" #, python-format msgid "Expected a uuid or int but received %(identity)s" msgstr "Expected a UUID or int but received %(identity)s" #, python-format msgid "Expected an interval or cron syntax but received %(name)s" msgstr "Expected an interval or cron syntax but received %(name)s" #, python-format msgid "Failed to create volume '%(volume)s. " msgstr "Failed to create volume '%(volume)s. " #, python-format msgid "Failed to delete volume '%(volume)s. " msgstr "Failed to delete volume '%(volume)s. " #, python-format msgid "Filter operator is not valid: %(operator)s not in %(valid_operators)s" msgstr "Filter operator is not valid: %(operator)s not in %(valid_operators)s" msgid "Filtering actions on both audit and action-plan is prohibited" msgstr "Filtering actions on both audit and action-plan is prohibited" msgid "Goal" msgstr "Goal" #, python-format msgid "Goal %(goal)s could not be found" msgstr "Goal %(goal)s could not be found" #, python-format msgid "Goal %(goal)s is invalid" msgstr "Goal %(goal)s is invalid" msgid "Goals" msgstr "Goals" msgid "Hardware Maintenance" msgstr "Hardware Maintenance" #, python-format msgid "Here below is a table containing the objects that can be purged%s:" msgstr "Here below is a table containing the objects that can be purged%s:" msgid "Host Maintenance Strategy" msgstr "Host Maintenance Strategy" msgid "Illegal argument" msgstr "Illegal argument" #, python-format msgid "" "Incorrect mapping: could not find associated weight for %s in weight dict." msgstr "" "Incorrect mapping: could not find associated weight for %s in weight dict." #, python-format msgid "Interval of audit must be specified for %(audit_type)s." msgstr "Interval of audit must be specified for %(audit_type)s." #, python-format msgid "Interval of audit must not be set for %(audit_type)s." msgstr "Interval of audit must not be set for %(audit_type)s." #, python-format msgid "Invalid filter: %s" msgstr "Invalid filter: %s" msgid "Invalid number of features, expected 9" msgstr "Invalid number of features, expected 9" #, python-format msgid "Invalid query: %(start_time)s > %(end_time)s" msgstr "Invalid query: %(start_time)s > %(end_time)s" #, python-format msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'" msgstr "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'" #, python-format msgid "Invalid sort key: %s" msgstr "Invalid sort key: %s" msgid "Invalid state for swapping volume" msgstr "Invalid state for swapping volume" #, python-format msgid "Invalid state: %(state)s" msgstr "Invalid state: %(state)s" msgid "JSON list expected in feature argument" msgstr "JSON list expected in feature argument" msgid "Limit must be positive" msgstr "Limit must be positive" msgid "Limit should be positive" msgstr "Limit should be positive" msgid "Maximum time since last check-in for up service." msgstr "Maximum time since last check-in for up service." #, python-format msgid "Migration of type '%(migration_type)s' is not supported." msgstr "Migration of type '%(migration_type)s' is not supported." msgid "" "Name of this node. This can be an opaque identifier. It is not necessarily a " "hostname, FQDN, or IP address. However, the node name must be valid within " "an AMQP key." msgstr "" "Name of this node. This can be an opaque identifier. It is not necessarily a " "hostname, FQDN, or IP address. However, the node name must be valid within " "an AMQP key." #, python-format msgid "No %(metric)s metric for %(host)s found." msgstr "No %(metric)s metric for %(host)s found." #, python-format msgid "No strategy could be found to achieve the '%(goal)s' goal." msgstr "No strategy could be found to achieve the '%(goal)s' goal." msgid "Noisy Neighbor" msgstr "Noisy Neighbour" msgid "Not authorized" msgstr "Not authorised" msgid "Not supported" msgstr "Not supported" msgid "Operation not permitted" msgstr "Operation not permitted" msgid "Outlet temperature based strategy" msgstr "Outlet temperature based strategy" #, python-format msgid "" "Payload not populated when trying to send notification \"%(class_name)s\"" msgstr "" "Payload not populated when trying to send notification \"%(class_name)s\"" msgid "Plugins" msgstr "Plugins" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Policy doesn't allow %(action)s to be performed." #, python-format msgid "Project name seems ambiguous: %s" msgstr "Project name seems ambiguous: %s" #, python-format msgid "Project not Found: %s" msgstr "Project not Found: %s" #, python-format msgid "Provided %(action_type)s is not supported yet" msgstr "Provided %(action_type)s is not supported yet" #, python-format msgid "Provided cron is invalid: %(message)s" msgstr "Provided cron is invalid: %(message)s" #, python-format msgid "Purge results summary%s:" msgstr "Purge results summary%s:" msgid "" "Ratio of actual attached volumes migrated to planned attached volumes " "migrate." msgstr "" "Ratio of actual attached volumes migrated to planned attached volumes " "migrate." msgid "" "Ratio of actual cold migrated instances to planned cold migrate instances." msgstr "" "Ratio of actual cold migrated instances to planned cold migrate instances." msgid "" "Ratio of actual detached volumes migrated to planned detached volumes " "migrate." msgstr "" "Ratio of actual detached volumes migrated to planned detached volumes " "migrate." msgid "" "Ratio of actual live migrated instances to planned live migrate instances." msgstr "" "Ratio of actual live migrated instances to planned live migrate instances." msgid "Ratio of migrated virtual machines to audited virtual machines" msgstr "Ratio of migrated virtual machines to audited virtual machines" msgid "" "Ratio of released compute nodes divided by the total number of enabled " "compute nodes." msgstr "" "Ratio of released compute nodes divided by the total number of enabled " "compute nodes." msgid "Request not acceptable." msgstr "Request not acceptable." #, python-format msgid "Role name seems ambiguous: %s" msgstr "Role name seems ambiguous: %s" #, python-format msgid "Role not Found: %s" msgstr "Role not Found: %s" msgid "Saving Energy" msgstr "Saving Energy" msgid "Saving Energy Strategy" msgstr "Saving Energy Strategy" #, python-format msgid "Scoring Engine with name=%s not found" msgstr "Scoring Engine with name=%s not found" #, python-format msgid "ScoringEngine %(scoring_engine)s could not be found" msgstr "ScoringEngine %(scoring_engine)s could not be found" msgid "Seconds between running periodic tasks." msgstr "Seconds between running periodic tasks." msgid "Server Consolidation" msgstr "Server Consolidation" msgid "" "Specifies the minimum level for which to send notifications. If not set, no " "notifications will be sent. The default is for this option to be at the " "`INFO` level." msgstr "" "Specifies the minimum level for which to send notifications. If not set, no " "notifications will be sent. The default is for this option to be at the " "`INFO` level." msgid "" "Specify parameters but no predefined strategy for audit, or no parameter " "spec in predefined strategy" msgstr "" "Specify parameters but no predefined strategy for audit, or no parameter " "spec in predefined strategy" #, python-format msgid "Start or End time of audit must not be set for %(audit_type)s." msgstr "Start or End time of audit must not be set for %(audit_type)s." #, python-format msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)" msgstr "State transition not allowed: (%(initial_state)s -> %(new_state)s)" msgid "Storage Capacity Balance Strategy" msgstr "Storage Capacity Balance Strategy" msgid "Strategies" msgstr "Strategies" #, python-format msgid "Strategy %(strategy)s could not be found" msgstr "Strategy %(strategy)s could not be found" #, python-format msgid "Strategy %(strategy)s is invalid" msgstr "Strategy %(strategy)s is invalid" #, python-format msgid "The %(name)s %(id)s could not be found" msgstr "The %(name)s %(id)s could not be found" #, python-format msgid "The %(name)s pool %(attribute)s is not integer" msgstr "The %(name)s pool %(attribute)s is not integer" #, python-format msgid "The %(name)s resource %(id)s could not be found" msgstr "The %(name)s resource %(id)s could not be found" #, python-format msgid "The %(name)s resource %(id)s is not soft deleted" msgstr "The %(name)s resource %(id)s is not soft deleted" #, python-format msgid "The action %(action_id)s execution failed." msgstr "The action %(action_id)s execution failed." #, python-format msgid "The action description %(action_id)s cannot be found." msgstr "The action description %(action_id)s cannot be found." msgid "The audit template UUID or name specified is invalid" msgstr "The audit template UUID or name specified is invalid" #, python-format msgid "The baremetal resource '%(name)s' could not be found" msgstr "The baremetal resource '%(name)s' could not be found" #, python-format msgid "The cluster data model '%(cdm)s' could not be built" msgstr "The cluster data model '%(cdm)s' could not be built" msgid "The cluster state is not defined" msgstr "The cluster state is not defined" msgid "The cluster state is stale" msgstr "The cluster state is stale" #, python-format msgid "The compute node %(name)s could not be found" msgstr "The compute node %(name)s could not be found" #, python-format msgid "The compute resource '%(name)s' could not be found" msgstr "The compute resource '%(name)s' could not be found" #, python-format msgid "The identifier '%(name)s' is a reserved word" msgstr "The identifier '%(name)s' is a reserved word" #, python-format msgid "" "The indicator '%(name)s' with value '%(value)s' and spec type " "'%(spec_type)s' is invalid." msgstr "" "The indicator '%(name)s' with value '%(value)s' and spec type " "'%(spec_type)s' is invalid." #, python-format msgid "The instance '%(name)s' could not be found" msgstr "The instance '%(name)s' could not be found" #, python-format msgid "The ironic node %(uuid)s could not be found" msgstr "The Ironic node %(uuid)s could not be found" msgid "The number of VM migrations to be performed." msgstr "The number of VM migrations to be performed." msgid "The number of attached volumes actually migrated." msgstr "The number of attached volumes actually migrated." msgid "The number of attached volumes planned to migrate." msgstr "The number of attached volumes planned to migrate." msgid "The number of compute nodes to be released." msgstr "The number of compute nodes to be released." msgid "The number of detached volumes actually migrated." msgstr "The number of detached volumes actually migrated." msgid "The number of detached volumes planned to migrate." msgstr "The number of detached volumes planned to migrate." msgid "The number of instances actually cold migrated." msgstr "The number of instances actually cold migrated." msgid "The number of instances actually live migrated." msgstr "The number of instances actually live migrated." msgid "The number of instances planned to cold migrate." msgstr "The number of instances planned to cold migrate." msgid "The number of instances planned to live migrate." msgstr "The number of instances planned to live migrate." #, python-format msgid "" "The number of objects (%(num)s) to delete from the database exceeds the " "maximum number of objects (%(max_number)s) specified." msgstr "" "The number of objects (%(num)s) to delete from the database exceeds the " "maximum number of objects (%(max_number)s) specified." #, python-format msgid "The pool %(name)s could not be found" msgstr "The pool %(name)s could not be found" #, python-format msgid "The service %(service)s cannot be found." msgstr "The service %(service)s cannot be found." #, python-format msgid "The storage node %(name)s could not be found" msgstr "The storage node %(name)s could not be found" #, python-format msgid "The storage resource '%(name)s' could not be found" msgstr "The storage resource '%(name)s' could not be found" msgid "The target state is not defined" msgstr "The target state is not defined" msgid "The total number of audited instances in strategy." msgstr "The total number of audited instances in strategy." msgid "The total number of enabled compute nodes." msgstr "The total number of enabled compute nodes." msgid "The value of original standard deviation." msgstr "The value of original standard deviation." msgid "The value of resulted standard deviation." msgstr "The value of resulted standard deviation." #, python-format msgid "The volume '%(name)s' could not be found" msgstr "The volume '%(name)s' could not be found" #, python-format msgid "There are %(count)d objects set for deletion. Continue? [y/N]" msgstr "There are %(count)d objects set for deletion. Continue? [y/N]" msgid "Thermal Optimization" msgstr "Thermal Optimisation" msgid "Total" msgstr "Total" msgid "Unable to parse features: " msgstr "Unable to parse features: " #, python-format msgid "Unable to parse features: %s" msgstr "Unable to parse features: %s" msgid "Unacceptable parameters" msgstr "Unacceptable parameters" msgid "Unclassified" msgstr "Unclassified" #, python-format msgid "Unexpected keystone client error occurred: %s" msgstr "Unexpected Keystone client error occurred: %s" msgid "Uniform airflow migration strategy" msgstr "Uniform airflow migration strategy" #, python-format msgid "User name seems ambiguous: %s" msgstr "User name seems ambiguous: %s" #, python-format msgid "User not Found: %s" msgstr "User not Found: %s" msgid "VM Workload Consolidation Strategy" msgstr "VM Workload Consolidation Strategy" msgid "Volume type must be different for retyping" msgstr "Volume type must be different for retyping" msgid "Volume type must be same for migrating" msgstr "Volume type must be same for migrating" msgid "" "Watcher database schema is already under version control; use upgrade() " "instead" msgstr "" "Watcher database schema is already under version control; use upgrade() " "instead" #, python-format msgid "Workflow execution error: %(error)s" msgstr "Workflow execution error: %(error)s" msgid "Workload Balance Migration Strategy" msgstr "Workload Balance Migration Strategy" msgid "Workload Balancing" msgstr "Workload Balancing" msgid "Workload stabilization" msgstr "Workload stabilisation" #, python-format msgid "Wrong type. Expected '%(type)s', got '%(value)s'" msgstr "Wrong type. Expected '%(type)s', got '%(value)s'" #, python-format msgid "" "You shouldn't use any other IDs of %(resource)s if you use wildcard " "character." msgstr "" "You shouldn't use any other IDs of %(resource)s if you use wildcard " "character." msgid "Zone migration" msgstr "Zone migration" msgid "destination type is required when migration type is swap" msgstr "destination type is required when migration type is swap" msgid "host_aggregates can't be included and excluded together" msgstr "host_aggregates can't be included and excluded together" python-watcher-4.0.0/watcher/locale/de/0000775000175000017500000000000013656752352020013 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/locale/de/LC_MESSAGES/0000775000175000017500000000000013656752352021600 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/locale/de/LC_MESSAGES/watcher.po0000664000175000017500000006351713656752270023610 0ustar zuulzuul00000000000000# Frank Kloeker , 2018. #zanata # Andreas Jaeger , 2020. #zanata msgid "" msgstr "" "Project-Id-Version: watcher VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2020-04-26 01:47+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2020-04-25 11:45+0000\n" "Last-Translator: Andreas Jaeger \n" "Language-Team: German\n" "Language: de\n" "X-Generator: Zanata 4.3.3\n" "Plural-Forms: nplurals=2; plural=(n != 1)\n" msgid " (may include orphans)" msgstr "(kann Waisen einschließen)" msgid " (orphans excluded)" msgstr "(Waisen ausgeschlossen)" #, python-format msgid "%(client)s connection failed. Reason: %(reason)s" msgstr "Die Verbindung von %(client)s ist fehlgeschlagen. Grund: %(reason)s" #, python-format msgid "%(field)s can't be updated." msgstr "%(field)s kann nicht aktualisiert werden." #, python-format msgid "%(parameter)s has to be of type %(parameter_type)s" msgstr "%(parameter)s muss vom Typ %(parameter_type)s sein" #, python-format msgid "%s is not JSON serializable" msgstr "%s ist nicht JSON serialisierbar" #, python-format msgid "" "'%(strategy)s' strategy does relate to the '%(goal)s' goal. Possible " "choices: %(choices)s" msgstr "" "Die Strategie '%(strategy)s' bezieht sich auf das Ziel'%(goal)s'. Mögliche " "Auswahlmöglichkeiten: %(choices)s" #, python-format msgid "'%s' is a mandatory attribute and can not be removed" msgstr "'%s' ist ein obligatorisches Attribut und kann nicht entfernt werden" #, python-format msgid "'%s' is an internal attribute and can not be updated" msgstr "'%s' ist ein internes Attribut und kann nicht aktualisiert werden" msgid "'add' and 'replace' operations needs value" msgstr "'add' und 'replace' Operationen benötigt Wert" msgid "'obj' argument type is not valid" msgstr "Der Argumenttyp 'obj' ist nicht gültig" #, python-format msgid "'obj' argument type is not valid: %s" msgstr "Der Argumenttyp 'obj' ist nicht gültig: %s" #, python-format msgid "A goal with UUID %(uuid)s already exists" msgstr "Ein Ziel mit UUID %(uuid)s ist bereits vorhanden" #, python-format msgid "A scoring engine with UUID %(uuid)s already exists" msgstr "Eine Scoring-Engine mit UUID %(uuid)s ist bereits vorhanden" #, python-format msgid "A service with name %(name)s is already working on %(host)s." msgstr "Ein Dienst mit dem Namen %(name)s arbeitet bereits auf %(host)s." #, python-format msgid "A strategy with UUID %(uuid)s already exists" msgstr "Eine Strategie mit UUID %(uuid)s ist bereits vorhanden" msgid "A valid goal_id or audit_template_id must be provided" msgstr "Eine gültige goal_id oder audit_template_id muss angegeben werden" #, python-format msgid "Action %(action)s could not be found" msgstr "Aktion %(action)s konnte nicht gefunden werden" #, python-format msgid "Action %(action)s was not eagerly loaded" msgstr "Aktion %(action)s wurde nicht eifrig geladen" #, python-format msgid "Action Plan %(action_plan)s is currently running." msgstr "Der Aktionsplan %(action_plan)s wird gerade ausgeführt." #, python-format msgid "Action Plan %(action_plan)s is referenced by one or multiple actions" msgstr "" "Der Aktionsplan %(action_plan)s wird durch eine oder mehrere Aktionen " "referenziert" #, python-format msgid "Action Plan with UUID %(uuid)s is cancelled by user" msgstr "Der Aktionsplan mit der UUID %(uuid)s wird vom Benutzer abgebrochen" msgid "Action Plans" msgstr "Aktionspläne" #, python-format msgid "Action plan %(action_plan)s is invalid" msgstr "Der Aktionsplan %(action_plan)s ist ungültig" #, python-format msgid "Action plan %(action_plan)s is referenced by one or multiple goals" msgstr "" "Der Aktionsplan %(action_plan)s wird von einem oder mehreren Zielen " "referenziert" #, python-format msgid "Action plan %(action_plan)s was not eagerly loaded" msgstr "Der Aktionsplan %(action_plan)s wurde nicht eifrig geladen" #, python-format msgid "ActionPlan %(action_plan)s could not be found" msgstr "ActionPlan %(action_plan)s konnte nicht gefunden werden" msgid "Actions" msgstr "Aktionen" msgid "Actuator" msgstr "Betätiger" #, python-format msgid "Adding a new attribute (%s) to the root of the resource is not allowed" msgstr "" "Das Hinzufügen eines neuen Attributs (%s) zum Stamm der Ressource ist nicht " "zulässig" msgid "Airflow Optimization" msgstr "Luftstrom-Optimierung" #, python-format msgid "An action description with type %(action_type)s is already exist." msgstr "" "Eine Aktionsbeschreibung vom Typ %(action_type)s ist bereits vorhanden." #, python-format msgid "An action plan with UUID %(uuid)s already exists" msgstr "Ein Aktionsplan mit UUID %(uuid)s ist bereits vorhanden" #, python-format msgid "An action with UUID %(uuid)s already exists" msgstr "Eine Aktion mit UUID %(uuid)s ist bereits vorhanden" #, python-format msgid "An audit with UUID or name %(audit)s already exists" msgstr "Ein Audit mit UUID oder Name %(audit)s ist bereits vorhanden" #, python-format msgid "An audit_template with UUID or name %(audit_template)s already exists" msgstr "" "Ein Audit_Template mit UUID oder Name %(audit_template)s ist bereits " "vorhanden" msgid "An indicator value should be a number" msgstr "Ein Indikatorwert sollte eine Zahl sein" msgid "An unknown exception occurred" msgstr "Eine unbekannte Ausnahme ist aufgetreten" msgid "At least one feature is required" msgstr "Mindestens eine Funktion ist erforderlich" #, python-format msgid "Audit %(audit)s could not be found" msgstr "Audit %(audit)s konnte nicht gefunden werden" #, python-format msgid "Audit %(audit)s is invalid" msgstr "Audit %(audit)s ist ungültig" #, python-format msgid "Audit %(audit)s is referenced by one or multiple action plans" msgstr "" "Audit %(audit)s wird von einem oder mehreren Aktionsplänen referenziert" #, python-format msgid "Audit %(audit)s was not eagerly loaded" msgstr "Audit %(audit)s wurde nicht eifrig geladen" msgid "Audit Templates" msgstr "Prüfungsvorlagen" #, python-format msgid "Audit parameter %(parameter)s are not allowed" msgstr "Prüfparameter %(parameter)s sind nicht erlaubt" #, python-format msgid "Audit state %(state)s is disallowed." msgstr "Auditstatus %(state)s ist ungültig." #, python-format msgid "Audit type %(audit_type)s could not be found" msgstr "Audit-Typ %(audit_type)s konnte nicht gefunden werden" #, python-format msgid "Audit type %(audit_type)s is disallowed." msgstr "Audit-Typ %(audit_type)s nicht erlaubt." #, python-format msgid "AuditTemplate %(audit_template)s could not be found" msgstr "AuditTemplate %(audit_template)s konnte nicht gefunden werden" msgid "Audits" msgstr "Audits" msgid "Basic offline consolidation" msgstr "Grundlegende Offline-Konsolidierung" msgid "CDMCs" msgstr "CDMCs" msgid "Cannot compile public API routes" msgstr "Öffentliche API-Routen können nicht kompiliert werden" msgid "Cannot create an action directly" msgstr "Eine Aktion kann nicht direkt erstellt werden" msgid "Cannot delete an action directly" msgstr "Eine Aktion kann nicht direkt gelöscht werden" msgid "Cannot modify an action directly" msgstr "Eine Aktion kann nicht direkt geändert werden" msgid "Cannot overwrite UUID for an existing Action Plan." msgstr "" "UUID für einen vorhandenen Aktionsplan kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Action." msgstr "UUID kann für eine vorhandene Aktion nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Audit Template." msgstr "UUID für eine vorhandene Auditvorlage kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Audit." msgstr "UUID für ein vorhandenes Audit kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Goal." msgstr "UUID für ein vorhandenes Ziel kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Scoring Engine." msgstr "" "UUID für eine vorhandene Scoring Engine kann nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing Strategy." msgstr "UUID kann für eine vorhandene Strategie nicht überschrieben werden." msgid "Cannot overwrite UUID for an existing efficacy indicator." msgstr "" "UUID kann für einen vorhandenen Wirksamkeitsindikator nicht überschrieben " "werden." msgid "Cannot remove 'goal' attribute from an audit template" msgstr "Das Attribut 'goal' kann nicht aus einer Audit-Vorlage entfernt werden" msgid "Conflict" msgstr "Konflikt" #, python-format msgid "" "Could not compute the global efficacy for the '%(goal)s' goal using the " "'%(strategy)s' strategy." msgstr "" "Die globale Wirksamkeit für das Ziel '%(goal)s' konnte nicht mit der " "Strategie '%(strategy)s' berechnet werden." #, python-format msgid "Could not load any strategy for goal %(goal)s" msgstr "Konnte keine Strategie für Ziel %(goal)s laden" #, python-format msgid "Couldn't apply patch '%(patch)s'. Reason: %(reason)s" msgstr "Patch '%(patch)s' konnte nicht angewendet werden. Grund:%(reason)s" #, python-format msgid "Couldn't delete when state is '%(state)s'." msgstr "Konnte nicht gelöscht werden, wenn der Status '%(state)s' ist." #, python-format msgid "Couldn't start when state is '%(state)s'." msgstr "Konnte nicht gestartet werden, wenn der Status '%(state)s' ist." #, python-format msgid "Datasource %(datasource)s is not available." msgstr "Datenquelle %(datasource)s ist nicht verfügbar." #, python-format msgid "Datasource %(datasource)s is not supported by strategy %(strategy)s" msgstr "" "Die Datenquelle %(datasource)s wird von der Strategie %(strategy)s nicht " "unterstützt" msgid "Do you want to delete objects up to the specified maximum number? [y/N]" msgstr "" "Möchten Sie Objekte bis zur angegebenen maximalen Anzahl löschen? [J/N]" #, python-format msgid "Domain name seems ambiguous: %s" msgstr "Domänenname scheint mehrdeutig: %s" #, python-format msgid "Domain not Found: %s" msgstr "Domain nicht gefunden: %s" msgid "Dummy Strategy using sample Scoring Engines" msgstr "Dummy-Strategie mit Sample Scoring Engines" msgid "Dummy goal" msgstr "Dummy Ziel" msgid "Dummy strategy" msgstr "Dummy-Strategie" msgid "Dummy strategy with resize" msgstr "Dummy-Strategie mit Größenänderung" #, python-format msgid "Efficacy indicator %(efficacy_indicator)s could not be found" msgstr "" "Der Wirksamkeitsindikator %(efficacy_indicator)s konnte nicht gefunden werden" #, python-format msgid "Error loading plugin '%(name)s'" msgstr "Fehler beim Laden des Plugins '%(name)s'" #, python-format msgid "ErrorDocumentMiddleware received an invalid status %s" msgstr "ErrorDocumentMiddleware hat einen ungültigen Status %s erhalten" #, python-format msgid "Expected a logical name but received %(name)s" msgstr "Erwartete einen logischen Namen, erhielt aber %(name)s" #, python-format msgid "Expected a logical name or uuid but received %(name)s" msgstr "" "Erwartete einen logischen Namen oder eine UUID, erhielt jedoch %(name)s" #, python-format msgid "Expected a uuid but received %(uuid)s" msgstr "Erwartet eine Uuid aber %(uuid)s erhalten" #, python-format msgid "Expected a uuid or int but received %(identity)s" msgstr "Erwartet eine Uuid oder Int aber %(identity)s erhalten" #, python-format msgid "Expected an interval or cron syntax but received %(name)s" msgstr "Erwartete eine Intervall- oder Cron-Syntax, aber erhielt %(name)s" #, python-format msgid "Failed to create volume '%(volume)s. " msgstr "Fehler beim Erstellen des Datenträgers '%(volume)s." #, python-format msgid "Failed to delete volume '%(volume)s. " msgstr "Fehler beim Löschen des Datenträgers '%(volume)s." #, python-format msgid "Filter operator is not valid: %(operator)s not in %(valid_operators)s" msgstr "" "Filter Operator ist nicht gültig: %(operator)s nicht in %(valid_operators)s" msgid "Filtering actions on both audit and action-plan is prohibited" msgstr "" "Das Filtern von Aktionen sowohl im Audit- als auch im Aktionsplan ist " "verboten" msgid "Goal" msgstr "Ziel" #, python-format msgid "Goal %(goal)s could not be found" msgstr "Ziel %(goal)s konnte nicht gefunden werden" #, python-format msgid "Goal %(goal)s is invalid" msgstr "Ziel %(goal)s ist ungültig" msgid "Goals" msgstr "Ziele" msgid "Hardware Maintenance" msgstr "Hardware-Wartung" #, python-format msgid "Here below is a table containing the objects that can be purged%s:" msgstr "" "Hier unten ist eine Tabelle mit den Objekten, die gelöscht werden können: %s" msgid "Illegal argument" msgstr "Illegales Argument" #, python-format msgid "" "Incorrect mapping: could not find associated weight for %s in weight dict." msgstr "" "Inkorrektes Mapping: Die zugehörige Gewichtung für %s im Gewicht dict konnte " "nicht gefunden werden." #, python-format msgid "Interval of audit must be specified for %(audit_type)s." msgstr "Das Intervall der Prüfung muss für %(audit_type)s angegeben werden." #, python-format msgid "Interval of audit must not be set for %(audit_type)s." msgstr "" "Das Intervall der Prüfung darf nicht für %(audit_type)s festgelegt werden." #, python-format msgid "Invalid filter: %s" msgstr "Ungültiger Filter: %s" msgid "Invalid number of features, expected 9" msgstr "Ungültige Anzahl der erwarteten Features 9" #, python-format msgid "Invalid query: %(start_time)s > %(end_time)s" msgstr "Ungültige Abfrage: %(start_time)s > %(end_time)s" #, python-format msgid "Invalid sort direction: %s. Acceptable values are 'asc' or 'desc'" msgstr "Ungültige Sortierrichtung: %s. Akzeptable Werte sind 'asc' oder 'desc'" #, python-format msgid "Invalid sort key: %s" msgstr "Ungültiger Sortierschlüssel: %s" msgid "Invalid state for swapping volume" msgstr "Ungültiger Status für das Auslagern des Datenträgers" #, python-format msgid "Invalid state: %(state)s" msgstr "Ungültiger Status: %(state)s" msgid "JSON list expected in feature argument" msgstr "JSON-Liste in Feature-Argument erwartet" msgid "Limit must be positive" msgstr "Limit muss positiv sein" msgid "Limit should be positive" msgstr "Limit sollte positiv sein" msgid "Maximum time since last check-in for up service." msgstr "Maximale Zeit seit dem letzten Check-in für den Up-Service." #, python-format msgid "Metric: %(metric)s not available" msgstr "Metrik: %(metric)s nicht verfügbar" #, python-format msgid "Migration of type '%(migration_type)s' is not supported." msgstr "Die Migration vom Typ '%(migration_type)s' wird nicht unterstützt." msgid "Minimum Nova API Version" msgstr "Minimale Nova API Version" #, python-format msgid "No %(metric)s metric for %(host)s found." msgstr "Keine %(metric)s Metrik für %(host)s gefunden." #, python-format msgid "No strategy could be found to achieve the '%(goal)s' goal." msgstr "" "Es konnte keine Strategie gefunden werden, um das Ziel '%(goal)s' zu " "erreichen." msgid "Noisy Neighbor" msgstr "Lauter Nachbar" msgid "Not authorized" msgstr "Nicht berechtigt" msgid "Not supported" msgstr "Nicht unterstützt" msgid "Operation not permitted" msgstr "Operation unzulässig" msgid "Outlet temperature based strategy" msgstr "Auslasstemperatur basierte Strategie" #, python-format msgid "" "Payload not populated when trying to send notification \"%(class_name)s\"" msgstr "" "Payload wurde nicht ausgefüllt, wenn versucht wird, eine Benachrichtigung " "'%(class_name)s' zu senden" msgid "Plugins" msgstr "Plugins" #, python-format msgid "Policy doesn't allow %(action)s to be performed." msgstr "Die Richtlinie lässt nicht zu, dass %(action)s ausgeführt werden." #, python-format msgid "Project name seems ambiguous: %s" msgstr "Der Projektname erscheint mehrdeutig: %s" #, python-format msgid "Project not Found: %s" msgstr "Projekt nicht gefunden: %s" #, python-format msgid "Provided cron is invalid: %(message)s" msgstr "Bereitgestellter Cron ist ungültig: %(message)s" #, python-format msgid "Purge results summary%s:" msgstr "Zusammenfassung der Bereinigungsergebnisse %s:" msgid "" "Ratio of actual attached volumes migrated to planned attached volumes " "migrate." msgstr "" "Das Verhältnis der tatsächlich angehängten Datenträger, die zu geplanten " "angehängten Datenträger migriert wurden, wird migriert." msgid "" "Ratio of actual cold migrated instances to planned cold migrate instances." msgstr "" "Verhältnis von tatsächlichen kalt migrierten Instanzen zu geplanten kalten " "migrieren Instanzen." msgid "" "Ratio of actual detached volumes migrated to planned detached volumes " "migrate." msgstr "" "Das Verhältnis der tatsächlich abgetrennten Datenträger, die in geplante, " "getrennte Datenträger migriert wurden, wird migriert." msgid "" "Ratio of actual live migrated instances to planned live migrate instances." msgstr "" "Verhältnis von tatsächlichen migrierten Live-Instanzen zu geplanten Live-" "Migrationsinstanzen" msgid "" "Ratio of released compute nodes divided by the total number of enabled " "compute nodes." msgstr "" "Verhältnis der freigegebenen Compute-Knoten geteilt durch die Gesamtzahl der " "aktivierten Compute-Knoten." msgid "Request not acceptable." msgstr "Anforderung nicht zulässig." #, python-format msgid "Role name seems ambiguous: %s" msgstr "Der Rollenname scheint mehrdeutig: %s" #, python-format msgid "Role not Found: %s" msgstr "Rolle nicht gefunden: %s" msgid "Saving Energy" msgstr "Energie sparen" msgid "Saving Energy Strategy" msgstr "Energiestrategie speichern" #, python-format msgid "Scoring Engine with name=%s not found" msgstr "Scoring Engine mit name=%s nicht gefunden" #, python-format msgid "ScoringEngine %(scoring_engine)s could not be found" msgstr "ScoringEngine %(scoring_engine)s konnte nicht gefunden werden" msgid "Seconds between running periodic tasks." msgstr "Sekunden zwischen dem Ausführen periodischer Aufgaben." msgid "Server Consolidation" msgstr "Serverkonsolidierung" msgid "" "Specifies the minimum level for which to send notifications. If not set, no " "notifications will be sent. The default is for this option to be at the " "`INFO` level." msgstr "" "Gibt die Mindeststufe an, für die Benachrichtigungen gesendet werden. Wenn " "nicht festgelegt, werden keine Benachrichtigungen gesendet. Standardmäßig " "ist diese Option auf der INFO-Ebene." msgid "" "Specify parameters but no predefined strategy for audit, or no parameter " "spec in predefined strategy" msgstr "" "Geben Sie Parameter, aber keine vordefinierte Strategie für das Audit oder " "keine Parameterspezifikation in der vordefinierten Strategie an" #, python-format msgid "State transition not allowed: (%(initial_state)s -> %(new_state)s)" msgstr "Statusübergang nicht erlaubt: (%(initial_state)s -> %(new_state)s)" msgid "Storage Capacity Balance Strategy" msgstr "Storage Capacity Balance-Strategie" msgid "Strategies" msgstr "Strategien" #, python-format msgid "Strategy %(strategy)s could not be found" msgstr "Strategie %(strategy)s konnte nicht gefunden werden" #, python-format msgid "Strategy %(strategy)s is invalid" msgstr "Strategie %(strategy)s ist ungültig" #, python-format msgid "The %(name)s %(id)s could not be found" msgstr " Die %(name)s %(id)s konnte nicht gefunden werden" #, python-format msgid "The %(name)s resource %(id)s could not be found" msgstr "Die %(name)s Ressource %(id)s konnte nicht gefunden werden" #, python-format msgid "The %(name)s resource %(id)s is not soft deleted" msgstr "Die %(name)s Ressource %(id)s wird nicht weich gelöscht" #, python-format msgid "The action %(action_id)s execution failed." msgstr "Die Ausführung der Aktion %(action_id)s ist fehlgeschlagen." #, python-format msgid "The action description %(action_id)s cannot be found." msgstr "Die Aktionsbeschreibung %(action_id)s konnte nicht gefunden werden." msgid "The audit template UUID or name specified is invalid" msgstr "Die UUID oder der Name der Überprüfungsvorlage ist ungültig" #, python-format msgid "The baremetal resource '%(name)s' could not be found" msgstr "Die Barmetal-Ressource '%(name)s' konnte nicht gefunden werden" #, python-format msgid "The cluster data model '%(cdm)s' could not be built" msgstr "Das Clusterdatenmodell '%(cdm)s' konnte nicht erstellt werden" msgid "The cluster state is not defined" msgstr "Der Clusterstatus ist nicht definiert" msgid "The cluster state is stale" msgstr "Der Clusterstatus ist veraltet" #, python-format msgid "The compute node %(name)s could not be found" msgstr "Der Compute-Knoten %(name)s konnte nicht gefunden werden" #, python-format msgid "The compute resource '%(name)s' could not be found" msgstr "Die Rechenressource '%(name)s' konnte nicht gefunden werden" #, python-format msgid "The identifier '%(name)s' is a reserved word" msgstr "Der Bezeichner '%(name)s' ist ein reserviertes Wort" #, python-format msgid "" "The indicator '%(name)s' with value '%(value)s' and spec type " "'%(spec_type)s' is invalid." msgstr "" "Das Kennzeichen '%(name)s' mit dem Wert '%(value)s' und dem " "Spezifikationstyp '%(spec_type)s' ist ungültig." #, python-format msgid "The instance '%(name)s' could not be found" msgstr "Die Instanz '%(name)s' konnte nicht gefunden werden" #, python-format msgid "The ironic node %(uuid)s could not be found" msgstr "Der Ironic Knoten %(uuid)s konnte nicht gefunden werden" msgid "The number of VM migrations to be performed." msgstr "Die Anzahl der VM-Migrationen, die ausgeführt werden sollen." msgid "The number of attached volumes actually migrated." msgstr "Die Anzahl der angehängten Datenträger wurde tatsächlich migriert." msgid "The number of attached volumes planned to migrate." msgstr "Die Anzahl der angehängten Datenträger, die migriert werden sollen." msgid "The number of compute nodes to be released." msgstr "Die Anzahl der zu veröffentlichenden Compute-Knoten." msgid "The number of detached volumes actually migrated." msgstr "Die Anzahl der gelösten Datenträger wurde tatsächlich migriert." msgid "The number of detached volumes planned to migrate." msgstr "Die Anzahl der gelöschten Datenträger, die migriert werden sollen." msgid "The number of instances actually cold migrated." msgstr "Die Anzahl der tatsächlich kalten Instanzen wurde migriert." msgid "The number of instances actually live migrated." msgstr "Die Anzahl der tatsächlich migrierten Instanzen." msgid "The number of instances planned to cold migrate." msgstr "Die Anzahl der geplanten Fälle für eine Kaltmigration." msgid "The number of instances planned to live migrate." msgstr "Die Anzahl der geplanten Live-Migrationen." #, python-format msgid "" "The number of objects (%(num)s) to delete from the database exceeds the " "maximum number of objects (%(max_number)s) specified." msgstr "" "Die Anzahl der zu löschenden Objekte (%(num)s) aus der Datenbank " "überschreitet die maximale Anzahl der angegebenen Objekte (%(max_number)s)." #, python-format msgid "The pool %(name)s could not be found" msgstr "Der Pool %(name)skonnte nicht gefunden werden" #, python-format msgid "The service %(service)s cannot be found." msgstr "Der Service %(service)s kann nicht gefunden werden." #, python-format msgid "The storage node %(name)s could not be found" msgstr "Der Speicherknoten %(name)s konnte nicht gefunden werden" #, python-format msgid "The storage resource '%(name)s' could not be found" msgstr "Die Speicherressource '%(name)s' konnte nicht gefunden werden" msgid "The target state is not defined" msgstr "Der Zielzustand ist nicht definiert" msgid "The total number of enabled compute nodes." msgstr "Die Gesamtzahl der aktivierten Compute-Knoten." #, python-format msgid "The volume '%(name)s' could not be found" msgstr "Der Datenträger '%(name)s' konnte nicht gefunden werden" #, python-format msgid "There are %(count)d objects set for deletion. Continue? [y/N]" msgstr "Es sind %(count)d Objekte zum Löschen eingestellt. Fortsetzen? [J/N]" msgid "Thermal Optimization" msgstr "Thermische Optimierung" msgid "Total" msgstr "Gesamt" msgid "Unable to parse features: " msgstr "Die Analyse von Features ist nicht möglich:" #, python-format msgid "Unable to parse features: %s" msgstr "Die Funktionen können nicht analysiert werden: %s" msgid "Unacceptable parameters" msgstr "Inakzeptable Parameter" msgid "Unclassified" msgstr "Nicht klassifiziert" #, python-format msgid "Unexpected keystone client error occurred: %s" msgstr "Unerwarteter Keystone Fehler trat auf: %s" msgid "Uniform airflow migration strategy" msgstr "Einheitliche Luftstrommigrationsstrategie" #, python-format msgid "User name seems ambiguous: %s" msgstr "Der Benutzername scheint mehrdeutig zu sein: %s" #, python-format msgid "User not Found: %s" msgstr "Benutzer nicht gefunden: %s" msgid "VM Workload Consolidation Strategy" msgstr "VM-Workload-Konsolidierungsstrategie" msgid "Volume type must be different for retyping" msgstr "Der Volume-Typ muss sich beim erneuten Eintippen unterscheiden" msgid "Volume type must be same for migrating" msgstr "Der Volume-Typ muss für die Migration identisch sein" msgid "" "Watcher database schema is already under version control; use upgrade() " "instead" msgstr "" "Watcher-Datenbankschema ist bereits unter Versionskontrolle; Verwenden Sie " "stattdessen upgrade()" #, python-format msgid "Workflow execution error: %(error)s" msgstr "Workflow-Ausführungsfehler: %(error)s" msgid "Workload Balance Migration Strategy" msgstr "Workload-Balance-Migrationsstrategie" msgid "Workload Balancing" msgstr "Workload-Ausgleich" msgid "Workload stabilization" msgstr "Workload-Stabilisierung" #, python-format msgid "Wrong type. Expected '%(type)s', got '%(value)s'" msgstr "Falscher Typ. Erwartete '%(type)s', bekam '%(value)s'" #, python-format msgid "" "You shouldn't use any other IDs of %(resource)s if you use wildcard " "character." msgstr "" "Sie sollten keine anderen IDs von %(resource)s verwenden, wenn Sie " "Platzhalterzeichen verwenden." msgid "Zone migration" msgstr "Zonenmigration" msgid "destination type is required when migration type is swap" msgstr "Zieltyp ist erforderlich, wenn der Migrationstyp Swap ist" msgid "host_aggregates can't be included and excluded together" msgstr "" "host_aggregates können nicht zusammen eingeschlossen und ausgeschlossen " "werden" python-watcher-4.0.0/watcher/_i18n.py0000664000175000017500000000241713656752270017457 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 b<>com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # import oslo_i18n from oslo_i18n import _lazy # The domain is the name of the App which is used to generate the folder # containing the translation files (i.e. the .pot file and the various locales) DOMAIN = "watcher" _translators = oslo_i18n.TranslatorFactory(domain=DOMAIN) # The primary translation function using the well-known name "_" _ = _translators.primary # The contextual translation function using the name "_C" _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form def lazy_translation_enabled(): return _lazy.USE_LAZY def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) python-watcher-4.0.0/watcher/conf/0000775000175000017500000000000013656752352017111 5ustar zuulzuul00000000000000python-watcher-4.0.0/watcher/conf/keystone_client.py0000664000175000017500000000250513656752270022663 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 ZTE Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg keystone_client = cfg.OptGroup(name='keystone_client', title='Configuration Options for Keystone') KEYSTONE_CLIENT_OPTS = [ cfg.StrOpt('interface', default='admin', choices=['internal', 'public', 'admin'], help='Type of endpoint to use in keystoneclient.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(keystone_client) conf.register_opts(KEYSTONE_CLIENT_OPTS, group=keystone_client) def list_opts(): return [(keystone_client, KEYSTONE_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/__init__.py0000775000175000017500000000444413656752270021232 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # Copyright (c) 2016 Intel Corp # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.conf import api from watcher.conf import applier from watcher.conf import ceilometer_client from watcher.conf import cinder_client from watcher.conf import clients_auth from watcher.conf import collector from watcher.conf import datasources from watcher.conf import db from watcher.conf import decision_engine from watcher.conf import exception from watcher.conf import glance_client from watcher.conf import gnocchi_client from watcher.conf import grafana_client from watcher.conf import grafana_translators from watcher.conf import ironic_client from watcher.conf import keystone_client from watcher.conf import monasca_client from watcher.conf import neutron_client from watcher.conf import nova_client from watcher.conf import paths from watcher.conf import placement_client from watcher.conf import planner from watcher.conf import service CONF = cfg.CONF service.register_opts(CONF) api.register_opts(CONF) paths.register_opts(CONF) exception.register_opts(CONF) datasources.register_opts(CONF) db.register_opts(CONF) planner.register_opts(CONF) applier.register_opts(CONF) decision_engine.register_opts(CONF) monasca_client.register_opts(CONF) nova_client.register_opts(CONF) glance_client.register_opts(CONF) gnocchi_client.register_opts(CONF) keystone_client.register_opts(CONF) grafana_client.register_opts(CONF) grafana_translators.register_opts(CONF) cinder_client.register_opts(CONF) ceilometer_client.register_opts(CONF) neutron_client.register_opts(CONF) clients_auth.register_opts(CONF) ironic_client.register_opts(CONF) collector.register_opts(CONF) placement_client.register_opts(CONF) python-watcher-4.0.0/watcher/conf/ceilometer_client.py0000664000175000017500000000470713656752270023160 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg ceilometer_client = cfg.OptGroup(name='ceilometer_client', title='Configuration Options for Ceilometer') CEILOMETER_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='2', deprecated_for_removal=True, deprecated_since="1.13.0", deprecated_reason=""" Ceilometer API is deprecated since Ocata release. Any related configuration options are deprecated too. """, help='Version of Ceilometer API to use in ' 'ceilometerclient.'), cfg.StrOpt('endpoint_type', default='internalURL', deprecated_for_removal=True, deprecated_since="1.13.0", deprecated_reason=""" Ceilometer API is deprecated since Ocata release. Any related configuration options are deprecated too. """, help='Type of endpoint to use in ceilometerclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is internalURL.'), cfg.StrOpt('region_name', deprecated_for_removal=True, deprecated_since="1.13.0", deprecated_reason=""" Ceilometer API is deprecated since Ocata release. Any related configuration options are deprecated too. """, help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(ceilometer_client) conf.register_opts(CEILOMETER_CLIENT_OPTS, group=ceilometer_client) def list_opts(): return [(ceilometer_client, CEILOMETER_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/glance_client.py0000664000175000017500000000307113656752270022252 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg glance_client = cfg.OptGroup(name='glance_client', title='Configuration Options for Glance') GLANCE_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='2', help='Version of Glance API to use in glanceclient.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in glanceclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(glance_client) conf.register_opts(GLANCE_CLIENT_OPTS, group=glance_client) def list_opts(): return [(glance_client, GLANCE_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/api.py0000664000175000017500000000535013656752270020236 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg api = cfg.OptGroup(name='api', title='Options for the Watcher API service') AUTH_OPTS = [ cfg.BoolOpt('enable_authentication', default=True, help='This option enables or disables user authentication ' 'via keystone. Default value is True.'), ] API_SERVICE_OPTS = [ cfg.PortOpt('port', default=9322, help='The port for the watcher API server'), cfg.HostAddressOpt('host', default='127.0.0.1', help='The listen IP address for the watcher API server' ), cfg.IntOpt('max_limit', default=1000, help='The maximum number of items returned in a single ' 'response from a collection resource'), cfg.IntOpt('workers', min=1, help='Number of workers for Watcher API service. ' 'The default is equal to the number of CPUs available ' 'if that can be determined, else a default worker ' 'count of 1 is returned.'), cfg.BoolOpt('enable_ssl_api', default=False, help="Enable the integrated stand-alone API to service " "requests via HTTPS instead of HTTP. If there is a " "front-end service performing HTTPS offloading from " "the service, this option should be False; note, you " "will want to change public API endpoint to represent " "SSL termination URL with 'public_endpoint' option."), cfg.BoolOpt('enable_webhooks_auth', default=True, help='This option enables or disables webhook request ' 'authentication via keystone. Default value is True.'), ] def register_opts(conf): conf.register_group(api) conf.register_opts(API_SERVICE_OPTS, group=api) conf.register_opts(AUTH_OPTS) def list_opts(): return [(api, API_SERVICE_OPTS), ('DEFAULT', AUTH_OPTS)] python-watcher-4.0.0/watcher/conf/opts.py0000664000175000017500000000476313656752270020461 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This is the single point of entry to generate the sample configuration file for Watcher. It collects all the necessary info from the other modules in this package. It is assumed that: * every other module in this package has a 'list_opts' function which return a dict where * the keys are strings which are the group names * the value of each key is a list of config options for that group * the watcher.conf package doesn't have further packages with config options * this module is only used in the context of sample file generation """ import importlib import os import pkgutil LIST_OPTS_FUNC_NAME = "list_opts" def list_opts(): """Grouped list of all the Watcher-specific configuration options :return: A list of ``(group, [opt_1, opt_2])`` tuple pairs, where ``group`` is either a group name as a string or an OptGroup object. """ opts = list() module_names = _list_module_names() imported_modules = _import_modules(module_names) for mod in imported_modules: opts.extend(mod.list_opts()) return opts def _list_module_names(): module_names = [] package_path = os.path.dirname(os.path.abspath(__file__)) for __, modname, ispkg in pkgutil.iter_modules(path=[package_path]): if modname == "opts" or ispkg: continue else: module_names.append(modname) return module_names def _import_modules(module_names): imported_modules = [] for modname in module_names: mod = importlib.import_module("watcher.conf." + modname) if not hasattr(mod, LIST_OPTS_FUNC_NAME): msg = "The module 'watcher.conf.%s' should have a '%s' "\ "function which returns the config options." % \ (modname, LIST_OPTS_FUNC_NAME) raise Exception(msg) else: imported_modules.append(mod) return imported_modules python-watcher-4.0.0/watcher/conf/clients_auth.py0000664000175000017500000000207513656752270022150 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from keystoneauth1 import loading as ka_loading WATCHER_CLIENTS_AUTH = 'watcher_clients_auth' def register_opts(conf): ka_loading.register_session_conf_options(conf, WATCHER_CLIENTS_AUTH) ka_loading.register_auth_conf_options(conf, WATCHER_CLIENTS_AUTH) def list_opts(): return [(WATCHER_CLIENTS_AUTH, ka_loading.get_session_conf_options() + ka_loading.get_auth_common_conf_options())] python-watcher-4.0.0/watcher/conf/neutron_client.py0000664000175000017500000000311013656752270022505 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg neutron_client = cfg.OptGroup(name='neutron_client', title='Configuration Options for Neutron') NEUTRON_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='2.0', help='Version of Neutron API to use in neutronclient.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in neutronclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(neutron_client) conf.register_opts(NEUTRON_CLIENT_OPTS, group=neutron_client) def list_opts(): return [(neutron_client, NEUTRON_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/exception.py0000664000175000017500000000173513656752270021466 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg EXC_LOG_OPTS = [ cfg.BoolOpt('fatal_exception_format_errors', default=False, help='Make exception message format errors fatal.'), ] def register_opts(conf): conf.register_opts(EXC_LOG_OPTS) def list_opts(): return [('DEFAULT', EXC_LOG_OPTS)] python-watcher-4.0.0/watcher/conf/paths.py0000664000175000017500000000335613656752270020610 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg import os PATH_OPTS = [ cfg.StrOpt('pybasedir', default=os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), help='Directory where the watcher python module is installed.'), cfg.StrOpt('bindir', default='$pybasedir/bin', help='Directory where watcher binaries are installed.'), cfg.StrOpt('state_path', default='$pybasedir', help="Top-level directory for maintaining watcher's state."), ] def basedir_def(*args): """Return an uninterpolated path relative to $pybasedir.""" return os.path.join('$pybasedir', *args) def bindir_def(*args): """Return an uninterpolated path relative to $bindir.""" return os.path.join('$bindir', *args) def state_path_def(*args): """Return an uninterpolated path relative to $state_path.""" return os.path.join('$state_path', *args) def register_opts(conf): conf.register_opts(PATH_OPTS) def list_opts(): return [('DEFAULT', PATH_OPTS)] python-watcher-4.0.0/watcher/conf/grafana_translators.py0000664000175000017500000000316313656752270023520 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg grafana_translators = cfg.OptGroup(name='grafana_translators', title='Configuration Options for Grafana ' 'transalators') GRAFANA_TRANSLATOR_INFLUX_OPTS = [ cfg.DictOpt('retention_periods', default={ 'one_week': 604800, 'one_month': 2592000, 'five_years': 31556952 }, help="Keys are the names of retention periods in InfluxDB and " "the values should correspond with the maximum time they " "can retain in seconds. Example: {'one_day': 86400}")] def register_opts(conf): conf.register_group(grafana_translators) conf.register_opts(GRAFANA_TRANSLATOR_INFLUX_OPTS, group=grafana_translators) def list_opts(): return [(grafana_translators, GRAFANA_TRANSLATOR_INFLUX_OPTS)] python-watcher-4.0.0/watcher/conf/nova_client.py0000775000175000017500000000360313656752270021770 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.common import clients nova_client = cfg.OptGroup(name='nova_client', title='Configuration Options for Nova') NOVA_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='2.56', help=""" Version of Nova API to use in novaclient. Minimum required version: %s Certain Watcher features depend on a minimum version of the compute API being available which is enforced with this option. See https://docs.openstack.org/nova/latest/reference/api-microversion-history.html for the compute API microversion history. """ % clients.MIN_NOVA_API_VERSION), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in novaclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(nova_client) conf.register_opts(NOVA_CLIENT_OPTS, group=nova_client) def list_opts(): return [(nova_client, NOVA_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/gnocchi_client.py0000664000175000017500000000305513656752270022435 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 Servionica # # Authors: Alexander Chadin # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg gnocchi_client = cfg.OptGroup(name='gnocchi_client', title='Configuration Options for Gnocchi') GNOCCHI_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='1', help='Version of Gnocchi API to use in gnocchiclient.'), cfg.StrOpt('endpoint_type', default='public', help='Type of endpoint to use in gnocchi client. ' 'Supported values: internal, public, admin. ' 'The default is public.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.') ] def register_opts(conf): conf.register_group(gnocchi_client) conf.register_opts(GNOCCHI_CLIENT_OPTS, group=gnocchi_client) def list_opts(): return [(gnocchi_client, GNOCCHI_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/db.py0000664000175000017500000000250013656752270020044 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from oslo_db import options as oslo_db_options from watcher.conf import paths _DEFAULT_SQL_CONNECTION = 'sqlite:///{0}'.format( paths.state_path_def('watcher.sqlite')) database = cfg.OptGroup(name='database', title='Configuration Options for database') SQL_OPTS = [ cfg.StrOpt('mysql_engine', default='InnoDB', help='MySQL engine to use.') ] def register_opts(conf): oslo_db_options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION) conf.register_group(database) conf.register_opts(SQL_OPTS, group=database) def list_opts(): return [(database, SQL_OPTS)] python-watcher-4.0.0/watcher/conf/ironic_client.py0000775000175000017500000000306513656752270022312 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2017 ZTE Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg ironic_client = cfg.OptGroup(name='ironic_client', title='Configuration Options for Ironic') IRONIC_CLIENT_OPTS = [ cfg.StrOpt('api_version', default=1, help='Version of Ironic API to use in ironicclient.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in ironicclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(ironic_client) conf.register_opts(IRONIC_CLIENT_OPTS, group=ironic_client) def list_opts(): return [(ironic_client, IRONIC_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/decision_engine.py0000664000175000017500000000773013656752270022613 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2015 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg watcher_decision_engine = cfg.OptGroup(name='watcher_decision_engine', title='Defines the parameters of ' 'the module decision engine') WATCHER_DECISION_ENGINE_OPTS = [ cfg.StrOpt('conductor_topic', default='watcher.decision.control', help='The topic name used for ' 'control events, this topic ' 'used for RPC calls'), cfg.ListOpt('notification_topics', default=['nova.versioned_notifications', 'watcher.watcher_notifications'], help='The exchange and topic names from which ' 'notification events will be listened to. ' 'The exchange should be specified to get ' 'an ability to use pools.'), cfg.StrOpt('publisher_id', default='watcher.decision.api', help='The identifier used by the Watcher ' 'module on the message broker'), cfg.IntOpt('max_audit_workers', default=2, required=True, help='The maximum number of threads that can be used to ' 'execute audits in parallel.'), cfg.IntOpt('max_general_workers', default=4, required=True, help='The maximum number of threads that can be used to ' 'execute general tasks in parallel. The number of general ' 'workers will not increase depending on the number of ' 'audit workers!'), cfg.IntOpt('action_plan_expiry', default=24, mutable=True, help='An expiry timespan(hours). Watcher invalidates any ' 'action plan for which its creation time ' '-whose number of hours has been offset by this value-' ' is older that the current time.'), cfg.IntOpt('check_periodic_interval', default=30 * 60, mutable=True, help='Interval (in seconds) for checking action plan expiry.'), cfg.StrOpt('metric_map_path', default='/etc/watcher/metric_map.yaml', help='Path to metric map yaml formatted file. ' ' ' 'The file contains a map per datasource whose keys ' 'are the metric names as recognized by watcher and the ' 'value is the real name of the metric in the datasource. ' 'For example:: \n\n' ' monasca:\n' ' instance_cpu_usage: VM_CPU\n' ' gnocchi:\n' ' instance_cpu_usage: cpu_vm_util\n\n' 'This file is optional.'), cfg.IntOpt('continuous_audit_interval', default=10, mutable=True, help='Interval (in seconds) for checking newly created ' 'continuous audits.')] def register_opts(conf): conf.register_group(watcher_decision_engine) conf.register_opts(WATCHER_DECISION_ENGINE_OPTS, group=watcher_decision_engine) def list_opts(): return [(watcher_decision_engine, WATCHER_DECISION_ENGINE_OPTS)] python-watcher-4.0.0/watcher/conf/applier.py0000664000175000017500000000350613656752270021122 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg watcher_applier = cfg.OptGroup(name='watcher_applier', title='Options for the Applier messaging ' 'core') APPLIER_MANAGER_OPTS = [ cfg.IntOpt('workers', default=1, min=1, required=True, help='Number of workers for applier, default value is 1.'), cfg.StrOpt('conductor_topic', default='watcher.applier.control', help='The topic name used for ' 'control events, this topic ' 'used for rpc call '), cfg.StrOpt('publisher_id', default='watcher.applier.api', help='The identifier used by watcher ' 'module on the message broker'), cfg.StrOpt('workflow_engine', default='taskflow', required=True, help='Select the engine to use to execute the workflow'), ] def register_opts(conf): conf.register_group(watcher_applier) conf.register_opts(APPLIER_MANAGER_OPTS, group=watcher_applier) def list_opts(): return [(watcher_applier, APPLIER_MANAGER_OPTS)] python-watcher-4.0.0/watcher/conf/service.py0000664000175000017500000000307113656752270021123 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import socket from oslo_config import cfg from watcher._i18n import _ SERVICE_OPTS = [ cfg.IntOpt('periodic_interval', default=60, mutable=True, help=_('Seconds between running periodic tasks.')), cfg.HostAddressOpt('host', default=socket.gethostname(), help=_('Name of this node. This can be an opaque ' 'identifier. It is not necessarily a hostname, ' 'FQDN, or IP address. However, the node name ' 'must be valid within an AMQP key.') ), cfg.IntOpt('service_down_time', default=90, help=_('Maximum time since last check-in for up service.')) ] def register_opts(conf): conf.register_opts(SERVICE_OPTS) def list_opts(): return [ ('DEFAULT', SERVICE_OPTS), ] python-watcher-4.0.0/watcher/conf/grafana_client.py0000664000175000017500000001513213656752270022421 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg grafana_client = cfg.OptGroup(name='grafana_client', title='Configuration Options for Grafana', help="See https://docs.openstack.org/watcher/lat" "est/datasources/grafana.html for details " "on how these options are used.") GRAFANA_CLIENT_OPTS = [ # TODO(Dantali0n) each individual metric could have its own token. # A similar structure to the database_map would solve this. cfg.StrOpt('token', default=None, help="Authentication token to gain access"), # TODO(Dantali0n) each individual metric could have its own base url. # A similar structure to the database_map would solve this. cfg.StrOpt('base_url', default=None, help="First part of the url (including https:// or http://) up " "until project id part. " "Example: https://secure.org/api/datasource/proxy/"), cfg.DictOpt('project_id_map', default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to grafana project ids. " "Dictionary values should be positive integers. " "Example: 7465"), cfg.DictOpt('database_map', default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to grafana databases. " "Values should be strings. Example: influx_production"), cfg.DictOpt('attribute_map', default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to resource attributes. " "For a complete list of available attributes see " "https://docs.openstack.org/watcher/latest/datasources/gr" "afana.html#attribute " "Values should be strings. Example: hostname"), cfg.DictOpt('translator_map', default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to grafana translators. " "Values should be strings. Example: influxdb"), cfg.DictOpt('query_map', # {0} = aggregate # {1} = attribute # {2} = period # {3} = granularity # {4} = { influxdb: retention_period, } default={ 'host_cpu_usage': None, 'host_ram_usage': None, 'host_outlet_temp': None, 'host_inlet_temp': None, 'host_airflow': None, 'host_power': None, 'instance_cpu_usage': None, 'instance_ram_usage': None, 'instance_ram_allocated': None, 'instance_l3_cache_usage': None, 'instance_root_disk_size': None, }, help="Mapping of datasource metrics to grafana queries. " "Values should be strings for which the .format method " "will transform it. The transformation offers five " "parameters to the query labeled {0} to {4}. {0} will be " "replaced with the aggregate, {1} with the resource " "attribute, {2} with the period, {3} with the " "granularity and {4} with translator specifics for " "InfluxDB this will be the retention period. " "These queries will need to be constructed using tools " "such as Postman. Example: SELECT cpu FROM {4}." "cpu_percent WHERE host == '{1}' AND time > now()-{2}s")] def register_opts(conf): conf.register_group(grafana_client) conf.register_opts(GRAFANA_CLIENT_OPTS, group=grafana_client) def list_opts(): return [(grafana_client, GRAFANA_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/cinder_client.py0000664000175000017500000000307113656752270022265 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg cinder_client = cfg.OptGroup(name='cinder_client', title='Configuration Options for Cinder') CINDER_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='3', help='Version of Cinder API to use in cinderclient.'), cfg.StrOpt('endpoint_type', default='publicURL', help='Type of endpoint to use in cinderclient. ' 'Supported values: internalURL, publicURL, adminURL. ' 'The default is publicURL.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(cinder_client) conf.register_opts(CINDER_CLIENT_OPTS, group=cinder_client) def list_opts(): return [(cinder_client, CINDER_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/monasca_client.py0000664000175000017500000000307413656752270022445 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg monasca_client = cfg.OptGroup(name='monasca_client', title='Configuration Options for Monasca') MONASCA_CLIENT_OPTS = [ cfg.StrOpt('api_version', default='2_0', help='Version of Monasca API to use in monascaclient.'), cfg.StrOpt('interface', default='internal', help='Type of interface used for monasca endpoint. ' 'Supported values: internal, public, admin. ' 'The default is internal.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(monasca_client) conf.register_opts(MONASCA_CLIENT_OPTS, group=monasca_client) def list_opts(): return [(monasca_client, MONASCA_CLIENT_OPTS)] python-watcher-4.0.0/watcher/conf/plugins.py0000664000175000017500000000457213656752270021153 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 b<>com # # Authors: Vincent FRANCOISE # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import prettytable as ptable from watcher.applier.loading import default as applier_loader from watcher.common import utils from watcher.decision_engine.loading import default as decision_engine_loader PLUGIN_LOADERS = ( applier_loader.DefaultActionLoader, decision_engine_loader.DefaultPlannerLoader, decision_engine_loader.DefaultScoringLoader, decision_engine_loader.DefaultScoringContainerLoader, decision_engine_loader.DefaultStrategyLoader, decision_engine_loader.ClusterDataModelCollectorLoader, applier_loader.DefaultWorkFlowEngineLoader, ) def list_opts(): """Load config options for all Watcher plugins""" plugins_opts = [] for plugin_loader_cls in PLUGIN_LOADERS: plugin_loader = plugin_loader_cls() plugins_map = plugin_loader.list_available() for plugin_name, plugin_cls in plugins_map.items(): plugin_opts = plugin_cls.get_config_opts() if plugin_opts: plugins_opts.append( (plugin_loader.get_entry_name(plugin_name), plugin_opts)) return plugins_opts def _show_plugins_ascii_table(rows): headers = ["Namespace", "Plugin name", "Import path"] table = ptable.PrettyTable(field_names=headers) for row in rows: table.add_row(row) return table.get_string() def show_plugins(): rows = [] for plugin_loader_cls in PLUGIN_LOADERS: plugin_loader = plugin_loader_cls() plugins_map = plugin_loader.list_available() rows += [ (plugin_loader.get_entry_name(plugin_name), plugin_name, utils.get_cls_import_path(plugin_cls)) for plugin_name, plugin_cls in plugins_map.items()] return _show_plugins_ascii_table(rows) python-watcher-4.0.0/watcher/conf/planner.py0000664000175000017500000000243113656752270021121 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2016 Intel Corp # # Authors: Prudhvi Rao Shedimbi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg watcher_planner = cfg.OptGroup(name='watcher_planner', title='Defines the parameters of ' 'the planner') default_planner = 'weight' WATCHER_PLANNER_OPTS = { cfg.StrOpt('planner', default=default_planner, required=True, help='The selected planner used to schedule the actions') } def register_opts(conf): conf.register_group(watcher_planner) conf.register_opts(WATCHER_PLANNER_OPTS, group=watcher_planner) def list_opts(): return [(watcher_planner, WATCHER_PLANNER_OPTS)] python-watcher-4.0.0/watcher/conf/placement_client.py0000664000175000017500000000275113656752270022775 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg placement_group = cfg.OptGroup( 'placement_client', title='Placement Service Options', help="Configuration options for connecting to the placement API service") placement_opts = [ cfg.StrOpt('api_version', default='1.29', help='microversion of placement API when using ' 'placement service.'), cfg.StrOpt('interface', default='public', choices=['internal', 'public', 'admin'], help='Type of endpoint when using placement service.'), cfg.StrOpt('region_name', help='Region in Identity service catalog to use for ' 'communication with the OpenStack service.')] def register_opts(conf): conf.register_group(placement_group) conf.register_opts(placement_opts, group=placement_group) def list_opts(): return [(placement_group, placement_opts)] python-watcher-4.0.0/watcher/conf/collector.py0000664000175000017500000000333113656752270021450 0ustar zuulzuul00000000000000# Copyright (c) 2017 NEC Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg collector = cfg.OptGroup(name='collector', title='Defines the parameters of ' 'the module model collectors') COLLECTOR_OPTS = [ cfg.ListOpt('collector_plugins', default=['compute'], help=""" The cluster data model plugin names. Supported in-tree collectors include: * ``compute`` - data model collector for nova * ``storage`` - data model collector for cinder * ``baremetal`` - data model collector for ironic Custom data model collector plugins can be defined with the ``watcher_cluster_data_model_collectors`` extension point. """), cfg.IntOpt('api_call_retries', default=10, help="Number of retries before giving up on external service " "calls."), cfg.IntOpt('api_query_timeout', default=1, help="Time before retry after failed call to external service.") ] def register_opts(conf): conf.register_group(collector) conf.register_opts(COLLECTOR_OPTS, group=collector) def list_opts(): return [(collector, COLLECTOR_OPTS)] python-watcher-4.0.0/watcher/conf/datasources.py0000664000175000017500000000426113656752270022002 0ustar zuulzuul00000000000000# -*- encoding: utf-8 -*- # Copyright (c) 2019 European Organization for Nuclear Research (CERN) # # Authors: Corne Lukken # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_config import cfg from watcher.decision_engine.datasources import manager datasources = cfg.OptGroup(name='watcher_datasources', title='Configuration Options for watcher' ' datasources') possible_datasources = list(manager.DataSourceManager.metric_map.keys()) DATASOURCES_OPTS = [ cfg.ListOpt("datasources", help="Datasources to use in order to query the needed metrics." " If one of strategy metric is not available in the first" " datasource, the next datasource will be chosen. This is" " the default for all strategies unless a strategy has a" " specific override.", item_type=cfg.types.String(choices=possible_datasources), default=possible_datasources), cfg.IntOpt('query_max_retries', min=1, default=10, mutable=True, help='How many times Watcher is trying to query again', deprecated_group="gnocchi_client"), cfg.IntOpt('query_timeout', min=0, default=1, mutable=True, help='How many seconds Watcher should wait to do query again', deprecated_group="gnocchi_client") ] def register_opts(conf): conf.register_group(datasources) conf.register_opts(DATASOURCES_OPTS, group=datasources) def list_opts(): return [(datasources, DATASOURCES_OPTS)] python-watcher-4.0.0/.stestr.conf0000664000175000017500000000006013656752270016773 0ustar zuulzuul00000000000000[DEFAULT] test_path=./watcher/tests top_dir=./ python-watcher-4.0.0/devstack/0000775000175000017500000000000013656752352016333 5ustar zuulzuul00000000000000python-watcher-4.0.0/devstack/settings0000664000175000017500000000037013656752270020115 0ustar zuulzuul00000000000000# DevStack settings # Make sure rabbit is enabled enable_service rabbit # Make sure mysql is enabled enable_service mysql # Enable Watcher services enable_service watcher-api enable_service watcher-decision-engine enable_service watcher-applier python-watcher-4.0.0/devstack/files/0000775000175000017500000000000013656752352017435 5ustar zuulzuul00000000000000python-watcher-4.0.0/devstack/files/apache-watcher-api.template0000664000175000017500000000274513656752270024624 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # This is an example Apache2 configuration file for using the # Watcher API through mod_wsgi. This version assumes you are # running devstack to configure the software. Listen %WATCHER_SERVICE_PORT% WSGIDaemonProcess watcher-api user=%USER% processes=%APIWORKERS% threads=1 display-name=%{GROUP} WSGIScriptAlias / %WATCHER_WSGI_DIR%/app.wsgi WSGIApplicationGroup %{GLOBAL} WSGIProcessGroup watcher-api WSGIPassAuthorization On ErrorLogFormat "%M" ErrorLog /var/log/%APACHE_NAME%/watcher-api.log CustomLog /var/log/%APACHE_NAME%/watcher-api-access.log combined WSGIProcessGroup watcher-api WSGIApplicationGroup %{GLOBAL} = 2.4> Require all granted Order allow,deny Allow from all python-watcher-4.0.0/devstack/local.conf.compute0000664000175000017500000000322113656752270021744 0ustar zuulzuul00000000000000# Sample ``local.conf`` for compute node for Watcher development # NOTE: Copy this file to the root DevStack directory for it to work properly. [[local|localrc]] ADMIN_PASSWORD=nomoresecrete DATABASE_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue SERVICE_PASSWORD=$ADMIN_PASSWORD SERVICE_TOKEN=azertytoken HOST_IP=192.168.42.2 # Change this to this compute node's IP address #HOST_IPV6=2001:db8::7 FLAT_INTERFACE=eth0 FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is NETWORK_GATEWAY=10.254.1.1 # Change this for your network MULTI_HOST=1 SERVICE_HOST=192.168.42.1 # Change this to the IP of your controller node MYSQL_HOST=$SERVICE_HOST RABBIT_HOST=$SERVICE_HOST GLANCE_HOSTPORT=${SERVICE_HOST}:9292 DATABASE_TYPE=mysql # Enable services (including neutron) ENABLED_SERVICES=n-cpu,n-api-meta,c-vol,q-agt,placement-client NOVA_VNC_ENABLED=True NOVNCPROXY_URL="http://$SERVICE_HOST:6080/vnc_auto.html" VNCSERVER_LISTEN=0.0.0.0 VNCSERVER_PROXYCLIENT_ADDRESS=$HOST_IP # or HOST_IPV6 NOVA_INSTANCES_PATH=/opt/stack/data/instances # Enable the Ceilometer plugin for the compute agent enable_plugin ceilometer https://opendev.org/openstack/ceilometer disable_service ceilometer-acentral,ceilometer-collector,ceilometer-api LOGFILE=$DEST/logs/stack.sh.log LOGDAYS=2 [[post-config|$NOVA_CONF]] [DEFAULT] compute_monitors=cpu.virt_driver [notifications] # Enable both versioned and unversioned notifications. Watcher only # uses versioned notifications but ceilometer uses unversioned. We # can change this to just versioned when ceilometer handles versioned # notifications from nova: https://bugs.launchpad.net/ceilometer/+bug/1665449 notification_format=both python-watcher-4.0.0/devstack/plugin.sh0000664000175000017500000000250613656752270020167 0ustar zuulzuul00000000000000#!/bin/bash # # plugin.sh - DevStack plugin script to install watcher # Save trace setting _XTRACE_WATCHER_PLUGIN=$(set +o | grep xtrace) set -o xtrace echo_summary "watcher's plugin.sh was called..." . $DEST/watcher/devstack/lib/watcher # Show all of defined environment variables (set -o posix; set) if is_service_enabled watcher-api watcher-decision-engine watcher-applier; then if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then echo_summary "Before Installing watcher" elif [[ "$1" == "stack" && "$2" == "install" ]]; then echo_summary "Installing watcher" install_watcher LIBS_FROM_GIT="${LIBS_FROM_GIT},python-watcherclient" install_watcherclient cleanup_watcher elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then echo_summary "Configuring watcher" configure_watcher if is_service_enabled key; then create_watcher_accounts fi elif [[ "$1" == "stack" && "$2" == "extra" ]]; then # Initialize watcher init_watcher # Start the watcher components echo_summary "Starting watcher" start_watcher fi if [[ "$1" == "unstack" ]]; then stop_watcher fi if [[ "$1" == "clean" ]]; then cleanup_watcher fi fi # Restore xtrace $_XTRACE_WATCHER_PLUGIN python-watcher-4.0.0/devstack/override-defaults0000664000175000017500000000063213656752270021702 0ustar zuulzuul00000000000000# Plug-in overrides # https://docs.openstack.org/devstack/latest/plugins.html#plugin-interface # Enable both versioned and unversioned notifications. Watcher only # uses versioned notifications but ceilometer uses unversioned. We # can change this to just versioned when ceilometer handles # versioned notifications from nova: # https://bugs.launchpad.net/ceilometer/+bug/1665449 NOVA_NOTIFICATION_FORMAT=bothpython-watcher-4.0.0/devstack/lib/0000775000175000017500000000000013656752352017101 5ustar zuulzuul00000000000000python-watcher-4.0.0/devstack/lib/watcher0000664000175000017500000002677213656752270020476 0ustar zuulzuul00000000000000#!/bin/bash # # lib/watcher # Functions to control the configuration and operation of the watcher services # Dependencies: # # - ``functions`` file # - ``SERVICE_{TENANT_NAME|PASSWORD}`` must be defined # - ``DEST``, ``DATA_DIR``, ``STACK_USER`` must be defined # ``stack.sh`` calls the entry points in this order: # # - is_watcher_enabled # - install_watcher # - configure_watcher # - create_watcher_conf # - init_watcher # - start_watcher # - stop_watcher # - cleanup_watcher # Save trace setting _XTRACE_WATCHER=$(set +o | grep xtrace) set +o xtrace # Defaults # -------- # Set up default directories WATCHER_REPO=${WATCHER_REPO:-${GIT_BASE}/openstack/watcher.git} WATCHER_BRANCH=${WATCHER_BRANCH:-master} WATCHER_DIR=$DEST/watcher GITREPO["python-watcherclient"]=${WATCHERCLIENT_REPO:-${GIT_BASE}/openstack/python-watcherclient.git} GITBRANCH["python-watcherclient"]=${WATCHERCLIENT_BRANCH:-master} GITDIR["python-watcherclient"]=$DEST/python-watcherclient WATCHER_STATE_PATH=${WATCHER_STATE_PATH:=$DATA_DIR/watcher} WATCHER_AUTH_CACHE_DIR=${WATCHER_AUTH_CACHE_DIR:-/var/cache/watcher} WATCHER_CONF_DIR=/etc/watcher WATCHER_CONF=$WATCHER_CONF_DIR/watcher.conf WATCHER_POLICY_YAML=$WATCHER_CONF_DIR/policy.yaml.sample WATCHER_DEVSTACK_DIR=$WATCHER_DIR/devstack WATCHER_DEVSTACK_FILES_DIR=$WATCHER_DEVSTACK_DIR/files if is_ssl_enabled_service "watcher" || is_service_enabled tls-proxy; then WATCHER_SERVICE_PROTOCOL="https" fi # Support entry points installation of console scripts if [[ -d $WATCHER_DIR/bin ]]; then WATCHER_BIN_DIR=$WATCHER_DIR/bin else WATCHER_BIN_DIR=$(get_python_exec_prefix) fi # There are 2 modes, which is "uwsgi" which runs with an apache # proxy uwsgi in front of it, or "mod_wsgi", which runs in # apache. mod_wsgi is deprecated, don't use it. WATCHER_USE_WSGI_MODE=${WATCHER_USE_WSGI_MODE:-$WSGI_MODE} WATCHER_UWSGI=$WATCHER_BIN_DIR/watcher-api-wsgi WATCHER_UWSGI_CONF=$WATCHER_CONF_DIR/watcher-uwsgi.ini if is_suse; then WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/srv/www/htdocs/watcher} else WATCHER_WSGI_DIR=${WATCHER_WSGI_DIR:-/var/www/watcher} fi # Public facing bits WATCHER_SERVICE_HOST=${WATCHER_SERVICE_HOST:-$SERVICE_HOST} WATCHER_SERVICE_PORT=${WATCHER_SERVICE_PORT:-9322} WATCHER_SERVICE_PORT_INT=${WATCHER_SERVICE_PORT_INT:-19322} WATCHER_SERVICE_PROTOCOL=${WATCHER_SERVICE_PROTOCOL:-$SERVICE_PROTOCOL} if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then WATCHER_API_URL="$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST/infra-optim" else WATCHER_API_URL="$WATCHER_SERVICE_PROTOCOL://$WATCHER_SERVICE_HOST:$WATCHER_SERVICE_PORT" fi # Entry Points # ------------ # Test if any watcher services are enabled # is_watcher_enabled function is_watcher_enabled { [[ ,${ENABLED_SERVICES} =~ ,"watcher-" ]] && return 0 return 1 } #_cleanup_watcher_apache_wsgi - Remove wsgi files, #disable and remove apache vhost file function _cleanup_watcher_apache_wsgi { sudo rm -rf $WATCHER_WSGI_DIR sudo rm -f $(apache_site_config_for watcher-api) restart_apache_server } # cleanup_watcher() - Remove residual data files, anything left over from previous # runs that a clean run would need to clean up function cleanup_watcher { sudo rm -rf $WATCHER_STATE_PATH $WATCHER_AUTH_CACHE_DIR if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then remove_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI" else _cleanup_watcher_apache_wsgi fi } # configure_watcher() - Set config files, create data dirs, etc function configure_watcher { # Put config files in ``/etc/watcher`` for everyone to find sudo install -d -o $STACK_USER $WATCHER_CONF_DIR local project=watcher local project_uc project_uc=$(echo watcher|tr a-z A-Z) local conf_dir="${project_uc}_CONF_DIR" # eval conf dir to get the variable conf_dir="${!conf_dir}" local project_dir="${project_uc}_DIR" # eval project dir to get the variable project_dir="${!project_dir}" local sample_conf_dir="${project_dir}/etc/${project}" local sample_policy_dir="${project_dir}/etc/${project}/policy.d" local sample_policy_generator="${project_dir}/etc/${project}/oslo-policy-generator/watcher-policy-generator.conf" # first generate policy.yaml oslopolicy-sample-generator --config-file $sample_policy_generator # then optionally copy over policy.d if [[ -d $sample_policy_dir ]]; then cp -r $sample_policy_dir $conf_dir/policy.d fi # Rebuild the config file from scratch create_watcher_conf } # create_watcher_accounts() - Set up common required watcher accounts # # Project User Roles # ------------------------------------------------------------------ # SERVICE_TENANT_NAME watcher service function create_watcher_accounts { create_service_user "watcher" "admin" local watcher_service=$(get_or_create_service "watcher" \ "infra-optim" "Watcher Infrastructure Optimization Service") get_or_create_endpoint $watcher_service \ "$REGION_NAME" \ "$WATCHER_API_URL"\ "$WATCHER_API_URL"\ "$WATCHER_API_URL" } # _config_watcher_apache_wsgi() - Set WSGI config files of watcher function _config_watcher_apache_wsgi { local watcher_apache_conf if [[ "$WATCHER_USE_WSGI_MODE" == "mod_wsgi" ]]; then local service_port=$WATCHER_SERVICE_PORT if is_service_enabled tls-proxy; then service_port=$WATCHER_SERVICE_PORT_INT service_protocol="http" fi sudo mkdir -p $WATCHER_WSGI_DIR sudo cp $WATCHER_DIR/watcher/api/app.wsgi $WATCHER_WSGI_DIR/app.wsgi watcher_apache_conf=$(apache_site_config_for watcher-api) sudo cp $WATCHER_DEVSTACK_FILES_DIR/apache-watcher-api.template $watcher_apache_conf sudo sed -e " s|%WATCHER_SERVICE_PORT%|$service_port|g; s|%WATCHER_WSGI_DIR%|$WATCHER_WSGI_DIR|g; s|%USER%|$STACK_USER|g; s|%APIWORKERS%|$API_WORKERS|g; s|%APACHE_NAME%|$APACHE_NAME|g; " -i $watcher_apache_conf enable_apache_site watcher-api fi } # create_watcher_conf() - Create a new watcher.conf file function create_watcher_conf { # (Re)create ``watcher.conf`` rm -f $WATCHER_CONF iniset $WATCHER_CONF DEFAULT debug "$ENABLE_DEBUG_LOG_LEVEL" iniset $WATCHER_CONF DEFAULT control_exchange watcher iniset_rpc_backend watcher $WATCHER_CONF iniset $WATCHER_CONF database connection $(database_connection_url watcher) iniset $WATCHER_CONF api host "$(ipv6_unquote $WATCHER_SERVICE_HOST)" if is_service_enabled tls-proxy; then iniset $WATCHER_CONF api host "$(ipv6_unquote $WATCHER_SERVICE_HOST)" iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT_INT" # iniset $WATCHER_CONF api enable_ssl_api "True" else if [[ "$WATCHER_USE_WSGI_MODE" == "mod_wsgi" ]]; then iniset $WATCHER_CONF api host "$(ipv6_unquote $WATCHER_SERVICE_HOST)" iniset $WATCHER_CONF api port "$WATCHER_SERVICE_PORT" fi fi iniset $WATCHER_CONF oslo_policy policy_file $WATCHER_POLICY_YAML iniset $WATCHER_CONF oslo_messaging_notifications driver "messagingv2" configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR configure_auth_token_middleware $WATCHER_CONF watcher $WATCHER_AUTH_CACHE_DIR "watcher_clients_auth" if is_fedora || is_suse; then # watcher defaults to /usr/local/bin, but fedora and suse pip like to # install things in /usr/bin iniset $WATCHER_CONF DEFAULT bindir "/usr/bin" fi if [ -n "$WATCHER_STATE_PATH" ]; then iniset $WATCHER_CONF DEFAULT state_path "$WATCHER_STATE_PATH" iniset $WATCHER_CONF oslo_concurrency lock_path "$WATCHER_STATE_PATH" fi if [ "$SYSLOG" != "False" ]; then iniset $WATCHER_CONF DEFAULT use_syslog "True" fi # Format logging setup_logging $WATCHER_CONF #config apache files if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then write_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI" "/infra-optim" else _config_watcher_apache_wsgi fi # Register SSL certificates if provided if is_ssl_enabled_service watcher; then ensure_certificates WATCHER iniset $WATCHER_CONF DEFAULT ssl_cert_file "$WATCHER_SSL_CERT" iniset $WATCHER_CONF DEFAULT ssl_key_file "$WATCHER_SSL_KEY" iniset $WATCHER_CONF DEFAULT enabled_ssl_apis "$WATCHER_ENABLED_APIS" fi } # create_watcher_cache_dir() - Part of the init_watcher() process function create_watcher_cache_dir { # Create cache dir sudo install -d -o $STACK_USER $WATCHER_AUTH_CACHE_DIR rm -rf $WATCHER_AUTH_CACHE_DIR/* } # init_watcher() - Initialize databases, etc. function init_watcher { # clean up from previous (possibly aborted) runs # create required data files if is_service_enabled $DATABASE_BACKENDS && is_service_enabled watcher-api; then # (Re)create watcher database recreate_database watcher # Create watcher schema $WATCHER_BIN_DIR/watcher-db-manage --config-file $WATCHER_CONF upgrade fi create_watcher_cache_dir } # install_watcherclient() - Collect source and prepare function install_watcherclient { if use_library_from_git "python-watcherclient"; then git_clone_by_name "python-watcherclient" setup_dev_lib "python-watcherclient" fi } # install_watcher() - Collect source and prepare function install_watcher { git_clone $WATCHER_REPO $WATCHER_DIR $WATCHER_BRANCH setup_develop $WATCHER_DIR if [[ "$WATCHER_USE_WSGI_MODE" == "mod_wsgi" ]]; then install_apache_wsgi fi } # start_watcher_api() - Start the API process ahead of other things function start_watcher_api { # Get right service port for testing local service_port=$WATCHER_SERVICE_PORT local service_protocol=$WATCHER_SERVICE_PROTOCOL local watcher_url if is_service_enabled tls-proxy; then service_port=$WATCHER_SERVICE_PORT_INT service_protocol="http" fi if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then run_process "watcher-api" "$WATCHER_BIN_DIR/uwsgi --procname-prefix watcher-api --ini $WATCHER_UWSGI_CONF" watcher_url=$service_protocol://$SERVICE_HOST/infra-optim else watcher_url=$service_protocol://$SERVICE_HOST:$service_port enable_apache_site watcher-api restart_apache_server # Start proxies if enabled if is_service_enabled tls-proxy; then start_tls_proxy watcher '*' $WATCHER_SERVICE_PORT $WATCHER_SERVICE_HOST $WATCHER_SERVICE_PORT_INT fi fi echo "Waiting for watcher-api to start..." if ! wait_for_service $SERVICE_TIMEOUT $watcher_url; then die $LINENO "watcher-api did not start" fi } # start_watcher() - Start running processes, including screen function start_watcher { # ``run_process`` checks ``is_service_enabled``, it is not needed here start_watcher_api run_process watcher-decision-engine "$WATCHER_BIN_DIR/watcher-decision-engine --config-file $WATCHER_CONF" run_process watcher-applier "$WATCHER_BIN_DIR/watcher-applier --config-file $WATCHER_CONF" } # stop_watcher() - Stop running processes (non-screen) function stop_watcher { if [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]]; then stop_process watcher-api else disable_apache_site watcher-api restart_apache_server fi for serv in watcher-decision-engine watcher-applier; do stop_process $serv done } # Restore xtrace $_XTRACE_WATCHER # Tell emacs to use shell-script-mode ## Local variables: ## mode: shell-script ## End: python-watcher-4.0.0/devstack/upgrade/0000775000175000017500000000000013656752352017762 5ustar zuulzuul00000000000000python-watcher-4.0.0/devstack/upgrade/settings0000664000175000017500000000146213656752270021547 0ustar zuulzuul00000000000000register_project_for_upgrade watcher register_db_to_save watcher devstack_localrc base enable_plugin watcher https://opendev.org/openstack/watcher $BASE_DEVSTACK_BRANCH devstack_localrc target enable_plugin watcher https://opendev.org/openstack/watcher devstack_localrc base enable_service watcher-api watcher-decision-engine watcher-applier devstack_localrc target enable_service watcher-api watcher-decision-engine watcher-applier BASE_RUN_SMOKE=False TARGET_RUN_SMOKE=False # Enable both versioned and unversioned notifications. Watcher only # uses versioned notifications but ceilometer uses unversioned. We # can change this to just versioned when ceilometer handles # versioned notifications from nova: # https://bugs.launchpad.net/ceilometer/+bug/1665449 devstack_localrc base NOVA_NOTIFICATION_FORMAT=both python-watcher-4.0.0/devstack/upgrade/shutdown.sh0000775000175000017500000000115413656752270022174 0ustar zuulzuul00000000000000#!/bin/bash set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions # We need base DevStack functions for this source $BASE_DEVSTACK_DIR/functions source $BASE_DEVSTACK_DIR/stackrc # needed for status directory source $BASE_DEVSTACK_DIR/lib/tls source $BASE_DEVSTACK_DIR/lib/apache WATCHER_DEVSTACK_DIR=$(dirname $(dirname $0)) source $WATCHER_DEVSTACK_DIR/settings source $WATCHER_DEVSTACK_DIR/plugin.sh source $WATCHER_DEVSTACK_DIR/lib/watcher set -o xtrace stop_watcher # sanity check that service is actually down ensure_services_stopped watcher-api watcher-decision-engine watcher-applier python-watcher-4.0.0/devstack/upgrade/resources.sh0000775000175000017500000000636413656752270022343 0ustar zuulzuul00000000000000#!/bin/bash set -o errexit source $GRENADE_DIR/grenaderc source $GRENADE_DIR/functions source $TOP_DIR/openrc admin demo set -o xtrace function _wait_for_status { while : do state=$("${@:2}" -f value -c State) [[ $state == "SUCCEEDED" ]] && break if [ $state == "ERROR" ]; then die $LINENO "ERROR creating audit" fi sleep 10 done } function create_audit_template { at_id=$(openstack optimize audittemplate create d1 dummy -s dummy -f value -c UUID) resource_save watcher at_id $at_id } function create_audit { audit_id=$(openstack optimize audit create -s dummy -g dummy -f value -c UUID) resource_save watcher audit_id $audit_id } function create_audit_with_autotrigger { audit_at_id=$(openstack optimize audit create -s dummy -g dummy -f value -c UUID --auto-trigger) resource_save watcher audit_at_id $audit_at_id } function verify_audit_template { local at_id=$(resource_get watcher at_id) openstack optimize audittemplate show $at_id } function verify_audit_with_autotrigger { local audit_at_id=$(resource_get watcher audit_at_id) _wait_for_status "SUCCEEDED" openstack optimize audit show $audit_at_id local actionplan_at_id=$(openstack optimize actionplan list --audit $audit_at_id -c UUID -f value) resource_save watcher actionplan_at $actionplan_at_id actionplan_at_state=$(openstack optimize actionplan show $actionplan_at_id -c State -f value) if [ $actionplan_at_state != "SUCCEEDED" ]; then die $LINENO "ERROR executing actionplan" fi } function verify_audit { local audit_id=$(resource_get watcher audit_id) _wait_for_status "SUCCEEDED" openstack optimize audit show $audit_id local actionplan_id=$(openstack optimize actionplan list --audit $audit_id -c UUID -f value) resource_save watcher actionplan $actionplan_id actionplan_state=$(openstack optimize actionplan show $actionplan_id -c State -f value) if [ $actionplan_state != "RECOMMENDED" ]; then die $LINENO "ERROR creating actionplan" fi } function verify_noapi { # currently no good way : } function delete_audit { local audit_id=$(resource_get watcher audit_id) local actionplan_id=$(resource_get watcher actionplan) watcher actionplan delete $actionplan_id openstack optimize audit delete $audit_id } function delete_audit_with_autotrigger { local audit_at_id=$(resource_get watcher audit_at_id) local actionplan_id=$(resource_get watcher actionplan_at) watcher actionplan delete $actionplan_id openstack optimize audit delete $audit_at_id } function delete_audit_template { local at_id=$(resource_get watcher at_id) openstack optimize audittemplate delete $at_id } function create { create_audit_template create_audit create_audit_with_autotrigger } function verify { verify_audit_template verify_audit verify_audit_with_autotrigger } function destroy { delete_audit_template delete_audit delete_audit_with_autotrigger } # Dispatcher case $1 in "create") create ;; "verify_noapi") verify_noapi ;; "verify") verify ;; "destroy") destroy ;; "force_destroy") set +o errexit destroy ;; esac python-watcher-4.0.0/devstack/upgrade/upgrade.sh0000775000175000017500000000466513656752270021762 0ustar zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-watcher`` echo "*********************************************************************" echo "Begin $0" echo "*********************************************************************" # Clean up any resources that may be in use cleanup() { set +o errexit echo "********************************************************************" echo "ERROR: Abort $0" echo "********************************************************************" # Kill ourselves to signal any calling process trap 2; kill -2 $$ } trap cleanup SIGHUP SIGINT SIGTERM # Keep track of the grenade directory RUN_DIR=$(cd $(dirname "$0") && pwd) # Source params source $GRENADE_DIR/grenaderc # Import common functions source $GRENADE_DIR/functions # This script exits on an error so that errors don't compound and you see # only the first error that occurred. set -o errexit # Upgrade watcher # ============ # Get functions from current DevStack source $TARGET_DEVSTACK_DIR/stackrc source $TARGET_DEVSTACK_DIR/lib/apache source $TARGET_DEVSTACK_DIR/lib/tls source $TARGET_DEVSTACK_DIR/lib/keystone source $TOP_DIR/openrc admin admin source $(dirname $(dirname $BASH_SOURCE))/settings source $(dirname $(dirname $BASH_SOURCE))/plugin.sh # Print the commands being run so that we can see the command that triggers # an error. It is also useful for following allowing as the install occurs. set -o xtrace # Save current config files for posterity [[ -d $SAVE_DIR/etc.watcher ]] || cp -pr $WATCHER_CONF_DIR $SAVE_DIR/etc.watcher # Install the target watcher install_watcher # calls upgrade-watcher for specific release upgrade_project watcher $RUN_DIR $BASE_DEVSTACK_BRANCH $TARGET_DEVSTACK_BRANCH if [[ ! -f "$WATCHER_UWSGI_CONF" ]] && [[ "$WATCHER_USE_WSGI_MODE" == "uwsgi" ]] then write_uwsgi_config "$WATCHER_UWSGI_CONF" "$WATCHER_UWSGI" "/infra-optim" endpoints=$(openstack endpoint list --service watcher -c ID -f value) for id in $endpoints; do openstack endpoint delete $id done create_watcher_accounts fi # Migrate the database watcher-db-manage upgrade || die $LINO "DB migration error" start_watcher # Don't succeed unless the services come up ensure_services_started watcher-api watcher-decision-engine watcher-applier set +o xtrace echo "*********************************************************************" echo "SUCCESS: End $0" echo "*********************************************************************" python-watcher-4.0.0/devstack/upgrade/from_rocky/0000775000175000017500000000000013656752352022134 5ustar zuulzuul00000000000000python-watcher-4.0.0/devstack/upgrade/from_rocky/upgrade-watcher0000664000175000017500000000064213656752270025142 0ustar zuulzuul00000000000000#!/usr/bin/env bash # ``upgrade-watcher`` function configure_watcher_upgrade { XTRACE=$(set +o | grep xtrace) set -o xtrace # Copy release-specific files sudo cp $TARGET_RELEASE_DIR/watcher/etc/watcher/watcher.conf $WATCHER_CONF_DIR/watcher.conf sudo cp $TARGET_RELEASE_DIR/watcher/etc/watcher/policy.yaml.sample $WATCHER_CONF_DIR/policy.yaml.sample # reset to previous state $XTRACE } python-watcher-4.0.0/devstack/local.conf.controller0000664000175000017500000000334313656752270022460 0ustar zuulzuul00000000000000# Sample ``local.conf`` for controller node for Watcher development # NOTE: Copy this file to the root DevStack directory for it to work properly. [[local|localrc]] ADMIN_PASSWORD=nomoresecrete DATABASE_PASSWORD=stackdb RABBIT_PASSWORD=stackqueue SERVICE_PASSWORD=$ADMIN_PASSWORD SERVICE_TOKEN=azertytoken HOST_IP=192.168.42.1 # Change this to your controller node IP address #HOST_IPV6=2001:db8::7 FLAT_INTERFACE=eth0 FIXED_RANGE=10.254.1.0/24 # Change this to whatever your network is NETWORK_GATEWAY=10.254.1.1 # Change this for your network MULTI_HOST=1 #Set this to FALSE if do not want to run watcher-api behind mod-wsgi #WATCHER_USE_MOD_WSGI=TRUE # This is the controller node, so disable nova-compute disable_service n-cpu # Enable the Watcher Dashboard plugin enable_plugin watcher-dashboard https://opendev.org/openstack/watcher-dashboard # Enable the Watcher plugin enable_plugin watcher https://opendev.org/openstack/watcher # Enable the Ceilometer plugin enable_plugin ceilometer https://opendev.org/openstack/ceilometer # This is the controller node, so disable the ceilometer compute agent disable_service ceilometer-acompute # Enable the ceilometer api explicitly(bug:1667678) enable_service ceilometer-api # Enable the Gnocchi plugin enable_plugin gnocchi https://github.com/gnocchixyz/gnocchi LOGFILE=$DEST/logs/stack.sh.log LOGDAYS=2 [[post-config|$NOVA_CONF]] [DEFAULT] compute_monitors=cpu.virt_driver [notifications] # Enable both versioned and unversioned notifications. Watcher only # uses versioned notifications but ceilometer uses unversioned. We # can change this to just versioned when ceilometer handles versioned # notifications from nova: https://bugs.launchpad.net/ceilometer/+bug/1665449 notification_format=both python-watcher-4.0.0/LICENSE0000664000175000017500000002363713656752270015546 0ustar zuulzuul00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. python-watcher-4.0.0/playbooks/0000775000175000017500000000000013656752352016532 5ustar zuulzuul00000000000000python-watcher-4.0.0/playbooks/legacy/0000775000175000017500000000000013656752352017776 5ustar zuulzuul00000000000000python-watcher-4.0.0/playbooks/legacy/grenade-devstack-watcher/0000775000175000017500000000000013656752352024640 5ustar zuulzuul00000000000000python-watcher-4.0.0/playbooks/legacy/grenade-devstack-watcher/run.yaml0000664000175000017500000000434313656752270026333 0ustar zuulzuul00000000000000- hosts: all name: legacy-grenade-dsvm-watcher tasks: - name: Ensure legacy workspace directory file: path: '{{ ansible_user_dir }}/workspace' state: directory - shell: cmd: | set -e set -x cat > clonemap.yaml << EOF clonemap: - name: openstack/devstack-gate dest: devstack-gate EOF /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \ https://opendev.org \ openstack/devstack-gate executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' - shell: cmd: | set -e set -x export PYTHONUNBUFFERED=true export PROJECTS="openstack/grenade $PROJECTS" export PROJECTS="openstack/watcher $PROJECTS" export PROJECTS="openstack/watcher-tempest-plugin $PROJECTS" export PROJECTS="openstack/python-watcherclient $PROJECTS" export DEVSTACK_PROJECT_FROM_GIT="python-watcherclient $DEVSTACK_PROJECT_FROM_GIT" export GRENADE_PLUGINRC="enable_grenade_plugin watcher https://opendev.org/openstack/watcher" export DEVSTACK_LOCAL_CONFIG+=$'\n'"export TEMPEST_PLUGINS='/opt/stack/new/watcher-tempest-plugin'" export DEVSTACK_GATE_TEMPEST_NOTESTS=1 export DEVSTACK_GATE_GRENADE=pullup export DEVSTACK_GATE_USE_PYTHON3=True export BRANCH_OVERRIDE=default if [ "$BRANCH_OVERRIDE" != "default" ] ; then export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE fi # Add configuration values for enabling security features in local.conf function pre_test_hook { if [ -f /opt/stack/old/watcher-tempest-plugin/tools/pre_test_hook.sh ] ; then . /opt/stack/old/watcher-tempest-plugin/tools/pre_test_hook.sh fi } export -f pre_test_hook cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh executable: /bin/bash chdir: '{{ ansible_user_dir }}/workspace' environment: '{{ zuul | zuul_legacy_vars }}' python-watcher-4.0.0/playbooks/legacy/grenade-devstack-watcher/post.yaml0000664000175000017500000000063313656752270026512 0ustar zuulzuul00000000000000- hosts: primary tasks: - name: Copy files from {{ ansible_user_dir }}/workspace/ on node synchronize: src: '{{ ansible_user_dir }}/workspace/' dest: '{{ zuul.executor.log_root }}' mode: pull copy_links: true verify_host: true rsync_opts: - --include=/logs/** - --include=*/ - --exclude=* - --prune-empty-dirs python-watcher-4.0.0/.coveragerc0000664000175000017500000000026513656752270016652 0ustar zuulzuul00000000000000[run] branch = True source = watcher omit = watcher/tests/* watcher/hacking/* [report] ignore_errors = True exclude_lines = @abc.abstract raise NotImplementedError