swift-2.17.0/0000775000175100017510000000000013236061751012765 5ustar zuulzuul00000000000000swift-2.17.0/babel.cfg0000666000175100017510000000002113236061617014507 0ustar zuulzuul00000000000000[python: **.py] swift-2.17.0/doc/0000775000175100017510000000000013236061751013532 5ustar zuulzuul00000000000000swift-2.17.0/doc/saio/0000775000175100017510000000000013236061751014465 5ustar zuulzuul00000000000000swift-2.17.0/doc/saio/swift/0000775000175100017510000000000013236061751015621 5ustar zuulzuul00000000000000swift-2.17.0/doc/saio/swift/swift.conf0000666000175100017510000000076513236061617017637 0ustar zuulzuul00000000000000[swift-hash] # random unique strings that can never change (DO NOT LOSE) # Use only printable chars (python -c "import string; print(string.printable)") swift_hash_path_prefix = changeme swift_hash_path_suffix = changeme [storage-policy:0] name = gold policy_type = replication default = yes [storage-policy:1] name = silver policy_type = replication [storage-policy:2] name = ec42 policy_type = erasure_coding ec_type = liberasurecode_rs_vand ec_num_data_fragments = 4 ec_num_parity_fragments = 2 swift-2.17.0/doc/saio/swift/container-sync-realms.conf0000666000175100017510000000013113236061617022703 0ustar zuulzuul00000000000000[saio] key = changeme key2 = changeme cluster_saio_endpoint = http://127.0.0.1:8080/v1/ swift-2.17.0/doc/saio/swift/container-server/0000775000175100017510000000000013236061751021107 5ustar zuulzuul00000000000000swift-2.17.0/doc/saio/swift/container-server/3.conf0000666000175100017510000000100713236061617022121 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/3/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.3 bind_port = 6031 workers = 1 user = log_facility = LOG_LOCAL4 recon_cache_path = /var/cache/swift3 eventlet_debug = true [pipeline:main] pipeline = recon container-server [app:container-server] use = egg:swift#container [filter:recon] use = egg:swift#recon [container-replicator] rsync_module = {replication_ip}::container{replication_port} [container-updater] [container-auditor] [container-sync] swift-2.17.0/doc/saio/swift/container-server/2.conf0000666000175100017510000000100713236061617022120 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/2/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.2 bind_port = 6021 workers = 1 user = log_facility = LOG_LOCAL3 recon_cache_path = /var/cache/swift2 eventlet_debug = true [pipeline:main] pipeline = recon container-server [app:container-server] use = egg:swift#container [filter:recon] use = egg:swift#recon [container-replicator] rsync_module = {replication_ip}::container{replication_port} [container-updater] [container-auditor] [container-sync] swift-2.17.0/doc/saio/swift/container-server/4.conf0000666000175100017510000000100713236061617022122 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/4/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.4 bind_port = 6041 workers = 1 user = log_facility = LOG_LOCAL5 recon_cache_path = /var/cache/swift4 eventlet_debug = true [pipeline:main] pipeline = recon container-server [app:container-server] use = egg:swift#container [filter:recon] use = egg:swift#recon [container-replicator] rsync_module = {replication_ip}::container{replication_port} [container-updater] [container-auditor] [container-sync] swift-2.17.0/doc/saio/swift/container-server/1.conf0000666000175100017510000000100613236061617022116 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/1/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.1 bind_port = 6011 workers = 1 user = log_facility = LOG_LOCAL2 recon_cache_path = /var/cache/swift eventlet_debug = true [pipeline:main] pipeline = recon container-server [app:container-server] use = egg:swift#container [filter:recon] use = egg:swift#recon [container-replicator] rsync_module = {replication_ip}::container{replication_port} [container-updater] [container-auditor] [container-sync] swift-2.17.0/doc/saio/swift/object-server/0000775000175100017510000000000013236061751020373 5ustar zuulzuul00000000000000swift-2.17.0/doc/saio/swift/object-server/3.conf0000666000175100017510000000077013236061617021413 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/3/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.3 bind_port = 6030 workers = 1 user = log_facility = LOG_LOCAL4 recon_cache_path = /var/cache/swift3 eventlet_debug = true [pipeline:main] pipeline = recon object-server [app:object-server] use = egg:swift#object [filter:recon] use = egg:swift#recon [object-replicator] rsync_module = {replication_ip}::object{replication_port} [object-reconstructor] [object-updater] [object-auditor] swift-2.17.0/doc/saio/swift/object-server/2.conf0000666000175100017510000000077013236061617021412 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/2/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.2 bind_port = 6020 workers = 1 user = log_facility = LOG_LOCAL3 recon_cache_path = /var/cache/swift2 eventlet_debug = true [pipeline:main] pipeline = recon object-server [app:object-server] use = egg:swift#object [filter:recon] use = egg:swift#recon [object-replicator] rsync_module = {replication_ip}::object{replication_port} [object-reconstructor] [object-updater] [object-auditor] swift-2.17.0/doc/saio/swift/object-server/4.conf0000666000175100017510000000077013236061617021414 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/4/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.4 bind_port = 6040 workers = 1 user = log_facility = LOG_LOCAL5 recon_cache_path = /var/cache/swift4 eventlet_debug = true [pipeline:main] pipeline = recon object-server [app:object-server] use = egg:swift#object [filter:recon] use = egg:swift#recon [object-replicator] rsync_module = {replication_ip}::object{replication_port} [object-reconstructor] [object-updater] [object-auditor] swift-2.17.0/doc/saio/swift/object-server/1.conf0000666000175100017510000000076713236061617021417 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/1/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.1 bind_port = 6010 workers = 1 user = log_facility = LOG_LOCAL2 recon_cache_path = /var/cache/swift eventlet_debug = true [pipeline:main] pipeline = recon object-server [app:object-server] use = egg:swift#object [filter:recon] use = egg:swift#recon [object-replicator] rsync_module = {replication_ip}::object{replication_port} [object-reconstructor] [object-updater] [object-auditor] swift-2.17.0/doc/saio/swift/container-reconciler.conf0000666000175100017510000000220313236061617022575 0ustar zuulzuul00000000000000[DEFAULT] # swift_dir = /etc/swift user = # You can specify default log routing here if you want: # log_name = swift # log_facility = LOG_LOCAL0 # log_level = INFO # log_address = /dev/log # # comma separated list of functions to call to setup custom log handlers. # functions get passed: conf, name, log_to_console, log_route, fmt, logger, # adapted_logger # log_custom_handlers = # # If set, log_udp_host will override log_address # log_udp_host = # log_udp_port = 514 # # You can enable StatsD logging here: # log_statsd_host = # log_statsd_port = 8125 # log_statsd_default_sample_rate = 1.0 # log_statsd_sample_rate_factor = 1.0 # log_statsd_metric_prefix = [container-reconciler] # reclaim_age = 604800 # interval = 300 # request_tries = 3 [pipeline:main] pipeline = catch_errors proxy-logging cache proxy-server [app:proxy-server] use = egg:swift#proxy # See proxy-server.conf-sample for options [filter:cache] use = egg:swift#memcache # See proxy-server.conf-sample for options [filter:proxy-logging] use = egg:swift#proxy_logging [filter:catch_errors] use = egg:swift#catch_errors # See proxy-server.conf-sample for options swift-2.17.0/doc/saio/swift/object-expirer.conf0000666000175100017510000000340413236061617021416 0ustar zuulzuul00000000000000[DEFAULT] # swift_dir = /etc/swift user = # You can specify default log routing here if you want: log_name = object-expirer log_facility = LOG_LOCAL6 log_level = INFO #log_address = /dev/log # # comma separated list of functions to call to setup custom log handlers. # functions get passed: conf, name, log_to_console, log_route, fmt, logger, # adapted_logger # log_custom_handlers = # # If set, log_udp_host will override log_address # log_udp_host = # log_udp_port = 514 # # You can enable StatsD logging here: # log_statsd_host = # log_statsd_port = 8125 # log_statsd_default_sample_rate = 1.0 # log_statsd_sample_rate_factor = 1.0 # log_statsd_metric_prefix = [object-expirer] interval = 300 # auto_create_account_prefix = . # report_interval = 300 # concurrency is the level of concurrency o use to do the work, this value # must be set to at least 1 # concurrency = 1 # processes is how many parts to divide the work into, one part per process # that will be doing the work # processes set 0 means that a single process will be doing all the work # processes can also be specified on the command line and will override the # config value # processes = 0 # process is which of the parts a particular process will work on # process can also be specified on the command line and will override the config # value # process is "zero based", if you want to use 3 processes, you should run # processes with process set to 0, 1, and 2 # process = 0 [pipeline:main] pipeline = catch_errors cache proxy-server [app:proxy-server] use = egg:swift#proxy # See proxy-server.conf-sample for options [filter:cache] use = egg:swift#memcache # See proxy-server.conf-sample for options [filter:catch_errors] use = egg:swift#catch_errors # See proxy-server.conf-sample for options swift-2.17.0/doc/saio/swift/proxy-server.conf0000666000175100017510000000333013236061617021157 0ustar zuulzuul00000000000000[DEFAULT] bind_ip = 127.0.0.1 bind_port = 8080 workers = 1 user = log_facility = LOG_LOCAL1 eventlet_debug = true [pipeline:main] # Yes, proxy-logging appears twice. This is so that # middleware-originated requests get logged too. pipeline = catch_errors gatekeeper healthcheck proxy-logging cache listing_formats bulk tempurl ratelimit crossdomain container_sync tempauth staticweb copy container-quotas account-quotas slo dlo versioned_writes symlink proxy-logging proxy-server [filter:catch_errors] use = egg:swift#catch_errors [filter:healthcheck] use = egg:swift#healthcheck [filter:proxy-logging] use = egg:swift#proxy_logging [filter:bulk] use = egg:swift#bulk [filter:ratelimit] use = egg:swift#ratelimit [filter:crossdomain] use = egg:swift#crossdomain [filter:dlo] use = egg:swift#dlo [filter:slo] use = egg:swift#slo [filter:container_sync] use = egg:swift#container_sync current = //saio/saio_endpoint [filter:tempurl] use = egg:swift#tempurl [filter:tempauth] use = egg:swift#tempauth user_admin_admin = admin .admin .reseller_admin user_test_tester = testing .admin user_test2_tester2 = testing2 .admin user_test_tester3 = testing3 [filter:staticweb] use = egg:swift#staticweb [filter:account-quotas] use = egg:swift#account_quotas [filter:container-quotas] use = egg:swift#container_quotas [filter:cache] use = egg:swift#memcache [filter:gatekeeper] use = egg:swift#gatekeeper [filter:versioned_writes] use = egg:swift#versioned_writes allow_versioned_writes = true [filter:copy] use = egg:swift#copy [filter:listing_formats] use = egg:swift#listing_formats [filter:symlink] use = egg:swift#symlink [app:proxy-server] use = egg:swift#proxy allow_account_management = true account_autocreate = true swift-2.17.0/doc/saio/swift/account-server/0000775000175100017510000000000013236061751020561 5ustar zuulzuul00000000000000swift-2.17.0/doc/saio/swift/account-server/3.conf0000666000175100017510000000074613236061617021604 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/3/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.3 bind_port = 6032 workers = 1 user = log_facility = LOG_LOCAL4 recon_cache_path = /var/cache/swift3 eventlet_debug = true [pipeline:main] pipeline = recon account-server [app:account-server] use = egg:swift#account [filter:recon] use = egg:swift#recon [account-replicator] rsync_module = {replication_ip}::account{replication_port} [account-auditor] [account-reaper] swift-2.17.0/doc/saio/swift/account-server/2.conf0000666000175100017510000000074613236061617021603 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/2/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.2 bind_port = 6022 workers = 1 user = log_facility = LOG_LOCAL3 recon_cache_path = /var/cache/swift2 eventlet_debug = true [pipeline:main] pipeline = recon account-server [app:account-server] use = egg:swift#account [filter:recon] use = egg:swift#recon [account-replicator] rsync_module = {replication_ip}::account{replication_port} [account-auditor] [account-reaper] swift-2.17.0/doc/saio/swift/account-server/4.conf0000666000175100017510000000074613236061617021605 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/4/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.4 bind_port = 6042 workers = 1 user = log_facility = LOG_LOCAL5 recon_cache_path = /var/cache/swift4 eventlet_debug = true [pipeline:main] pipeline = recon account-server [app:account-server] use = egg:swift#account [filter:recon] use = egg:swift#recon [account-replicator] rsync_module = {replication_ip}::account{replication_port} [account-auditor] [account-reaper] swift-2.17.0/doc/saio/swift/account-server/1.conf0000666000175100017510000000074513236061617021601 0ustar zuulzuul00000000000000[DEFAULT] devices = /srv/1/node mount_check = false disable_fallocate = true bind_ip = 127.0.0.1 bind_port = 6012 workers = 1 user = log_facility = LOG_LOCAL2 recon_cache_path = /var/cache/swift eventlet_debug = true [pipeline:main] pipeline = recon account-server [app:account-server] use = egg:swift#account [filter:recon] use = egg:swift#recon [account-replicator] rsync_module = {replication_ip}::account{replication_port} [account-auditor] [account-reaper] swift-2.17.0/doc/saio/rsyslog.d/0000775000175100017510000000000013236061751016411 5ustar zuulzuul00000000000000swift-2.17.0/doc/saio/rsyslog.d/10-swift.conf0000666000175100017510000000213413236061617020635 0ustar zuulzuul00000000000000# Uncomment the following to have a log containing all logs together #local1,local2,local3,local4,local5.* /var/log/swift/all.log # Uncomment the following to have hourly proxy logs for stats processing #$template HourlyProxyLog,"/var/log/swift/hourly/%$YEAR%%$MONTH%%$DAY%%$HOUR%" #local1.*;local1.!notice ?HourlyProxyLog local1.*;local1.!notice /var/log/swift/proxy.log local1.notice /var/log/swift/proxy.error local1.* ~ local2.*;local2.!notice /var/log/swift/storage1.log local2.notice /var/log/swift/storage1.error local2.* ~ local3.*;local3.!notice /var/log/swift/storage2.log local3.notice /var/log/swift/storage2.error local3.* ~ local4.*;local4.!notice /var/log/swift/storage3.log local4.notice /var/log/swift/storage3.error local4.* ~ local5.*;local5.!notice /var/log/swift/storage4.log local5.notice /var/log/swift/storage4.error local5.* ~ local6.*;local6.!notice /var/log/swift/expirer.log local6.notice /var/log/swift/expirer.error local6.* ~ swift-2.17.0/doc/saio/rsyncd.conf0000666000175100017510000000272213236061617016644 0ustar zuulzuul00000000000000uid = gid = log file = /var/log/rsyncd.log pid file = /var/run/rsyncd.pid address = 0.0.0.0 [account6012] max connections = 25 path = /srv/1/node/ read only = false lock file = /var/lock/account6012.lock [account6022] max connections = 25 path = /srv/2/node/ read only = false lock file = /var/lock/account6022.lock [account6032] max connections = 25 path = /srv/3/node/ read only = false lock file = /var/lock/account6032.lock [account6042] max connections = 25 path = /srv/4/node/ read only = false lock file = /var/lock/account6042.lock [container6011] max connections = 25 path = /srv/1/node/ read only = false lock file = /var/lock/container6011.lock [container6021] max connections = 25 path = /srv/2/node/ read only = false lock file = /var/lock/container6021.lock [container6031] max connections = 25 path = /srv/3/node/ read only = false lock file = /var/lock/container6031.lock [container6041] max connections = 25 path = /srv/4/node/ read only = false lock file = /var/lock/container6041.lock [object6010] max connections = 25 path = /srv/1/node/ read only = false lock file = /var/lock/object6010.lock [object6020] max connections = 25 path = /srv/2/node/ read only = false lock file = /var/lock/object6020.lock [object6030] max connections = 25 path = /srv/3/node/ read only = false lock file = /var/lock/object6030.lock [object6040] max connections = 25 path = /srv/4/node/ read only = false lock file = /var/lock/object6040.lock swift-2.17.0/doc/saio/bin/0000775000175100017510000000000013236061751015235 5ustar zuulzuul00000000000000swift-2.17.0/doc/saio/bin/startrest0000777000175100017510000000005313236061617017217 0ustar zuulzuul00000000000000#!/bin/bash set -e swift-init rest start swift-2.17.0/doc/saio/bin/startmain0000777000175100017510000000005313236061617017166 0ustar zuulzuul00000000000000#!/bin/bash set -e swift-init main start swift-2.17.0/doc/saio/bin/resetswift0000777000175100017510000000177613236061617017400 0ustar zuulzuul00000000000000#!/bin/bash set -e swift-init all kill # Remove the following line if you did not set up rsyslog for individual logging: sudo find /var/log/swift -type f -exec rm -f {} \; if cut -d' ' -f2 /proc/mounts | grep -q /mnt/sdb1 ; then sudo umount /mnt/sdb1 fi # If you are using a loopback device set SAIO_BLOCK_DEVICE to "/srv/swift-disk" sudo mkfs.xfs -f ${SAIO_BLOCK_DEVICE:-/dev/sdb1} sudo mount /mnt/sdb1 sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 sudo chown ${USER}:${USER} /mnt/sdb1/* mkdir -p /srv/1/node/sdb1 /srv/1/node/sdb5 \ /srv/2/node/sdb2 /srv/2/node/sdb6 \ /srv/3/node/sdb3 /srv/3/node/sdb7 \ /srv/4/node/sdb4 /srv/4/node/sdb8 sudo rm -f /var/log/debug /var/log/messages /var/log/rsyncd.log /var/log/syslog find /var/cache/swift* -type f -name *.recon -exec rm -f {} \; if [ "`type -t systemctl`" == "file" ]; then sudo systemctl restart rsyslog sudo systemctl restart memcached else sudo service rsyslog restart sudo service memcached restart fi swift-2.17.0/doc/saio/bin/remakerings0000777000175100017510000000417713236061617017506 0ustar zuulzuul00000000000000#!/bin/bash set -e cd /etc/swift rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz swift-ring-builder object.builder create 10 3 1 swift-ring-builder object.builder add r1z1-127.0.0.1:6010/sdb1 1 swift-ring-builder object.builder add r1z2-127.0.0.2:6020/sdb2 1 swift-ring-builder object.builder add r1z3-127.0.0.3:6030/sdb3 1 swift-ring-builder object.builder add r1z4-127.0.0.4:6040/sdb4 1 swift-ring-builder object.builder rebalance swift-ring-builder object-1.builder create 10 2 1 swift-ring-builder object-1.builder add r1z1-127.0.0.1:6010/sdb1 1 swift-ring-builder object-1.builder add r1z2-127.0.0.2:6020/sdb2 1 swift-ring-builder object-1.builder add r1z3-127.0.0.3:6030/sdb3 1 swift-ring-builder object-1.builder add r1z4-127.0.0.4:6040/sdb4 1 swift-ring-builder object-1.builder rebalance swift-ring-builder object-2.builder create 10 6 1 swift-ring-builder object-2.builder add r1z1-127.0.0.1:6010/sdb1 1 swift-ring-builder object-2.builder add r1z1-127.0.0.1:6010/sdb5 1 swift-ring-builder object-2.builder add r1z2-127.0.0.2:6020/sdb2 1 swift-ring-builder object-2.builder add r1z2-127.0.0.2:6020/sdb6 1 swift-ring-builder object-2.builder add r1z3-127.0.0.3:6030/sdb3 1 swift-ring-builder object-2.builder add r1z3-127.0.0.3:6030/sdb7 1 swift-ring-builder object-2.builder add r1z4-127.0.0.4:6040/sdb4 1 swift-ring-builder object-2.builder add r1z4-127.0.0.4:6040/sdb8 1 swift-ring-builder object-2.builder rebalance swift-ring-builder container.builder create 10 3 1 swift-ring-builder container.builder add r1z1-127.0.0.1:6011/sdb1 1 swift-ring-builder container.builder add r1z2-127.0.0.2:6021/sdb2 1 swift-ring-builder container.builder add r1z3-127.0.0.3:6031/sdb3 1 swift-ring-builder container.builder add r1z4-127.0.0.4:6041/sdb4 1 swift-ring-builder container.builder rebalance swift-ring-builder account.builder create 10 3 1 swift-ring-builder account.builder add r1z1-127.0.0.1:6012/sdb1 1 swift-ring-builder account.builder add r1z2-127.0.0.2:6022/sdb2 1 swift-ring-builder account.builder add r1z3-127.0.0.3:6032/sdb3 1 swift-ring-builder account.builder add r1z4-127.0.0.4:6042/sdb4 1 swift-ring-builder account.builder rebalance swift-2.17.0/doc/manpages/0000775000175100017510000000000013236061751015325 5ustar zuulzuul00000000000000swift-2.17.0/doc/manpages/swift-account-info.10000666000175100017510000000350013236061617021127 0ustar zuulzuul00000000000000.\" .\" Author: Madhuri Kumari .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-account-info 1 "10/25/2016" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-account-info \- OpenStack Swift account-info tool .SH SYNOPSIS .LP .B swift-account-info [options] .SH DESCRIPTION .PP This is a very simple swift tool that allows a swiftop engineer to retrieve information about an account that is located on the storage node. One calls the tool with a given db file as it is stored on the storage node system. It will then return several information about that account such as; .PD 0 .IP "- Account" .IP "- Account hash " .IP "- Created timestamp " .IP "- Put timestamp " .IP "- Delete timestamp " .IP "- Container Count " .IP "- Object count " .IP "- Bytes used " .IP "- Chexor " .IP "- ID" .IP "- User Metadata " .IP "- Ring Location" .PD .SH OPTIONS .TP \fB\-h, --help \fR Shows the help message and exit .TP \fB\-d SWIFT_DIR, --swift-dir=SWIFT_DIR\fR Pass location of swift configuration file if different from the default location /etc/swift .SH DOCUMENTATION .LP More documentation about OpenStack Swift can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-container-info(1), .BR swift-get-nodes(1), .BR swift-object-info(1) swift-2.17.0/doc/manpages/swift-object-expirer.10000666000175100017510000000407513236061617021474 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-object-expirer 1 "3/15/2012" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-object-expirer \- OpenStack Swift object expirer .SH SYNOPSIS .LP .B swift-object-expirer [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP The swift-object-expirer offers scheduled deletion of objects. The Swift client would use the X-Delete-At or X-Delete-After headers during an object PUT or POST and the cluster would automatically quit serving that object at the specified time and would shortly thereafter remove the object from the system. The X-Delete-At header takes a Unix Epoch timestamp, in integer form; for example: 1317070737 represents Mon Sep 26 20:58:57 2011 UTC. The X-Delete-After header takes a integer number of seconds. The proxy server that receives the request will convert this header into an X-Delete-At header using its current time plus the value given. The options are as follows: .RS 4 .PD 0 .IP "-v" .IP "--verbose" .RS 4 .IP "log to console" .RE .IP "-o" .IP "--once" .RS 4 .IP "only run one pass of daemon" .RE .PD .RE .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-object-expirer can be found at .BI https://docs.openstack.org/swift/latest/overview_expiring_objects.html and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR object-expirer.conf(5) swift-2.17.0/doc/manpages/swift-init.10000666000175100017510000000771313236061617017517 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-init 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-init \- OpenStack Swift swift-init tool .SH SYNOPSIS .LP .B swift-init [ ...] [options] .SH DESCRIPTION .PP The swift-init tool can be used to initialize all swift daemons available as part of OpenStack Swift. Instead of calling individual init scripts for each swift daemon, one can just use swift-init. With swift-init you can initialize just one swift service, such as the "proxy", or a combination of them. The tool also allows one to use the keywords such as "all", "main" and "rest" for the argument. \fBServers:\fR .PD 0 .RS 4 .IP "\fIproxy\fR" "4" .IP " - Initializes the swift proxy daemon" .RE .RS 4 .IP "\fIobject\fR, \fIobject-replicator\fR, \fIobject-auditor\fR, \fIobject-updater\fR" .IP " - Initializes the swift object daemons above" .RE .RS 4 .IP "\fIcontainer\fR, \fIcontainer-update\fR, \fIcontainer-replicator\fR, \fIcontainer-auditor\fR" .IP " - Initializes the swift container daemons above" .RE .RS 4 .IP "\fIaccount\fR, \fIaccount-auditor\fR, \fIaccount-reaper\fR, \fIaccount-replicator\fR" .IP " - Initializes the swift account daemons above" .RE .RS 4 .IP "\fIall\fR" .IP " - Initializes \fBall\fR the swift daemons" .RE .RS 4 .IP "\fImain\fR" .IP " - Initializes all the \fBmain\fR swift daemons" .IP " (proxy, container, account and object servers)" .RE .RS 4 .IP "\fIrest\fR" .IP " - Initializes all the other \fBswift background daemons\fR" .IP " (updater, replicator, auditor, reaper, etc)" .RE .PD \fBCommands:\fR .RS 4 .PD 0 .IP "\fIforce-reload\fR: \t\t alias for reload" .IP "\fIno-daemon\fR: \t\t start a server interactively" .IP "\fIno-wait\fR: \t\t\t spawn server and return immediately" .IP "\fIonce\fR: \t\t\t start server and run one pass on supporting daemons" .IP "\fIreload\fR: \t\t\t graceful shutdown then restart on supporting servers" .IP "\fIrestart\fR: \t\t\t stops then restarts server" .IP "\fIshutdown\fR: \t\t allow current requests to finish on supporting servers" .IP "\fIstart\fR: \t\t\t starts a server" .IP "\fIstatus\fR: \t\t\t display status of tracked pids for server" .IP "\fIstop\fR: \t\t\t stops a server" .PD .RE \fBOptions:\fR .RS 4 .PD 0 .IP "-h, --help \t\t\t show this help message and exit" .IP "-v, --verbose \t\t\t display verbose output" .IP "-w, --no-wait \t\t\t won't wait for server to start before returning .IP "-o, --once \t\t\t only run one pass of daemon .IP "-n, --no-daemon \t\t start server interactively .IP "-g, --graceful \t\t send SIGHUP to supporting servers .IP "-c N, --config-num=N \t send command to the Nth server only .IP "-k N, --kill-wait=N \t wait N seconds for processes to die (default 15) .IP "-r RUN_DIR, --run-dir=RUN_DIR directory where the pids will be stored (default /var/run/swift) .IP "--strict return non-zero status code if some config is missing. Default mode if server is explicitly named." .IP "--non-strict return zero status code even if some config is missing. Default mode if server is one of aliases `all`, `main` or `rest`." .IP "--kill-after-timeout kill daemon and all children after kill-wait period." .PD .RE .SH DOCUMENTATION .LP More documentation about OpenStack Swift can be found at .BI https://docs.openstack.org/swift/latest/ swift-2.17.0/doc/manpages/swift-ring-builder-analyzer.10000666000175100017510000000325713236061617022761 0ustar zuulzuul00000000000000.\" .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH SWIFT-RING-BUILDER-ANALYZER "1" "August 2016" "OpenStack Swift" .SH NAME swift\-ring\-builder\-analyzer \- put the OpenStack Swift ring builder through its paces .SH SYNOPSIS .B swift\-ring\-builder\-analyzer [\fIoptions\fR] \fIscenario_path\fR .SH DESCRIPTION .PP This is a tool to help developers quantify changes to the ring builder. It takes a scenario (JSON file) describing the builder's basic parameters (part_power, replicas, etc.) and a number of "rounds", where each round is a set of operations to perform on the builder. For each round, the operations are applied, and then the builder is rebalanced until it reaches a steady state. .SH OPTIONS .TP .I scenario_path Path to the scenario file .TP \fB\-h\fR, \fB\-\-help\fR Show this help message and exit .TP \fB\-\-check\fR, \fB\-c\fR Just check the scenario, don't execute it. .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift\-ring\-builder\-analyzer and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org swift-2.17.0/doc/manpages/swift-dispersion-populate.10000666000175100017510000001011613236061617022551 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-dispersion-populate 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-dispersion-populate \- OpenStack Swift dispersion populate .SH SYNOPSIS .LP .B swift-dispersion-populate [--container-suffix-start] [--object-suffix-start] [--container-only|--object-only] [--insecure] [conf_file] .SH DESCRIPTION .PP This is one of the swift-dispersion utilities that is used to evaluate the overall cluster health. This is accomplished by checking if a set of deliberately distributed containers and objects are currently in their proper places within the cluster. .PP For instance, a common deployment has three replicas of each object. The health of that object can be measured by checking if each replica is in its proper place. If only 2 of the 3 is in place the object's health can be said to be at 66.66%, where 100% would be perfect. .PP We need to place the containers and objects throughout the system so that they are on distinct partitions. The \fBswift-dispersion-populate\fR tool does this by making up random container and object names until they fall on distinct partitions. Last, and repeatedly for the life of the cluster, we need to run the \fBswift-dispersion-report\fR tool to check the health of each of these containers and objects. .PP These tools need direct access to the entire cluster and to the ring files. Installing them on a proxy server will probably do or a box used for swift administration purposes that also contains the common swift packages and ring. Both \fBswift-dispersion-populate\fR and \fBswift-dispersion-report\fR use the same configuration file, /etc/swift/dispersion.conf . The account used by these tool should be a dedicated account for the dispersion stats and also have admin privileges. .SH OPTIONS .RS 0 .PD 1 .IP "\fB--insecure\fR" Allow accessing insecure keystone server. The keystone's certificate will not be verified. .IP "\fB--container-suffix-start=NUMBER\fR" Start container suffix at NUMBER and resume population at this point; default: 0 .IP "\fB--object-suffix-start=NUMBER\fR" Start object suffix at NUMBER and resume population at this point; default: 0 .IP "\fB--object-only\fR" Only run object population .IP "\fB--container-only\fR" Only run container population .IP "\fB--no-overlap\fR" Increase coverage by amount in dispersion_coverage option with no overlap of existing partitions (if run more than once) .IP "\fB-P, --policy-name\fR" Specify storage policy name .SH CONFIGURATION .PD 0 Example \fI/etc/swift/dispersion.conf\fR: .RS 3 .IP "[dispersion]" .IP "auth_url = https://127.0.0.1:443/auth/v1.0" .IP "auth_user = dpstats:dpstats" .IP "auth_key = dpstats" .IP "swift_dir = /etc/swift" .IP "# project_name = dpstats" .IP "# project_domain_name = default" .IP "# user_domain_name = default" .IP "# dispersion_coverage = 1.0" .IP "# retries = 5" .IP "# concurrency = 25" .IP "# endpoint_type = publicURL" .RE .PD .SH EXAMPLE .PP .PD 0 $ swift-dispersion-populate .RS 1 .IP "Created 2621 containers for dispersion reporting, 38s, 0 retries" .IP "Created 2621 objects for dispersion reporting, 27s, 0 retries" .RE .PD .SH DOCUMENTATION .LP More in depth documentation about the swift-dispersion utilities and also OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html#dispersion-report and .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-dispersion-report(1), .BR dispersion.conf(5) swift-2.17.0/doc/manpages/swift.conf.50000666000175100017510000002006613236061617017502 0ustar zuulzuul00000000000000.\" .\" Author: Nandini Tata .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift.conf 5 "8/8/2016" "Linux" "OpenStack Swift" .SH NAME .LP .B swift.conf \- common configuration file for the OpenStack object storage services .SH SYNOPSIS .LP .B swift.conf .SH DESCRIPTION .PP This is the common configuration file used by all services of OpenStack object storage services. The configuration file follows the python-pastedeploy syntax. The file is divided into sections, which are enclosed by square brackets. Each section will contain a certain number of key/value parameters which are described later. Any line that begins with a '#' symbol is ignored. You can find more information about python-pastedeploy configuration format at \fIhttp://pythonpaste.org/deploy/#config-format\fR .SH SWIFT HASH SECTION .PD 1 .RS 0 This is indicated by section named [swift-hash]. Below are the parameters that are acceptable within this section: .PD 0 .IP "\fBswift_hash_path_suffix\fR" .IP "\fBswift_hash_path_prefix\fR" .PD swift_hash_path_suffix and swift_hash_path_prefix are used as part of the hashing algorithm when determining data placement in the cluster. These values should remain secret and MUST NOT change once a cluster has been deployed. Use only printable chars (python -c "import string; print(string.printable)"). .SH STORAGE POLICY SECTION .PD 1 .RS 0 This is indicated by section name [storage-policy:#] Storage policies are defined here and they determine various characteristics about how objects are stored and treated. Policies are specified by name on a per container basis. The policy index is specified in the section header and is used internally. The policy with index 0 is always used for legacy containers and can be given a name for use in metadata; however, the ring file name will always be 'object.ring.gz' for backwards compatibility. If no policies are defined, a policy with index 0 will be automatically created for backwards compatibility and given the name Policy-0. A default policy is used when creating new containers when no policy is specified in the request. If no other policies are defined, the policy with index 0 will be declared the default. If multiple policies are defined, you must define a policy with index 0 and you must specify a default. It is recommended you always define a section for storage-policy:0. Aliases are not mandatory when defining a storage policy. .IP "\fB[storage-policy:index]\fR" Each storage policy is defined in a separate section with an index specified in the header. Below are the parameters that are acceptable within this section: .IP "\fBname\fR" Name of the storage policy. Policy names are case insensitive. .IP "\fBaliases\fR" Multiple names can be assigned to one policy using aliases. All names must follow the Swift naming rules. .IP "\fBpolicy_type\fR" Policy type can be replication or erasure_coding. Replication policy replicates the objects to specified number of replicas. Erasure coding uses PyECLib API library for encode/decode operations. Please refer to Swift documentation for details on how erasure coding is implemented. .IP "\fBec_type\fR" This parameter must be chosen from the list of EC backends supported by PyECLib. .IP "\fBec_num_data_fragments\fR" This parameter is specific to 'erasure coding' policy_type only. It defines the number of fragments that will be comprised of data. .IP "\fBec_num_parity_fragments\fR" This parameter is specific to 'erasure coding' policy_type only. It defines the number of fragments that will be comprised of parity. .IP "\fBec_object_segment_size\fR" This parameter is specific to 'erasure coding' policy_type only. It defines the amount of data that will be buffered up before feeding a segment into the encoder/decoder. The default value is 1048576. .IP "\fIExamples:\fR" .PD 0 .IP "[storage-policy:0]" .IP "name = Policy-0" .IP "default = yes" .IP "policy_type = replication" .IP "aliases = yellow, orange" .IP "[storage-policy:1]" .IP "name = silver" .IP "policy_type = replication" .IP "[storage-policy:2]" .IP "name = deepfreeze10-4" .IP "aliases = df10-4" .IP "policy_type = erasure_coding" .IP "ec_type = liberasurecode_rs_vand" .IP "ec_num_data_fragments = 10" .IP "ec_num_parity_fragments = 4" .IP "ec_object_segment_size = 1048576" .PD .RE .PD .SH SWIFT CONSTRAINTS SECTION .PD 1 .RS 0 This is indicated by section name [swift-constraints]. This section sets the basic constraints on data saved in the swift cluster. These constraints are automatically published by the proxy server in responses to /info requests. Below are the parameters that are acceptable within this section: .IP "\fBmax_file_size\fR" max_file_size is the largest "normal" object that can be saved in the cluster. This is also the limit on the size of each segment of a "large" object when using the large object manifest support. This value is set in bytes. Setting it to lower than 1MiB will cause some tests to fail. It is STRONGLY recommended to leave this value at the default (5 * 2**30 + 2). .IP "\fBmax_meta_name_length\fR" max_meta_name_length is the max number of bytes in the utf8 encoding of the name portion of a metadata header. .IP "\fBmax_meta_value_length\fR" max_meta_value_length is the max number of bytes in the utf8 encoding of a metadata value. .IP "\fBmax_meta_count\fR" max_meta_count is the max number of metadata keys that can be stored on a single account, container, or object. .IP "\fBmax_meta_overall_size\fR" max_meta_overall_size is the max number of bytes in the utf8 encoding of the metadata (keys + values). .IP "\fBmax_header_size\fR" max_header_size is the max number of bytes in the utf8 encoding of each header. Using 8192 as default because eventlet uses 8192 as max size of header line. This value may need to be increased when using identity v3 API tokens including more than 7 catalog entries. .IP "\fBextra_header_count\fR" By default the maximum number of allowed headers depends on the number of max allowed metadata settings plus a default value of 36 for swift internally generated headers and regular http headers. If for some reason this is not enough (custom middleware for example) it can be increased with the extra_header_count constraint. .IP "\fBmax_object_name_length\fR" max_object_name_length is the max number of bytes in the utf8 encoding of an object name. .IP "\fBcontainer_listing_limit\fR" container_listing_limit is the default (and max) number of items returned for a container listing request. .IP "\fBaccount_listing_limit\fR" account_listing_limit is the default (and max) number of items returned for an account listing request. .IP "\fBmax_account_name_length\fR" max_account_name_length is the max number of bytes in the utf8 encoding of an account name. .IP "\fBmax_container_name_length\fR" max_container_name_length is the max number of bytes in the utf8 encoding of a container name. .IP "\fBvalid_api_versions\fR" By default, all REST API calls should use "v1" or "v1.0" as the version string, for example "/v1/account". This can be manually overridden to make this backward-compatible, in case a different version string has been used before. Use a comma-separated list in case of multiple allowed versions, for example valid_api_versions = v0,v1,v2. This is only enforced for account, container and object requests. The allowed api versions are by default excluded from /info. .SH DOCUMENTATION .LP More in depth documentation about the swift.conf and also OpenStack-Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html and .BI https://docs.openstack.org/swift/latest/ swift-2.17.0/doc/manpages/swift-drive-audit.10000666000175100017510000000225513236061617020765 0ustar zuulzuul00000000000000.\" .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH SWIFT-DRIVE-AUDIT "1" "August 2016" "OpenStack Swift" .SH NAME swift\-drive\-audit \- OpenStack Swift drive audit cron job .SH SYNOPSIS .B swift\-drive\-audit \fICONFIG\fR .SH DESCRIPTION .PP Tool that can be run by using cron to watch for bad drives. If errors are detected, it unmounts the bad drive, so that Swift can work around it. .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift\-drive\-audit and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org swift-2.17.0/doc/manpages/swift-recon.10000666000175100017510000001014513236061617017653 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-recon 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-recon \- OpenStack Swift recon middleware cli tool .SH SYNOPSIS .LP .B swift-recon \ [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] .SH DESCRIPTION .PP The swift-recon cli tool can be used to retrieve various metrics and telemetry information about a cluster that has been collected by the swift-recon middleware. In order to make use of the swift-recon middleware, update the object-server.conf file and enable the recon middleware by adding a pipeline entry and setting its option(s). You can view more information in the example section below. .SH OPTIONS .RS 0 .PD 1 .IP "\fB\fR" account|container|object - Defaults to object server. .IP "\fB-h, --help\fR" show this help message and exit .IP "\fB-v, --verbose\fR" Print verbose information .IP "\fB--suppress\fR" Suppress most connection related errors .IP "\fB-a, --async\fR" Get async stats .IP "\fB--auditor\fR" Get auditor stats .IP "\fB--updater\fR" Get updater stats .IP "\fB--expirer\fR" Get expirer stats .IP "\fB-r, --replication\fR" Get replication stats .IP "\fB-u, --unmounted\fR" Check cluster for unmounted devices .IP "\fB-d, --diskusage\fR" Get disk usage stats .IP "\fB--top=COUNT\fR" Also show the top COUNT entries in rank order .IP "\fB--lowest=COUNT\fR" Also show the lowest COUNT entries in rank order .IP "\fB--human-readable\fR" Use human readable suffix for disk usage stats .IP "\fB-l, --loadstats\fR" Get cluster load average stats .IP "\fB-q, --quarantined\fR" Get cluster quarantine stats .IP "\fB--validate-servers\fR" Validate servers on the ring .IP "\fB--md5\fR" Get md5sum of servers ring and compare to local copy .IP "\fB--sockstat\fR" Get cluster socket usage stats .IP "\fB--driveaudit\fR" Get drive audit error stats .IP "\fB-T, --time\fR" Check time synchronization .IP "\fB--swift-versions\fR" Check swift version .IP "\fB--all\fR" Perform all checks. Equivalent to \-arudlqT \-\-md5 \-\-sockstat \-\-auditor \-\-updater \-\-expirer \-\-driveaudit \-\-validate\-servers \-\-swift-versions .IP "\fB--region=REGION\fR" Only query servers in specified region .IP "\fB-z ZONE, --zone=ZONE\fR" Only query servers in specified zone .IP "\fB-t SECONDS, --timeout=SECONDS\fR" Time to wait for a response from a server .IP "\fB--swiftdir=PATH\fR" Default = /etc/swift .PD .RE .SH EXAMPLE .LP .PD 0 .RS 0 .IP "ubuntu:~$ swift-recon -q --zone 3" .IP "=================================================================" .IP "[2011-10-18 19:36:00] Checking quarantine dirs on 1 hosts... " .IP "[Quarantined objects] low: 4, high: 4, avg: 4, total: 4 " .IP "[Quarantined accounts] low: 0, high: 0, avg: 0, total: 0 " .IP "[Quarantined containers] low: 0, high: 0, avg: 0, total: 0 " .IP "=================================================================" .RE .RS 0 Finally if you also wish to track asynchronous pending's you will need to setup a cronjob to run the swift-recon-cron script periodically: .IP "*/5 * * * * swift /usr/bin/swift-recon-cron /etc/swift/object-server.conf" .RE .SH DOCUMENTATION .LP More documentation about OpenStack Swift can be found at .BI https://docs.openstack.org/swift/latest/ Also more specific documentation about swift-recon can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html#cluster-telemetry-and-monitoring .SH "SEE ALSO" .BR object-server.conf(5), swift-2.17.0/doc/manpages/swift-get-nodes.10000666000175100017510000000645113236061617020437 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-get-nodes 1 "10/25/2016" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-get-nodes \- OpenStack Swift get-nodes tool .SH SYNOPSIS .LP .B swift-get-nodes \ [options] [ []] Or .B swift-get-nodes [options] -p Or .B swift-get-nodes \ [options] -P policy_name .SH DESCRIPTION .PP The swift-get-nodes tool can be used to find out the location where a particular account, container or object item is located within the swift cluster nodes. For example, if you have the account hash and a container name that belongs to that account, you can use swift-get-nodes to lookup where the container resides by using the container ring. .SH OPTIONS .TP \fB\-h --help \fR Shows the help message and exit .TP \fB\-a, --all\fR Show all handoff nodes .TP \fB\-p PARTITION, --partition=PARTITION\fR Show nodes for a given partition .TP \fB\-P POLICY_NAME, --policy-name=POLICY_NAME \fR Specify storage policy name .TP \fB\-d SWIFT_DIR, --swift-dir=SWIFT_DIR\fR Pass location of swift configuration file if different from the default location /etc/swift .RS 0 .IP "\fIExample:\fR" .RE .RS 4 .PD 0 .IP "$ swift-get-nodes /etc/swift/account.ring.gz MyAccount-12ac01446be2" .PD 0 .IP "Account MyAccount-12ac01446be2" .IP "Container None" .IP "Object None" .IP "Partition 221082" .IP "Hash d7e6ba68cfdce0f0e4ca7890e46cacce" .IP "Server:Port Device 172.24.24.29:6202 sdd" .IP "Server:Port Device 172.24.24.27:6202 sdr" .IP "Server:Port Device 172.24.24.32:6202 sde" .IP "Server:Port Device 172.24.24.26:6202 sdv [Handoff]" .IP "curl -I -XHEAD http://172.24.24.29:6202/sdd/221082/MyAccount-12ac01446be2" .IP "curl -I -XHEAD http://172.24.24.27:6202/sdr/221082/MyAccount-12ac01446be2" .IP "curl -I -XHEAD http://172.24.24.32:6202/sde/221082/MyAccount-12ac01446be2" .IP "curl -I -XHEAD http://172.24.24.26:6202/sdv/221082/MyAccount-12ac01446be2 # [Handoff]" .IP "ssh 172.24.24.29 ls -lah /srv/node/sdd/accounts/221082/cce/d7e6ba68cfdce0f0e4ca7890e46cacce/ " .IP "ssh 172.24.24.27 ls -lah /srv/node/sdr/accounts/221082/cce/d7e6ba68cfdce0f0e4ca7890e46cacce/" .IP "ssh 172.24.24.32 ls -lah /srv/node/sde/accounts/221082/cce/d7e6ba68cfdce0f0e4ca7890e46cacce/" .IP "ssh 172.24.24.26 ls -lah /srv/node/sdv/accounts/221082/cce/d7e6ba68cfdce0f0e4ca7890e46cacce/ # [Handoff] " .PD .RE .SH DOCUMENTATION .LP More documentation about OpenStack Swift can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-account-info(1), .BR swift-container-info(1), .BR swift-object-info(1), .BR swift-ring-builder(1) swift-2.17.0/doc/manpages/swift-config.10000666000175100017510000000262113236061617020012 0ustar zuulzuul00000000000000.\" .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH SWIFT-CONFIG "1" "August 2016" "OpenStack Swift" .SH NAME swift\-config \- OpenStack Swift config parser .SH SYNOPSIS .B swift\-config [\fIoptions\fR] \fISERVER\fR .SH DESCRIPTION .PP Combine Swift configuration files and print result. .SH OPTIONS .TP \fB\-h\fR, \fB\-\-help\fR Show this help message and exit .TP \fB\-c\fR \fIN\fR, \fB\-\-config\-num\fR=\fIN\fR Parse config for the \fIN\fRth server only .TP \fB\-s\fR \fISECTION\fR, \fB\-\-section\fR=\fISECTION\fR Only display matching sections .TP \fB\-w\fR, \fB\-\-wsgi\fR Use wsgi/paste parser instead of readconf .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift\-config and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org swift-2.17.0/doc/manpages/swift-account-audit.10000666000175100017510000000374513236061617021315 0ustar zuulzuul00000000000000.\" .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH SWIFT-ACCOUNT-AUDIT "1" "August 2016" "OpenStack Swift" .SH NAME swift\-account\-audit \- manually audit OpenStack Swift accounts .SH SYNOPSIS .PP .B swift\-account\-audit\/ \fI[options]\fR \fI[url 1]\fR \fI[url 2]\fR \fI...\fR .SH DESCRIPTION .PP The swift-account-audit cli tool can be used to audit the data for an account. It crawls the account, checking that all containers and objects can be found. You can also feed a list of URLs to the script through stdin. .SH OPTIONS .TP \fB\-c\fR \fIconcurrency\fR Set the concurrency, default 50 .TP \fB\-r\fR \fIring dir\fR Ring locations, default \fI/etc/swift\fR .TP \fB\-e\fR \fIfilename\fR File for writing a list of inconsistent URLs .TP \fB\-d\fR Also download files and verify md5 .SH EXAMPLES .nf /usr/bin/swift\-account\-audit\/ AUTH_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076 /usr/bin/swift\-account\-audit\/ AUTH_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container/object /usr/bin/swift\-account\-audit\/ \fB\-e\fR errors.txt AUTH_88ad0b83\-b2c5\-4fa1\-b2d6\-60c597202076/container /usr/bin/swift\-account\-audit\/ < errors.txt /usr/bin/swift\-account\-audit\/ \fB\-c\fR 25 \fB\-d\fR < errors.txt .fi .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift\-account\-audit and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org swift-2.17.0/doc/manpages/swift-object-replicator.10000666000175100017510000000504313236061617022156 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-object-replicator 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-object-replicator \- OpenStack Swift object replicator .SH SYNOPSIS .LP .B swift-object-replicator [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP Replication is designed to keep the system in a consistent state in the face of temporary error conditions like network outages or drive failures. The replication processes compare local data with each remote copy to ensure they all contain the latest version. Object replication uses a hash list to quickly compare subsections of each partition. .PP Replication updates are push based. For object replication, updating is just a matter of rsyncing files to the peer. The replicator also ensures that data is removed from the system. When an object item is deleted a tombstone is set as the latest version of the item. The replicator will see the tombstone and ensure that the item is removed from the entire system. .SH OPTIONS .TP \fB\-h\fR, \fB\-\-help\fR Show this help message and exit .TP \fB\-d\fR \fIDEVICES\fR, \fB\-\-devices\fR=\fIDEVICES\fR Replicate only given devices. Comma\-separated list. Only has effect if \-\-once is used. .TP \fB\-p\fR \fIPARTITIONS\fR, \fB\-\-partitions\fR=\fIPARTITIONS\fR Replicate only given partitions. Comma\-separated list. Only has effect if \-\-once is used. .TP \fB\-i\fR \fIPOLICIES\fR, \fB\-\-policies\fR=\fIPOLICIES\fR Replicate only given policy indices. Comma\-separated list. Only has effect if \-\-once is used. .TP \fB\-v\fR, \fB\-\-verbose\fR Log to console .TP \fB\-o\fR, \fB\-\-once\fR Only run one pass of daemon .PP .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-object-replicator and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR object-server.conf(5) swift-2.17.0/doc/manpages/object-server.conf.50000666000175100017510000006030613236061617021121 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH object-server.conf 5 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B object-server.conf \- configuration file for the OpenStack Swift object server .SH SYNOPSIS .LP .B object-server.conf .SH DESCRIPTION .PP This is the configuration file used by the object server and other object background services, such as; replicator, reconstructor, updater and auditor. The configuration file follows the python-pastedeploy syntax. The file is divided into sections, which are enclosed by square brackets. Each section will contain a certain number of key/value parameters which are described later. Any line that begins with a '#' symbol is ignored. You can find more information about python-pastedeploy configuration format at \fIhttp://pythonpaste.org/deploy/#config-format\fR .SH GLOBAL SECTION .PD 1 .RS 0 This is indicated by section named [DEFAULT]. Below are the parameters that are acceptable within this section. .IP "\fBbind_ip\fR" IP address the object server should bind to. The default is 0.0.0.0 which will make it bind to all available addresses. .IP "\fBbind_port\fR" TCP port the object server should bind to. The default is 6200. .IP "\fBbind_timeout\fR" Timeout to bind socket. The default is 30. .IP \fBbacklog\fR TCP backlog. Maximum number of allowed pending connections. The default value is 4096. .IP \fBworkers\fR The number of pre-forked processes that will accept connections. Zero means no fork. The default is auto which will make the server try to match the number of effective cpu cores if python multiprocessing is available (included with most python distributions >= 2.6) or fallback to one. It's worth noting that individual workers will use many eventlet co-routines to service multiple concurrent requests. .IP \fBmax_clients\fR Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. The default is 1024. .IP \fBuser\fR The system user that the object server will run as. The default is swift. .IP \fBswift_dir\fR Swift configuration directory. The default is /etc/swift. .IP \fBdevices\fR Parent directory of where devices are mounted. Default is /srv/node. .IP \fBmount_check\fR Whether or not check if the devices are mounted to prevent accidentally writing to the root device. The default is set to true. .IP \fBdisable_fallocate\fR Disable pre-allocate disk space for a file. The default is false. .IP \fBexpiring_objects_container_divisor\fR The default is 86400. .IP \fBexpiring_objects_account_name\fR The default is 'expiring_objects'. .IP \fBservers_per_port\fR Make object-server run this many worker processes per unique port of "local" ring devices across all storage policies. The default value of 0 disables this feature. .IP \fBlog_name\fR Label used when logging. The default is swift. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBlog_max_line_length\fR The following caps the length of log lines to the value given; no limit if set to 0, the default. .IP \fBlog_custom_handlers\fR Comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger. The default is empty. .IP \fBlog_udp_host\fR If set, log_udp_host will override log_address. .IP "\fBlog_udp_port\fR UDP log port, the default is 514. .IP \fBlog_statsd_host\fR StatsD server. IPv4/IPv6 addresses and hostnames are supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. .IP \fBlog_statsd_port\fR The default is 8125. .IP \fBlog_statsd_default_sample_rate\fR The default is 1. .IP \fBlog_statsd_sample_rate_factor\fR The default is 1. .IP \fBlog_statsd_metric_prefix\fR The default is empty. .IP \fBeventlet_debug\fR Debug mode for eventlet library. The default is false. .IP \fBfallocate_reserve\fR You can set fallocate_reserve to the number of bytes or percentage of disk space you'd like fallocate to reserve, whether there is space for the given file size or not. Percentage will be used if the value ends with a '%'. The default is 1%. .IP \fBnode_timeout\fR Request timeout to external services. The default is 3 seconds. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 0.5 seconds. .IP \fBcontainer_update_timeout\fR Time to wait while sending a container update on object update. The default is 1 second. .IP \fBclient_timeout\fR Time to wait while receiving each chunk of data from a client or another backend node. The default is 60. .IP \fBnetwork_chunk_size\fR The default is 65536. .IP \fBdisk_chunk_size\fR The default is 65536. .IP \fBreclaim_age\fR Time elapsed in seconds before an object can be reclaimed. The default is 604800 seconds. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH PIPELINE SECTION .PD 1 .RS 0 This is indicated by section name [pipeline:main]. Below are the parameters that are acceptable within this section. .IP "\fBpipeline\fR" It is used when you need to apply a number of filters. It is a list of filters ended by an application. The normal pipeline is "healthcheck recon object-server". .RE .PD .SH APP SECTION .PD 1 .RS 0 This is indicated by section name [app:object-server]. Below are the parameters that are acceptable within this section. .IP "\fBuse\fR" Entry point for paste.deploy for the object server. This is the reference to the installed python egg. This is normally \fBegg:swift#object\fR. .IP "\fBset log_name\fR" Label used when logging. The default is object-server. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR" Logging level. The default is INFO. .IP "\fBset log_requests\fR" Enables request logging. The default is True. .IP "\fBset log_address\fR" Logging address. The default is /dev/log. .IP "\fBmax_upload_time\fR" The default is 86400. .IP "\fBslow\fR" The default is 0. .IP "\fBkeep_cache_size\fR" Objects smaller than this are not evicted from the buffercache once read. The default is 5242880. .IP "\fBkeep_cache_private\fR" If true, objects for authenticated GET requests may be kept in buffer cache if small enough. The default is false. .IP "\fBmb_per_sync\fR" On PUTs, sync data every n MB. The default is 512. .IP "\fBallowed_headers\fR" Comma separated list of headers that can be set in metadata on an object. This list is in addition to X-Object-Meta-* headers and cannot include Content-Type, etag, Content-Length, or deleted. The default is 'Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object'. .IP "\fBauto_create_account_prefix\fR" The default is '.'. .IP "\fBreplication_server\fR" Configure parameter for creating specific server To handle all verbs, including replication verbs, do not specify "replication_server" (this is the default). To only handle replication, set to a True value (e.g. "True" or "1"). To handle only non-replication verbs, set to "False". Unless you have a separate replication network, you should not specify any value for "replication_server". .IP "\fBreplication_concurrency\fR" Set to restrict the number of concurrent incoming SSYNC requests Set to 0 for unlimited (the default is 4). Note that SSYNC requests are only used by the object reconstructor or the object replicator when configured to use ssync. .IP "\fBreplication_concurrency_per_device\fR" Set to restrict the number of concurrent incoming SSYNC requests per device; set to 0 for unlimited requests per devices. This can help control I/O to each device. This does not override replication_concurrency described above, so you may need to adjust both parameters depending on your hardware or network capacity. Defaults to 1. .IP "\fBreplication_lock_timeout\fR" Number of seconds to wait for an existing replication device lock before giving up. The default is 15. .IP "\fBreplication_failure_threshold\fR" .IP "\fBreplication_failure_ratio\fR" These two settings control when the SSYNC subrequest handler will abort an incoming SSYNC attempt. An abort will occur if there are at least threshold number of failures and the value of failures / successes exceeds the ratio. The defaults of 100 and 1.0 means that at least 100 failures have to occur and there have to be more failures than successes for an abort to occur. .IP "\fBsplice\fR" Use splice() for zero-copy object GETs. This requires Linux kernel version 3.0 or greater. If you set "splice = yes" but the kernel does not support it, error messages will appear in the object server logs at startup, but your object servers should continue to function. The default is false. .IP \fBnode_timeout\fR Request timeout to external services. The default is 3 seconds. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 0.5 seconds. .IP \fBcontainer_update_timeout\fR Time to wait while sending a container update on object update. The default is 1 second. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH FILTER SECTION .PD 1 .RS 0 Any section that has its name prefixed by "filter:" indicates a filter section. Filters are used to specify configuration parameters for specific swift middlewares. Below are the filters available and respective acceptable parameters. .IP "\fB[filter:healthcheck]\fR" .RE .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the healthcheck middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#healthcheck\fR. .IP "\fBdisable_path\fR" An optional filesystem path which, if present, will cause the healthcheck URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". .RE .RS 0 .IP "\fB[filter:recon]\fR" .RE .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the recon middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#recon\fR. .IP "\fBrecon_cache_path\fR" The recon_cache_path simply sets the directory where stats for a few items will be stored. Depending on the method of deployment you may need to create this directory manually and ensure that swift has read/write. The default is /var/cache/swift. .IP "\fBrecon_lock_path\fR" The default is /var/lock. .RE .PD .RS 0 .IP "\fB[filter:xprofile]\fR" .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the xprofile middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#xprofile\fR. .IP "\fBprofile_module\fR" This option enable you to switch profilers which should inherit from python standard profiler. Currently the supported value can be 'cProfile', 'eventlet.green.profile' etc. .IP "\fBlog_filename_prefix\fR" This prefix will be used to combine process ID and timestamp to name the profile data file. Make sure the executing user has permission to write into this path (missing path segments will be created, if necessary). If you enable profiling in more than one type of daemon, you must override it with an unique value like, the default is /var/log/swift/profile/account.profile. .IP "\fBdump_interval\fR" The profile data will be dumped to local disk based on above naming rule in this interval. The default is 5.0. .IP "\fBdump_timestamp\fR" Be careful, this option will enable profiler to dump data into the file with time stamp which means there will be lots of files piled up in the directory. The default is false .IP "\fBpath\fR" This is the path of the URL to access the mini web UI. The default is __profile__. .IP "\fBflush_at_shutdown\fR" Clear the data when the wsgi server shutdown. The default is false. .IP "\fBunwind\fR" Unwind the iterator of applications. Default is false. .RE .PD .SH ADDITIONAL SECTIONS .PD 1 .RS 0 The following sections are used by other swift-object services, such as replicator, updater, auditor. .IP "\fB[object-replicator]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is object-replicator. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBdaemonize\fR Whether or not to run replication as a daemon. The default is yes. .IP "\fBrun_pause [deprecated]\fR" Time in seconds to wait between replication passes. The default is 30. .IP \fBinterval\fR Time in seconds to wait between replication passes. The default is 30. .IP \fBconcurrency\fR Number of replication workers to spawn. The default is 1. .IP \fBstats_interval\fR Interval in seconds between logging replication statistics. The default is 300. .IP \fBsync_method\fR The sync method to use; default is rsync but you can use ssync to try the EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified as having performance comparable to, or better than, rsync, we plan to deprecate rsync so we can move on with more features for replication. .IP \fBrsync_timeout\fR Max duration of a partition rsync. The default is 900 seconds. .IP \fBrsync_io_timeout\fR Passed to rsync for I/O OP timeout. The default is 30 seconds. .IP \fBrsync_compress\fR Allow rsync to compress data which is transmitted to destination node during sync. However, this is applicable only when destination node is in a different region than the local one. NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might slow down the syncing process. The default is false. .IP \fBrsync_module\fR Format of the rsync module where the replicator will send data. See etc/rsyncd.conf-sample for some usage examples. The default is empty. .IP \fBnode_timeout\fR Request timeout to external services. The default is 10 seconds. .IP \fBrsync_bwlimit\fR Passed to rsync for bandwidth limit in kB/s. The default is 0 (unlimited). .IP \fBhttp_timeout\fR Max duration of an HTTP request. The default is 60 seconds. .IP \fBlockup_timeout\fR Attempts to kill all workers if nothing replicates for lockup_timeout seconds. The default is 1800 seconds. .IP \fBring_check_interval\fR The default is 15. .IP \fBrsync_error_log_line_length\fR Limits how long rsync error log lines are. 0 (default) means to log the entire line. .IP "\fBrecon_cache_path\fR" The recon_cache_path simply sets the directory where stats for a few items will be stored. Depending on the method of deployment you may need to create this directory manually and ensure that swift has read/write.The default is /var/cache/swift. .IP "\fBhandoffs_first\fR" The flag to replicate handoffs prior to canonical partitions. It allows one to force syncing and deleting handoffs quickly. If set to a True value(e.g. "True" or "1"), partitions that are not supposed to be on the node will be replicated first. The default is false. .IP "\fBhandoff_delete\fR" The number of replicas which are ensured in swift. If the number less than the number of replicas is set, object-replicator could delete local handoffs even if all replicas are not ensured in the cluster. Object-replicator would remove local handoff partition directories after syncing partition when the number of successful responses is greater than or equal to this number. By default(auto), handoff partitions will be removed when it has successfully replicated to all the canonical nodes. The handoffs_first and handoff_delete are options for a special case such as disk full in the cluster. These two options SHOULD NOT BE CHANGED, except for such an extreme situations. (e.g. disks filled up or are about to fill up. Anyway, DO NOT let your drives fill up). .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .RS 0 .IP "\fB[object-reconstructor]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is object-reconstructor. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBdaemonize\fR Whether or not to run replication as a daemon. The default is yes. .IP "\fBrun_pause [deprecated]\fR" Time in seconds to wait between replication passes. The default is 30. .IP \fBinterval\fR Time in seconds to wait between replication passes. The default is 30. .IP \fBconcurrency\fR Number of replication workers to spawn. The default is 1. .IP \fBstats_interval\fR Interval in seconds between logging replication statistics. The default is 300. .IP \fBnode_timeout\fR Request timeout to external services. The default is 10 seconds. .IP \fBhttp_timeout\fR Max duration of an HTTP request. The default is 60 seconds. .IP \fBlockup_timeout\fR Attempts to kill all workers if nothing replicates for lockup_timeout seconds. The default is 1800 seconds. .IP \fBring_check_interval\fR The default is 15. .IP "\fBrecon_cache_path\fR" The recon_cache_path simply sets the directory where stats for a few items will be stored. Depending on the method of deployment you may need to create this directory manually and ensure that swift has read/write.The default is /var/cache/swift. .IP "\fBhandoffs_first\fR" The flag to replicate handoffs prior to canonical partitions. It allows one to force syncing and deleting handoffs quickly. If set to a True value(e.g. "True" or "1"), partitions that are not supposed to be on the node will be replicated first. The default is false. .RE .PD .RS 0 .IP "\fB[object-updater]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is object-updater. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBinterval\fR Minimum time for a pass to take. The default is 300 seconds. .IP \fBconcurrency\fR Number of updater workers to spawn. The default is 1. .IP \fBnode_timeout\fR Request timeout to external services. The default is 10 seconds. .IP \fBobjects_per_second\fR Maximum objects updated per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 50. .IP \fBslowdown\fR Slowdown will sleep that amount between objects. The default is 0.01 seconds. Deprecated in favor of objects_per_second. .IP "\fBrecon_cache_path\fR" The recon_cache_path simply sets the directory where stats for a few items will be stored. Depending on the method of deployment you may need to create this directory manually and ensure that swift has read/write. The default is /var/cache/swift. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .RS 0 .IP "\fB[object-auditor]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is object-auditor. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBdisk_chunk_size\fR The default is 65536. .IP \fBfiles_per_second\fR Maximum files audited per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 20. .IP \fBbytes_per_second\fR Maximum bytes audited per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 10000000. .IP \fBconcurrency\fR Number of auditor workers to spawn. The default is 1. .IP \fBlog_time\fR The default is 3600 seconds. .IP \fBzero_byte_files_per_second\fR The default is 50. .IP "\fBrecon_cache_path\fR" The recon_cache_path simply sets the directory where stats for a few items will be stored. Depending on the method of deployment you may need to create this directory manually and ensure that swift has read/write. The default is /var/cache/swift. .IP \fBobject_size_stats\fR Takes a comma separated list of ints. If set, the object auditor will increment a counter for every object whose size is <= to the given break points and report the result after a full scan. .IP \fBrsync_tempfile_timeout\fR Time elapsed in seconds before rsync tempfiles will be unlinked. Config value of "auto" will try to use object-replicator's rsync_timeout + 900 or fall-back to 86400 (1 day). .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .SH DOCUMENTATION .LP More in depth documentation about the swift-object-server and also OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html and .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-object-server(1), swift-2.17.0/doc/manpages/swift-orphans.10000666000175100017510000000361513236061617020223 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-orphans 1 "3/15/2012" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-orphans \- OpenStack Swift orphans tool .SH SYNOPSIS .LP .B swift-orphans [-h|--help] [-a|--age] [-k|--kill] [-w|--wide] [-r|--run-dir] .SH DESCRIPTION .PP Lists and optionally kills orphaned Swift processes. This is done by scanning /var/run/swift or the directory specified to the \-r switch for .pid files and listing any processes that look like Swift processes but aren't associated with the pids in those .pid files. Any Swift processes running with the 'once' parameter are ignored, as those are usually for full-speed audit scans and such. Example (sends SIGTERM to all orphaned Swift processes older than two hours): swift-orphans \-a 2 \-k TERM The options are as follows: .RS 4 .PD 0 .IP "-a HOURS" .IP "--age=HOURS" .RS 4 .IP "Look for processes at least HOURS old; default: 24" .RE .IP "-k SIGNAL" .IP "--kill=SIGNAL" .RS 4 .IP "Send SIGNAL to matched processes; default: just list process information" .RE .IP "-w" .IP "--wide" .RS 4 .IP "Don't clip the listing at 80 characters" .RE .PD .RE .SH DOCUMENTATION .LP More documentation about OpenStack Swift can be found at .BI https://docs.openstack.org/swift/latest/ swift-2.17.0/doc/manpages/proxy-server.conf.50000666000175100017510000012754313236061617021043 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH proxy-server.conf 5 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B proxy-server.conf \- configuration file for the OpenStack Swift proxy server .SH SYNOPSIS .LP .B proxy-server.conf .SH DESCRIPTION .PP This is the configuration file used by the proxy server and other proxy middlewares. The configuration file follows the python-pastedeploy syntax. The file is divided into sections, which are enclosed by square brackets. Each section will contain a certain number of key/value parameters which are described later. Any line that begins with a '#' symbol is ignored. You can find more information about python-pastedeploy configuration format at \fIhttp://pythonpaste.org/deploy/#config-format\fR .SH GLOBAL SECTION .PD 1 .RS 0 This is indicated by section named [DEFAULT]. Below are the parameters that are acceptable within this section. .IP "\fBbind_ip\fR" IP address the proxy server should bind to. The default is 0.0.0.0 which will make it bind to all available addresses. .IP "\fBbind_port\fR" TCP port the proxy server should bind to. The default is 80. .IP "\fBbind_timeout\fR" Timeout to bind socket. The default is 30. .IP \fBbacklog\fR TCP backlog. Maximum number of allowed pending connections. The default value is 4096. .IP \fBadmin_key\fR Key to use for admin calls that are HMAC signed. Default is empty, which will disable admin calls to /info. .IP \fBdisallowed_sections\fR Allows the ability to withhold sections from showing up in the public calls to /info. You can withhold subsections by separating the dict level with a ".". The following would cause the sections 'container_quotas' and 'tempurl' to not be listed, and the key max_failed_deletes would be removed from bulk_delete. Default value is 'swift.valid_api_versions' which allows all registered features to be listed via HTTP GET /info except swift.valid_api_versions information .IP \fBworkers\fR The number of pre-forked processes that will accept connections. Zero means no fork. The default is auto which will make the server try to match the number of effective cpu cores if python multiprocessing is available (included with most python distributions >= 2.6) or fallback to one. It's worth noting that individual workers will use many eventlet co-routines to service multiple concurrent requests. .IP \fBmax_clients\fR Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. The default is 1024. .IP \fBuser\fR The system user that the proxy server will run as. The default is swift. .IP \fBexpose_info\fR Enables exposing configuration settings via HTTP GET /info. The default is true. .IP \fBswift_dir\fR Swift configuration directory. The default is /etc/swift. .IP \fBcert_file\fR Location of the SSL certificate file. The default path is /etc/swift/proxy.crt. This is disabled by default. .IP \fBkey_file\fR Location of the SSL certificate key file. The default path is /etc/swift/proxy.key. This is disabled by default. .IP \fBexpiring_objects_container_divisor\fR The default is 86400. .IP \fBexpiring_objects_account_name\fR The default is 'expiring_objects'. .IP \fBlog_name\fR Label used when logging. The default is swift. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBlog_max_line_length\fR To cap the length of log lines to the value given. No limit if set to 0, the default. .IP \fBlog_headers\fR The default is false. .IP \fBlog_custom_handlers\fR Comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger. The default is empty. .IP \fBlog_udp_host\fR If set, log_udp_host will override log_address. .IP "\fBlog_udp_port\fR UDP log port, the default is 514. .IP \fBlog_statsd_host\fR StatsD server. IPv4/IPv6 addresses and hostnames are supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. .IP \fBlog_statsd_port\fR The default is 8125. .IP \fBlog_statsd_default_sample_rate\fR The default is 1. .IP \fBlog_statsd_sample_rate_factor\fR The default is 1. .IP \fBlog_statsd_metric_prefix\fR The default is empty. .IP \fBclient_timeout\fR Time to wait while receiving each chunk of data from a client or another backend node. The default is 60. .IP \fBeventlet_debug\fR Debug mode for eventlet library. The default is false. .IP \fBtrans_id_suffix\fR This optional suffix (default is empty) that would be appended to the swift transaction id allows one to easily figure out from which cluster that X-Trans-Id belongs to. This is very useful when one is managing more than one swift cluster. .IP \fBcors_allow_origin\fR Use a comma separated list of full URL (http://foo.bar:1234,https://foo.bar) .IP \fBstrict_cors_mode\fR The default is true. .IP \fBcors_expose_headers\fR Comma separated list of headers to expose through Access-Control-Expose-Headers .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH PIPELINE SECTION .PD 1 .RS 0 This is indicated by section name [pipeline:main]. Below are the parameters that are acceptable within this section. .IP "\fBpipeline\fR" It is used when you need apply a number of filters. It is a list of filters ended by an application. The normal pipeline is "catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server". Note: The double proxy-logging in the pipeline is not a mistake. The left-most proxy-logging is there to log requests that were handled in middleware and never made it through to the right-most middleware (and proxy server). Double logging is prevented for normal requests. See proxy-logging docs. .RE .PD .SH FILTER SECTION .PD 1 .RS 0 Any section that has its name prefixed by "filter:" indicates a filter section. Filters are used to specify configuration parameters for specific swift middlewares. Below are the filters available and respective acceptable parameters. .IP "\fB[filter:healthcheck]\fR" .RE .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the healthcheck middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#healthcheck\fR. .IP "\fBdisable_path\fR" An optional filesystem path which, if present, will cause the healthcheck URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". .RE .PD .RS 0 .IP "\fB[filter:tempauth]\fR" .RE .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the tempauth middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#tempauth\fR. .IP "\fBset log_name\fR" Label used when logging. The default is tempauth. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR " Logging level. The default is INFO. .IP "\fBset log_address\fR" Logging address. The default is /dev/log. .IP "\fBset log_headers\fR " Enables the ability to log request headers. The default is False. .IP \fBreseller_prefix\fR The reseller prefix will verify a token begins with this prefix before even attempting to validate it. Also, with authorization, only Swift storage accounts with this prefix will be authorized by this middleware. Useful if multiple auth systems are in use for one Swift cluster. The default is AUTH. .IP \fBauth_prefix\fR The auth prefix will cause requests beginning with this prefix to be routed to the auth subsystem, for granting tokens, etc. The default is /auth/. .IP \fBrequire_group\fR The require_group parameter names a group that must be presented by either X-Auth-Token or X-Service-Token. Usually this parameter is used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah). By default, no group is needed. Do not use .admin. .IP \fBtoken_life\fR This is the time in seconds before the token expires. The default is 86400. .IP \fBallow_overrides\fR This allows middleware higher in the WSGI pipeline to override auth processing, useful for middleware such as tempurl and formpost. If you know you're not going to use such middleware and you want a bit of extra security, you can set this to false. The default is true. .IP \fBstorage_url_scheme\fR This specifies what scheme to return with storage urls: http, https, or default (chooses based on what the server is running as) This can be useful with an SSL load balancer in front of a non-SSL server. .IP \fBuser__\fR Lastly, you need to list all the accounts/users you want here. The format is: user__ = [group] [group] [...] [storage_url] or if you want underscores in or , you can base64 encode them (with no equal signs) and use this format: user64__ = [group] [group] [...] [storage_url] There are special groups of: \fI.reseller_admin\fR who can do anything to any account for this auth and also \fI.admin\fR who can do anything within the account. If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a \fI.admin\fR or \fI.reseller_admin\fR. The trailing optional storage_url allows you to specify an alternate URL to hand back to the user upon authentication. If not specified, this defaults to \fIhttp[s]://:/v1/_\fR where http or https depends on whether cert_file is specified in the [DEFAULT] section, and are based on the [DEFAULT] section's bind_ip and bind_port (falling back to 127.0.0.1 and 8080), is from this section, and is from the user__ name. Here are example entries, required for running the tests: .RE .PD 0 .RS 10 .IP "user_admin_admin = admin .admin .reseller_admin" .IP "user_test_tester = testing .admin" .IP "user_test2_tester2 = testing2 .admin" .IP "user_test_tester3 = testing3" .RE .PD .RS 0 .IP "\fB[filter:authtoken]\fR" .RE To enable Keystone authentication you need to have the auth token middleware first to be configured. Here is an example below, please refer to the keystone's documentation for details about the different settings. You'll need to have as well the keystoneauth middleware enabled and have it in your main pipeline so instead of having tempauth in there you can change it to: authtoken keystoneauth The auth credentials ("project_domain_name", "user_domain_name", "username", "project_name", "password") must match the Keystone credentials for the Swift service. The example values shown here assume a user named "swift" with admin role on a project named "service", both being in the Keystone domain with id "default". Refer to the KeystoneMiddleware documentation at .BI https://docs.openstack.org/keystonemiddleware/latest/middlewarearchitecture.html#configuration for other examples. .PD 0 .RS 10 .IP "paste.filter_factory = keystonemiddleware.auth_token:filter_factory" .IP "auth_uri = http://keystonehost:5000" .IP "auth_url = http://keystonehost:35357" .IP "auth_plugin = password" .IP "project_domain_id = default" .IP "user_domain_id = default" .IP "project_name = service" .IP "username = swift" .IP "password = password" .IP "" .IP "# delay_auth_decision defaults to False, but leaving it as false will" .IP "# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from" .IP "# working. This value must be explicitly set to True." .IP "delay_auth_decision = False" .IP .IP "cache = swift.cache" .IP "include_service_catalog = False" .RE .PD .RS 0 .IP "\fB[filter:keystoneauth]\fR" .RE Keystone authentication middleware. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the keystoneauth middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#keystoneauth\fR. .IP \fBreseller_prefix\fR The reseller_prefix option lists account namespaces that this middleware is responsible for. The prefix is placed before the Keystone project id. For example, for project 12345678, and prefix AUTH, the account is named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...). Several prefixes are allowed by specifying a comma-separated list as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a single blank/empty prefix. If an empty prefix is required in a list of prefixes, a value of '' (two single quote characters) indicates a blank/empty prefix. Except for the blank/empty prefix, an underscore ('_') character is appended to the value unless already present. .IP \fBoperator_roles\fR The user must have at least one role named by operator_roles on a project in order to create, delete and modify containers and objects and to set and read privileged headers such as ACLs. If there are several reseller prefix items, you can prefix the parameter so it applies only to those accounts (for example the parameter SERVICE_operator_roles applies to the /v1/SERVICE_ path). If you omit the prefix, the option applies to all reseller prefix items. For the blank/empty prefix, prefix with '' (do not put underscore after the two single quote characters). .IP \fBreseller_admin_role\fR The reseller admin role has the ability to create and delete accounts. .IP \fBallow_overrides\fR This allows middleware higher in the WSGI pipeline to override auth processing, useful for middleware such as tempurl and formpost. If you know you're not going to use such middleware and you want a bit of extra security, you can set this to false. .IP \fBservice_roles\fR If the service_roles parameter is present, an X-Service-Token must be present in the request that when validated, grants at least one role listed in the parameter. The X-Service-Token may be scoped to any project. If there are several reseller prefix items, you can prefix the parameter so it applies only to those accounts (for example the parameter SERVICE_service_roles applies to the /v1/SERVICE_ path). If you omit the prefix, the option applies to all reseller prefix items. For the blank/empty prefix, prefix with '' (do not put underscore after the two single quote characters). By default, no service_roles are required. .IP \fBdefault_domain_id\fR For backwards compatibility, keystoneauth will match names in cross-tenant access control lists (ACLs) when both the requesting user and the tenant are in the default domain i.e the domain to which existing tenants are migrated. The default_domain_id value configured here should be the same as the value used during migration of tenants to keystone domains. .IP \fBallow_names_in_acls\fR For a new installation, or an installation in which keystone projects may move between domains, you should disable backwards compatible name matching in ACLs by setting allow_names_in_acls to false: .RE .PD .RS 0 .IP "\fB[filter:cache]\fR" .RE Caching middleware that manages caching in swift. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the memcache middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#memcache\fR. .IP "\fBset log_name\fR" Label used when logging. The default is memcache. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR " Logging level. The default is INFO. .IP "\fBset log_address\fR" Logging address. The default is /dev/log. .IP "\fBset log_headers\fR" Enables the ability to log request headers. The default is False. .IP \fBmemcache_max_connections\fR Sets the maximum number of connections to each memcached server per worker. .IP \fBmemcache_servers\fR If not set in the configuration file, the value for memcache_servers will be read from /etc/swift/memcache.conf (see memcache.conf-sample) or lacking that file, it will default to 127.0.0.1:11211. You can specify multiple servers separated with commas, as in: 10.1.2.3:11211,10.1.2.4:11211. (IPv6 addresses must follow rfc3986 section-3.2.2, i.e. [::1]:11211) .IP \fBmemcache_serialization_support\fR This sets how memcache values are serialized and deserialized: .RE .PD 0 .RS 10 .IP "0 = older, insecure pickle serialization" .IP "1 = json serialization but pickles can still be read (still insecure)" .IP "2 = json serialization only (secure and the default)" .RE .RS 10 To avoid an instant full cache flush, existing installations should upgrade with 0, then set to 1 and reload, then after some time (24 hours) set to 2 and reload. In the future, the ability to use pickle serialization will be removed. If not set in the configuration file, the value for memcache_serialization_support will be read from /etc/swift/memcache.conf if it exists (see memcache.conf-sample). Otherwise, the default value as indicated above will be used. .RE .PD .RS 0 .IP "\fB[filter:ratelimit]\fR" .RE Rate limits requests on both an Account and Container level. Limits are configurable. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the ratelimit middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#ratelimit\fR. .IP "\fBset log_name\fR" Label used when logging. The default is ratelimit. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR " Logging level. The default is INFO. .IP "\fBset log_address\fR" Logging address. The default is /dev/log. .IP "\fBset log_headers\fR " Enables the ability to log request headers. The default is False. .IP \fBclock_accuracy\fR This should represent how accurate the proxy servers' system clocks are with each other. 1000 means that all the proxies' clock are accurate to each other within 1 millisecond. No ratelimit should be higher than the clock accuracy. The default is 1000. .IP \fBmax_sleep_time_seconds\fR App will immediately return a 498 response if the necessary sleep time ever exceeds the given max_sleep_time_seconds. The default is 60 seconds. .IP \fBlog_sleep_time_seconds\fR To allow visibility into rate limiting set this value > 0 and all sleeps greater than the number will be logged. If set to 0 means disabled. The default is 0. .IP \fBrate_buffer_seconds\fR Number of seconds the rate counter can drop and be allowed to catch up (at a faster than listed rate). A larger number will result in larger spikes in rate but better average accuracy. The default is 5. .IP \fBaccount_ratelimit\fR If set, will limit PUT and DELETE requests to /account_name/container_name. Number is in requests per second. If set to 0 means disabled. The default is 0. .IP \fBcontainer_ratelimit_size\fR When set with container_limit_x = r: for containers of size x, limit requests per second to r. Will limit PUT, DELETE, and POST requests to /a/c/o. The default is ''. .IP \fBcontainer_listing_ratelimit_size\fR Similarly to the above container-level write limits, the following will limit container GET (listing) requests. .RE .PD .RS 0 .IP "\fB[filter:domain_remap]\fR" .RE Middleware that translates container and account parts of a domain to path parameters that the proxy server understands. The container.account.storageurl/object gets translated to container.account.storageurl/path_root/account/container/object and account.storageurl/path_root/container/object gets translated to account.storageurl/path_root/account/container/object .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the domain_remap middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#domain_remap\fR. .IP "\fBset log_name\fR" Label used when logging. The default is domain_remap. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR " Logging level. The default is INFO. .IP "\fBset log_address\fR" Logging address. The default is /dev/log. .IP "\fBset log_headers\fR " Enables the ability to log request headers. The default is False. .IP \fBstorage_domain\fR The domain to be used by the middleware. Multiple domains can be specified separated by a comma. .IP \fBpath_root\fR The path root value for the storage URL. The default is v1. .IP \fBreseller_prefixes\fR Browsers can convert a host header to lowercase, so check that reseller prefix on the account is the correct case. This is done by comparing the items in the reseller_prefixes config option to the found prefix. If they match except for case, the item from reseller_prefixes will be used instead of the found reseller prefix. When none match, the default reseller prefix is used. When no default reseller prefix is configured, any request with an account prefix not in that list will be ignored by this middleware. Defaults to 'AUTH'. .IP \fBdefault_reseller_prefix\fR The default reseller prefix. This is used when none of the configured reseller_prefixes match. When not set, no reseller prefix is added. .RE .PD .RS 0 .IP "\fB[filter:catch_errors]\fR" .RE .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the catch_errors middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#catch_errors\fR. .IP "\fBset log_name\fR" Label used when logging. The default is catch_errors. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR " Logging level. The default is INFO. .IP "\fBset log_address\fR " Logging address. The default is /dev/log. .IP "\fBset log_headers\fR" Enables the ability to log request headers. The default is False. .RE .PD .RS 0 .IP "\fB[filter:cname_lookup]\fR" .RE Note: this middleware requires python-dnspython .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the cname_lookup middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#cname_lookup\fR. .IP "\fBset log_name\fR" Label used when logging. The default is cname_lookup. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR " Logging level. The default is INFO. .IP "\fBset log_address\fR" Logging address. The default is /dev/log. .IP "\fBset log_headers\fR" Enables the ability to log request headers. The default is False. .IP \fBstorage_domain\fR The domain to be used by the middleware. .IP \fBlookup_depth\fR How deep in the CNAME chain to look for something that matches the storage domain. The default is 1. .IP \fBnameservers\fR Specify the nameservers to use to do the CNAME resolution. If unset, the system configuration is used. Multiple nameservers can be specified separated by a comma. Default is unset. .RE .PD .RS 0 .IP "\fB[filter:staticweb]\fR" .RE Note: Put staticweb just after your auth filter(s) in the pipeline .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the staticweb middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#staticweb\fR. .IP "\fBset log_name\fR" Label used when logging. The default is staticweb. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR " Logging level. The default is INFO. .IP "\fBset log_address\fR " Logging address. The default is /dev/log. .IP "\fBset log_headers\fR" Enables the ability to log request headers. The default is False. .RE .PD .RS 0 .IP "\fB[filter:tempurl]\fR" .RE Note: Put tempurl before slo, dlo, and your auth filter(s) in the pipeline .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the tempurl middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#tempurl\fR. .IP \fBmethods\fR The methods allowed with Temp URLs. The default is 'GET HEAD PUT POST DELETE'. .IP \fBincoming_remove_headers\fR The headers to remove from incoming requests. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. incoming_allow_headers is a list of exceptions to these removals. .IP \fBincoming_allow_headers\fR The headers allowed as exceptions to incoming_remove_headers. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. .IP "\fBoutgoing_remove_headers\fR" The headers to remove from outgoing responses. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. outgoing_allow_headers is a list of exceptions to these removals. .IP "\fBoutgoing_allow_headers\fR" The headers allowed as exceptions to outgoing_remove_headers. Simply a whitespace delimited list of header names and names can optionally end with '*' to indicate a prefix match. .RE .PD .RS 0 .IP "\fB[filter:formpost]\fR" .RE Note: Put formpost just before your auth filter(s) in the pipeline .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the formpost middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#formpost\fR. .RE .PD .RS 0 .IP "\fB[filter:name_check]\fR" .RE Note: Just needs to be placed before the proxy-server in the pipeline. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the name_check middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#name_check\fR. .IP \fBforbidden_chars\fR Characters that will not be allowed in a name. The default is '"`<>. .IP \fBmaximum_length\fR Maximum number of characters that can be in the name. The default is 255. .IP \fBforbidden_regexp\fR Python regular expressions of substrings that will not be allowed in a name. The default is /\./|/\.\./|/\.$|/\.\.$. .RE .PD .RS 0 .IP "\fB[filter:list-endpoints]\fR" .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the list_endpoints middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#list_endpoints\fR. .IP \fBlist_endpoints_path\fR The default is '/endpoints/'. .RE .PD .RS 0 .IP "\fB[filter:proxy-logging]\fR" .RE Logging for the proxy server now lives in this middleware. If the access_* variables are not set, logging directives from [DEFAULT] without "access_" will be used. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the proxy_logging middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#proxy_logging\fR. .IP "\fBaccess_log_name\fR" Label used when logging. The default is proxy-server. .IP "\fBaccess_log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBaccess_log_level\fR " Logging level. The default is INFO. .IP \fBaccess_log_address\fR Default is /dev/log. .IP \fBaccess_log_udp_host\fR If set, access_log_udp_host will override access_log_address. Default is unset. .IP \fBaccess_log_udp_port\fR Default is 514. .IP \fBaccess_log_statsd_host\fR You can use log_statsd_* from [DEFAULT], or override them here. StatsD server. IPv4/IPv6 addresses and hostnames are supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. .IP \fBaccess_log_statsd_port\fR Default is 8125. .IP \fBaccess_log_statsd_default_sample_rate\fR Default is 1. .IP \fBaccess_log_statsd_sample_rate_factor\fR The default is 1. .IP \fBaccess_log_statsd_metric_prefix\fR Default is "" (empty-string) .IP \fBaccess_log_headers\fR Default is False. .IP \fBaccess_log_headers_only\fR If access_log_headers is True and access_log_headers_only is set only these headers are logged. Multiple headers can be defined as comma separated list like this: access_log_headers_only = Host, X-Object-Meta-Mtime .IP \fBreveal_sensitive_prefix\fR By default, the X-Auth-Token is logged. To obscure the value, set reveal_sensitive_prefix to the number of characters to log. For example, if set to 12, only the first 12 characters of the token appear in the log. An unauthorized access of the log file won't allow unauthorized usage of the token. However, the first 12 or so characters is unique enough that you can trace/debug token usage. Set to 0 to suppress the token completely (replaced by '...' in the log). The default is 16 chars. Note: reveal_sensitive_prefix will not affect the value logged with access_log_headers=True. .IP \fBlog_statsd_valid_http_methods\fR What HTTP methods are allowed for StatsD logging (comma-sep); request methods not in this list will have "BAD_METHOD" for the portion of the metric. Default is "GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS". .RE .PD .RS 0 .IP "\fB[filter:bulk]\fR" .RE Note: Put before both ratelimit and auth in the pipeline. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the bulk middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#bulk\fR. .IP \fBmax_containers_per_extraction\fR The default is 10000. .IP \fBmax_failed_extractions\fR The default is 1000. .IP \fBmax_deletes_per_request\fR The default is 10000. .IP \fBmax_failed_deletes\fR The default is 1000. In order to keep a connection active during a potentially long bulk request, Swift may return whitespace prepended to the actual response body. This whitespace will be yielded no more than every yield_frequency seconds. The default is 10. .IP \fByield_frequency\fR .IP \fBdelete_container_retry_count\fR Note: This parameter is used during a bulk delete of objects and their container. This would frequently fail because it is very likely that all replicated objects have not been deleted by the time the middleware got a successful response. It can be configured the number of retries. And the number of seconds to wait between each retry will be 1.5**retry The default is 0. .RE .PD .RS 0 .IP "\fB[filter:slo]\fR" .RE Note: Put after auth and staticweb in the pipeline. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the slo middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#slo\fR. .IP \fBmax_manifest_segments\fR The default is 1000. .IP \fBmax_manifest_size\fR The default is 2097152. .IP \fBmin_segment_size\fR The default is 1048576 .IP \fBrate_limit_after_segment\fR Start rate-limiting object segments after the Nth segment of a segmented object. The default is 10 segments. .IP \fBrate_limit_segments_per_sec\fR Once segment rate-limiting kicks in for an object, limit segments served to N per second. The default is 1. .IP \fBmax_get_time\fR Time limit on GET requests (seconds). The default is 86400. .RE .PD .RS 0 .IP "\fB[filter:dlo]\fR" .RE Note: Put after auth and staticweb in the pipeline. If you don't put it in the pipeline, it will be inserted for you. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the dlo middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#dlo\fR. .IP \fBrate_limit_after_segment\fR Start rate-limiting object segments after the Nth segment of a segmented object. The default is 10 segments. .IP \fBrate_limit_segments_per_sec\fR Once segment rate-limiting kicks in for an object, limit segments served to N per second. The default is 1. .IP \fBmax_get_time\fR Time limit on GET requests (seconds). The default is 86400. .RE .PD .RS 0 .IP "\fB[filter:container-quotas]\fR" .RE Note: Put after auth in the pipeline. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the container_quotas middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#container_quotas\fR. .RE .PD .RS 0 .IP "\fB[filter:account-quotas]\fR" .RE Note: Put after auth in the pipeline. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the account_quotas middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#account_quotas\fR. .RE .PD .RS 0 .IP "\fB[filter:gatekeeper]\fR" .RE Note: this middleware requires python-dnspython .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the gatekeeper middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#gatekeeper\fR. .IP "\fBset log_name\fR" Label used when logging. The default is gatekeeper. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR " Logging level. The default is INFO. .IP "\fBset log_address\fR" Logging address. The default is /dev/log. .IP "\fBset log_headers\fR" Enables the ability to log request headers. The default is False. .RE .PD .RS 0 .IP "\fB[filter:container_sync]\fR" .RE Note: this middleware requires python-dnspython .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the container_sync middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#container_sync\fR. .IP \fBallow_full_urls\fR Set this to false if you want to disallow any full URL values to be set for any new X-Container-Sync-To headers. This will keep any new full urls from coming in, but won't change any existing values already in the cluster. Updating those will have to be done manually, as knowing what the true realm endpoint should be cannot always be guessed. The default is true. .IP \fBcurrent\fR Set this to specify this clusters //realm/cluster as "current" in /info .RE .PD .RS 0 .IP "\fB[filter:xprofile]\fR" .RE Note: Put it at the beginning of the pipeline to profile all middleware. But it is safer to put this after healthcheck. .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the xprofile middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#xprofile\fR. .IP "\fBprofile_module\fR" This option enable you to switch profilers which should inherit from python standard profiler. Currently the supported value can be 'cProfile', 'eventlet.green.profile' etc. .IP "\fBlog_filename_prefix\fR" This prefix will be used to combine process ID and timestamp to name the profile data file. Make sure the executing user has permission to write into this path (missing path segments will be created, if necessary). If you enable profiling in more than one type of daemon, you must override it with an unique value like, the default is /var/log/swift/profile/account.profile. .IP "\fBdump_interval\fR" The profile data will be dumped to local disk based on above naming rule in this interval. The default is 5.0. .IP "\fBdump_timestamp\fR" Be careful, this option will enable profiler to dump data into the file with time stamp which means there will be lots of files piled up in the directory. The default is false .IP "\fBpath\fR" This is the path of the URL to access the mini web UI. The default is __profile__. .IP "\fBflush_at_shutdown\fR" Clear the data when the wsgi server shutdown. The default is false. .IP "\fBunwind\fR" Unwind the iterator of applications. Default is false. .RE .PD .RS 0 .IP "\fB[filter:versioned_writes]\fR" .RE Note: Put after slo, dlo in the pipeline. If you don't put it in the pipeline, it will be inserted automatically. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the versioned_writes middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#versioned_writes\fR. .IP \fBallow_versioned_writes\fR Enables using versioned writes middleware and exposing configuration settings via HTTP GET /info. WARNING: Setting this option bypasses the "allow_versions" option in the container configuration file, which will be eventually deprecated. See documentation for more details. .RE .PD .SH APP SECTION .PD 1 .RS 0 This is indicated by section name [app:proxy-server]. Below are the parameters that are acceptable within this section. .IP \fBuse\fR Entry point for paste.deploy for the proxy server. This is the reference to the installed python egg. This is normally \fBegg:swift#proxy\fR. .IP "\fBset log_name\fR" Label used when logging. The default is proxy-server. .IP "\fBset log_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR" Logging level. The default is INFO. .IP "\fBset log_address\fR" Logging address. The default is /dev/log. .IP \fBlog_handoffs\fR Log when handoff locations are used. Default is True. .IP \fBrecheck_account_existence\fR Cache timeout in seconds to send memcached for account existence. The default is 60 seconds. .IP \fBrecheck_container_existence\fR Cache timeout in seconds to send memcached for container existence. The default is 60 seconds. .IP \fBobject_chunk_size\fR Chunk size to read from object servers. The default is 8192. .IP \fBclient_chunk_size\fR Chunk size to read from clients. The default is 8192. .IP \fBnode_timeout\fR Request timeout to external services. The default is 10 seconds. .IP \fBrecoverable_node_timeout\fR How long the proxy server will wait for an initial response and to read a chunk of data from the object servers while serving GET / HEAD requests. Timeouts from these requests can be recovered from so setting this to something lower than node_timeout would provide quicker error recovery while allowing for a longer timeout for non-recoverable requests (PUTs). Defaults to node_timeout, should be overridden if node_timeout is set to a high number to prevent client timeouts from firing before the proxy server has a chance to retry. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 0.5 seconds. .IP \fBpost_quorum_timeout\fR How long to wait for requests to finish after a quorum has been established. The default is 0.5 seconds. .IP \fBerror_suppression_interval\fR Time in seconds that must elapse since the last error for a node to be considered no longer error limited. The default is 60 seconds. .IP \fBerror_suppression_limit\fR Error count to consider a node error limited. The default is 10. .IP \fBallow_account_management\fR Whether account PUTs and DELETEs are even callable. If set to 'true' any authorized user may create and delete accounts; if 'false' no one, even authorized, can. The default is false. .IP \fBaccount_autocreate\fR If set to 'true' authorized accounts that do not yet exist within the Swift cluster will be automatically created. The default is set to false. .IP \fBauto_create_account_prefix\fR Prefix used when automatically creating accounts. The default is '.'. .IP \fBmax_containers_per_account\fR If set to a positive value, trying to create a container when the account already has at least this maximum containers will result in a 403 Forbidden. Note: This is a soft limit, meaning a user might exceed the cap for recheck_account_existence before the 403s kick in. .IP \fBmax_containers_whitelist\fR This is a comma separated list of account hashes that ignore the max_containers_per_account cap. .IP \fBdeny_host_headers\fR Comma separated list of Host headers to which the proxy will deny requests. The default is empty. .IP \fBput_queue_depth\fR Depth of the proxy put queue. The default is 10. .IP \fBsorting_method\fR Storage nodes can be chosen at random (shuffle - default), by using timing measurements (timing), or by using an explicit match (affinity). Using timing measurements may allow for lower overall latency, while using affinity allows for finer control. In both the timing and affinity cases, equally-sorting nodes are still randomly chosen to spread load. The valid values for sorting_method are "affinity", "shuffle", and "timing". .IP \fBtiming_expiry\fR If the "timing" sorting_method is used, the timings will only be valid for the number of seconds configured by timing_expiry. The default is 300. .IP \fBconcurrent_gets\fR If "on" then use replica count number of threads concurrently during a GET/HEAD and return with the first successful response. In the EC case, this parameter only affects an EC HEAD as an EC GET behaves differently. Default is "off". .IP \fBconcurrency_timeout\fR This parameter controls how long to wait before firing off the next concurrent_get thread. A value of 0 would we fully concurrent, any other number will stagger the firing of the threads. This number should be between 0 and node_timeout. The default is the value of conn_timeout (0.5). .IP \fBrequest_node_count\fR Set to the number of nodes to contact for a normal request. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request. The default is '2 * replicas'. .IP \fBread_affinity\fR Specifies which backend servers to prefer on reads. Format is a comma separated list of affinity descriptors of the form =. The may be r for selecting nodes in region N or rz for selecting nodes in region N, zone M. The value should be a whole number that represents the priority to be given to the selection; lower numbers are higher priority. Default is empty, meaning no preference. Example: first read from region 1 zone 1, then region 1 zone 2, then anything in region 2, then everything else: .PD 0 .RS 10 .IP "read_affinity = r1z1=100, r1z2=200, r2=300" .RE .PD .IP \fBwrite_affinity\fR Specifies which backend servers to prefer on writes. Format is a comma separated list of affinity descriptors of the form r for region N or rz for region N, zone M. If this is set, then when handling an object PUT request, some number (see setting write_affinity_node_count) of local backend servers will be tried before any nonlocal ones. Default is empty, meaning no preference. Example: try to write to regions 1 and 2 before writing to any other nodes: .PD 0 .RS 10 write_affinity = r1, r2 .RE .PD .IP \fBwrite_affinity_node_count\fR The number of local (as governed by the write_affinity setting) nodes to attempt to contact first on writes, before any non-local ones. The value should be an integer number, or use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request. The default is '2 * replicas'. .IP \fBswift_owner_headers\fR These are the headers whose values will only be shown to swift_owners. The exact definition of a swift_owner is up to the auth system in use, but usually indicates administrative responsibilities. The default is 'x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control'. .IP \fBrate_limit_after_segment\fR Start rate-limiting object segments after the Nth segment of a segmented object. The default is 10 segments. .IP \fBrate_limit_segments_per_sec\fR Once segment rate-limiting kicks in for an object, limit segments served to N per second. The default is 1. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH DOCUMENTATION .LP More in depth documentation about the swift-proxy-server and also OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html and .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-proxy-server(1) swift-2.17.0/doc/manpages/account-server.conf.50000666000175100017510000004137713236061617021316 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH account-server.conf 5 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B account-server.conf \- configuration file for the OpenStack Swift account server .SH SYNOPSIS .LP .B account-server.conf .SH DESCRIPTION .PP This is the configuration file used by the account server and other account background services, such as; replicator, auditor and reaper. The configuration file follows the python-pastedeploy syntax. The file is divided into sections, which are enclosed by square brackets. Each section will contain a certain number of key/value parameters which are described later. Any line that begins with a '#' symbol is ignored. You can find more information about python-pastedeploy configuration format at \fIhttp://pythonpaste.org/deploy/#config-format\fR .SH GLOBAL SECTION .PD 1 .RS 0 This is indicated by section named [DEFAULT]. Below are the parameters that are acceptable within this section. .IP "\fBbind_ip\fR" IP address the account server should bind to. The default is 0.0.0.0 which will make it bind to all available addresses. .IP "\fBbind_port\fR" TCP port the account server should bind to. The default is 6202. .IP "\fBbind_timeout\fR" Timeout to bind socket. The default is 30. .IP \fBbacklog\fR TCP backlog. Maximum number of allowed pending connections. The default value is 4096. .IP \fBworkers\fR The number of pre-forked processes that will accept connections. Zero means no fork. The default is auto which will make the server try to match the number of effective cpu cores if python multiprocessing is available (included with most python distributions >= 2.6) or fallback to one. It's worth noting that individual workers will use many eventlet co-routines to service multiple concurrent requests. .IP \fBmax_clients\fR Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. The default is 1024. .IP \fBuser\fR The system user that the account server will run as. The default is swift. .IP \fBswift_dir\fR Swift configuration directory. The default is /etc/swift. .IP \fBdevices\fR Parent directory of where devices are mounted. Default is /srv/node. .IP \fBmount_check\fR Whether or not check if the devices are mounted to prevent accidentally writing to the root device. The default is set to true. .IP \fBdisable_fallocate\fR Disable pre-allocate disk space for a file. The default is false. .IP \fBlog_name\fR Label used when logging. The default is swift. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP "\fBlog_address\fR Logging address. The default is /dev/log. .IP \fBlog_max_line_length\fR The following caps the length of log lines to the value given; no limit if set to 0, the default. .IP \fBlog_custom_handlers\fR Comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger. The default is empty. .IP \fBlog_udp_host\fR If set, log_udp_host will override log_address. .IP "\fBlog_udp_port\fR UDP log port, the default is 514. .IP \fBlog_statsd_host\fR StatsD server. IPv4/IPv6 addresses and hostnames are supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. .IP \fBlog_statsd_port\fR The default is 8125. .IP \fBlog_statsd_default_sample_rate\fR The default is 1. .IP \fBlog_statsd_sample_rate_factor\fR The default is 1. .IP \fBlog_statsd_metric_prefix\fR The default is empty. .IP \fBdb_preallocation\fR If you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation. The default is false. .IP \fBeventlet_debug\fR Debug mode for eventlet library. The default is false. .IP \fBfallocate_reserve\fR You can set fallocate_reserve to the number of bytes or percentage of disk space you'd like fallocate to reserve, whether there is space for the given file size or not. Percentage will be used if the value ends with a '%'. The default is 1%. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH PIPELINE SECTION .PD 1 .RS 0 This is indicated by section name [pipeline:main]. Below are the parameters that are acceptable within this section. .IP "\fBpipeline\fR" It is used when you need apply a number of filters. It is a list of filters ended by an application. The normal pipeline is "healthcheck recon account-server". .RE .PD .SH APP SECTION .PD 1 .RS 0 This is indicated by section name [app:account-server]. Below are the parameters that are acceptable within this section. .IP "\fBuse\fR" Entry point for paste.deploy for the account server. This is the reference to the installed python egg. This is normally \fBegg:swift#account\fR. .IP "\fBset log_name\fR Label used when logging. The default is account-server. .IP "\fBset log_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR Logging level. The default is INFO. .IP "\fBset log_requests\fR Enables request logging. The default is True. .IP "\fBset log_address\fR Logging address. The default is /dev/log. .IP "\fBauto_create_account_prefix\fR The default is ".". .IP "\fBreplication_server\fR Configure parameter for creating specific server. To handle all verbs, including replication verbs, do not specify "replication_server" (this is the default). To only handle replication, set to a true value (e.g. "true" or "1"). To handle only non-replication verbs, set to "false". Unless you have a separate replication network, you should not specify any value for "replication_server". The default is empty. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH FILTER SECTION .PD 1 .RS 0 Any section that has its name prefixed by "filter:" indicates a filter section. Filters are used to specify configuration parameters for specific swift middlewares. Below are the filters available and respective acceptable parameters. .IP "\fB[filter:healthcheck]\fR" .RE .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the healthcheck middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#healthcheck\fR. .IP "\fBdisable_path\fR" An optional filesystem path which, if present, will cause the healthcheck URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". .RE .RS 0 .IP "\fB[filter:recon]\fR" .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the recon middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#recon\fR. .IP "\fBrecon_cache_path\fR" The recon_cache_path simply sets the directory where stats for a few items will be stored. Depending on the method of deployment you may need to create this directory manually and ensure that swift has read/write. The default is /var/cache/swift. .RE .PD .RS 0 .IP "\fB[filter:xprofile]\fR" .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the xprofile middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#xprofile\fR. .IP "\fBprofile_module\fR" This option enable you to switch profilers which should inherit from python standard profiler. Currently the supported value can be 'cProfile', 'eventlet.green.profile' etc. .IP "\fBlog_filename_prefix\fR" This prefix will be used to combine process ID and timestamp to name the profile data file. Make sure the executing user has permission to write into this path (missing path segments will be created, if necessary). If you enable profiling in more than one type of daemon, you must override it with an unique value like, the default is /var/log/swift/profile/account.profile. .IP "\fBdump_interval\fR" The profile data will be dumped to local disk based on above naming rule in this interval. The default is 5.0. .IP "\fBdump_timestamp\fR" Be careful, this option will enable profiler to dump data into the file with time stamp which means there will be lots of files piled up in the directory. The default is false .IP "\fBpath\fR" This is the path of the URL to access the mini web UI. The default is __profile__. .IP "\fBflush_at_shutdown\fR" Clear the data when the wsgi server shutdown. The default is false. .IP "\fBunwind\fR" Unwind the iterator of applications. Default is false. .RE .PD .SH ADDITIONAL SECTIONS .PD 1 .RS 0 The following sections are used by other swift-account services, such as replicator, auditor and reaper. .IP "\fB[account-replicator]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is account-replicator. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBper_diff\fR Maximum number of database rows that will be sync'd in a single HTTP replication request. The default is 1000. .IP \fBmax_diffs\fR This caps how long the replicator will spend trying to sync a given database per pass so the other databases don't get starved. The default is 100. .IP \fBconcurrency\fR Number of replication workers to spawn. The default is 8. .IP "\fBrun_pause [deprecated]\fR" Time in seconds to wait between replication passes. The default is 30. .IP \fBinterval\fR Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 30. .IP \fBnode_timeout\fR Request timeout to external services. The default is 10 seconds. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 0.5 seconds. .IP \fBreclaim_age\fR Time elapsed in seconds before an account can be reclaimed. The default is 604800 seconds. .IP \fBrsync_compress\fR Allow rsync to compress data which is transmitted to destination node during sync. However, this is applicable only when destination node is in a different region than the local one. The default is false. .IP \fBrsync_module\fR Format of the rsync module where the replicator will send data. See etc/rsyncd.conf-sample for some usage examples. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .RS 0 .IP "\fB[account-auditor]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is account-auditor. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBinterval\fR Will audit, at most, 1 account per device per interval. The default is 1800 seconds. .IP \fBaccounts_per_second\fR Maximum accounts audited per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 200. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .RS 0 .IP "\fB[account-reaper]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is account-reaper. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBconcurrency\fR Number of reaper workers to spawn. The default is 25. .IP \fBinterval\fR Minimum time for a pass to take. The default is 3600 seconds. .IP \fBnode_timeout\fR Request timeout to external services. The default is 10 seconds. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 0.5 seconds. .IP \fBdelay_reaping\fR Normally, the reaper begins deleting account information for deleted accounts immediately; you can set this to delay its work however. The value is in seconds. The default is 0. The sum of this value and the container-updater interval should be less than the account-replicator reclaim_age. This ensures that once the account-reaper has deleted a container there is sufficient time for the container-updater to report to the account before the account DB is removed. .IP \fBreap_warn_after\fR If the account fails to be be reaped due to a persistent error, the account reaper will log a message such as: Account has not been reaped since You can search logs for this message if space is not being reclaimed after you delete account(s). Default is 2592000 seconds (30 days). This is in addition to any time requested by delay_reaping. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH DOCUMENTATION .LP More in depth documentation about the swift-account-server and also OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html and .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-account-server(1), swift-2.17.0/doc/manpages/swift-ring-builder.10000666000175100017510000001742713236061617021142 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-ring-builder 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-ring-builder \- OpenStack Swift ring builder .SH SYNOPSIS .LP .B swift-ring-builder <...> .SH DESCRIPTION .PP The swift-ring-builder utility is used to create, search and manipulate the swift storage ring. The ring-builder assigns partitions to devices and writes an optimized Python structure to a gzipped, pickled file on disk for shipping out to the servers. The server processes just check the modification time of the file occasionally and reload their in-memory copies of the ring structure as needed. Because of how the ring-builder manages changes to the ring, using a slightly older ring usually just means one of the three replicas for a subset of the partitions will be incorrect, which can be easily worked around. .PP The ring-builder also keeps its own builder file with the ring information and additional data required to build future rings. It is very important to keep multiple backup copies of these builder files. One option is to copy the builder files out to every server while copying the ring files themselves. Another is to upload the builder files into the cluster itself. Complete loss of a builder file will mean creating a new ring from scratch, nearly all partitions will end up assigned to different devices, and therefore nearly all data stored will have to be replicated to new locations. So, recovery from a builder file loss is possible, but data will definitely be unreachable for an extended time. .PP If invoked as 'swift-ring-builder-safe' the directory containing the builder file provided will be locked (via a .lock file in the files parent directory). This provides a basic safe guard against multiple instances of the swift-ring-builder (or other utilities that observe this lock) from attempting to write to or read the builder/ring files while operations are in progress. This can be useful in environments where ring management has been automated but the operator still needs to interact with the rings manually. .SH SEARCH .PD 0 .IP "\fB\fR" .RS 5 .IP "Can be of the form:" .IP "drz-:/_" .IP "Any part is optional, but you must include at least one, examples:" .RS 3 .IP "d74 Matches the device id 74" .IP "z1 Matches devices in zone 1" .IP "z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4" .IP "1.2.3.4 Matches devices in any zone with the ip 1.2.3.4" .IP "r1z1:5678 Matches devices in zone 1 present in region 1 using port 5678" .IP "z1:5678 Matches devices in zone 1 using port 5678" .IP ":5678 Matches devices that use port 5678" .IP "/sdb1 Matches devices with the device name sdb1" .IP "_shiny Matches devices with shiny in the meta data" .IP "_'snet: 5.6.7.8' Matches devices with snet: 5.6.7.8 in the meta data" .IP "[::1] Matches devices in any zone with the ip ::1" .IP "z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678" .RE Most specific example: .RS 3 d74z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8" .RE Nerd explanation: .RS 3 .IP "All items require their single character prefix except the ip, in which case the - is optional unless the device id or zone is also included." .RE .RE .PD .SH OPTIONS .TP .I "\-y, \-\-yes" Assume a yes response to all questions .SH COMMANDS .PD 0 .IP "\fB\fR" .RS 5 Shows information about the ring and the devices within. .RE .IP "\fBsearch\fR " .RS 5 Shows information about matching devices. .RE .IP "\fBadd\fR z-:/_ " .IP "\fBadd\fR rz-:/_ " .IP "\fBadd\fR -r -z -i -p -d -m -w " .RS 5 Adds a device to the ring with the given information. No partitions will be assigned to the new device until after running 'rebalance'. This is so you can make multiple device changes and rebalance them all just once. .RE .IP "\fBcreate\fR " .RS 5 Creates with 2^ partitions and . is number of hours to restrict moving a partition more than once. .RE .IP "\fBlist_parts\fR [] .." .RS 5 Returns a 2 column list of all the partitions that are assigned to any of the devices matching the search values given. The first column is the assigned partition number and the second column is the number of device matches for that partition. The list is ordered from most number of matches to least. If there are a lot of devices to match against, this command could take a while to run. .RE .IP "\fBrebalance\fR" .RS 5 Attempts to rebalance the ring by reassigning partitions that haven't been recently reassigned. .RE .IP "\fBremove\fR " .RS 5 Removes the device(s) from the ring. This should normally just be used for a device that has failed. For a device you wish to decommission, it's best to set its weight to 0, wait for it to drain all its data, then use this remove command. This will not take effect until after running 'rebalance'. This is so you can make multiple device changes and rebalance them all just once. .RE .IP "\fBset_info\fR :/_" .RS 5 Resets the device's information. This information isn't used to assign partitions, so you can use 'write_ring' afterward to rewrite the current ring with the newer device information. Any of the parts are optional in the final :/_ parameter; just give what you want to change. For instance set_info d74 _"snet: 5.6.7.8" would just update the meta data for device id 74. .RE .IP "\fBset_min_part_hours\fR " .RS 5 Changes the to the given . This should be set to however long a full replication/update cycle takes. We're working on a way to determine this more easily than scanning logs. .RE .IP "\fBset_weight\fR " .RS 5 Resets the device's weight. No partitions will be reassigned to or from the device until after running 'rebalance'. This is so you can make multiple device changes and rebalance them all just once. .RE .IP "\fBvalidate\fR" .RS 5 Just runs the validation routines on the ring. .RE .IP "\fBwrite_ring\fR" .RS 5 Just rewrites the distributable ring file. This is done automatically after a successful rebalance, so really this is only useful after one or more 'set_info' calls when no rebalance is needed but you want to send out the new device information. .RE \fBQuick list:\fR add create list_parts rebalance remove search set_info set_min_part_hours set_weight validate write_ring \fBExit codes:\fR 0 = ring changed, 1 = ring did not change, 2 = error .PD .SH DOCUMENTATION .LP More in depth documentation about the swift ring and also OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/overview_ring.html .BI https://docs.openstack.org/swift/latest/admin_guide.html#managing-the-rings and .BI https://docs.openstack.org/swift/latest/ swift-2.17.0/doc/manpages/container-sync-realms.conf.50000666000175100017510000001124513236061617022562 0ustar zuulzuul00000000000000.\" .\" Author: HCLTech-SSW .\" Copyright (c) 2010-2017 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH container-sync-realms.conf 5 "10/09/2017" "Linux" "OpenStack Swift" .SH NAME .LP .B container-sync-realms.conf \- configuration file for the OpenStack Swift container sync realms .SH SYNOPSIS .LP .B container-sync-realms.conf .SH DESCRIPTION .PP This is the configuration file used by the Object storage Swift to perform container to container synchronization. This configuration file is used to configure clusters to allow/accept sync requests to/from other clusters. Using this configuration file, the user specifies where to sync their container to along with a secret synchronization key. You can find more information about container to container synchronization at \fIhttps://docs.openstack.org/swift/latest/overview_container_sync.html\fR The configuration file follows the python-pastedeploy syntax. The file is divided into sections, which are enclosed by square brackets. Each section will contain a certain number of key/value parameters which are described later. Any line that begins with a '#' symbol is ignored. You can find more information about python-pastedeploy configuration format at \fIhttp://pythonpaste.org/deploy/#config-format\fR .SH GLOBAL SECTION .PD 1 .RS 0 This is indicated by section named [DEFAULT]. Below are the parameters that are acceptable within this section. .IP "\fBmtime_check_interval\fR" The number of seconds between checking the modified time of this config file for changes and therefore reloading it. The default value is 300. .RE .PD .SH REALM SECTIONS .PD 1 .RS 0 Each section name is the name of a sync realm, for example [realm1]. A sync realm is a set of clusters that have agreed to allow container syncing with each other. Realm names will be considered case insensitive. Below are the parameters that are acceptable within this section. .IP "\fBcluster_clustername1\fR" Any values in the realm section whose name begin with cluster_ will indicate the name and endpoint of a cluster and will be used by external users in their container's X-Container-Sync-To metadata header values with the format as "realm_name/cluster_name/container_name". The Realm and cluster names are considered to be case insensitive. .IP "\fBcluster_clustername2\fR" Any values in the realm section whose name begin with cluster_ will indicate the name and endpoint of a cluster and will be used by external users in their container's X-Container-Sync-To metadata header values with the format as "realm_name/cluster_name/container_name". The Realm and cluster names are considered to be case insensitive. The endpoint is what the container sync daemon will use when sending out requests to that cluster. Keep in mind this endpoint must be reachable by all container servers, since that is where the container sync daemon runs. Note that the endpoint ends with /v1/ and that the container sync daemon will then add the account/container/obj name after that. .IP "\fBkey\fR" The key is the overall cluster-to-cluster key used in combination with the external users' key that they set on their containers' X-Container-Sync-Key metadata header values. These keys will be used to sign each request the container sync daemon makes and used to validate each incoming container sync request. .IP "\fBkey2\fR" The key2 is optional and is an additional key incoming requests will be checked against. This is so you can rotate keys if you wish; you move the existing key to key2 and make a new key value. .RE .PD .SH EXAMPLE .nf .RS 0 [DEFAULT] mtime_check_interval = 300 [realm1] key = realm1key key2 = realm1key2 cluster_clustername1 = https://host1/v1/ cluster_clustername2 = https://host2/v1/ [realm2] key = realm2key key2 = realm2key2 cluster_clustername3 = https://host3/v1/ cluster_clustername4 = https://host4/v1/ .RE .fi .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-container-sync and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/overview_container_sync.html and .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-container-sync(1) swift-2.17.0/doc/manpages/swift-object-updater.10000666000175100017510000000475213236061617021464 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-object-updater 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-object-updater \- OpenStack Swift object updater .SH SYNOPSIS .LP .B swift-object-updater [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP The object updater is responsible for updating object information in container listings. It will check to see if there are any locally queued updates on the filesystem of each devices, what is also known as async pending file(s), walk each one and update the container listing. For example, suppose a container server is under load and a new object is put into the system. The object will be immediately available for reads as soon as the proxy server responds to the client with success. However, the object server has not been able to update the object listing in the container server. Therefore, the update would be queued locally for a later update. Container listings, therefore, may not immediately contain the object. This is where an eventual consistency window will most likely come in to play. In practice, the consistency window is only as large as the frequency at which the updater runs and may not even be noticed as the proxy server will route listing requests to the first container server which responds. The server under load may not be the one that serves subsequent listing requests – one of the other two replicas may handle the listing. The options are as follows: .RS 4 .PD 0 .IP "-v" .IP "--verbose" .RS 4 .IP "log to console" .RE .IP "-o" .IP "--once" .RS 4 .IP "only run one pass of daemon" .RE .PD .RE .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-object-updater and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR object-server.conf(5) swift-2.17.0/doc/manpages/swift-container-reconciler.10000666000175100017510000000336213236061617022655 0ustar zuulzuul00000000000000.\" .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH SWIFT-CONTAINER-RECONCILER "1" "August 2016" "OpenStack Swift" .SH NAME swift\-container\-reconciler \- OpenStack Swift container reconciler .SH SYNOPSIS .B swift\-container\-reconciler \fICONFIG \fR[\fIoptions\fR] .SH DESCRIPTION .PP This daemon will take objects that are in the wrong storage policy and move them to the right ones, or delete requests that went to the wrong storage policy and apply them to the right ones. It operates on a queue similar to the object-expirer's queue. Discovering that the object is in the wrong policy is done in the container replicator; the container reconciler is the daemon that handles them once they happen. Like the object expirer, you only need to run one of these per cluster .SH OPTIONS .TP \fB\-h\fR, \fB\-\-help\fR Show this help message and exit .TP \fB\-v\fR, \fB\-\-verbose\fR Log to console .TP \fB\-o\fR, \fB\-\-once\fR Only run one pass of daemon .PP .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift\-container\-reconciler and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org swift-2.17.0/doc/manpages/swift-reconciler-enqueue.10000666000175100017510000000311413236061617022335 0ustar zuulzuul00000000000000.\" .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH SWIFT-RECONCILER-ENQUEUE "1" "August 2016" "OpenStack Swift" .SH NAME swift\-reconciler\-enqueue \- OpenStack Swift reconciler enqueue .SH SYNOPSIS .B swift\-reconciler\-enqueue \fIpolicy_index\fR \fI/a/c/o\fR \fItimestamp\fR \fR[\fIoptions\fR] .SH DESCRIPTION .PP This script enqueues an object to be evaluated by the reconciler. .SH OPTIONS .TP \fIpolicy_index\fR The policy the object is currently stored in. .TP \fI/a/c/o\fR The full path of the object \- UTF\-8 .TP \fItimestamp\fR The timestamp of the datafile/tombstone. .TP \fB\-h\fR, \fB\-\-help\fR Show this help message and exit .TP \fB\-X\fR \fIOP\fR, \fB\-\-op\fR=\fIOP\fR The method of the misplaced operation .TP \fB\-f\fR, \fB\-\-force\fR Force an object to be re\-enqueued .PP .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift\-reconciler\-enqueue and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org swift-2.17.0/doc/manpages/swift-recon-cron.10000666000175100017510000000217613236061617020617 0ustar zuulzuul00000000000000.\" .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH SWIFT-RECON-CRON "1" "August 2016" "OpenStack Swift" .SH NAME swift\-recon\-cron \- OpenStack Swift recon cron job .SH SYNOPSIS .B swift\-recon\-cron \fI\fR .SH DESCRIPTION .PP Tool that can be run by using cron to fill recon cache. Recon data can be read by \fBswift-recon\fR tool. .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift\-recon\-cron and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org swift-2.17.0/doc/manpages/swift-container-auditor.10000666000175100017510000000315613236061617022200 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-container-auditor 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-container-auditor \- OpenStack Swift container auditor .SH SYNOPSIS .LP .B swift-container-auditor [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP The container auditor crawls the local container system checking the integrity of container objects. If corruption is found (in the case of bit rot, for example), the file is quarantined, and replication will replace the bad file from another replica. The options are as follows: .RS 4 .PD 0 .IP "-v" .IP "--verbose" .RS 4 .IP "log to console" .RE .IP "-o" .IP "--once" .RS 4 .IP "only run one pass of daemon" .RE .PD .RE .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-container-auditor and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR container-server.conf(5) swift-2.17.0/doc/manpages/swift-account-reaper.10000666000175100017510000000356713236061617021467 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-account-reaper 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-account-reaper \- OpenStack Swift account reaper .SH SYNOPSIS .LP .B swift-account-reaper [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP Removes data from status=DELETED accounts. These are accounts that have been asked to be removed by the reseller via services remove_storage_account XMLRPC call. .PP The account is not deleted immediately by the services call, but instead the account is simply marked for deletion by setting the status column in the account_stat table of the account database. This account reaper scans for such accounts and removes the data in the background. The background deletion process will occur on the primary account server for the account. The options are as follows: .RS 4 .PD 0 .IP "-v" .IP "--verbose" .RS 4 .IP "log to console" .RE .IP "-o" .IP "--once" .RS 4 .IP "only run one pass of daemon" .RE .PD .RE .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-object-auditor and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR account-server.conf(5) swift-2.17.0/doc/manpages/swift-dispersion-report.10000666000175100017510000001001013236061617022224 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-dispersion-report 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-dispersion-report \- OpenStack Swift dispersion report .SH SYNOPSIS .LP .B swift-dispersion-report [-d|--debug] [-j|--dump-json] [-p|--partitions] [--container-only|--object-only] [--insecure] [conf_file] .SH DESCRIPTION .PP This is one of the swift-dispersion utilities that is used to evaluate the overall cluster health. This is accomplished by checking if a set of deliberately distributed containers and objects are currently in their proper places within the cluster. .PP For instance, a common deployment has three replicas of each object. The health of that object can be measured by checking if each replica is in its proper place. If only 2 of the 3 is in place the object's health can be said to be at 66.66%, where 100% would be perfect. .PP Once the \fBswift-dispersion-populate\fR has been used to populate the dispersion account, one should run the \fBswift-dispersion-report\fR tool repeatedly for the life of the cluster, in order to check the health of each of these containers and objects. .PP These tools need direct access to the entire cluster and to the ring files. Installing them on a proxy server will probably do or a box used for swift administration purposes that also contains the common swift packages and ring. Both \fBswift-dispersion-populate\fR and \fBswift-dispersion-report\fR use the same configuration file, /etc/swift/dispersion.conf . The account used by these tool should be a dedicated account for the dispersion stats and also have admin privileges. .SH OPTIONS .RS 0 .PD 1 .IP "\fB-d, --debug\fR" output any 404 responses to standard error .IP "\fB-j, --dump-json\fR" output dispersion report in json format .IP "\fB-p, --partitions\fR" output the partition numbers that have any missing replicas .IP "\fB--container-only\fR" Only run the container report .IP "\fB--object-only\fR" Only run the object report .IP "\fB--insecure\fR" Allow accessing insecure keystone server. The keystone's certificate will not be verified. .IP "\fB-P, --policy-name\fR" Specify storage policy name .SH CONFIGURATION .PD 0 Example \fI/etc/swift/dispersion.conf\fR: .RS 3 .IP "[dispersion]" .IP "auth_url = https://127.0.0.1:443/auth/v1.0" .IP "auth_user = dpstats:dpstats" .IP "auth_key = dpstats" .IP "swift_dir = /etc/swift" .IP "# project_name = dpstats" .IP "# project_domain_name = default" .IP "# user_domain_name = default" .IP "# dispersion_coverage = 1.0" .IP "# retries = 5" .IP "# concurrency = 25" .IP "# dump_json = no" .IP "# endpoint_type = publicURL" .RE .PD .SH EXAMPLE .PP .PD 0 $ swift-dispersion-report .RS 1 .IP "Queried 2622 containers for dispersion reporting, 31s, 0 retries" .IP "100.00% of container copies found (7866 of 7866)" .IP "Sample represents 1.00% of the container partition space" .IP "Queried 2621 objects for dispersion reporting, 22s, 0 retries" .IP "100.00% of object copies found (7863 of 7863)" .IP "Sample represents 1.00% of the object partition space" .RE .PD .SH DOCUMENTATION .LP More in depth documentation about the swift-dispersion utilities and also OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html#dispersion-report and .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-dispersion-populate(1), .BR dispersion.conf(5) swift-2.17.0/doc/manpages/swift-account-auditor.10000666000175100017510000000313313236061617021645 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-account-auditor 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-account-auditor \- OpenStack Swift account auditor .SH SYNOPSIS .LP .B swift-account-auditor [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP The account auditor crawls the local account system checking the integrity of accounts objects. If corruption is found (in the case of bit rot, for example), the file is quarantined, and replication will replace the bad file from another replica. The options are as follows: .RS 4 .PD 0 .IP "-v" .IP "--verbose" .RS 4 .IP "log to console" .RE .IP "-o" .IP "--once" .RS 4 .IP "only run one pass of daemon" .RE .PD .RE .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-account-auditor and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR account-server.conf(5) swift-2.17.0/doc/manpages/swift-account-server.10000666000175100017510000000260013236061617021502 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-account-server 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-account-server \- OpenStack Swift account server .SH SYNOPSIS .LP .B swift-account-server [CONFIG] [-h|--help] [-v|--verbose] .SH DESCRIPTION .PP The Account Server's primary job is to handle listings of containers. The listings are stored as sqlite database files, and replicated across the cluster similar to how objects are. .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-account-server and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org .SH "SEE ALSO" .BR account-server.conf(5) swift-2.17.0/doc/manpages/swift-container-info.10000666000175100017510000000405613236061617021464 0ustar zuulzuul00000000000000.\" .\" Author: Madhuri Kumari .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-container-info 1 "10/25/2016" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-container-info \- OpenStack Swift container-info tool .SH SYNOPSIS .LP .B swift-container-info [options] .SH DESCRIPTION .PP This is a very simple swift tool that allows a swiftop engineer to retrieve information about a container that is located on the storage node. One calls the tool with a given container db file as it is stored on the storage node system. It will then return several information about that container such as; .PD 0 .IP "- Account it belongs to" .IP "- Container " .IP "- Created timestamp " .IP "- Put timestamp " .IP "- Delete timestamp " .IP "- Object count " .IP "- Bytes used " .IP "- Reported put timestamp " .IP "- Reported delete timestamp " .IP "- Reported object count " .IP "- Reported bytes used " .IP "- Hash " .IP "- ID " .IP "- User metadata " .IP "- X-Container-Sync-Point 1 " .IP "- X-Container-Sync-Point 2 " .IP "- Location on the ring " .PD .SH OPTIONS .TP \fB\-h, --help \fR Shows the help message and exit .TP \fB\-d SWIFT_DIR, --swift-dir=SWIFT_DIR\fR Pass location of swift configuration file if different from the default location /etc/swift .SH DOCUMENTATION .LP More documentation about OpenStack Swift can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-get-nodes(1), .BR swift-object-info(1) swift-2.17.0/doc/manpages/swift-oldies.10000666000175100017510000000313013236061617020020 0ustar zuulzuul00000000000000.\" .\" Author: Paul Dardeau .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-oldies 1 "8/04/2016" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-oldies \- OpenStack Swift oldies tool .SH SYNOPSIS .LP .B swift-oldies [-h|--help] [-a|--age] .SH DESCRIPTION .PP Lists Swift processes that have been running more than a specific length of time (in hours). This is done by scanning the list of currently executing processes (via ps command) and examining the execution time of those python processes whose program names begin with 'swift-'. Example (see all Swift processes older than two days): swift-oldies \-a 48 The options are as follows: .RS 4 .PD 0 .IP "-a HOURS" .IP "--age=HOURS" .RS 4 .IP "Look for processes at least HOURS old; default: 720 (30 days)" .RE .PD 0 .IP "-h" .IP "--help" .RS 4 .IP "Display program help and exit" .PD .RE .SH DOCUMENTATION .LP More documentation about OpenStack Swift can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-orphans(1) swift-2.17.0/doc/manpages/swift-account-replicator.10000666000175100017510000000415413236061617022346 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-account-replicator 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-account-replicator \- OpenStack Swift account replicator .SH SYNOPSIS .LP .B swift-account-replicator [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP Replication is designed to keep the system in a consistent state in the face of temporary error conditions like network outages or drive failures. The replication processes compare local data with each remote copy to ensure they all contain the latest version. Account replication uses a combination of hashes and shared high water marks to quickly compare subsections of each partition. .PP Replication updates are push based. Account replication push missing records over HTTP or rsync whole database files. The replicator also ensures that data is removed from the system. When an account item is deleted a tombstone is set as the latest version of the item. The replicator will see the tombstone and ensure that the item is removed from the entire system. The options are as follows: .RS 4 .PD 0 .IP "-v" .IP "--verbose" .RS 4 .IP "log to console" .RE .IP "-o" .IP "--once" .RS 4 .IP "only run one pass of daemon" .RE .PD .RE .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-account-replicator and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR account-server.conf(5) swift-2.17.0/doc/manpages/swift-container-sync.10000666000175100017510000000361513236061617021505 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-container-sync 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-container-sync \- OpenStack Swift container sync .SH SYNOPSIS .LP .B swift-container-sync [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP Swift has a feature where all the contents of a container can be mirrored to another container through background synchronization. Swift cluster operators configure their cluster to allow/accept sync requests to/from other clusters, and the user specifies where to sync their container to along with a secret synchronization key. .PP The swift-container-sync does the job of sending updates to the remote container. This is done by scanning the local devices for container databases and checking for x-container-sync-to and x-container-sync-key metadata values. If they exist, newer rows since the last sync will trigger PUTs or DELETEs to the other container. .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-container-sync and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/overview_container_sync.html and .BI https://docs.openstack.org .LP .SH "SEE ALSO" .BR container-server.conf(5) swift-2.17.0/doc/manpages/object-expirer.conf.50000666000175100017510000002207113236061617021266 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH object-expirer.conf 5 "03/15/2012" "Linux" "OpenStack Swift" .SH NAME .LP .B object-expirer.conf \- configuration file for the OpenStack Swift object expirer daemon .SH SYNOPSIS .LP .B object-expirer.conf .SH DESCRIPTION .PP This is the configuration file used by the object expirer daemon. The daemon's function is to query the internal hidden expiring_objects_account to discover objects that need to be deleted and to then delete them. The configuration file follows the python-pastedeploy syntax. The file is divided into sections, which are enclosed by square brackets. Each section will contain a certain number of key/value parameters which are described later. Any line that begins with a '#' symbol is ignored. You can find more information about python-pastedeploy configuration format at \fIhttp://pythonpaste.org/deploy/#config-format\fR .SH GLOBAL SECTION .PD 1 .RS 0 This is indicated by section named [DEFAULT]. Below are the parameters that are acceptable within this section. .IP \fBswift_dir\fR Swift configuration directory. The default is /etc/swift. .IP \fBuser\fR The system user that the object server will run as. The default is swift. .IP \fBlog_name\fR Label used when logging. The default is swift. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBlog_max_line_length\fR The following caps the length of log lines to the value given; no limit if set to 0, the default. .IP \fBlog_custom_handlers\fR Comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger. The default is empty. .IP \fBlog_udp_host\fR If set, log_udp_host will override log_address. .IP "\fBlog_udp_port\fR UDP log port, the default is 514. .IP \fBlog_statsd_host\fR StatsD server. IPv4/IPv6 addresses and hostnames are supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. .IP \fBlog_statsd_port\fR The default is 8125. .IP \fBlog_statsd_default_sample_rate\fR The default is 1. .IP \fBlog_statsd_sample_rate_factor\fR The default is 1. .IP \fBlog_statsd_metric_prefix\fR The default is empty. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH PIPELINE SECTION .PD 1 .RS 0 This is indicated by section name [pipeline:main]. Below are the parameters that are acceptable within this section. .IP "\fBpipeline\fR" It is used when you need to apply a number of filters. It is a list of filters ended by an application. The default should be \fB"catch_errors cache proxy-server"\fR .RE .PD .SH APP SECTION .PD 1 .RS 0 This is indicated by section name [app:object-server]. Below are the parameters that are acceptable within this section. .IP "\fBuse\fR" Entry point for paste.deploy for the object server. This is the reference to the installed python egg. The default is \fBegg:swift#proxy\fR. See proxy-server.conf-sample for options or See proxy-server.conf manpage. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH FILTER SECTION .PD 1 .RS 0 Any section that has its name prefixed by "filter:" indicates a filter section. Filters are used to specify configuration parameters for specific swift middlewares. Below are the filters available and respective acceptable parameters. .RS 0 .IP "\fB[filter:cache]\fR" .RE Caching middleware that manages caching in swift. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the memcache middleware. This is the reference to the installed python egg. The default is \fBegg:swift#memcache\fR. See proxy-server.conf-sample for options or See proxy-server.conf manpage. .RE .RS 0 .IP "\fB[filter:catch_errors]\fR" .RE .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the catch_errors middleware. This is the reference to the installed python egg. The default is \fBegg:swift#catch_errors\fR. See proxy-server.conf-sample for options or See proxy-server.conf manpage. .RE .RS 0 .IP "\fB[filter:proxy-logging]\fR" .RE Logging for the proxy server now lives in this middleware. If the access_* variables are not set, logging directives from [DEFAULT] without "access_" will be used. .RS 3 .IP \fBuse\fR Entry point for paste.deploy for the proxy_logging middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#proxy_logging\fR. See proxy-server.conf-sample for options or See proxy-server.conf manpage. .RE .PD .SH OBJECT EXPIRER SECTION .PD 1 .RS 0 .IP "\fB[object-expirer]\fR" .RE .RS 3 .IP \fBinterval\fR Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 300. .IP "\fBauto_create_account_prefix\fR The default is ".". .IP \fBexpiring_objects_account_name\fR The default is 'expiring_objects'. .IP \fBreport_interval\fR The default is 300 seconds. .IP \fBrequest_tries\fR The number of times the expirer's internal client will attempt any given request in the event of failure. The default is 3. .IP \fBconcurrency\fR Number of expirer workers to spawn. The default is 1. .IP \fBprocesses\fR Processes is how many parts to divide the work into, one part per process that will be doing the work. Processes set 0 means that a single process will be doing all the work. Processes can also be specified on the command line and will override the config value. The default is 0. .IP \fBprocess\fR Process is which of the parts a particular process will work on process can also be specified on the command line and will override the config value process is "zero based", if you want to use 3 processes, you should run processes with process set to 0, 1, and 2. The default is 0. .IP \fBreclaim_age\fR The expirer will re-attempt expiring if the source object is not available up to reclaim_age seconds before it gives up and deletes the entry in the queue. The default is 604800 seconds. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH DOCUMENTATION .LP More in depth documentation about the swift-object-expirer and also OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html and .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-proxy-server.conf(5), swift-2.17.0/doc/manpages/swift-form-signature.10000666000175100017510000000356213236061617021514 0ustar zuulzuul00000000000000.\" .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH SWIFT-FORM-SIGNATURE "1" "August 2016" "OpenStack Swift" .SH NAME swift\-form\-signature \- compute the expires and signature for OpenStack Swift Form POST middleware .SH SYNOPSIS .B swift\-form\-signature \fIpath\fR \fIredirect\fR \fImax_file_size\fR \fImax_file_count\fR \fIseconds\fR \fIkey\fR .SH DESCRIPTION .PP Tool to compute expires and signature values which can be used to upload objects directly to the Swift from a browser by using the form POST middleware. .SH OPTIONS .TP .I path The prefix to use for form uploaded objects. For example: \fI/v1/account/container/object_prefix_\fP would ensure all form uploads have that path prepended to the browser\-given file name. .TP .I redirect The URL to redirect the browser to after the uploads have completed. .TP .I max_file_size The maximum file size per file uploaded. .TP .I max_file_count The maximum number of uploaded files allowed. .TP .I seconds The number of seconds from now to allow the form post to begin. .TP .I key The X\-Account\-Meta\-Temp\-URL\-Key for the account. .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift\-form\-signature and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org swift-2.17.0/doc/manpages/container-reconciler.conf.50000666000175100017510000001153313236061617022452 0ustar zuulzuul00000000000000.\" .\" Author: HCLTech-SSW .\" Copyright (c) 2010-2017 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH container-reconciler.conf 5 "10/25/2017" "Linux" "OpenStack Swift" .SH NAME .LP .B container-reconciler.conf \- configuration file for the OpenStack Swift container reconciler .SH SYNOPSIS .LP .B container-reconciler.conf .SH DESCRIPTION .PP This is the configuration file used by the container reconciler. The configuration file follows the python-pastedeploy syntax. The file is divided into sections, which are enclosed by square brackets. Each section will contain a certain number of key/value parameters which are described later. Any line that begins with a '#' symbol is ignored. You can find more information about python-pastedeploy configuration format at \fIhttp://pythonpaste.org/deploy/#config-format\fR .SH GLOBAL SECTION .PD 1 .RS 0 This is indicated by section named [DEFAULT]. Below are the parameters that are acceptable within this section. .IP "\fBlog_address\fR" Location where syslog sends the logs to. The default is /dev/log. .IP "\fBlog_custom_handlers \fR" Comma-separated list of functions to call to setup custom log handlers. .IP "\fBlog_facility\fR" Syslog log facility. The default is LOG_LOCAL0. .IP "\fBlog_level\fR" Log level used for logging. The default is INFO. .IP "\fBlog_name\fR" Label used when logging. The default is swift. .IP "\fBlog_statsd_default_sample_rate\fR" Defines the probability of sending a sample for any given event or timing measurement. The default is 1.0. .IP "\fBlog_statsd_host\fR" If not set, the StatsD feature is disabled. The default is localhost. .IP "\fBlog_statsd_metric_prefix\fR" Value will be prepended to every metric sent to the StatsD server. .IP "\fBlog_statsd_port\fR" The port value for the StatsD server. The default is 8125. .IP "\fBlog_statsd_sample_rate_factor\fR" It is not recommended to set this to a value less than 1.0, if frequency of logging is too high, tune the log_statsd_default_sample_rate instead. The default value is 1.0. .IP "\fBlog_udp_host\fR" If not set, the UDP receiver for syslog is disabled. .IP "\fBlog_udp_port\fR" Port value for UDP receiver, if enabled. The default is 514. .IP "\fBswift_dir\fR" Swift configuration directory. The default is /etc/swift. .IP "\fBuser\fR" User to run as. The default is swift. .RE .PD .SH CONTAINER RECONCILER SECTION .PD 1 .RS 0 .IP "\fB[container-reconciler]\fR" .RE .RS 3 .IP "\fBinterval\fR" Minimum time for a pass to take. The default is 30 seconds. .IP "\fBreclaim_age\fR" Time elapsed in seconds before an object can be reclaimed. The default is 604800 seconds. .IP "\fBrequest_tries\fR" Server errors from requests will be retried by default. The default is 3. .RE .PD .SH PIPELINE SECTION .PD 1 .RS 0 .IP "\fB[pipeline:main]\fR" .RE .RS 3 .IP "\fBpipeline\fR" Pipeline to use for processing operations. The default is "catch_errors proxy-logging cache proxy-server". .RE .PD .SH APP SECTION .PD 1 .RS 0 \fBFor details of the available options see proxy-server.conf.5.\fR .RS 0 .IP "\fB[app:proxy-server]\fR" .RE .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy in the server. This is normally \fBegg:swift#proxy\fR. .RE .PD .SH FILTER SECTIONS .PD 1 .RS 0 Any section that has its name prefixed by "filter:" indicates a filter section. Filters are used to specify configuration parameters for specific swift middlewares. Below are the filters available and respective acceptable parameters. \fBFor details of the available options for each filter section see proxy-server.conf.5.\fR .RS 0 .IP "\fB[filter:cache]\fR" .RE Caching middleware that manages caching in swift. .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy in the server. This is normally \fBegg:swift#memcache\fR. .RE .PD .RS 0 .IP "\fB[filter:catch_errors]\fR" .RE .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy in the server. This is normally \fBegg:swift#catch_errors\fR. .RE .PD .RS 0 .IP "\fB[filter:proxy-logging]\fR" .RE .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy in the server. This is normally \fBegg:swift#proxy_logging\fR. .RE .PD .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-container-reconciler and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/overview_policies.html. .SH "SEE ALSO" .BR swift-container-reconciler(1) swift-2.17.0/doc/manpages/dispersion.conf.50000666000175100017510000000632213236061617020524 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH dispersion.conf 5 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B dispersion.conf \- configuration file for the OpenStack Swift dispersion tools .SH SYNOPSIS .LP .B dispersion.conf .SH DESCRIPTION .PP This is the configuration file used by the dispersion populate and report tools. The file format consists of the '[dispersion]' module as the header and available parameters. Any line that begins with a '#' symbol is ignored. .SH PARAMETERS .PD 1 .RS 0 .IP "\fBauth_version\fR" Authentication system API version. The default is 1.0. .IP "\fBauth_url\fR" Authentication system URL .IP "\fBauth_user\fR" Authentication system account/user name .IP "\fBauth_key\fR" Authentication system account/user password .IP "\fBproject_name\fR" Project name in case of keystone auth version 3 .IP "\fBproject_domain_name\fR" Project domain name in case of keystone auth version 3 .IP "\fBuser_domain_name\fR" User domain name in case of keystone auth version 3 .IP "\fBendpoint_type\fR" The default is 'publicURL'. .IP "\fBkeystone_api_insecure\fR" The default is false. .IP "\fBswift_dir\fR" Location of OpenStack Swift configuration and ring files .IP "\fBdispersion_coverage\fR" Percentage of partition coverage to use. The default is 1.0. .IP "\fBretries\fR" Maximum number of attempts. The defaul is 5. .IP "\fBconcurrency\fR" Concurrency to use. The default is 25. .IP "\fBcontainer_populate\fR" The default is true. .IP "\fBobject_populate\fR" The default is true. .IP "\fBdump_json\fR" Whether to output in json format. The default is no. .IP "\fBcontainer_report\fR" Whether to run the container report. The default is yes. .IP "\fBobject_report\fR" Whether to run the object report. The default is yes. .RE .PD .SH SAMPLE .PD 0 .RS 0 .IP "[dispersion]" .IP "auth_url = https://127.0.0.1:443/auth/v1.0" .IP "auth_user = dpstats:dpstats" .IP "auth_key = dpstats" .IP "swift_dir = /etc/swift" .IP "# keystone_api_insecure = no" .IP "# project_name = dpstats" .IP "# project_domain_name = default" .IP "# user_domain_name = default" .IP "# dispersion_coverage = 1.0" .IP "# retries = 5" .IP "# concurrency = 25" .IP "# dump_json = no" .IP "# container_report = yes" .IP "# object_report = yes" .RE .PD .SH DOCUMENTATION .LP More in depth documentation about the swift-dispersion utilities and also OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html#dispersion-report and .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-dispersion-report(1), .BR swift-dispersion-populate(1) swift-2.17.0/doc/manpages/swift-object-server.10000666000175100017510000000400213236061617021312 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-object-server 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-object-server \- OpenStack Swift object server. .SH SYNOPSIS .LP .B swift-object-server [CONFIG] [-h|--help] [-v|--verbose] .SH DESCRIPTION .PP The Object Server is a very simple blob storage server that can store, retrieve and delete objects stored on local devices. Objects are stored as binary files on the filesystem with metadata stored in the file's extended attributes (xattrs). This requires that the underlying filesystem choice for object servers support xattrs on files. Some filesystems, like ext3, have xattrs turned off by default. Each object is stored using a path derived from the object name's hash and the operation's timestamp. Last write always wins, and ensures that the latest object version will be served. A deletion is also treated as a version of the file (a 0 byte file ending with ".ts", which stands for tombstone). This ensures that deleted files are replicated correctly and older versions don't magically reappear due to failure scenarios. .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-object-server and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org .SH "SEE ALSO" .BR object-server.conf(5) swift-2.17.0/doc/manpages/swift-object-info.10000666000175100017510000000377113236061617020753 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-object-info 1 "10/25/2016" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-object-info \- OpenStack Swift object-info tool .SH SYNOPSIS .LP .B swift-object-info [options] .SH DESCRIPTION .PP This is a very simple swift tool that allows a swiftop engineer to retrieve information about an object that is located on the storage node. One calls the tool with a given object file as it is stored on the storage node system. It will then return several information about that object such as; .PD 0 .IP "- Account it belongs to" .IP "- Container " .IP "- Object hash " .IP "- Content Type " .IP "- timestamp " .IP "- Etag " .IP "- Content Length " .IP "- User Metadata " .IP "- Location on the ring " .PD .SH OPTIONS .TP \fB\-h --help \fR Shows the help message and exit .TP \fB\-n, --no-check-etag\fR Don't verify file contents against stored etag .TP \fB\-d SWIFT_DIR, --swift-dir=SWIFT_DIR\fR Pass location of swift configuration file if different from the default location /etc/swift .TP \fB\-P POLICY_NAME, --policy-name=POLICY_NAME \fR Specify storage policy name .SH DOCUMENTATION .LP More documentation about OpenStack Swift can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-account-info(1), .BR swift-container-info(1), .BR swift-get-nodes(1) swift-2.17.0/doc/manpages/swift-container-replicator.10000666000175100017510000000417613236061617022700 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-container-replicator 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-container-replicator \- OpenStack Swift container replicator .SH SYNOPSIS .LP .B swift-container-replicator [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP Replication is designed to keep the system in a consistent state in the face of temporary error conditions like network outages or drive failures. The replication processes compare local data with each remote copy to ensure they all contain the latest version. Container replication uses a combination of hashes and shared high water marks to quickly compare subsections of each partition. .PP Replication updates are push based. Container replication push missing records over HTTP or rsync whole database files. The replicator also ensures that data is removed from the system. When an container item is deleted a tombstone is set as the latest version of the item. The replicator will see the tombstone and ensure that the item is removed from the entire system. The options are as follows: .RS 4 .PD 0 .IP "-v" .IP "--verbose" .RS 4 .IP "log to console" .RE .IP "-o" .IP "--once" .RS 4 .IP "only run one pass of daemon" .RE .PD .RE .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-container-replicator and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR container-server.conf(5) swift-2.17.0/doc/manpages/swift-object-reconstructor.10000666000175100017510000000365213236061617022732 0ustar zuulzuul00000000000000.\" .\" Copyright (c) 2016 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH SWIFT-OBJECT-RECONSTRUCTOR "1" "August 2016" "OpenStack Swift" .SH NAME swift\-object\-reconstructor \- OpenStack Swift EC object reconstructor .SH SYNOPSIS .B swift\-object\-reconstructor \fICONFIG \fR[\fIoptions\fR] .SH DESCRIPTION .PP Daemon for reconstruction of EC objects. Once a pair of nodes has determined the need to replace a missing object fragment, instead of pushing over a copy like replication would do, the reconstructor has to read in enough surviving fragments from other nodes and perform a local reconstruction before it has the correct data to push to the other node. .SH OPTIONS .TP \fB\-h\fR, \fB\-\-help\fR Show this help message and exit .TP \fB\-d\fR \fIDEVICES\fR, \fB\-\-devices\fR=\fIDEVICES\fR Reconstruct only given devices. Comma\-separated list. Only has effect if \-\-once is used. .TP \fB\-p\fR \fIPARTITIONS\fR, \fB\-\-partitions\fR=\fIPARTITIONS\fR Reconstruct only given partitions. Comma\-separated list. Only has effect if \-\-once is used. .TP \fB\-v\fR, \fB\-\-verbose\fR Log to console .TP \fB\-o\fR, \fB\-\-once\fR Only run one pass of daemon .PP .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift\-object\-reconstructor and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org swift-2.17.0/doc/manpages/swift-container-updater.10000666000175100017510000000423313236061617022172 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-container-updater 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-container-updater \- OpenStack Swift container updater .SH SYNOPSIS .LP .B swift-container-updater [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] .SH DESCRIPTION .PP The container updater is responsible for updating container information in the account database. It will walk the container path in the system looking for container DBs and sending updates to the account server as needed as it goes along. There are times when account data can not be immediately updated. This usually occurs during failure scenarios or periods of high load. This is where an eventual consistency window will most likely come in to play. In practice, the consistency window is only as large as the frequency at which the updater runs and may not even be noticed as the proxy server will route listing requests to the first account server which responds. The server under load may not be the one that serves subsequent listing requests – one of the other two replicas may handle the listing. The options are as follows: .RS 4 .PD 0 .IP "-v" .IP "--verbose" .RS 4 .IP "log to console" .RE .IP "-o" .IP "--once" .RS 4 .IP "only run one pass of daemon" .RE .PD .RE .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-container-updater and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR container-server.conf(5) swift-2.17.0/doc/manpages/swift-container-server.10000666000175100017510000000313113236061617022030 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-container-server 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-container-server \- OpenStack Swift container server .SH SYNOPSIS .LP .B swift-container-server [CONFIG] [-h|--help] [-v|--verbose] .SH DESCRIPTION .PP The Container Server's primary job is to handle listings of objects. It doesn't know where those objects are, just what objects are in a specific container. The listings are stored as sqlite database files, and replicated across the cluster similar to how objects are. Statistics are also tracked that include the total number of objects, and total storage usage for that container. .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-container-server and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ and .BI https://docs.openstack.org .LP .SH "SEE ALSO" .BR container-server.conf(5) swift-2.17.0/doc/manpages/swift-object-auditor.10000666000175100017510000000333713236061617021465 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-object-auditor 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-object-auditor \- OpenStack Swift object auditor .SH SYNOPSIS .LP .B swift-object-auditor [CONFIG] [-h|--help] [-v|--verbose] [-o|--once] [-z|--zero_byte_fps] .SH DESCRIPTION .PP The object auditor crawls the local object system checking the integrity of objects. If corruption is found (in the case of bit rot, for example), the file is quarantined, and replication will replace the bad file from another replica. The options are as follows: .RS 4 .PD 0 .IP "-v" .IP "--verbose" .RS 4 .IP "log to console" .RE .IP "-o" .IP "--once" .RS 4 .IP "only run one pass of daemon" .RE .IP "-z ZERO_BYTE_FPS" .IP "--zero_byte_fps=ZERO_BYTE_FPS" .RS 4 .IP "Audit only zero byte files at specified files/sec" .RE .PD .RE .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-object-auditor and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR object-server.conf(5) swift-2.17.0/doc/manpages/container-server.conf.50000666000175100017510000004514413236061617021640 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2012 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH container-server.conf 5 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B container-server.conf \- configuration file for the OpenStack Swift container server .SH SYNOPSIS .LP .B container-server.conf .SH DESCRIPTION .PP This is the configuration file used by the container server and other container background services, such as; replicator, updater, auditor and sync. The configuration file follows the python-pastedeploy syntax. The file is divided into sections, which are enclosed by square brackets. Each section will contain a certain number of key/value parameters which are described later. Any line that begins with a '#' symbol is ignored. You can find more information about python-pastedeploy configuration format at \fIhttp://pythonpaste.org/deploy/#config-format\fR .SH GLOBAL SECTION .PD 1 .RS 0 This is indicated by section named [DEFAULT]. Below are the parameters that are acceptable within this section. .IP "\fBbind_ip\fR" IP address the container server should bind to. The default is 0.0.0.0 which will make it bind to all available addresses. .IP "\fBbind_port\fR" TCP port the container server should bind to. The default is 6201. .IP "\fBbind_timeout\fR" Timeout to bind socket. The default is 30. .IP \fBbacklog\fR TCP backlog. Maximum number of allowed pending connections. The default value is 4096. .IP \fBworkers\fR The number of pre-forked processes that will accept connections. Zero means no fork. The default is auto which will make the server try to match the number of effective cpu cores if python multiprocessing is available (included with most python distributions >= 2.6) or fallback to one. It's worth noting that individual workers will use many eventlet co-routines to service multiple concurrent requests. .IP \fBmax_clients\fR Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. The default is 1024. .IP \fBallowed_sync_hosts\fR This is a comma separated list of hosts allowed in the X-Container-Sync-To field for containers. This is the old-style of using container sync. It is strongly recommended to use the new style of a separate container-sync-realms.conf -- see container-sync-realms.conf-sample allowed_sync_hosts = 127.0.0.1 .IP \fBuser\fR The system user that the container server will run as. The default is swift. .IP \fBswift_dir\fR Swift configuration directory. The default is /etc/swift. .IP \fBdevices\fR Parent directory of where devices are mounted. Default is /srv/node. .IP \fBmount_check\fR Whether or not check if the devices are mounted to prevent accidentally writing to the root device. The default is set to true. .IP \fBdisable_fallocate\fR Disable pre-allocate disk space for a file. The default is false. .IP \fBlog_name\fR Label used when logging. The default is swift. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBlog_max_line_length\fR The following caps the length of log lines to the value given; no limit if set to 0, the default. .IP \fBlog_custom_handlers\fR Comma separated list of functions to call to setup custom log handlers. functions get passed: conf, name, log_to_console, log_route, fmt, logger, adapted_logger. The default is empty. .IP \fBlog_udp_host\fR If set, log_udp_host will override log_address. .IP "\fBlog_udp_port\fR UDP log port, the default is 514. .IP \fBlog_statsd_host\fR StatsD server. IPv4/IPv6 addresses and hostnames are supported. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. .IP \fBlog_statsd_port\fR The default is 8125. .IP \fBlog_statsd_default_sample_rate\fR The default is 1. .IP \fBlog_statsd_sample_rate_factor\fR The default is 1. .IP \fBlog_statsd_metric_prefix\fR The default is empty. .IP \fBdb_preallocation\fR If you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation. The default is false. .IP \fBeventlet_debug\fR Debug mode for eventlet library. The default is false. .IP \fBfallocate_reserve\fR You can set fallocate_reserve to the number of bytes or percentage of disk space you'd like fallocate to reserve, whether there is space for the given file size or not. Percentage will be used if the value ends with a '%'. The default is 1%. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH PIPELINE SECTION .PD 1 .RS 0 This is indicated by section name [pipeline:main]. Below are the parameters that are acceptable within this section. .IP "\fBpipeline\fR" It is used when you need to apply a number of filters. It is a list of filters ended by an application. The normal pipeline is "healthcheck recon container-server". .RE .PD .SH APP SECTION .PD 1 .RS 0 This is indicated by section name [app:container-server]. Below are the parameters that are acceptable within this section. .IP "\fBuse\fR" Entry point for paste.deploy for the container server. This is the reference to the installed python egg. This is normally \fBegg:swift#container\fR. .IP "\fBset log_name\fR Label used when logging. The default is container-server. .IP "\fBset log_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP "\fBset log_level\fR Logging level. The default is INFO. .IP "\fBset log_requests\fR Enables request logging. The default is True. .IP "\fBset log_address\fR Logging address. The default is /dev/log. .IP \fBnode_timeout\fR Request timeout to external services. The default is 3 seconds. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 0.5 seconds. .IP \fBallow_versions\fR The default is false. .IP \fBauto_create_account_prefix\fR The default is '.'. .IP \fBreplication_server\fR Configure parameter for creating specific server. To handle all verbs, including replication verbs, do not specify "replication_server" (this is the default). To only handle replication, set to a True value (e.g. "True" or "1"). To handle only non-replication verbs, set to "False". Unless you have a separate replication network, you should not specify any value for "replication_server". .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH FILTER SECTION .PD 1 .RS 0 Any section that has its name prefixed by "filter:" indicates a filter section. Filters are used to specify configuration parameters for specific swift middlewares. Below are the filters available and respective acceptable parameters. .IP "\fB[filter:healthcheck]\fR" .RE .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the healthcheck middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#healthcheck\fR. .IP "\fBdisable_path\fR" An optional filesystem path which, if present, will cause the healthcheck URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". .RE .RS 0 .IP "\fB[filter:recon]\fR" .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the recon middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#recon\fR. .IP "\fBrecon_cache_path\fR" The recon_cache_path simply sets the directory where stats for a few items will be stored. Depending on the method of deployment you may need to create this directory manually and ensure that swift has read/write. The default is /var/cache/swift. .RE .PD .RS 0 .IP "\fB[filter:xprofile]\fR" .RS 3 .IP "\fBuse\fR" Entry point for paste.deploy for the xprofile middleware. This is the reference to the installed python egg. This is normally \fBegg:swift#xprofile\fR. .IP "\fBprofile_module\fR" This option enable you to switch profilers which should inherit from python standard profiler. Currently the supported value can be 'cProfile', 'eventlet.green.profile' etc. .IP "\fBlog_filename_prefix\fR" This prefix will be used to combine process ID and timestamp to name the profile data file. Make sure the executing user has permission to write into this path (missing path segments will be created, if necessary). If you enable profiling in more than one type of daemon, you must override it with an unique value like, the default is /var/log/swift/profile/account.profile. .IP "\fBdump_interval\fR" The profile data will be dumped to local disk based on above naming rule in this interval. The default is 5.0. .IP "\fBdump_timestamp\fR" Be careful, this option will enable profiler to dump data into the file with time stamp which means there will be lots of files piled up in the directory. The default is false .IP "\fBpath\fR" This is the path of the URL to access the mini web UI. The default is __profile__. .IP "\fBflush_at_shutdown\fR" Clear the data when the wsgi server shutdown. The default is false. .IP "\fBunwind\fR" Unwind the iterator of applications. Default is false. .RE .PD .SH ADDITIONAL SECTIONS .PD 1 .RS 0 The following sections are used by other swift-container services, such as replicator, updater, auditor and sync. .IP "\fB[container-replicator]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is container-replicator. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBper_diff\fR Maximum number of database rows that will be sync'd in a single HTTP replication request. The default is 1000. .IP \fBmax_diffs\fR This caps how long the replicator will spend trying to sync a given database per pass so the other databases don't get starved. The default is 100. .IP \fBconcurrency\fR Number of replication workers to spawn. The default is 8. .IP "\fBrun_pause [deprecated]\fR" Time in seconds to wait between replication passes. The default is 30. .IP \fBinterval\fR Replaces run_pause with the more standard "interval", which means the replicator won't pause unless it takes less than the interval set. The default is 30. .IP \fBnode_timeout\fR Request timeout to external services. The default is 10 seconds. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 0.5 seconds. .IP \fBreclaim_age\fR Time elapsed in seconds before an container can be reclaimed. The default is 604800 seconds. .IP \fBrsync_compress\fR Allow rsync to compress data which is transmitted to destination node during sync. However, this is applicable only when destination node is in a different region than the local one. The default is false. .IP \fBrsync_module\fR Format of the rsync module where the replicator will send data. See etc/rsyncd.conf-sample for some usage examples. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .RS 0 .IP "\fB[container-updater]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is container-updater. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBinterval\fR Minimum time for a pass to take. The default is 300 seconds. .IP \fBconcurrency\fR Number of updater workers to spawn. The default is 4. .IP \fBnode_timeout\fR Request timeout to external services. The default is 3 seconds. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 0.5 seconds. .IP \fBcontainers_per_second\fR Maximum containers updated per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 50. .IP \fBslowdown\fR Slowdown will sleep that amount between containers. The default is 0.01 seconds. Deprecated in favor of containers_per_second .IP \fBaccount_suppression_time\fR Seconds to suppress updating an account that has generated an error. The default is 60 seconds. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .RS 0 .IP "\fB[container-auditor]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is container-auditor. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBinterval\fR Will audit, at most, 1 container per device per interval. The default is 1800 seconds. .IP \fBcontainers_per_second\fR Maximum containers audited per second. Should be tuned according to individual system specs. 0 is unlimited. The default is 200. .IP \fBrecon_cache_path\fR Path to recon cache directory. The default is /var/cache/swift. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .RS 0 .IP "\fB[container-sync]\fR" .RE .RS 3 .IP \fBlog_name\fR Label used when logging. The default is container-sync. .IP \fBlog_facility\fR Syslog log facility. The default is LOG_LOCAL0. .IP \fBlog_level\fR Logging level. The default is INFO. .IP \fBlog_address\fR Logging address. The default is /dev/log. .IP \fBsync_proxy\fR If you need to use an HTTP Proxy, set it here; defaults to no proxy. .IP \fBinterval\fR Will audit, at most, each container once per interval. The default is 300 seconds. .IP \fBcontainer_time\fR Maximum amount of time to spend syncing each container per pass. The default is 60 seconds. .IP \fBconn_timeout\fR Connection timeout to external services. The default is 5 seconds. .IP \fBrequest_tries\fR Server errors from requests will be retried by default. The default is 3. .IP \fBinternal_client_conf_path\fR Internal client config file path. .IP \fBnice_priority\fR Modify scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. .IP \fBionice_class\fR Modify I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Work only with ionice_priority. .IP \fBionice_priority\fR Modify I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. .RE .PD .SH DOCUMENTATION .LP More in depth documentation about the swift-container-server and also OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/admin_guide.html and .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR swift-container-server(1) swift-2.17.0/doc/manpages/swift-proxy-server.10000666000175100017510000000343113236061617021232 0ustar zuulzuul00000000000000.\" .\" Author: Joao Marcelo Martins or .\" Copyright (c) 2010-2011 OpenStack Foundation. .\" .\" Licensed under the Apache License, Version 2.0 (the "License"); .\" you may not use this file except in compliance with the License. .\" You may obtain a copy of the License at .\" .\" http://www.apache.org/licenses/LICENSE-2.0 .\" .\" Unless required by applicable law or agreed to in writing, software .\" distributed under the License is distributed on an "AS IS" BASIS, .\" WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or .\" implied. .\" See the License for the specific language governing permissions and .\" limitations under the License. .\" .TH swift-proxy-server 1 "8/26/2011" "Linux" "OpenStack Swift" .SH NAME .LP .B swift-proxy-server \- OpenStack Swift proxy server. .SH SYNOPSIS .LP .B swift-proxy-server [CONFIG] [-h|--help] [-v|--verbose] .SH DESCRIPTION .PP The Swift Proxy Server is responsible for tying together the rest of the Swift architecture. For each request, it will look up the location of the account, container, or object in the ring and route the request accordingly. The public API is also exposed through the Proxy Server. A large number of failures are also handled in the Proxy Server. For example, if a server is unavailable for an object PUT, it will ask the ring for a handoff server and route there instead. When objects are streamed to or from an object server, they are streamed directly through the proxy server to or from the user the proxy server does not spool them. .SH DOCUMENTATION .LP More in depth documentation in regards to .BI swift-proxy-server and also about OpenStack Swift as a whole can be found at .BI https://docs.openstack.org/swift/latest/ .SH "SEE ALSO" .BR proxy-server.conf(5) swift-2.17.0/doc/source/0000775000175100017510000000000013236061751015032 5ustar zuulzuul00000000000000swift-2.17.0/doc/source/development_auth.rst0000666000175100017510000004644413236061617021146 0ustar zuulzuul00000000000000========================== Auth Server and Middleware ========================== -------------------------------------------- Creating Your Own Auth Server and Middleware -------------------------------------------- The included swift/common/middleware/tempauth.py is a good example of how to create an auth subsystem with proxy server auth middleware. The main points are that the auth middleware can reject requests up front, before they ever get to the Swift Proxy application, and afterwards when the proxy issues callbacks to verify authorization. It's generally good to separate the authentication and authorization procedures. Authentication verifies that a request actually comes from who it says it does. Authorization verifies the 'who' has access to the resource(s) the request wants. Authentication is performed on the request before it ever gets to the Swift Proxy application. The identity information is gleaned from the request, validated in some way, and the validation information is added to the WSGI environment as needed by the future authorization procedure. What exactly is added to the WSGI environment is solely dependent on what the installed authorization procedures need; the Swift Proxy application itself needs no specific information, it just passes it along. Convention has environ['REMOTE_USER'] set to the authenticated user string but often more information is needed than just that. The included TempAuth will set the REMOTE_USER to a comma separated list of groups the user belongs to. The first group will be the "user's group", a group that only the user belongs to. The second group will be the "account's group", a group that includes all users for that auth account (different than the storage account). The third group is optional and is the storage account string. If the user does not have admin access to the account, the third group will be omitted. It is highly recommended that authentication server implementers prefix their tokens and Swift storage accounts they create with a configurable reseller prefix (`AUTH_` by default with the included TempAuth). This prefix will avoid conflicts with other authentication servers that might be using the same Swift cluster. Otherwise, the Swift cluster will have to try all the resellers until one validates a token or all fail. A restriction with group names is that no group name should begin with a period '.' as that is reserved for internal Swift use (such as the .r for referrer designations as you'll see later). Example Authentication with TempAuth: * Token AUTH_tkabcd is given to the TempAuth middleware in a request's X-Auth-Token header. * The TempAuth middleware validates the token AUTH_tkabcd and discovers it matches the "tester" user within the "test" account for the storage account "AUTH_storage_xyz". * The TempAuth middleware sets the REMOTE_USER to "test:tester,test,AUTH_storage_xyz" * Now this user will have full access (via authorization procedures later) to the AUTH_storage_xyz Swift storage account and access to containers in other storage accounts, provided the storage account begins with the same `AUTH_` reseller prefix and the container has an ACL specifying at least one of those three groups. Authorization is performed through callbacks by the Swift Proxy server to the WSGI environment's swift.authorize value, if one is set. The swift.authorize value should simply be a function that takes a Request as an argument and returns None if access is granted or returns a callable(environ, start_response) if access is denied. This callable is a standard WSGI callable. Generally, you should return 403 Forbidden for requests by an authenticated user and 401 Unauthorized for an unauthenticated request. For example, here's an authorize function that only allows GETs (in this case you'd probably return 405 Method Not Allowed, but ignore that for the moment).:: from swift.common.swob import HTTPForbidden, HTTPUnauthorized def authorize(req): if req.method == 'GET': return None if req.remote_user: return HTTPForbidden(request=req) else: return HTTPUnauthorized(request=req) Adding the swift.authorize callback is often done by the authentication middleware as authentication and authorization are often paired together. But, you could create separate authorization middleware that simply sets the callback before passing on the request. To continue our example above:: from swift.common.swob import HTTPForbidden, HTTPUnauthorized class Authorization(object): def __init__(self, app, conf): self.app = app self.conf = conf def __call__(self, environ, start_response): environ['swift.authorize'] = self.authorize return self.app(environ, start_response) def authorize(self, req): if req.method == 'GET': return None if req.remote_user: return HTTPForbidden(request=req) else: return HTTPUnauthorized(request=req) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return Authorization(app, conf) return auth_filter The Swift Proxy server will call swift.authorize after some initial work, but before truly trying to process the request. Positive authorization at this point will cause the request to be fully processed immediately. A denial at this point will immediately send the denial response for most operations. But for some operations that might be approved with more information, the additional information will be gathered and added to the WSGI environment and then swift.authorize will be called once more. These are called delay_denial requests and currently include container read requests and object read and write requests. For these requests, the read or write access control string (X-Container-Read and X-Container-Write) will be fetched and set as the 'acl' attribute in the Request passed to swift.authorize. The delay_denial procedures allow skipping possibly expensive access control string retrievals for requests that can be approved without that information, such as administrator or account owner requests. To further our example, we now will approve all requests that have the access control string set to same value as the authenticated user string. Note that you probably wouldn't do this exactly as the access control string represents a list rather than a single user, but it'll suffice for this example:: from swift.common.swob import HTTPForbidden, HTTPUnauthorized class Authorization(object): def __init__(self, app, conf): self.app = app self.conf = conf def __call__(self, environ, start_response): environ['swift.authorize'] = self.authorize return self.app(environ, start_response) def authorize(self, req): # Allow anyone to perform GET requests if req.method == 'GET': return None # Allow any request where the acl equals the authenticated user if getattr(req, 'acl', None) == req.remote_user: return None if req.remote_user: return HTTPForbidden(request=req) else: return HTTPUnauthorized(request=req) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return Authorization(app, conf) return auth_filter The access control string has a standard format included with Swift, though this can be overridden if desired. The standard format can be parsed with swift.common.middleware.acl.parse_acl which converts the string into two arrays of strings: (referrers, groups). The referrers allow comparing the request's Referer header to control access. The groups allow comparing the request.remote_user (or other sources of group information) to control access. Checking referrer access can be accomplished by using the swift.common.middleware.acl.referrer_allowed function. Checking group access is usually a simple string comparison. Let's continue our example to use parse_acl and referrer_allowed. Now we'll only allow GETs after a referrer check and any requests after a group check:: from swift.common.middleware.acl import parse_acl, referrer_allowed from swift.common.swob import HTTPForbidden, HTTPUnauthorized class Authorization(object): def __init__(self, app, conf): self.app = app self.conf = conf def __call__(self, environ, start_response): environ['swift.authorize'] = self.authorize return self.app(environ, start_response) def authorize(self, req): if hasattr(req, 'acl'): referrers, groups = parse_acl(req.acl) if req.method == 'GET' and referrer_allowed(req, referrers): return None if req.remote_user and groups and req.remote_user in groups: return None if req.remote_user: return HTTPForbidden(request=req) else: return HTTPUnauthorized(request=req) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return Authorization(app, conf) return auth_filter The access control strings are set with PUTs and POSTs to containers with the X-Container-Read and X-Container-Write headers. Swift allows these strings to be set to any value, though it's very useful to validate that the strings meet the desired format and return a useful error to the user if they don't. To support this validation, the Swift Proxy application will call the WSGI environment's swift.clean_acl callback whenever one of these headers is to be written. The callback should take a header name and value as its arguments. It should return the cleaned value to save if valid or raise a ValueError with a reasonable error message if not. There is an included swift.common.middleware.acl.clean_acl that validates the standard Swift format. Let's improve our example by making use of that:: from swift.common.middleware.acl import \ clean_acl, parse_acl, referrer_allowed from swift.common.swob import HTTPForbidden, HTTPUnauthorized class Authorization(object): def __init__(self, app, conf): self.app = app self.conf = conf def __call__(self, environ, start_response): environ['swift.authorize'] = self.authorize environ['swift.clean_acl'] = clean_acl return self.app(environ, start_response) def authorize(self, req): if hasattr(req, 'acl'): referrers, groups = parse_acl(req.acl) if req.method == 'GET' and referrer_allowed(req, referrers): return None if req.remote_user and groups and req.remote_user in groups: return None if req.remote_user: return HTTPForbidden(request=req) else: return HTTPUnauthorized(request=req) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return Authorization(app, conf) return auth_filter Now, if you want to override the format for access control strings you'll have to provide your own clean_acl function and you'll have to do your own parsing and authorization checking for that format. It's highly recommended you use the standard format simply to support the widest range of external tools, but sometimes that's less important than meeting certain ACL requirements. ---------------------------- Integrating With repoze.what ---------------------------- Here's an example of integration with repoze.what, though honestly I'm no repoze.what expert by any stretch; this is just included here to hopefully give folks a start on their own code if they want to use repoze.what:: from time import time from eventlet.timeout import Timeout from repoze.what.adapters import BaseSourceAdapter from repoze.what.middleware import setup_auth from repoze.what.predicates import in_any_group, NotAuthorizedError from swift.common.bufferedhttp import http_connect_raw as http_connect from swift.common.middleware.acl import clean_acl, parse_acl, referrer_allowed from swift.common.utils import cache_from_env, split_path from swift.common.swob import HTTPForbidden, HTTPUnauthorized class DevAuthorization(object): def __init__(self, app, conf): self.app = app self.conf = conf def __call__(self, environ, start_response): environ['swift.authorize'] = self.authorize environ['swift.clean_acl'] = clean_acl return self.app(environ, start_response) def authorize(self, req): version, account, container, obj = split_path(req.path, 1, 4, True) if not account: return self.denied_response(req) referrers, groups = parse_acl(getattr(req, 'acl', None)) if referrer_allowed(req, referrers): return None try: in_any_group(account, *groups).check_authorization(req.environ) except NotAuthorizedError: return self.denied_response(req) return None def denied_response(self, req): if req.remote_user: return HTTPForbidden(request=req) else: return HTTPUnauthorized(request=req) class DevIdentifier(object): def __init__(self, conf): self.conf = conf def identify(self, env): return {'token': env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN'))} def remember(self, env, identity): return [] def forget(self, env, identity): return [] class DevAuthenticator(object): def __init__(self, conf): self.conf = conf self.auth_host = conf.get('ip', '127.0.0.1') self.auth_port = int(conf.get('port', 11000)) self.ssl = \ conf.get('ssl', 'false').lower() in ('true', 'on', '1', 'yes') self.auth_prefix = conf.get('prefix', '/') self.timeout = float(conf.get('node_timeout', 10)) def authenticate(self, env, identity): token = identity.get('token') if not token: return None memcache_client = cache_from_env(env) key = 'devauth/%s' % token cached_auth_data = memcache_client.get(key) if cached_auth_data: start, expiration, user = cached_auth_data if time() - start <= expiration: return user with Timeout(self.timeout): conn = http_connect(self.auth_host, self.auth_port, 'GET', '%stoken/%s' % (self.auth_prefix, token), ssl=self.ssl) resp = conn.getresponse() resp.read() conn.close() if resp.status == 204: expiration = float(resp.getheader('x-auth-ttl')) user = resp.getheader('x-auth-user') memcache_client.set(key, (time(), expiration, user), time=expiration) return user return None class DevChallenger(object): def __init__(self, conf): self.conf = conf def challenge(self, env, status, app_headers, forget_headers): def no_challenge(env, start_response): start_response(str(status), []) return [] return no_challenge class DevGroupSourceAdapter(BaseSourceAdapter): def __init__(self, *args, **kwargs): super(DevGroupSourceAdapter, self).__init__(*args, **kwargs) self.sections = {} def _get_all_sections(self): return self.sections def _get_section_items(self, section): return self.sections[section] def _find_sections(self, credentials): return credentials['repoze.what.userid'].split(',') def _include_items(self, section, items): self.sections[section] |= items def _exclude_items(self, section, items): for item in items: self.sections[section].remove(item) def _item_is_included(self, section, item): return item in self.sections[section] def _create_section(self, section): self.sections[section] = set() def _edit_section(self, section, new_section): self.sections[new_section] = self.sections[section] del self.sections[section] def _delete_section(self, section): del self.sections[section] def _section_exists(self, section): return self.sections.has_key(section) class DevPermissionSourceAdapter(BaseSourceAdapter): def __init__(self, *args, **kwargs): super(DevPermissionSourceAdapter, self).__init__(*args, **kwargs) self.sections = {} def _get_all_sections(self): return self.sections def _get_section_items(self, section): return self.sections[section] def _find_sections(self, group_name): return set([n for (n, p) in self.sections.items() if group_name in p]) def _include_items(self, section, items): self.sections[section] |= items def _exclude_items(self, section, items): for item in items: self.sections[section].remove(item) def _item_is_included(self, section, item): return item in self.sections[section] def _create_section(self, section): self.sections[section] = set() def _edit_section(self, section, new_section): self.sections[new_section] = self.sections[section] del self.sections[section] def _delete_section(self, section): del self.sections[section] def _section_exists(self, section): return self.sections.has_key(section) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return setup_auth(DevAuthorization(app, conf), group_adapters={'all_groups': DevGroupSourceAdapter()}, permission_adapters={'all_perms': DevPermissionSourceAdapter()}, identifiers=[('devauth', DevIdentifier(conf))], authenticators=[('devauth', DevAuthenticator(conf))], challengers=[('devauth', DevChallenger(conf))]) return auth_filter ----------------------- Allowing CORS with Auth ----------------------- Cross Origin Resource Sharing (CORS) require that the auth system allow the OPTIONS method to pass through without a token. The preflight request will make an OPTIONS call against the object or container and will not work if the auth system stops it. See TempAuth for an example of how OPTIONS requests are handled. swift-2.17.0/doc/source/ops_runbook/0000775000175100017510000000000013236061751017372 5ustar zuulzuul00000000000000swift-2.17.0/doc/source/ops_runbook/maintenance.rst0000666000175100017510000003777513236061617022434 0ustar zuulzuul00000000000000================== Server maintenance ================== General assumptions ~~~~~~~~~~~~~~~~~~~ - It is assumed that anyone attempting to replace hardware components will have already read and understood the appropriate maintenance and service guides. - It is assumed that where servers need to be taken off-line for hardware replacement, that this will be done in series, bringing the server back on-line before taking the next off-line. - It is assumed that the operations directed procedure will be used for identifying hardware for replacement. Assessing the health of swift ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can run the swift-recon tool on a Swift proxy node to get a quick check of how Swift is doing. Please note that the numbers below are necessarily somewhat subjective. Sometimes parameters for which we say 'low values are good' will have pretty high values for a time. Often if you wait a while things get better. For example: .. code:: sudo swift-recon -rla =============================================================================== [2012-03-10 12:57:21] Checking async pendings on 384 hosts... Async stats: low: 0, high: 1, avg: 0, total: 1 =============================================================================== [2012-03-10 12:57:22] Checking replication times on 384 hosts... [Replication Times] shortest: 1.4113877813, longest: 36.8293570836, avg: 4.86278064749 =============================================================================== [2012-03-10 12:57:22] Checking load avg's on 384 hosts... [5m load average] lowest: 2.22, highest: 9.5, avg: 4.59578125 [15m load average] lowest: 2.36, highest: 9.45, avg: 4.62622395833 [1m load average] lowest: 1.84, highest: 9.57, avg: 4.5696875 =============================================================================== In the example above we ask for information on replication times (-r), load averages (-l) and async pendings (-a). This is a healthy Swift system. Rules-of-thumb for 'good' recon output are: - Nodes that respond are up and running Swift. If all nodes respond, that is a good sign. But some nodes may time out. For example: .. code:: -> [http://.29:6200/recon/load:] -> [http://.31:6200/recon/load:] - That could be okay or could require investigation. - Low values (say < 10 for high and average) for async pendings are good. Higher values occur when disks are down and/or when the system is heavily loaded. Many simultaneous PUTs to the same container can drive async pendings up. This may be normal, and may resolve itself after a while. If it persists, one way to track down the problem is to find a node with high async pendings (with ``swift-recon -av | sort -n -k4``), then check its Swift logs, Often async pendings are high because a node cannot write to a container on another node. Often this is because the node or disk is offline or bad. This may be okay if we know about it. - Low values for replication times are good. These values rise when new rings are pushed, and when nodes and devices are brought back on line. - Our 'high' load average values are typically in the 9-15 range. If they are a lot bigger it is worth having a look at the systems pushing the average up. Run ``swift-recon -av`` to get the individual averages. To sort the entries with the highest at the end, run ``swift-recon -av | sort -n -k4``. For comparison here is the recon output for the same system above when two entire racks of Swift are down: .. code:: [2012-03-10 16:56:33] Checking async pendings on 384 hosts... -> http://.22:6200/recon/async: -> http://.18:6200/recon/async: -> http://.16:6200/recon/async: -> http://.13:6200/recon/async: -> http://.30:6200/recon/async: -> http://.6:6200/recon/async: ......... -> http://.5:6200/recon/async: -> http://.15:6200/recon/async: -> http://.9:6200/recon/async: -> http://.27:6200/recon/async: -> http://.4:6200/recon/async: -> http://.8:6200/recon/async: Async stats: low: 243, high: 659, avg: 413, total: 132275 =============================================================================== [2012-03-10 16:57:48] Checking replication times on 384 hosts... -> http://.22:6200/recon/replication: -> http://.18:6200/recon/replication: -> http://.16:6200/recon/replication: -> http://.13:6200/recon/replication: -> http://.30:6200/recon/replication: -> http://.6:6200/recon/replication: ............ -> http://.5:6200/recon/replication: -> http://.15:6200/recon/replication: -> http://.9:6200/recon/replication: -> http://.27:6200/recon/replication: -> http://.4:6200/recon/replication: -> http://.8:6200/recon/replication: [Replication Times] shortest: 1.38144306739, longest: 112.620954418, avg: 10.285 9475361 =============================================================================== [2012-03-10 16:59:03] Checking load avg's on 384 hosts... -> http://.22:6200/recon/load: -> http://.18:6200/recon/load: -> http://.16:6200/recon/load: -> http://.13:6200/recon/load: -> http://.30:6200/recon/load: -> http://.6:6200/recon/load: ............ -> http://.15:6200/recon/load: -> http://.9:6200/recon/load: -> http://.27:6200/recon/load: -> http://.4:6200/recon/load: -> http://.8:6200/recon/load: [5m load average] lowest: 1.71, highest: 4.91, avg: 2.486375 [15m load average] lowest: 1.79, highest: 5.04, avg: 2.506125 [1m load average] lowest: 1.46, highest: 4.55, avg: 2.4929375 =============================================================================== .. note:: The replication times and load averages are within reasonable parameters, even with 80 object stores down. Async pendings, however is quite high. This is due to the fact that the containers on the servers which are down cannot be updated. When those servers come back up, async pendings should drop. If async pendings were at this level without an explanation, we have a problem. Recon examples ~~~~~~~~~~~~~~ Here is an example of noting and tracking down a problem with recon. Running reccon shows some async pendings: .. code:: bob@notso:~/swift-1.4.4/swift$ ssh -q .132.7 sudo swift-recon -alr =============================================================================== [2012-03-14 17:25:55] Checking async pendings on 384 hosts... Async stats: low: 0, high: 23, avg: 8, total: 3356 =============================================================================== [2012-03-14 17:25:55] Checking replication times on 384 hosts... [Replication Times] shortest: 1.49303831657, longest: 39.6982825994, avg: 4.2418222066 =============================================================================== [2012-03-14 17:25:56] Checking load avg's on 384 hosts... [5m load average] lowest: 2.35, highest: 8.88, avg: 4.45911458333 [15m load average] lowest: 2.41, highest: 9.11, avg: 4.504765625 [1m load average] lowest: 1.95, highest: 8.56, avg: 4.40588541667 =============================================================================== Why? Running recon again with -av swift (not shown here) tells us that the node with the highest (23) is .72.61. Looking at the log files on .72.61 we see: .. code:: souzab@:~$ sudo tail -f /var/log/swift/background.log | - grep -i ERROR Mar 14 17:28:06 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:06 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:09 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:11 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:13 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:13 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:15 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:15 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:19 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:19 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:20 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.119', 'id': 5481, 'meta': '', 'device': 'disk6', 'port': 6201} Mar 14 17:28:21 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:21 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} Mar 14 17:28:22 container-replicator ERROR Remote drive not mounted {'zone': 5, 'weight': 1952.0, 'ip': '.204.20', 'id': 2311, 'meta': '', 'device': 'disk5', 'port': 6201} That is why this node has a lot of async pendings: a bunch of disks that are not mounted on and . There may be other issues, but clearing this up will likely drop the async pendings a fair bit, as other nodes will be having the same problem. Assessing the availability risk when multiple storage servers are down ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: This procedure will tell you if you have a problem, however, in practice you will find that you will not use this procedure frequently. If three storage nodes (or, more precisely, three disks on three different storage nodes) are down, there is a small but nonzero probability that user objects, containers, or accounts will not be available. Procedure --------- .. note:: swift has three rings: one each for objects, containers and accounts. This procedure should be run three times, each time specifying the appropriate ``*.builder`` file. #. Determine whether all three nodes are in different Swift zones by running the ring builder on a proxy node to determine which zones the storage nodes are in. For example: .. code:: % sudo swift-ring-builder /etc/swift/object.builder /etc/swift/object.builder, build version 1467 2097152 partitions, 3 replicas, 5 zones, 1320 devices, 0.02 balance The minimum number of hours before a partition can be reassigned is 24 Devices: id zone ip address port name weight partitions balance meta 0 1 .4 6200 disk0 1708.00 4259 -0.00 1 1 .4 6200 disk1 1708.00 4260 0.02 2 1 .4 6200 disk2 1952.00 4868 0.01 3 1 .4 6200 disk3 1952.00 4868 0.01 4 1 .4 6200 disk4 1952.00 4867 -0.01 #. Here, node .4 is in zone 1. If two or more of the three nodes under consideration are in the same Swift zone, they do not have any ring partitions in common; there is little/no data availability risk if all three nodes are down. #. If the nodes are in three distinct Swift zones it is necessary to whether the nodes have ring partitions in common. Run ``swift-ring`` builder again, this time with the ``list_parts`` option and specify the nodes under consideration. For example: .. code:: % sudo swift-ring-builder /etc/swift/object.builder list_parts .8 .15 .72.2 Partition Matches 91 2 729 2 3754 2 3769 2 3947 2 5818 2 7918 2 8733 2 9509 2 10233 2 #. The ``list_parts`` option to the ring builder indicates how many ring partitions the nodes have in common. If, as in this case, the first entry in the list has a 'Matches' column of 2 or less, there is no data availability risk if all three nodes are down. #. If the 'Matches' column has entries equal to 3, there is some data availability risk if all three nodes are down. The risk is generally small, and is proportional to the number of entries that have a 3 in the Matches column. For example: .. code:: Partition Matches 26865 3 362367 3 745940 3 778715 3 797559 3 820295 3 822118 3 839603 3 852332 3 855965 3 858016 3 #. A quick way to count the number of rows with 3 matches is: .. code:: % sudo swift-ring-builder /etc/swift/object.builder list_parts .8 .15 .72.2 | grep "3$" | wc -l 30 #. In this case the nodes have 30 out of a total of 2097152 partitions in common; about 0.001%. In this case the risk is small/nonzero. Recall that a partition is simply a portion of the ring mapping space, not actual data. So having partitions in common is a necessary but not sufficient condition for data unavailability. .. note:: We should not bring down a node for repair if it shows Matches entries of 3 with other nodes that are also down. If three nodes that have 3 partitions in common are all down, there is a nonzero probability that data are unavailable and we should work to bring some or all of the nodes up ASAP. Swift startup/shutdown ~~~~~~~~~~~~~~~~~~~~~~ - Use reload - not stop/start/restart. - Try to roll sets of servers (especially proxy) in groups of less than 20% of your servers. swift-2.17.0/doc/source/ops_runbook/troubleshooting.rst0000666000175100017510000002515013236061617023361 0ustar zuulzuul00000000000000==================== Troubleshooting tips ==================== Diagnose: Customer complains they receive a HTTP status 500 when trying to browse containers ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This entry is prompted by a real customer issue and exclusively focused on how that problem was identified. There are many reasons why a http status of 500 could be returned. If there are no obvious problems with the swift object store, then it may be necessary to take a closer look at the users transactions. After finding the users swift account, you can search the swift proxy logs on each swift proxy server for transactions from this user. The linux ``bzgrep`` command can be used to search all the proxy log files on a node including the ``.bz2`` compressed files. For example: .. code:: $ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l -R ssh \ -w .68.[4-11,132-139 4-11,132-139],.132.[4-11,132-139] \ 'sudo bzgrep -w AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log*' | dshbak -c . . ---------------- .132.6 ---------------- Feb 29 08:51:57 sw-aw2az2-proxy011 proxy-server .16.132 .66.8 29/Feb/2012/08/51/57 GET /v1.0/AUTH_redacted-4962-4692-98fb-52ddda82a5af /%3Fformat%3Djson HTTP/1.0 404 - - _4f4d50c5e4b064d88bd7ab82 - - - tx429fc3be354f434ab7f9c6c4206c1dc3 - 0.0130 This shows a ``GET`` operation on the users account. .. note:: The HTTP status returned is 404, Not found, rather than 500 as reported by the user. Using the transaction ID, ``tx429fc3be354f434ab7f9c6c4206c1dc3`` you can search the swift object servers log files for this transaction ID: .. code:: $ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l -R ssh \ -w .72.[4-67|4-67],.[4-67|4-67],.[4-67|4-67],.204.[4-131] \ 'sudo bzgrep tx429fc3be354f434ab7f9c6c4206c1dc3 /var/log/swift/server.log*' | dshbak -c . . ---------------- .72.16 ---------------- Feb 29 08:51:57 sw-aw2az1-object013 account-server .132.6 - - [29/Feb/2012:08:51:57 +0000|] "GET /disk9/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0016 "" ---------------- .31 ---------------- Feb 29 08:51:57 node-az2-object060 account-server .132.6 - - [29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962- 4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0011 "" ---------------- .204.70 ---------------- Feb 29 08:51:57 sw-aw2az3-object0067 account-server .132.6 - - [29/Feb/2012:08:51:57 +0000|] "GET /disk6/198875/AUTH_redacted-4962- 4692-98fb-52ddda82a5af" 404 - "tx429fc3be354f434ab7f9c6c4206c1dc3" "-" "-" 0.0014 "" .. note:: The 3 GET operations to 3 different object servers that hold the 3 replicas of this users account. Each ``GET`` returns a HTTP status of 404, Not found. Next, use the ``swift-get-nodes`` command to determine exactly where the user's account data is stored: .. code:: $ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_redacted-4962-4692-98fb-52ddda82a5af Account AUTH_redacted-4962-4692-98fb-52ddda82a5af Container None Object None Partition 198875 Hash 1846d99185f8a0edaf65cfbf37439696 Server:Port Device .31:6202 disk6 Server:Port Device .204.70:6202 disk6 Server:Port Device .72.16:6202 disk9 Server:Port Device .204.64:6202 disk11 [Handoff] Server:Port Device .26:6202 disk11 [Handoff] Server:Port Device .72.27:6202 disk11 [Handoff] curl -I -XHEAD "`http://.31:6202/disk6/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" `_ curl -I -XHEAD "`http://.204.70:6202/disk6/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" `_ curl -I -XHEAD "`http://.72.16:6202/disk9/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" `_ curl -I -XHEAD "`http://.204.64:6202/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" `_ # [Handoff] curl -I -XHEAD "`http://.26:6202/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" `_ # [Handoff] curl -I -XHEAD "`http://.72.27:6202/disk11/198875/AUTH_redacted-4962-4692-98fb-52ddda82a5af" `_ # [Handoff] ssh .31 "ls -lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" ssh .204.70 "ls -lah /srv/node/disk6/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" ssh .72.16 "ls -lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" ssh .204.64 "ls -lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff] ssh .26 "ls -lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff] ssh .72.27 "ls -lah /srv/node/disk11/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/" # [Handoff] Check each of the primary servers, .31, .204.70 and .72.16, for this users account. For example on .72.16: .. code:: $ ls -lah /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/ total 1.0M drwxrwxrwx 2 swift swift 98 2012-02-23 14:49 . drwxrwxrwx 3 swift swift 45 2012-02-03 23:28 .. -rw------- 1 swift swift 15K 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db -rw-rw-rw- 1 swift swift 0 2012-02-23 14:49 1846d99185f8a0edaf65cfbf37439696.db.pending So this users account db, an sqlite db is present. Use sqlite to checkout the account: .. code:: $ sudo cp /srv/node/disk9/accounts/198875/696/1846d99185f8a0edaf65cfbf37439696/1846d99185f8a0edaf65cfbf37439696.db /tmp $ sudo sqlite3 /tmp/1846d99185f8a0edaf65cfbf37439696.db sqlite> .mode line sqlite> select * from account_stat; account = AUTH_redacted-4962-4692-98fb-52ddda82a5af created_at = 1328311738.42190 put_timestamp = 1330000873.61411 delete_timestamp = 1330001026.00514 container_count = 0 object_count = 0 bytes_used = 0 hash = eb7e5d0ea3544d9def940b19114e8b43 id = 2de8c8a8-cef9-4a94-a421-2f845802fe90 status = DELETED status_changed_at = 1330001026.00514 metadata = .. note: The status is ``DELETED``. So this account was deleted. This explains why the GET operations are returning 404, not found. Check the account delete date/time: .. code:: $ python >>> import time >>> time.ctime(1330001026.00514) 'Thu Feb 23 12:43:46 2012' Next try and find the ``DELETE`` operation for this account in the proxy server logs: .. code:: $ PDSH_SSH_ARGS_APPEND="-o StrictHostKeyChecking=no" pdsh -l -R ssh \ -w .68.[4-11,132-139 4-11,132-139],.132.[4-11,132-139|4-11,132-139] \ 'sudo bzgrep AUTH_redacted-4962-4692-98fb-52ddda82a5af /var/log/swift/proxy.log* \ | grep -w DELETE | awk "{print $3,$10,$12}"' |- dshbak -c . . Feb 23 12:43:46 sw-aw2az2-proxy001 proxy-server .66.7 23/Feb/2012/12/43/46 DELETE /v1.0/AUTH_redacted-4962-4692-98fb- 52ddda82a5af/ HTTP/1.0 204 - Apache-HttpClient/4.1.2%20%28java%201.5%29 _4f458ee4e4b02a869c3aad02 - - - tx4471188b0b87406899973d297c55ab53 - 0.0086 From this you can see the operation that resulted in the account being deleted. Procedure: Deleting objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~ Simple case - deleting small number of objects and containers ------------------------------------------------------------- .. note:: ``swift-direct`` is specific to the Hewlett Packard Enterprise Helion Public Cloud. Use ``swiftly`` as an alternative. .. note:: Object and container names are in UTF8. Swift direct accepts UTF8 directly, not URL-encoded UTF8 (the REST API expects UTF8 and then URL-encoded). In practice cut and paste of foreign language strings to a terminal window will produce the right result. Hint: Use the ``head`` command before any destructive commands. To delete a small number of objects, log into any proxy node and proceed as follows: Examine the object in question: .. code:: $ sudo -u swift /opt/hp/swift/bin/swift-direct head 132345678912345 container_name obj_name See if ``X-Object-Manifest`` or ``X-Static-Large-Object`` is set, then this is the manifest object and segment objects may be in another container. If the ``X-Object-Manifest`` attribute is set, you need to find the name of the objects this means it is a DLO. For example, if ``X-Object-Manifest`` is ``container2/seg-blah``, list the contents of the container container2 as follows: .. code:: $ sudo -u swift /opt/hp/swift/bin/swift-direct show 132345678912345 container2 Pick out the objects whose names start with ``seg-blah``. Delete the segment objects as follows: .. code:: $ sudo -u swift /opt/hp/swift/bin/swift-direct delete 132345678912345 container2 seg-blah01 $ sudo -u swift /opt/hp/swift/bin/swift-direct delete 132345678912345 container2 seg-blah02 etc If ``X-Static-Large-Object`` is set, you need to read the contents. Do this by: - Using swift-get-nodes to get the details of the object's location. - Change the ``-X HEAD`` to ``-X GET`` and run ``curl`` against one copy. - This lists a JSON body listing containers and object names - Delete the objects as described above for DLO segments Once the segments are deleted, you can delete the object using ``swift-direct`` as described above. Finally, use ``swift-direct`` to delete the container. Procedure: Decommissioning swift nodes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Should Swift nodes need to be decommissioned (e.g.,, where they are being re-purposed), it is very important to follow the following steps. #. In the case of object servers, follow the procedure for removing the node from the rings. #. In the case of swift proxy servers, have the network team remove the node from the load balancers. #. Open a network ticket to have the node removed from network firewalls. #. Make sure that you remove the ``/etc/swift`` directory and everything in it. swift-2.17.0/doc/source/ops_runbook/procedures.rst0000666000175100017510000003412713236061617022311 0ustar zuulzuul00000000000000================================= Software configuration procedures ================================= .. _fix_broken_gpt_table: Fix broken GPT table (broken disk partition) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - If a GPT table is broken, a message like the following should be observed when the command... .. code:: $ sudo parted -l - ... is run. .. code:: ... Error: The backup GPT table is corrupt, but the primary appears OK, so that will be used. OK/Cancel? #. To fix this, firstly install the ``gdisk`` program to fix this: .. code:: $ sudo aptitude install gdisk #. Run ``gdisk`` for the particular drive with the damaged partition: .. code: $ sudo gdisk /dev/sd*a-l* GPT fdisk (gdisk) version 0.6.14 Caution: invalid backup GPT header, but valid main header; regenerating backup header from main header. Warning! One or more CRCs don't match. You should repair the disk! Partition table scan: MBR: protective BSD: not present APM: not present GPT: damaged /dev/sd ***************************************************************************** Caution: Found protective or hybrid MBR and corrupt GPT. Using GPT, but disk verification and recovery are STRONGLY recommended. ***************************************************************************** #. On the command prompt, type ``r`` (recovery and transformation options), followed by ``d`` (use main GPT header) , ``v`` (verify disk) and finally ``w`` (write table to disk and exit). Will also need to enter ``Y`` when prompted in order to confirm actions. .. code:: Command (? for help): r Recovery/transformation command (? for help): d Recovery/transformation command (? for help): v Caution: The CRC for the backup partition table is invalid. This table may be corrupt. This program will automatically create a new backup partition table when you save your partitions. Caution: Partition 1 doesn't begin on a 8-sector boundary. This may result in degraded performance on some modern (2009 and later) hard disks. Caution: Partition 2 doesn't begin on a 8-sector boundary. This may result in degraded performance on some modern (2009 and later) hard disks. Caution: Partition 3 doesn't begin on a 8-sector boundary. This may result in degraded performance on some modern (2009 and later) hard disks. Identified 1 problems! Recovery/transformation command (? for help): w Final checks complete. About to write GPT data. THIS WILL OVERWRITE EXISTING PARTITIONS!! Do you want to proceed, possibly destroying your data? (Y/N): Y OK; writing new GUID partition table (GPT). The operation has completed successfully. #. Running the command: .. code:: $ sudo parted /dev/sd# #. Should now show that the partition is recovered and healthy again. #. Finally, uninstall ``gdisk`` from the node: .. code:: $ sudo aptitude remove gdisk .. _fix_broken_xfs_filesystem: Procedure: Fix broken XFS filesystem ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #. A filesystem may be corrupt or broken if the following output is observed when checking its label: .. code:: $ sudo xfs_admin -l /dev/sd# cache_node_purge: refcount was 1, not zero (node=0x25d5ee0) xfs_admin: cannot read root inode (117) cache_node_purge: refcount was 1, not zero (node=0x25d92b0) xfs_admin: cannot read realtime bitmap inode (117) bad sb magic # 0 in AG 1 failed to read label in AG 1 #. Run the following commands to remove the broken/corrupt filesystem and replace. (This example uses the filesystem ``/dev/sdb2``) Firstly need to replace the partition: .. code:: $ sudo parted GNU Parted 2.3 Using /dev/sda Welcome to GNU Parted! Type 'help' to view a list of commands. (parted) select /dev/sdb Using /dev/sdb (parted) p Model: HP LOGICAL VOLUME (scsi) Disk /dev/sdb: 2000GB Sector size (logical/physical): 512B/512B Partition Table: gpt Number Start End Size File system Name Flags 1 17.4kB 1024MB 1024MB ext3 boot 2 1024MB 1751GB 1750GB xfs sw-aw2az1-object045-disk1 3 1751GB 2000GB 249GB lvm (parted) rm 2 (parted) mkpart primary 2 -1 Warning: You requested a partition from 2000kB to 2000GB. The closest location we can manage is 1024MB to 1751GB. Is this still acceptable to you? Yes/No? Yes Warning: The resulting partition is not properly aligned for best performance. Ignore/Cancel? Ignore (parted) p Model: HP LOGICAL VOLUME (scsi) Disk /dev/sdb: 2000GB Sector size (logical/physical): 512B/512B Partition Table: gpt Number Start End Size File system Name Flags 1 17.4kB 1024MB 1024MB ext3 boot 2 1024MB 1751GB 1750GB xfs primary 3 1751GB 2000GB 249GB lvm (parted) quit #. Next step is to scrub the filesystem and format: .. code:: $ sudo dd if=/dev/zero of=/dev/sdb2 bs=$((1024*1024)) count=1 1+0 records in 1+0 records out 1048576 bytes (1.0 MB) copied, 0.00480617 s, 218 MB/s $ sudo /sbin/mkfs.xfs -f -i size=1024 /dev/sdb2 meta-data=/dev/sdb2 isize=1024 agcount=4, agsize=106811524 blks = sectsz=512 attr=2, projid32bit=0 data = bsize=4096 blocks=427246093, imaxpct=5 = sunit=0 swidth=0 blks naming =version 2 bsize=4096 ascii-ci=0 log =internal log bsize=4096 blocks=208616, version=2 = sectsz=512 sunit=0 blks, lazy-count=1 realtime =none extsz=4096 blocks=0, rtextents=0 #. You should now label and mount your filesystem. #. Can now check to see if the filesystem is mounted using the command: .. code:: $ mount .. _checking_if_account_ok: Procedure: Checking if an account is okay ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: ``swift-direct`` is only available in the HPE Helion Public Cloud. Use ``swiftly`` as an alternate (or use ``swift-get-nodes`` as explained here). You must know the tenant/project ID. You can check if the account is okay as follows from a proxy. .. code:: $ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_ The response will either be similar to a swift list of the account containers, or an error indicating that the resource could not be found. Alternatively, you can use ``swift-get-nodes`` to find the account database files. Run the following on a proxy: .. code:: $ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_ The response will print curl/ssh commands that will list the replicated account databases. Use the indicated ``curl`` or ``ssh`` commands to check the status and existence of the account. Procedure: Getting swift account stats ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: ``swift-direct`` is specific to the HPE Helion Public Cloud. Go look at ``swifty`` for an alternate or use ``swift-get-nodes`` as explained in :ref:`checking_if_account_ok`. This procedure describes how you determine the swift usage for a given swift account, that is the number of containers, number of objects and total bytes used. To do this you will need the project ID. Log onto one of the swift proxy servers. Use swift-direct to show this accounts usage: .. code:: $ sudo -u swift /opt/hp/swift/bin/swift-direct show AUTH_ Status: 200 Content-Length: 0 Accept-Ranges: bytes X-Timestamp: 1379698586.88364 X-Account-Bytes-Used: 67440225625994 X-Account-Container-Count: 1 Content-Type: text/plain; charset=utf-8 X-Account-Object-Count: 8436776 Status: 200 name: my_container count: 8436776 bytes: 67440225625994 This account has 1 container. That container has 8436776 objects. The total bytes used is 67440225625994. Procedure: Revive a deleted account ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Swift accounts are normally not recreated. If a tenant/project is deleted, the account can then be deleted. If the user wishes to use Swift again, the normal process is to create a new tenant/project -- and hence a new Swift account. However, if the Swift account is deleted, but the tenant/project is not deleted from Keystone, the user can no longer access the account. This is because the account is marked deleted in Swift. You can revive the account as described in this process. .. note:: The containers and objects in the "old" account cannot be listed anymore. In addition, if the Account Reaper process has not finished reaping the containers and objects in the "old" account, these are effectively orphaned and it is virtually impossible to find and delete them to free up disk space. The solution is to delete the account database files and re-create the account as follows: #. You must know the tenant/project ID. The account name is AUTH_. In this example, the tenant/project is ``4ebe3039674d4864a11fe0864ae4d905`` so the Swift account name is ``AUTH_4ebe3039674d4864a11fe0864ae4d905``. #. Use ``swift-get-nodes`` to locate the account's database files (on three servers). The output has been truncated so we can focus on the import pieces of data: .. code:: $ sudo swift-get-nodes /etc/swift/account.ring.gz AUTH_4ebe3039674d4864a11fe0864ae4d905 ... curl -I -XHEAD "http://192.168.245.5:6202/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" curl -I -XHEAD "http://192.168.245.3:6202/disk0/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" curl -I -XHEAD "http://192.168.245.4:6202/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" ... Use your own device location of servers: such as "export DEVICE=/srv/node" ssh 192.168.245.5 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052" ssh 192.168.245.3 "ls -lah ${DEVICE:-/srv/node*}/disk0/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052" ssh 192.168.245.4 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052" ... note: `/srv/node*` is used as default value of `devices`, the real value is set in the config file on each storage node. #. Before proceeding check that the account is really deleted by using curl. Execute the commands printed by ``swift-get-nodes``. For example: .. code:: $ curl -I -XHEAD "http://192.168.245.5:6202/disk1/3934/AUTH_4ebe3039674d4864a11fe0864ae4d905" HTTP/1.1 404 Not Found Content-Length: 0 Content-Type: text/html; charset=utf-8 Repeat for the other two servers (192.168.245.3 and 192.168.245.4). A ``404 Not Found`` indicates that the account is deleted (or never existed). If you get a ``204 No Content`` response, do **not** proceed. #. Use the ssh commands printed by ``swift-get-nodes`` to check if database files exist. For example: .. code:: $ ssh 192.168.245.5 "ls -lah ${DEVICE:-/srv/node*}/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052" total 20K drwxr-xr-x 2 swift swift 110 Mar 9 10:22 . drwxr-xr-x 3 swift swift 45 Mar 9 10:18 .. -rw------- 1 swift swift 17K Mar 9 10:22 f5ecf8b40de3e1b0adb0dbe576874052.db -rw-r--r-- 1 swift swift 0 Mar 9 10:22 f5ecf8b40de3e1b0adb0dbe576874052.db.pending -rwxr-xr-x 1 swift swift 0 Mar 9 10:18 .lock Repeat for the other two servers (192.168.245.3 and 192.168.245.4). If no files exist, no further action is needed. #. Stop Swift processes on all nodes listed by ``swift-get-nodes`` (In this example, that is 192.168.245.3, 192.168.245.4 and 192.168.245.5). #. We recommend you make backup copies of the database files. #. Delete the database files. For example: .. code:: $ ssh 192.168.245.5 $ cd /srv/node/disk1/accounts/3934/052/f5ecf8b40de3e1b0adb0dbe576874052 $ sudo rm * Repeat for the other two servers (192.168.245.3 and 192.168.245.4). #. Restart Swift on all three servers At this stage, the account is fully deleted. If you enable the auto-create option, the next time the user attempts to access the account, the account will be created. You may also use swiftly to recreate the account. Procedure: Temporarily stop load balancers from directing traffic to a proxy server ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can stop the load balancers sending requests to a proxy server as follows. This can be useful when a proxy is misbehaving but you need Swift running to help diagnose the problem. By removing from the load balancers, customer's are not impacted by the misbehaving proxy. #. Ensure that in /etc/swift/proxy-server.conf the ``disable_path`` variable is set to ``/etc/swift/disabled-by-file``. #. Log onto the proxy node. #. Shut down Swift as follows: .. code:: sudo swift-init proxy shutdown .. note:: Shutdown, not stop. #. Create the ``/etc/swift/disabled-by-file`` file. For example: .. code:: sudo touch /etc/swift/disabled-by-file #. Optional, restart Swift: .. code:: sudo swift-init proxy start It works because the healthcheck middleware looks for /etc/swift/disabled-by-file. If it exists, the middleware will return 503/error instead of 200/OK. This means the load balancer should stop sending traffic to the proxy. Procedure: Ad-Hoc disk performance test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can get an idea whether a disk drive is performing as follows: .. code:: sudo dd bs=1M count=256 if=/dev/zero conv=fdatasync of=/srv/node/disk11/remember-to-delete-this-later You can expect ~600MB/sec. If you get a low number, repeat many times as Swift itself may also read or write to the disk, hence giving a lower number. swift-2.17.0/doc/source/ops_runbook/index.rst0000666000175100017510000000157313236061617021244 0ustar zuulzuul00000000000000================= Swift Ops Runbook ================= This document contains operational procedures that Hewlett Packard Enterprise (HPE) uses to operate and monitor the Swift system within the HPE Helion Public Cloud. This document is an excerpt of a larger product-specific handbook. As such, the material may appear incomplete. The suggestions and recommendations made in this document are for our particular environment, and may not be suitable for your environment or situation. We make no representations concerning the accuracy, adequacy, completeness or suitability of the information, suggestions or recommendations. This document are provided for reference only. We are not responsible for your use of any information, suggestions or recommendations contained herein. .. toctree:: :maxdepth: 2 diagnose.rst procedures.rst maintenance.rst troubleshooting.rst swift-2.17.0/doc/source/ops_runbook/diagnose.rst0000666000175100017510000013344113236061617021726 0ustar zuulzuul00000000000000================================== Identifying issues and resolutions ================================== Is the system up? ----------------- If you have a report that Swift is down, perform the following basic checks: #. Run swift functional tests. #. From a server in your data center, use ``curl`` to check ``/healthcheck`` (see below). #. If you have a monitoring system, check your monitoring system. #. Check your hardware load balancers infrastructure. #. Run swift-recon on a proxy node. Functional tests usage ----------------------- We would recommend that you set up the functional tests to run against your production system. Run regularly this can be a useful tool to validate that the system is configured correctly. In addition, it can provide early warning about failures in your system (if the functional tests stop working, user applications will also probably stop working). A script for running the function tests is located in ``swift/.functests``. External monitoring ------------------- We use pingdom.com to monitor the external Swift API. We suggest the following: - Do a GET on ``/healthcheck`` - Create a container, make it public (x-container-read: .r*,.rlistings), create a small file in the container; do a GET on the object Diagnose: General approach -------------------------- - Look at service status in your monitoring system. - In addition to system monitoring tools and issue logging by users, swift errors will often result in log entries (see :ref:`swift_logs`). - Look at any logs your deployment tool produces. - Log files should be reviewed for error signatures (see below) that may point to a known issue, or root cause issues reported by the diagnostics tools, prior to escalation. Dependencies ^^^^^^^^^^^^ The Swift software is dependent on overall system health. Operating system level issues with network connectivity, domain name resolution, user management, hardware and system configuration and capacity in terms of memory and free disk space, may result is secondary Swift issues. System level issues should be resolved prior to diagnosis of swift issues. Diagnose: Swift-dispersion-report --------------------------------- The swift-dispersion-report is a useful tool to gauge the general health of the system. Configure the ``swift-dispersion`` report to cover at a minimum every disk drive in your system (usually 1% coverage). See :ref:`dispersion_report` for details of how to configure and use the dispersion reporting tool. The ``swift-dispersion-report`` tool can take a long time to run, especially if any servers are down. We suggest you run it regularly (e.g., in a cron job) and save the results. This makes it easy to refer to the last report without having to wait for a long-running command to complete. Diagnose: Is system responding to /healthcheck? ----------------------------------------------- When you want to establish if a swift endpoint is running, run ``curl -k`` against https://*[ENDPOINT]*/healthcheck. .. _swift_logs: Diagnose: Interpreting messages in ``/var/log/swift/`` files ------------------------------------------------------------ .. note:: In the Hewlett Packard Enterprise Helion Public Cloud we send logs to ``proxy.log`` (proxy-server logs), ``server.log`` (object-server, account-server, container-server logs), ``background.log`` (all other servers [object-replicator, etc]). The following table lists known issues: .. list-table:: :widths: 25 25 25 25 :header-rows: 1 * - **Logfile** - **Signature** - **Issue** - **Steps to take** * - /var/log/syslog - kernel: [] sd .... [csbu:sd...] Sense Key: Medium Error - Suggests disk surface issues - Run ``swift-drive-audit`` on the target node to check for disk errors, repair disk errors * - /var/log/syslog - kernel: [] sd .... [csbu:sd...] Sense Key: Hardware Error - Suggests storage hardware issues - Run diagnostics on the target node to check for disk failures, replace failed disks * - /var/log/syslog - kernel: [] .... I/O error, dev sd.... ,sector .... - - Run diagnostics on the target node to check for disk errors * - /var/log/syslog - pound: NULL get_thr_arg - Multiple threads woke up - Noise, safe to ignore * - /var/log/swift/proxy.log - .... ERROR .... ConnectionTimeout .... - A storage node is not responding in a timely fashion - Check if node is down, not running Swift, unconfigured, storage off-line or for network issues between the proxy and non responding node * - /var/log/swift/proxy.log - proxy-server .... HTTP/1.0 500 .... - A proxy server has reported an internal server error - Examine the logs for any errors at the time the error was reported to attempt to understand the cause of the error. * - /var/log/swift/server.log - .... ERROR .... ConnectionTimeout .... - A storage server is not responding in a timely fashion - Check if node is down, not running Swift, unconfigured, storage off-line or for network issues between the server and non responding node * - /var/log/swift/server.log - .... ERROR .... Remote I/O error: '/srv/node/disk.... - A storage device is not responding as expected - Run ``swift-drive-audit`` and check the filesystem named in the error for corruption (unmount & xfs_repair). Check if the filesystem is mounted and working. * - /var/log/swift/background.log - object-server ERROR container update failed .... Connection refused - A container server node could not be contacted - Check if node is down, not running Swift, unconfigured, storage off-line or for network issues between the server and non responding node * - /var/log/swift/background.log - object-updater ERROR with remote .... ConnectionTimeout - The remote container server is busy - If the container is very large, some errors updating it can be expected. However, this error can also occur if there is a networking issue. * - /var/log/swift/background.log - account-reaper STDOUT: .... error: ECONNREFUSED - Network connectivity issue or the target server is down. - Resolve network issue or reboot the target server * - /var/log/swift/background.log - .... ERROR .... ConnectionTimeout - A storage server is not responding in a timely fashion - The target server may be busy. However, this error can also occur if there is a networking issue. * - /var/log/swift/background.log - .... ERROR syncing .... Timeout - A timeout occurred syncing data to another node. - The target server may be busy. However, this error can also occur if there is a networking issue. * - /var/log/swift/background.log - .... ERROR Remote drive not mounted .... - A storage server disk is unavailable - Repair and remount the file system (on the remote node) * - /var/log/swift/background.log - object-replicator .... responded as unmounted - A storage server disk is unavailable - Repair and remount the file system (on the remote node) * - /var/log/swift/\*.log - STDOUT: EXCEPTION IN - A unexpected error occurred - Read the Traceback details, if it matches known issues (e.g. active network/disk issues), check for re-ocurrences after the primary issues have been resolved * - /var/log/rsyncd.log - rsync: mkdir "/disk....failed: No such file or directory.... - A local storage server disk is unavailable - Run diagnostics on the node to check for a failed or unmounted disk * - /var/log/swift* - Exception: Could not bind to 0.0.0.0:6xxx - Possible Swift process restart issue. This indicates an old swift process is still running. - Restart Swift services. If some swift services are reported down, check if they left residual process behind. Diagnose: Parted reports the backup GPT table is corrupt -------------------------------------------------------- - If a GPT table is broken, a message like the following should be observed when the following command is run: .. code:: $ sudo parted -l .. code:: Error: The backup GPT table is corrupt, but the primary appears OK, so that will be used. OK/Cancel? To fix, go to :ref:`fix_broken_gpt_table` Diagnose: Drives diagnostic reports a FS label is not acceptable ---------------------------------------------------------------- If diagnostics reports something like "FS label: obj001dsk011 is not acceptable", it indicates that a partition has a valid disk label, but an invalid filesystem label. In such cases proceed as follows: #. Verify that the disk labels are correct: .. code:: FS=/dev/sd#1 sudo parted -l | grep object #. If partition labels are inconsistent then, resolve the disk label issues before proceeding: .. code:: sudo parted -s ${FS} name ${PART_NO} ${PART_NAME} #Partition Label #PART_NO is 1 for object disks and 3 for OS disks #PART_NAME follows the convention seen in "sudo parted -l | grep object" #. If the Filesystem label is missing then create it with care: .. code:: sudo xfs_admin -l ${FS} #Filesystem label (12 Char limit) #Check for the existence of a FS label OBJNO=<3 Length Object No.> #I.E OBJNO for sw-stbaz3-object0007 would be 007 DISKNO=<3 Length Disk No.> #I.E DISKNO for /dev/sdb would be 001, /dev/sdc would be 002 etc. sudo xfs_admin -L "obj${OBJNO}dsk${DISKNO}" ${FS} #Create a FS Label Diagnose: Failed LUNs --------------------- .. note:: The HPE Helion Public Cloud uses direct attach SmartArray controllers/drives. The information here is specific to that environment. The hpacucli utility mentioned here may be called hpssacli in your environment. The ``swift_diagnostics`` mount checks may return a warning that a LUN has failed, typically accompanied by DriveAudit check failures and device errors. Such cases are typically caused by a drive failure, and if drive check also reports a failed status for the underlying drive, then follow the procedure to replace the disk. Otherwise the lun can be re-enabled as follows: #. Generate a hpssacli diagnostic report. This report allows the DC team to troubleshoot potential cabling or hardware issues so it is imperative that you run it immediately when troubleshooting a failed LUN. You will come back later and grep this file for more details, but just generate it for now. .. code:: sudo hpssacli controller all diag file=/tmp/hpacu.diag ris=on xml=off zip=off Export the following variables using the below instructions before proceeding further. #. Print a list of logical drives and their numbers and take note of the failed drive's number and array value (example output: "array A logicaldrive 1..." would be exported as LDRIVE=1): .. code:: sudo hpssacli controller slot=1 ld all show #. Export the number of the logical drive that was retrieved from the previous command into the LDRIVE variable: .. code:: export LDRIVE= #. Print the array value and Port:Box:Bay for all drives and take note of the Port:Box:Bay for the failed drive (example output: " array A physicaldrive 2C:1:1..." would be exported as PBOX=2C:1:1). Match the array value of this output with the array value obtained from the previous command to be sure you are working on the same drive. Also, the array value usually matches the device name (For example, /dev/sdc in the case of "array c"), but we will run a different command to be sure we are operating on the correct device. .. code:: sudo hpssacli controller slot=1 pd all show .. note:: Sometimes a LUN may appear to be failed as it is not and cannot be mounted but the hpssacli/parted commands may show no problems with the LUNS/drives. In this case, the filesystem may be corrupt and may be necessary to run ``sudo xfs_check /dev/sd[a-l][1-2]`` to see if there is an xfs issue. The results of running this command may require that ``xfs_repair`` is run. #. Export the Port:Box:Bay for the failed drive into the PBOX variable: .. code:: export PBOX= #. Print the physical device information and take note of the Disk Name (example output: "Disk Name: /dev/sdk" would be exported as DEV=/dev/sdk): .. code:: sudo hpssacli controller slot=1 ld ${LDRIVE} show detail | grep -i "Disk Name" #. Export the device name variable from the preceding command (example: /dev/sdk): .. code:: export DEV= #. Export the filesystem variable. Disks that are split between the operating system and data storage, typically sda and sdb, should only have repairs done on their data filesystem, usually /dev/sda2 and /dev/sdb2, Other data only disks have just one partition on the device, so the filesystem will be 1. In any case you should verify the data filesystem by running ``df -h | grep /srv/node`` and using the listed data filesystem for the device in question as the export. For example: /dev/sdk1. .. code:: export FS= #. Verify the LUN is failed, and the device is not: .. code:: sudo hpssacli controller slot=1 ld all show sudo hpssacli controller slot=1 pd all show sudo hpssacli controller slot=1 ld ${LDRIVE} show detail sudo hpssacli controller slot=1 pd ${PBOX} show detail #. Stop the swift and rsync service: .. code:: sudo service rsync stop sudo swift-init shutdown all #. Unmount the problem drive, fix the LUN and the filesystem: .. code:: sudo umount ${FS} #. If umount fails, you should run lsof search for the mountpoint and kill any lingering processes before repeating the unpount: .. code:: sudo hpacucli controller slot=1 ld ${LDRIVE} modify reenable sudo xfs_repair ${FS} #. If the ``xfs_repair`` complains about possible journal data, use the ``xfs_repair -L`` option to zeroise the journal log. #. Once complete test-mount the filesystem, and tidy up its lost and found area. .. code:: sudo mount ${FS} /mnt sudo rm -rf /mnt/lost+found/ sudo umount /mnt #. Mount the filesystem and restart swift and rsync. #. Run the following to determine if a DC ticket is needed to check the cables on the node: .. code:: grep -y media.exchanged /tmp/hpacu.diag grep -y hot.plug.count /tmp/hpacu.diag #. If the output reports any non 0x00 values, it suggests that the cables should be checked. For example, log a DC ticket to check the sas cables between the drive and the expander. .. _diagnose_slow_disk_drives: Diagnose: Slow disk devices --------------------------- .. note:: collectl is an open-source performance gathering/analysis tool. If the diagnostics report a message such as ``sda: drive is slow``, you should log onto the node and run the following command (remove ``-c 1`` option to continuously monitor the data): .. code:: $ /usr/bin/collectl -s D -c 1 waiting for 1 second sample... # DISK STATISTICS (/sec) # <---------reads---------><---------writes---------><--------averages--------> Pct #Name KBytes Merged IOs Size KBytes Merged IOs Size RWSize QLen Wait SvcTim Util sdb 204 0 33 6 43 0 4 11 6 1 7 6 23 sda 84 0 13 6 108 21 6 18 10 1 7 7 13 sdc 100 0 16 6 0 0 0 0 6 1 7 6 9 sdd 140 0 22 6 22 0 2 11 6 1 9 9 22 sde 76 0 12 6 255 0 52 5 5 1 2 1 10 sdf 276 0 44 6 0 0 0 0 6 1 11 8 38 sdg 112 0 17 7 18 0 2 9 6 1 7 7 13 sdh 3552 0 73 49 0 0 0 0 48 1 9 8 62 sdi 72 0 12 6 0 0 0 0 6 1 8 8 10 sdj 112 0 17 7 22 0 2 11 7 1 10 9 18 sdk 120 0 19 6 21 0 2 11 6 1 8 8 16 sdl 144 0 22 7 18 0 2 9 6 1 9 7 18 dm-0 0 0 0 0 0 0 0 0 0 0 0 0 0 dm-1 0 0 0 0 60 0 15 4 4 0 0 0 0 dm-2 0 0 0 0 48 0 12 4 4 0 0 0 0 dm-3 0 0 0 0 0 0 0 0 0 0 0 0 0 dm-4 0 0 0 0 0 0 0 0 0 0 0 0 0 dm-5 0 0 0 0 0 0 0 0 0 0 0 0 0 Look at the ``Wait`` and ``SvcTime`` values. It is not normal for these values to exceed 50msec. This is known to impact customer performance (upload/download). For a controller problem, many/all drives will show long wait and service times. A reboot may correct the problem; otherwise hardware replacement is needed. Another way to look at the data is as follows: .. code:: $ /opt/hp/syseng/disk-anal.pl -d Disk: sda Wait: 54580 371 65 25 12 6 6 0 1 2 0 46 Disk: sdb Wait: 54532 374 96 36 16 7 4 1 0 2 0 46 Disk: sdc Wait: 54345 554 105 29 15 4 7 1 4 4 0 46 Disk: sdd Wait: 54175 553 254 31 20 11 6 6 2 2 1 53 Disk: sde Wait: 54923 66 56 15 8 7 7 0 1 0 2 29 Disk: sdf Wait: 50952 941 565 403 426 366 442 447 338 99 38 97 Disk: sdg Wait: 50711 689 808 562 642 675 696 185 43 14 7 82 Disk: sdh Wait: 51018 668 688 483 575 542 692 275 55 22 9 87 Disk: sdi Wait: 51012 1011 849 672 568 240 344 280 38 13 6 81 Disk: sdj Wait: 50724 743 770 586 662 509 684 283 46 17 11 79 Disk: sdk Wait: 50886 700 585 517 633 511 729 352 89 23 8 81 Disk: sdl Wait: 50106 617 794 553 604 504 532 501 288 234 165 216 Disk: sda Time: 55040 22 16 6 1 1 13 0 0 0 3 12 Disk: sdb Time: 55014 41 19 8 3 1 8 0 0 0 3 17 Disk: sdc Time: 55032 23 14 8 9 2 6 1 0 0 0 19 Disk: sdd Time: 55022 29 17 12 6 2 11 0 0 0 1 14 Disk: sde Time: 55018 34 15 11 12 1 9 0 0 0 2 12 Disk: sdf Time: 54809 250 45 7 1 0 0 0 0 0 1 1 Disk: sdg Time: 55070 36 6 2 0 0 0 0 0 0 0 0 Disk: sdh Time: 55079 33 2 0 0 0 0 0 0 0 0 0 Disk: sdi Time: 55074 28 7 2 0 0 2 0 0 0 0 1 Disk: sdj Time: 55067 35 10 0 1 0 0 0 0 0 0 1 Disk: sdk Time: 55068 31 10 3 0 0 1 0 0 0 0 1 Disk: sdl Time: 54905 130 61 7 3 4 1 0 0 0 0 3 This shows the historical distribution of the wait and service times over a day. This is how you read it: - sda did 54580 operations with a short wait time, 371 operations with a longer wait time and 65 with an even longer wait time. - sdl did 50106 operations with a short wait time, but as you can see many took longer. There is a clear pattern that sdf to sdl have a problem. Actually, sda to sde would more normally have lots of zeros in their data. But maybe this is a busy system. In this example it is worth changing the controller as the individual drives may be ok. After the controller is changed, use collectl -s D as described above to see if the problem has cleared. disk-anal.pl will continue to show historical data. You can look at recent data as follows. It only looks at data from 13:15 to 14:15. As you can see, this is a relatively clean system (few if any long wait or service times): .. code:: $ /opt/hp/syseng/disk-anal.pl -d -t 13:15-14:15 Disk: sda Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdb Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdc Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdd Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sde Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdf Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdg Wait: 3594 6 0 0 0 0 0 0 0 0 0 0 Disk: sdh Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdi Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdj Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdk Wait: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdl Wait: 3599 1 0 0 0 0 0 0 0 0 0 0 Disk: sda Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdb Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdc Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdd Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sde Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdf Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdg Time: 3594 6 0 0 0 0 0 0 0 0 0 0 Disk: sdh Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdi Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdj Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdk Time: 3600 0 0 0 0 0 0 0 0 0 0 0 Disk: sdl Time: 3599 1 0 0 0 0 0 0 0 0 0 0 For long wait times, where the service time appears normal is to check the logical drive cache status. While the cache may be enabled, it can be disabled on a per-drive basis. Diagnose: Slow network link - Measuring network performance ----------------------------------------------------------- Network faults can cause performance between Swift nodes to degrade. Testing with ``netperf`` is recommended. Other methods (such as copying large files) may also work, but can produce inconclusive results. Install ``netperf`` on all systems if not already installed. Check that the UFW rules for its control port are in place. However, there are no pre-opened ports for netperf's data connection. Pick a port number. In this example, 12866 is used because it is one higher than netperf's default control port number, 12865. If you get very strange results including zero values, you may not have gotten the data port opened in UFW at the target or may have gotten the netperf command-line wrong. Pick a ``source`` and ``target`` node. The source is often a proxy node and the target is often an object node. Using the same source proxy you can test communication to different object nodes in different AZs to identity possible bottlenecks. Running tests ^^^^^^^^^^^^^ #. Prepare the ``target`` node as follows: .. code:: sudo iptables -I INPUT -p tcp -j ACCEPT Or, do: .. code:: sudo ufw allow 12866/tcp #. On the ``source`` node, run the following command to check throughput. Note the double-dash before the -P option. The command takes 10 seconds to complete. The ``target`` node is 192.168.245.5. .. code:: $ netperf -H 192.168.245.5 -- -P 12866 MIGRATED TCP STREAM TEST from 0.0.0.0 (0.0.0.0) port 12866 AF_INET to .72.4 (.72.4) port 12866 AF_INET : demo Recv Send Send Socket Socket Message Elapsed Size Size Size Time Throughput bytes bytes bytes secs. 10^6bits/sec 87380 16384 16384 10.02 923.69 #. On the ``source`` node, run the following command to check latency: .. code:: $ netperf -H 192.168.245.5 -t TCP_RR -- -P 12866 MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 12866 AF_INET to .72.4 (.72.4) port 12866 AF_INET : demo : first burst 0 Local Remote Socket Size Request Resp. Elapsed Trans. Send Recv Size Size Time Rate bytes Bytes bytes bytes secs. per sec 16384 87380 1 1 10.00 11753.37 16384 87380 Expected results ^^^^^^^^^^^^^^^^ Faults will show up as differences between different pairs of nodes. However, for reference, here are some expected numbers: - For throughput, proxy to proxy, expect ~9300 Mbit/sec (proxies have a 10Ge link). - For throughout, proxy to object, expect ~920 Mbit/sec (at time of writing this, object nodes have a 1Ge link). - For throughput, object to object, expect ~920 Mbit/sec. - For latency (all types), expect ~11000 transactions/sec. Diagnose: Remapping sectors experiencing UREs --------------------------------------------- #. Find the bad sector, device, and filesystem in ``kern.log``. #. Set the environment variables SEC, DEV & FS, for example: .. code:: SEC=2930954256 DEV=/dev/sdi FS=/dev/sdi1 #. Verify that the sector is bad: .. code:: sudo dd if=${DEV} of=/dev/null bs=512 count=1 skip=${SEC} #. If the sector is bad this command will output an input/output error: .. code:: dd: reading `/dev/sdi`: Input/output error 0+0 records in 0+0 records out #. Prevent chef from attempting to re-mount the filesystem while the repair is in progress: .. code:: sudo mv /etc/chef/client.pem /etc/chef/xx-client.xx-pem #. Stop the swift and rsync service: .. code:: sudo service rsync stop sudo swift-init shutdown all #. Unmount the problem drive: .. code:: sudo umount ${FS} #. Overwrite/remap the bad sector: .. code:: sudo dd_rescue -d -A -m8b -s ${SEC}b ${DEV} ${DEV} #. This command should report an input/output error the first time it is run. Run the command a second time, if it successfully remapped the bad sector it should not report an input/output error. #. Verify the sector is now readable: .. code:: sudo dd if=${DEV} of=/dev/null bs=512 count=1 skip=${SEC} #. If the sector is now readable this command should not report an input/output error. #. If more than one problem sector is listed, set the SEC environment variable to the next sector in the list: .. code:: SEC=123456789 #. Repeat from step 8. #. Repair the filesystem: .. code:: sudo xfs_repair ${FS} #. If ``xfs_repair`` reports that the filesystem has valuable filesystem changes: .. code:: sudo xfs_repair ${FS} Phase 1 - find and verify superblock... Phase 2 - using internal log - zero log... ERROR: The filesystem has valuable metadata changes in a log which needs to be replayed. Mount the filesystem to replay the log, and unmount it before re-running xfs_repair. If you are unable to mount the filesystem, then use the -L option to destroy the log and attempt a repair. Note that destroying the log may cause corruption -- please attempt a mount of the filesystem before doing this. #. You should attempt to mount the filesystem, and clear the lost+found area: .. code:: sudo mount $FS /mnt sudo rm -rf /mnt/lost+found/* sudo umount /mnt #. If the filesystem fails to mount then you will need to use the ``xfs_repair -L`` option to force log zeroing. Repeat step 11. #. If ``xfs_repair`` reports that an additional input/output error has been encountered, get the sector details as follows: .. code:: sudo grep "I/O error" /var/log/kern.log | grep sector | tail -1 #. If new input/output error is reported then set the SEC environment variable to the problem sector number: .. code:: SEC=234567890 #. Repeat from step 8 #. Remount the filesystem and restart swift and rsync. - If all UREs in the kern.log have been fixed and you are still unable to have xfs_repair disk, it is possible that the URE's have corrupted the filesystem or possibly destroyed the drive altogether. In this case, the first step is to re-format the filesystem and if this fails, get the disk replaced. Diagnose: High system latency ----------------------------- .. note:: The latency measurements described here are specific to the HPE Helion Public Cloud. - A bad NIC on a proxy server. However, as explained above, this usually causes the peak to rise, but average should remain near normal parameters. A quick fix is to shutdown the proxy. - A stuck memcache server. Accepts connections, but then will not respond. Expect to see timeout messages in ``/var/log/proxy.log`` (port 11211). Swift Diags will also report this as a failed node/port. A quick fix is to shutdown the proxy server. - A bad/broken object server can also cause problems if the accounts used by the monitor program happen to live on the bad object server. - A general network problem within the data canter. Compare the results with the Pingdom monitors to see if they also have a problem. Diagnose: Interface reports errors ---------------------------------- Should a network interface on a Swift node begin reporting network errors, it may well indicate a cable, switch, or network issue. Get an overview of the interface with: .. code:: sudo ifconfig eth{n} sudo ethtool eth{n} The ``Link Detected:`` indicator will read ``yes`` if the nic is cabled. Establish the adapter type with: .. code:: sudo ethtool -i eth{n} Gather the interface statistics with: .. code:: sudo ethtool -S eth{n} If the nick supports self test, this can be performed with: .. code:: sudo ethtool -t eth{n} Self tests should read ``PASS`` if the nic is operating correctly. Nic module drivers can be re-initialised by carefully removing and re-installing the modules (this avoids rebooting the server). For example, mellanox drivers use a two part driver mlx4_en and mlx4_core. To reload these you must carefully remove the mlx4_en (ethernet) then the mlx4_core modules, and reinstall them in the reverse order. As the interface will be disabled while the modules are unloaded, you must be very careful not to lock yourself out so it may be better to script this. Diagnose: Hung swift object replicator -------------------------------------- A replicator reports in its log that remaining time exceeds 100 hours. This may indicate that the swift ``object-replicator`` is stuck and not making progress. Another useful way to check this is with the 'swift-recon -r' command on a swift proxy server: .. code:: sudo swift-recon -r =============================================================================== --> Starting reconnaissance on 384 hosts =============================================================================== [2013-07-17 12:56:19] Checking on replication [replication_time] low: 2, high: 80, avg: 28.8, total: 11037, Failed: 0.0%, no_result: 0, reported: 383 Oldest completion was 2013-06-12 22:46:50 (12 days ago) by 192.168.245.3:6200. Most recent completion was 2013-07-17 12:56:19 (5 seconds ago) by 192.168.245.5:6200. =============================================================================== The ``Oldest completion`` line in this example indicates that the object-replicator on swift object server 192.168.245.3 has not completed the replication cycle in 12 days. This replicator is stuck. The object replicator cycle is generally less than 1 hour. Though an replicator cycle of 15-20 hours can occur if nodes are added to the system and a new ring has been deployed. You can further check if the object replicator is stuck by logging on the object server and checking the object replicator progress with the following command: .. code:: # sudo grep object-rep /var/log/swift/background.log | grep -e "Starting object replication" -e "Object replication complete" -e "partitions rep" Jul 16 06:25:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69018.48s (0.22/sec, 22h remaining) Jul 16 06:30:46 192.168.245.4object-replicator 15344/16450 (93.28%) partitions replicated in 69318.58s (0.22/sec, 22h remaining) Jul 16 06:35:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69618.63s (0.22/sec, 23h remaining) Jul 16 06:40:46 192.168.245.4 object-replicator 15344/16450 (93.28%) partitions replicated in 69918.73s (0.22/sec, 23h remaining) Jul 16 06:45:46 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 70218.75s (0.22/sec, 24h remaining) Jul 16 06:50:47 192.168.245.4object-replicator 15348/16450 (93.30%) partitions replicated in 70518.85s (0.22/sec, 24h remaining) Jul 16 06:55:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 70818.95s (0.22/sec, 25h remaining) Jul 16 07:00:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 71119.05s (0.22/sec, 25h remaining) Jul 16 07:05:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 71419.15s (0.21/sec, 26h remaining) Jul 16 07:10:47 192.168.245.4object-replicator 15348/16450 (93.30%) partitions replicated in 71719.25s (0.21/sec, 26h remaining) Jul 16 07:15:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 72019.27s (0.21/sec, 27h remaining) Jul 16 07:20:47 192.168.245.4object-replicator 15348/16450 (93.30%) partitions replicated in 72319.37s (0.21/sec, 27h remaining) Jul 16 07:25:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 72619.47s (0.21/sec, 28h remaining) Jul 16 07:30:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 72919.56s (0.21/sec, 28h remaining) Jul 16 07:35:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 73219.67s (0.21/sec, 29h remaining) Jul 16 07:40:47 192.168.245.4 object-replicator 15348/16450 (93.30%) partitions replicated in 73519.76s (0.21/sec, 29h remaining) The above status is output every 5 minutes to ``/var/log/swift/background.log``. .. note:: The 'remaining' time is increasing as time goes on, normally the time remaining should be decreasing. Also note the partition number. For example, 15344 remains the same for several status lines. Eventually the object replicator detects the hang and attempts to make progress by killing the problem thread. The replicator then progresses to the next partition but quite often it again gets stuck on the same partition. One of the reasons for the object replicator hanging like this is filesystem corruption on the drive. The following is a typical log entry of a corrupted filesystem detected by the object replicator: .. code:: # sudo bzgrep "Remote I/O error" /var/log/swift/background.log* |grep srv | - tail -1 Jul 12 03:33:30 192.168.245.4 object-replicator STDOUT: ERROR:root:Error hashing suffix#012Traceback (most recent call last):#012 File "/usr/lib/python2.7/dist-packages/swift/obj/replicator.py", line 199, in get_hashes#012 hashes[suffix] = hash_suffix(suffix_dir, reclaim_age)#012 File "/usr/lib/python2.7/dist-packages/swift/obj/replicator.py", line 84, in hash_suffix#012 path_contents = sorted(os.listdir(path))#012OSError: [Errno 121] Remote I/O error: '/srv/node/disk4/objects/1643763/b51' An ``ls`` of the problem file or directory usually shows something like the following: .. code:: # ls -l /srv/node/disk4/objects/1643763/b51 ls: cannot access /srv/node/disk4/objects/1643763/b51: Remote I/O error If no entry with ``Remote I/O error`` occurs in the ``background.log`` it is not possible to determine why the object-replicator is hung. It may be that the ``Remote I/O error`` entry is older than 7 days and so has been rotated out of the logs. In this scenario it may be best to simply restart the object-replicator. #. Stop the object-replicator: .. code:: # sudo swift-init object-replicator stop #. Make sure the object replicator has stopped, if it has hung, the stop command will not stop the hung process: .. code:: # ps auxww | - grep swift-object-replicator #. If the previous ps shows the object-replicator is still running, kill the process: .. code:: # kill -9 #. Start the object-replicator: .. code:: # sudo swift-init object-replicator start If the above grep did find an ``Remote I/O error`` then it may be possible to repair the problem filesystem. #. Stop swift and rsync: .. code:: # sudo swift-init all shutdown # sudo service rsync stop #. Make sure all swift process have stopped: .. code:: # ps auxww | grep swift | grep python #. Kill any swift processes still running. #. Unmount the problem filesystem: .. code:: # sudo umount /srv/node/disk4 #. Repair the filesystem: .. code:: # sudo xfs_repair -P /dev/sde1 #. If the ``xfs_repair`` fails then it may be necessary to re-format the filesystem. See :ref:`fix_broken_xfs_filesystem`. If the ``xfs_repair`` is successful, re-enable chef using the following command and replication should commence again. Diagnose: High CPU load ----------------------- The CPU load average on an object server, as shown with the 'uptime' command, is typically under 10 when the server is lightly-moderately loaded: .. code:: $ uptime 07:59:26 up 99 days, 5:57, 1 user, load average: 8.59, 8.39, 8.32 During times of increased activity, due to user transactions or object replication, the CPU load average can increase to to around 30. However, sometimes the CPU load average can increase significantly. The following is an example of an object server that has extremely high CPU load: .. code:: $ uptime 07:44:02 up 18:22, 1 user, load average: 407.12, 406.36, 404.59 Further issues and resolutions ------------------------------ .. note:: The urgency levels in each **Action** column indicates whether or not it is required to take immediate action, or if the problem can be worked on during business hours. .. list-table:: :widths: 33 33 33 :header-rows: 1 * - **Scenario** - **Description** - **Action** * - ``/healthcheck`` latency is high. - The ``/healthcheck`` test does not tax the proxy very much so any drop in value is probably related to network issues, rather than the proxies being very busy. A very slow proxy might impact the average number, but it would need to be very slow to shift the number that much. - Check networks. Do a ``curl https://:/healthcheck`` where ``ip-address`` is individual proxy IP address. Repeat this for every proxy server to see if you can pin point the problem. Urgency: If there are other indications that your system is slow, you should treat this as an urgent problem. * - Swift process is not running. - You can use ``swift-init`` status to check if swift processes are running on any given server. - Run this command: .. code:: sudo swift-init all start Examine messages in the swift log files to see if there are any error messages related to any of the swift processes since the time you ran the ``swift-init`` command. Take any corrective actions that seem necessary. Urgency: If this only affects one server, and you have more than one, identifying and fixing the problem can wait until business hours. If this same problem affects many servers, then you need to take corrective action immediately. * - ntpd is not running. - NTP is not running. - Configure and start NTP. Urgency: For proxy servers, this is vital. * - Host clock is not syncd to an NTP server. - Node time settings does not match NTP server time. This may take some time to sync after a reboot. - Assuming NTP is configured and running, you have to wait until the times sync. * - A swift process has hundreds, to thousands of open file descriptors. - May happen to any of the swift processes. Known to have happened with a ``rsyslod`` restart and where ``/tmp`` was hanging. - Restart the swift processes on the affected node: .. code:: % sudo swift-init all reload Urgency: If known performance problem: Immediate If system seems fine: Medium * - A swift process is not owned by the swift user. - If the UID of the swift user has changed, then the processes might not be owned by that UID. - Urgency: If this only affects one server, and you have more than one, identifying and fixing the problem can wait until business hours. If this same problem affects many servers, then you need to take corrective action immediately. * - Object account or container files not owned by swift. - This typically happens if during a reinstall or a re-image of a server that the UID of the swift user was changed. The data files in the object account and container directories are owned by the original swift UID. As a result, the current swift user does not own these files. - Correct the UID of the swift user to reflect that of the original UID. An alternate action is to change the ownership of every file on all file systems. This alternate action is often impractical and will take considerable time. Urgency: If this only affects one server, and you have more than one, identifying and fixing the problem can wait until business hours. If this same problem affects many servers, then you need to take corrective action immediately. * - A disk drive has a high IO wait or service time. - If high wait IO times are seen for a single disk, then the disk drive is the problem. If most/all devices are slow, the controller is probably the source of the problem. The controller cache may also be miss configured – which will cause similar long wait or service times. - As a first step, if your controllers have a cache, check that it is enabled and their battery/capacitor is working. Second, reboot the server. If problem persists, file a DC ticket to have the drive or controller replaced. See :ref:`diagnose_slow_disk_drives` on how to check the drive wait or service times. Urgency: Medium * - The network interface is not up. - Use the ``ifconfig`` and ``ethtool`` commands to determine the network state. - You can try restarting the interface. However, generally the interface (or cable) is probably broken, especially if the interface is flapping. Urgency: If this only affects one server, and you have more than one, identifying and fixing the problem can wait until business hours. If this same problem affects many servers, then you need to take corrective action immediately. * - Network interface card (NIC) is not operating at the expected speed. - The NIC is running at a slower speed than its nominal rated speed. For example, it is running at 100 Mb/s and the NIC is a 1Ge NIC. - 1. Try resetting the interface with: .. code:: sudo ethtool -s eth0 speed 1000 ... and then run: .. code:: sudo lshw -class See if size goes to the expected speed. Failing that, check hardware (NIC cable/switch port). 2. If persistent, consider shutting down the server (especially if a proxy) until the problem is identified and resolved. If you leave this server running it can have a large impact on overall performance. Urgency: High * - The interface RX/TX error count is non-zero. - A value of 0 is typical, but counts of 1 or 2 do not indicate a problem. - 1. For low numbers (For example, 1 or 2), you can simply ignore. Numbers in the range 3-30 probably indicate that the error count has crept up slowly over a long time. Consider rebooting the server to remove the report from the noise. Typically, when a cable or interface is bad, the error count goes to 400+. For example, it stands out. There may be other symptoms such as the interface going up and down or not running at correct speed. A server with a high error count should be watched. 2. If the error count continues to climb, consider taking the server down until it can be properly investigated. In any case, a reboot should be done to clear the error count. Urgency: High, if the error count increasing. * - In a swift log you see a message that a process has not replicated in over 24 hours. - The replicator has not successfully completed a run in the last 24 hours. This indicates that the replicator has probably hung. - Use ``swift-init`` to stop and then restart the replicator process. Urgency: Low. However if you recently added or replaced disk drives then you should treat this urgently. * - Container Updater has not run in 4 hour(s). - The service may appear to be running however, it may be hung. Examine their swift logs to see if there are any error messages relating to the container updater. This may potentially explain why the container is not running. - Urgency: Medium This may have been triggered by a recent restart of the rsyslog daemon. Restart the service with: .. code:: sudo swift-init reload * - Object replicator: Reports the remaining time and that time is more than 100 hours. - Each replication cycle the object replicator writes a log message to its log reporting statistics about the current cycle. This includes an estimate for the remaining time needed to replicate all objects. If this time is longer than 100 hours, there is a problem with the replication process. - Urgency: Medium Restart the service with: .. code:: sudo swift-init object-replicator reload Check that the remaining replication time is going down. swift-2.17.0/doc/source/overview_global_cluster.rst0000666000175100017510000001407213236061617022522 0ustar zuulzuul00000000000000=============== Global Clusters =============== -------- Overview -------- Swift's default configuration is currently designed to work in a single region, where a region is defined as a group of machines with high-bandwidth, low-latency links between them. However, configuration options exist that make running a performant multi-region Swift cluster possible. For the rest of this section, we will assume a two-region Swift cluster: region 1 in San Francisco (SF), and region 2 in New York (NY). Each region shall contain within it 3 zones, numbered 1, 2, and 3, for a total of 6 zones. .. _configuring_global_clusters: --------------------------- Configuring Global Clusters --------------------------- .. note:: The proxy-server configuration options described below can be given generic settings in the ``[app:proxy-server]`` configuration section and/or given specific settings for individual policies using :ref:`proxy_server_per_policy_config`. ~~~~~~~~~~~~~ read_affinity ~~~~~~~~~~~~~ This setting, combined with sorting_method setting, makes the proxy server prefer local backend servers for GET and HEAD requests over non-local ones. For example, it is preferable for an SF proxy server to service object GET requests by talking to SF object servers, as the client will receive lower latency and higher throughput. By default, Swift randomly chooses one of the three replicas to give to the client, thereby spreading the load evenly. In the case of a geographically-distributed cluster, the administrator is likely to prioritize keeping traffic local over even distribution of results. This is where the read_affinity setting comes in. Example:: [app:proxy-server] sorting_method = affinity read_affinity = r1=100 This will make the proxy attempt to service GET and HEAD requests from backends in region 1 before contacting any backends in region 2. However, if no region 1 backends are available (due to replica placement, failed hardware, or other reasons), then the proxy will fall back to backend servers in other regions. Example:: [app:proxy-server] sorting_method = affinity read_affinity = r1z1=100, r1=200 This will make the proxy attempt to service GET and HEAD requests from backends in region 1 zone 1, then backends in region 1, then any other backends. If a proxy is physically close to a particular zone or zones, this can provide bandwidth savings. For example, if a zone corresponds to servers in a particular rack, and the proxy server is in that same rack, then setting read_affinity to prefer reads from within the rack will result in less traffic between the top-of-rack switches. The read_affinity setting may contain any number of region/zone specifiers; the priority number (after the equals sign) determines the ordering in which backend servers will be contacted. A lower number means higher priority. Note that read_affinity only affects the ordering of primary nodes (see ring docs for definition of primary node), not the ordering of handoff nodes. ~~~~~~~~~~~~~~ write_affinity ~~~~~~~~~~~~~~ This setting makes the proxy server prefer local backend servers for object PUT requests over non-local ones. For example, it may be preferable for an SF proxy server to service object PUT requests by talking to SF object servers, as the client will receive lower latency and higher throughput. However, if this setting is used, note that a NY proxy server handling a GET request for an object that was PUT using write affinity may have to fetch it across the WAN link, as the object won't immediately have any replicas in NY. However, replication will move the object's replicas to their proper homes in both SF and NY. One potential issue with write_affinity is, end user may get 404 error when deleting objects before replication. The write_affinity_handoff_delete_count setting is used together with write_affinity in order to solve that issue. With its default configuration, Swift will calculate the proper number of handoff nodes to send requests to. Note that only object PUT/DELETE requests are affected by the write_affinity setting; POST, GET, HEAD, OPTIONS, and account/container PUT requests are not affected. This setting lets you trade data distribution for throughput. If write_affinity is enabled, then object replicas will initially be stored all within a particular region or zone, thereby decreasing the quality of the data distribution, but the replicas will be distributed over fast WAN links, giving higher throughput to clients. Note that the replicators will eventually move objects to their proper, well-distributed homes. The write_affinity setting is useful only when you don't typically read objects immediately after writing them. For example, consider a workload of mainly backups: if you have a bunch of machines in NY that periodically write backups to Swift, then odds are that you don't then immediately read those backups in SF. If your workload doesn't look like that, then you probably shouldn't use write_affinity. The write_affinity_node_count setting is only useful in conjunction with write_affinity; it governs how many local object servers will be tried before falling back to non-local ones. Example:: [app:proxy-server] write_affinity = r1 write_affinity_node_count = 2 * replicas Assuming 3 replicas, this configuration will make object PUTs try storing the object's replicas on up to 6 disks ("2 * replicas") in region 1 ("r1"). Proxy server tries to find 3 devices for storing the object. While a device is unavailable, it queries the ring for the 4th device and so on until 6th device. If the 6th disk is still unavailable, the last replica will be sent to other region. It doesn't mean there'll have 6 replicas in region 1. You should be aware that, if you have data coming into SF faster than your replicators are transferring it to NY, then your cluster's data distribution will get worse and worse over time as objects pile up in SF. If this happens, it is recommended to disable write_affinity and simply let object PUTs traverse the WAN link, as that will naturally limit the object growth rate to what your WAN link can handle. swift-2.17.0/doc/source/overview_acl.rst0000666000175100017510000003353213236061617020262 0ustar zuulzuul00000000000000 =========================== Access Control Lists (ACLs) =========================== Normally to create, read and modify containers and objects, you must have the appropriate roles on the project associated with the account, i.e., you must be the owner of the account. However, an owner can grant access to other users by using an Access Control List (ACL). There are two types of ACLs: - :ref:`container_acls`. These are specified on a container and apply to that container only and the objects in the container. - :ref:`account_acls`. These are specified at the account level and apply to all containers and objects in the account. .. _container_acls: -------------- Container ACLs -------------- Container ACLs are stored in the ``X-Container-Write`` and ``X-Container-Read`` metadata. The scope of the ACL is limited to the container where the metadata is set and the objects in the container. In addition: - ``X-Container-Write`` grants the ability to perform PUT, POST and DELETE operations on objects within a container. It does not grant the ability to perform POST or DELETE operations on the container itself. Some ACL elements also grant the ability to perform HEAD or GET operations on the container. - ``X-Container-Read`` grants the ability to perform GET and HEAD operations on objects within a container. Some of the ACL elements also grant the ability to perform HEAD or GET operations on the container itself. However, a container ACL does not allow access to privileged metadata (such as ``X-Container-Sync-Key``). Container ACLs use the "V1" ACL syntax which is a comma separated string of elements as shown in the following example:: .r:*,.rlistings,7ec59e87c6584c348b563254aae4c221:* Spaces may occur between elements as shown in the following example:: .r : *, .rlistings, 7ec59e87c6584c348b563254aae4c221:* However, these spaces are removed from the value stored in the ``X-Container-Write`` and ``X-Container-Read`` metadata. In addition, the ``.r:`` string can be written as ``.referrer:``, but is stored as ``.r:``. While all auth systems use the same syntax, the meaning of some elements is different because of the different concepts used by different auth systems as explained in the following sections: - :ref:`acl_common_elements` - :ref:`acl_keystone_elements` - :ref:`acl_tempauth_elements` .. _acl_common_elements: Common ACL Elements ------------------- The following table describes elements of an ACL that are supported by both Keystone auth and TempAuth. These elements should only be used with ``X-Container-Read`` (with the exception of ``.rlistings``, an error will occur if used with ``X-Container-Write``): ============================== ================================================ Element Description ============================== ================================================ ``.r:*`` Any user has access to objects. No token is required in the request. ``.r:`` The referrer is granted access to objects. The referrer is identified by the ``Referer`` request header in the request. No token is required. ``.r:-`` This syntax (with "-" prepended to the referrer) is supported. However, it does not deny access if another element (e.g., ``.r:*``) grants access. ``.rlistings`` Any user can perform a HEAD or GET operation on the container provided the user also has read access on objects (e.g., also has ``.r:*`` or ``.r:``. No token is required. ============================== ================================================ .. _acl_keystone_elements: Keystone Auth ACL Elements -------------------------- The following table describes elements of an ACL that are supported only by Keystone auth. Keystone auth also supports the elements described in :ref:`acl_common_elements`. A token must be included in the request for any of these ACL elements to take effect. ============================== ================================================ Element Description ============================== ================================================ ``:`` The specified user, provided a token scoped to the project is included in the request, is granted access. Access to the container is also granted when used in ``X-Container-Read``. ``:*`` Any user with a role in the specified Keystone project has access. A token scoped to the project must be included in the request. Access to the container is also granted when used in ``X-Container-Read``. ``*:`` The specified user has access. A token for the user (scoped to any project) must be included in the request. Access to the container is also granted when used in ``X-Container-Read``. ``*:*`` Any user has access. Access to the container is also granted when used in ``X-Container-Read``. The ``*:*`` element differs from the ``.r:*`` element because ``*:*`` requires that a valid token is included in the request whereas ``.r:*`` does not require a token. In addition, ``.r:*`` does not grant access to the container listing. ```` A user with the specified role *name* on the project within which the container is stored is granted access. A user token scoped to the project must be included in the request. Access to the container is also granted when used in ``X-Container-Read``. ============================== ================================================ .. note:: Keystone project (tenant) or user *names* (i.e., ``:`` The named user is granted access. The wildcard ("*") character is not supported. A token from the user must be included in the request. ============================== ================================================ ---------------------- Container ACL Examples ---------------------- Container ACLs may be set by including ``X-Container-Write`` and/or ``X-Container-Read`` headers with a PUT or a POST request to the container URL. The following examples use the ``swift`` command line client which support these headers being set via its ``--write-acl`` and ``--read-acl`` options. Example: Public Container ------------------------- The following allows anybody to list objects in the ``www`` container and download objects. The users do not need to include a token in their request. This ACL is commonly referred to as making the container "public". It is useful when used with :ref:`staticweb`:: swift post www --read-acl ".r:*,.rlistings" Example: Shared Writable Container ---------------------------------- The following allows anybody to upload or download objects. However, to download an object, the exact name of the object must be known since users cannot list the objects in the container. The users must include a Keystone token in the upload request. However, it does not need to be scoped to the project associated with the container:: swift post www --read-acl ".r:*" --write-acl "*:*" Example: Sharing a Container with Project Members ------------------------------------------------- The following allows any member of the ``77b8f82565f14814bece56e50c4c240f`` project to upload and download objects or to list the contents of the ``www`` container. A token scoped to the ``77b8f82565f14814bece56e50c4c240f`` project must be included in the request:: swift post www --read-acl "77b8f82565f14814bece56e50c4c240f:*" \ --write-acl "77b8f82565f14814bece56e50c4c240f:*" Example: Sharing a Container with Users having a specified Role --------------------------------------------------------------- The following allows any user that has been assigned the ``my_read_access_role`` on the project within which the ``www`` container is stored to download objects or to list the contents of the ``www`` container. A user token scoped to the project must be included in the download or list request:: swift post www --read-acl "my_read_access_role" Example: Allowing a Referrer Domain to Download Objects ------------------------------------------------------- The following allows any request from the ``example.com`` domain to access an object in the container:: swift post www --read-acl ".r:.example.com" However, the request from the user **must** contain the appropriate `Referer` header as shown in this example request:: curl -i $publicURL/www/document --head -H "Referer: http://www.example.com/index.html" .. note:: The `Referer` header is included in requests by many browsers. However, since it is easy to create a request with any desired value in the `Referer` header, the referrer ACL has very weak security. .. _account_acls: ------------ Account ACLs ------------ .. note:: Account ACLs are not currently supported by Keystone auth The ``X-Account-Access-Control`` header is used to specify account-level ACLs in a format specific to the auth system. These headers are visible and settable only by account owners (those for whom ``swift_owner`` is true). Behavior of account ACLs is auth-system-dependent. In the case of TempAuth, if an authenticated user has membership in a group which is listed in the ACL, then the user is allowed the access level of that ACL. Account ACLs use the "V2" ACL syntax, which is a JSON dictionary with keys named "admin", "read-write", and "read-only". (Note the case sensitivity.) An example value for the ``X-Account-Access-Control`` header looks like this, where ``a``, ``b`` and ``c`` are user names:: {"admin":["a","b"],"read-only":["c"]} Keys may be absent (as shown in above example). The recommended way to generate ACL strings is as follows:: from swift.common.middleware.acl import format_acl acl_data = { 'admin': ['alice'], 'read-write': ['bob', 'carol'] } acl_string = format_acl(version=2, acl_dict=acl_data) Using the :func:`format_acl` method will ensure that JSON is encoded as ASCII (using e.g. '\u1234' for Unicode). While it's permissible to manually send ``curl`` commands containing ``X-Account-Access-Control`` headers, you should exercise caution when doing so, due to the potential for human error. Within the JSON dictionary stored in ``X-Account-Access-Control``, the keys have the following meanings: ============ ============================================================== Access Level Description ============ ============================================================== read-only These identities can read *everything* (except privileged headers) in the account. Specifically, a user with read-only account access can get a list of containers in the account, list the contents of any container, retrieve any object, and see the (non-privileged) headers of the account, any container, or any object. read-write These identities can read or write (or create) any container. A user with read-write account access can create new containers, set any unprivileged container headers, overwrite objects, delete containers, etc. A read-write user can NOT set account headers (or perform any PUT/POST/DELETE requests on the account). admin These identities have "swift_owner" privileges. A user with admin account access can do anything the account owner can, including setting account headers and any privileged headers -- and thus granting read-only, read-write, or admin access to other users. ============ ============================================================== For more details, see :mod:`swift.common.middleware.tempauth`. For details on the ACL format, see :mod:`swift.common.middleware.acl`.swift-2.17.0/doc/source/admin/0000775000175100017510000000000013236061751016122 5ustar zuulzuul00000000000000swift-2.17.0/doc/source/admin/objectstorage-features.rst0000666000175100017510000000406213236061617023330 0ustar zuulzuul00000000000000===================== Features and benefits ===================== .. list-table:: :header-rows: 1 :widths: 10 40 * - Features - Benefits * - Leverages commodity hardware - No lock-in, lower price/GB. * - HDD/node failure agnostic - Self-healing, reliable, data redundancy protects from failures. * - Unlimited storage - Large and flat namespace, highly scalable read/write access, able to serve content directly from storage system. * - Multi-dimensional scalability - Scale-out architecture: Scale vertically and horizontally-distributed storage. Backs up and archives large amounts of data with linear performance. * - Account/container/object structure - No nesting, not a traditional file system: Optimized for scale, it scales to multiple petabytes and billions of objects. * - Built-in replication 3✕ + data redundancy (compared with 2✕ on RAID) - A configurable number of accounts, containers and object copies for high availability. * - Easily add capacity (unlike RAID resize) - Elastic data scaling with ease. * - No central database - Higher performance, no bottlenecks. * - RAID not required - Handle many small, random reads and writes efficiently. * - Built-in management utilities - Account management: Create, add, verify, and delete users; Container management: Upload, download, and verify; Monitoring: Capacity, host, network, log trawling, and cluster health. * - Drive auditing - Detect drive failures preempting data corruption. * - Expiring objects - Users can set an expiration time or a TTL on an object to control access. * - Direct object access - Enable direct browser access to content, such as for a control panel. * - Realtime visibility into client requests - Know what users are requesting. * - Supports S3 API - Utilize tools that were designed for the popular S3 API. * - Restrict containers per account - Limit access to control usage by user. swift-2.17.0/doc/source/admin/objectstorage-intro.rst0000666000175100017510000000230613236061617022644 0ustar zuulzuul00000000000000============================== Introduction to Object Storage ============================== OpenStack Object Storage (swift) is used for redundant, scalable data storage using clusters of standardized servers to store petabytes of accessible data. It is a long-term storage system for large amounts of static data which can be retrieved and updated. Object Storage uses a distributed architecture with no central point of control, providing greater scalability, redundancy, and permanence. Objects are written to multiple hardware devices, with the OpenStack software responsible for ensuring data replication and integrity across the cluster. Storage clusters scale horizontally by adding new nodes. Should a node fail, OpenStack works to replicate its content from other active nodes. Because OpenStack uses software logic to ensure data replication and distribution across different devices, inexpensive commodity hard drives and servers can be used in lieu of more expensive equipment. Object Storage is ideal for cost effective, scale-out storage. It provides a fully distributed, API-accessible storage platform that can be integrated directly into applications or used for backup, archiving, and data retention. swift-2.17.0/doc/source/admin/objectstorage-tenant-specific-image-storage.rst0000666000175100017510000000241213236061617027305 0ustar zuulzuul00000000000000============================================================== Configure project-specific image locations with Object Storage ============================================================== For some deployers, it is not ideal to store all images in one place to enable all projects and users to access them. You can configure the Image service to store image data in project-specific image locations. Then, only the following projects can use the Image service to access the created image: - The project who owns the image - Projects that are defined in ``swift_store_admin_tenants`` and that have admin-level accounts **To configure project-specific image locations** #. Configure swift as your ``default_store`` in the ``glance-api.conf`` file. #. Set these configuration options in the ``glance-api.conf`` file: - swift_store_multi_tenant Set to ``True`` to enable tenant-specific storage locations. Default is ``False``. - swift_store_admin_tenants Specify a list of tenant IDs that can grant read and write access to all Object Storage containers that are created by the Image service. With this configuration, images are stored in an Object Storage service (swift) endpoint that is pulled from the service catalog for the authenticated user. swift-2.17.0/doc/source/admin/objectstorage-account-reaper.rst0000666000175100017510000000444713236061617024431 0ustar zuulzuul00000000000000============== Account reaper ============== The purpose of the account reaper is to remove data from the deleted accounts. A reseller marks an account for deletion by issuing a ``DELETE`` request on the account's storage URL. This action sets the ``status`` column of the account_stat table in the account database and replicas to ``DELETED``, marking the account's data for deletion. Typically, a specific retention time or undelete are not provided. However, you can set a ``delay_reaping`` value in the ``[account-reaper]`` section of the ``account-server.conf`` file to delay the actual deletion of data. At this time, to undelete you have to update the account database replicas directly, set the status column to an empty string and update the put_timestamp to be greater than the delete_timestamp. .. note:: It is on the development to-do list to write a utility that performs this task, preferably through a REST call. The account reaper runs on each account server and scans the server occasionally for account databases marked for deletion. It only fires up on the accounts for which the server is the primary node, so that multiple account servers aren't trying to do it simultaneously. Using multiple servers to delete one account might improve the deletion speed but requires coordination to avoid duplication. Speed really is not a big concern with data deletion, and large accounts aren't deleted often. Deleting an account is simple. For each account container, all objects are deleted and then the container is deleted. Deletion requests that fail will not stop the overall process but will cause the overall process to fail eventually (for example, if an object delete times out, you will not be able to delete the container or the account). The account reaper keeps trying to delete an account until it is empty, at which point the database reclaim process within the db\_replicator will remove the database files. A persistent error state may prevent the deletion of an object or container. If this happens, you will see a message in the log, for example: .. code-block:: console Account has not been reaped since You can control when this is logged with the ``reap_warn_after`` value in the ``[account-reaper]`` section of the ``account-server.conf`` file. The default value is 30 days. swift-2.17.0/doc/source/admin/objectstorage-troubleshoot.rst0000666000175100017510000001626313236061617024251 0ustar zuulzuul00000000000000=========================== Troubleshoot Object Storage =========================== For Object Storage, everything is logged in ``/var/log/syslog`` (or ``messages`` on some distros). Several settings enable further customization of logging, such as ``log_name``, ``log_facility``, and ``log_level``, within the object server configuration files. Drive failure ~~~~~~~~~~~~~ Problem ------- Drive failure can prevent Object Storage performing replication. Solution -------- In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for Object Storage to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up. If you cannot replace the drive immediately, then it is best to leave it unmounted, and remove the drive from the ring. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive is replaced. Once the drive is replaced, it can be re-added to the ring. You can look at error messages in the ``/var/log/kern.log`` file for hints of drive failure. Server failure ~~~~~~~~~~~~~~ Problem ------- The server is potentially offline, and may have failed, or require a reboot. Solution -------- If a server is having hardware issues, it is a good idea to make sure the Object Storage services are not running. This will allow Object Storage to work around the failure while you troubleshoot. If the server just needs a reboot, or a small amount of work that should only last a couple of hours, then it is probably best to let Object Storage work around the failure and get the machine fixed and back online. When the machine comes back online, replication will make sure that anything that is missing during the downtime will get updated. If the server has more serious issues, then it is probably best to remove all of the server's devices from the ring. Once the server has been repaired and is back online, the server's devices can be added back into the ring. It is important that the devices are reformatted before putting them back into the ring as it is likely to be responsible for a different set of partitions than before. Detect failed drives ~~~~~~~~~~~~~~~~~~~~ Problem ------- When drives fail, it can be difficult to detect that a drive has failed, and the details of the failure. Solution -------- It has been our experience that when a drive is about to fail, error messages appear in the ``/var/log/kern.log`` file. There is a script called ``swift-drive-audit`` that can be run via cron to watch for bad drives. If errors are detected, it will unmount the bad drive, so that Object Storage can work around it. The script takes a configuration file with the following settings: .. list-table:: **Description of configuration options for [drive-audit] in drive-audit.conf** :header-rows: 1 * - Configuration option = Default value - Description * - ``device_dir = /srv/node`` - Directory devices are mounted under * - ``error_limit = 1`` - Number of errors to find before a device is unmounted * - ``log_address = /dev/log`` - Location where syslog sends the logs to * - ``log_facility = LOG_LOCAL0`` - Syslog log facility * - ``log_file_pattern = /var/log/kern.*[!.][!g][!z]`` - Location of the log file with globbing pattern to check against device errors locate device blocks with errors in the log file * - ``log_level = INFO`` - Logging level * - ``log_max_line_length = 0`` - Caps the length of log lines to the value given; no limit if set to 0, the default. * - ``log_to_console = False`` - No help text available for this option. * - ``minutes = 60`` - Number of minutes to look back in ``/var/log/kern.log`` * - ``recon_cache_path = /var/cache/swift`` - Directory where stats for a few items will be stored * - ``regex_pattern_1 = \berror\b.*\b(dm-[0-9]{1,2}\d?)\b`` - No help text available for this option. * - ``unmount_failed_device = True`` - No help text available for this option. .. warning:: This script has only been tested on Ubuntu 10.04; use with caution on other operating systems in production. Emergency recovery of ring builder files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Problem ------- An emergency might prevent a successful backup from restoring the cluster to operational status. Solution -------- You should always keep a backup of swift ring builder files. However, if an emergency occurs, this procedure may assist in returning your cluster to an operational state. Using existing swift tools, there is no way to recover a builder file from a ``ring.gz`` file. However, if you have a knowledge of Python, it is possible to construct a builder file that is pretty close to the one you have lost. .. warning:: This procedure is a last-resort for emergency circumstances. It requires knowledge of the swift python code and may not succeed. #. Load the ring and a new ringbuilder object in a Python REPL: .. code-block:: python >>> from swift.common.ring import RingData, RingBuilder >>> ring = RingData.load('/path/to/account.ring.gz') #. Start copying the data we have in the ring into the builder: .. code-block:: python >>> import math >>> partitions = len(ring._replica2part2dev_id[0]) >>> replicas = len(ring._replica2part2dev_id) >>> builder = RingBuilder(int(math.log(partitions, 2)), replicas, 1) >>> builder.devs = ring.devs >>> builder._replica2part2dev = ring._replica2part2dev_id >>> builder._last_part_moves_epoch = 0 >>> from array import array >>> builder._last_part_moves = array('B', (0 for _ in xrange(partitions))) >>> builder._set_parts_wanted() >>> for d in builder._iter_devs(): d['parts'] = 0 >>> for p2d in builder._replica2part2dev: for dev_id in p2d: builder.devs[dev_id]['parts'] += 1 This is the extent of the recoverable fields. #. For ``min_part_hours`` you either have to remember what the value you used was, or just make up a new one: .. code-block:: python >>> builder.change_min_part_hours(24) # or whatever you want it to be #. Validate the builder. If this raises an exception, check your previous code: .. code-block:: python >>> builder.validate() #. After it validates, save the builder and create a new ``account.builder``: .. code-block:: python >>> import pickle >>> pickle.dump(builder.to_dict(), open('account.builder', 'wb'), protocol=2) >>> exit () #. You should now have a file called ``account.builder`` in the current working directory. Run :command:`swift-ring-builder account.builder write_ring` and compare the new ``account.ring.gz`` to the ``account.ring.gz`` that you started from. They probably are not byte-for-byte identical, but if you load them in a REPL and their ``_replica2part2dev_id`` and ``devs`` attributes are the same (or nearly so), then you are in good shape. #. Repeat the procedure for ``container.ring.gz`` and ``object.ring.gz``, and you might get usable builder files. swift-2.17.0/doc/source/admin/objectstorage-admin.rst0000666000175100017510000000077713236061617022613 0ustar zuulzuul00000000000000======================================== System administration for Object Storage ======================================== By understanding Object Storage concepts, you can better monitor and administer your storage solution. The majority of the administration information is maintained in the :doc:`developer documentation `. See the `OpenStack Configuration Reference `__ for a list of configuration options for Object Storage. swift-2.17.0/doc/source/admin/objectstorage-auditors.rst0000666000175100017510000000170513236061617023345 0ustar zuulzuul00000000000000============== Object Auditor ============== On system failures, the XFS file system can sometimes truncate files it is trying to write and produce zero-byte files. The object-auditor will catch these problems but in the case of a system crash it is advisable to run an extra, less rate limited sweep, to check for these specific files. You can run this command as follows: .. code-block:: console $ swift-object-auditor /path/to/object-server/config/file.conf once -z 1000 .. note:: "-z" means to only check for zero-byte files at 1000 files per second. It is useful to run the object auditor on a specific device or set of devices. You can run the object-auditor once as follows: .. code-block:: console $ swift-object-auditor /path/to/object-server/config/file.conf once \ --devices=sda,sdb .. note:: This will run the object auditor on only the ``sda`` and ``sdb`` devices. This parameter accepts a comma-separated list of values. swift-2.17.0/doc/source/admin/objectstorage-characteristics.rst0000666000175100017510000000320713236061617024665 0ustar zuulzuul00000000000000============================== Object Storage characteristics ============================== The key characteristics of Object Storage are that: - All objects stored in Object Storage have a URL. - "Storage Policies" may be used to define different levels of durability for objects stored in the cluster. These policies support not only complete replicas but also erasure-coded fragments. - All replicas or fragments for an object are stored in as-unique-as-possible zones to increase durability and availability. - All objects have their own metadata. - Developers interact with the object storage system through a RESTful HTTP API. - Object data can be located anywhere in the cluster. - The cluster scales by adding additional nodes without sacrificing performance, which allows a more cost-effective linear storage expansion than fork-lift upgrades. - Data does not have to be migrated to an entirely new storage system. - New nodes can be added to the cluster without downtime. - Failed nodes and disks can be swapped out without downtime. - It runs on industry-standard hardware, such as Dell, HP, and Supermicro. .. _objectstorage-figure: Object Storage (swift) .. figure:: figures/objectstorage.png Developers can either write directly to the Swift API or use one of the many client libraries that exist for all of the popular programming languages, such as Java, Python, Ruby, and C#. Amazon S3 and RackSpace Cloud Files users should be very familiar with Object Storage. Users new to object storage systems will have to adjust to a different approach and mindset than those required for a traditional filesystem. swift-2.17.0/doc/source/admin/objectstorage-monitoring.rst0000666000175100017510000002264113236061617023702 0ustar zuulzuul00000000000000========================= Object Storage monitoring ========================= .. note:: This section was excerpted from a `blog post by Darrell Bishop `_ and has since been edited. An OpenStack Object Storage cluster is a collection of many daemons that work together across many nodes. With so many different components, you must be able to tell what is going on inside the cluster. Tracking server-level meters like CPU utilization, load, memory consumption, disk usage and utilization, and so on is necessary, but not sufficient. Swift Recon ~~~~~~~~~~~ The Swift Recon middleware (see :ref:`cluster_telemetry_and_monitoring`) provides general machine statistics, such as load average, socket statistics, ``/proc/meminfo`` contents, as well as Swift-specific meters: - The ``MD5`` sum of each ring file. - The most recent object replication time. - Count of each type of quarantined file: Account, container, or object. - Count of "async_pendings" (deferred container updates) on disk. Swift Recon is middleware that is installed in the object servers pipeline and takes one required option: A local cache directory. To track ``async_pendings``, you must set up an additional cron job for each object server. You access data by either sending HTTP requests directly to the object server or using the ``swift-recon`` command-line client. There are Object Storage cluster statistics but the typical server meters overlap with existing server monitoring systems. To get the Swift-specific meters into a monitoring system, they must be polled. Swift Recon acts as a middleware meters collector. The process that feeds meters to your statistics system, such as ``collectd`` and ``gmond``, should already run on the storage node. You can choose to either talk to Swift Recon or collect the meters directly. Swift-Informant ~~~~~~~~~~~~~~~ Swift-Informant middleware (see `swift-informant `_) has real-time visibility into Object Storage client requests. It sits in the pipeline for the proxy server, and after each request to the proxy server it sends three meters to a ``StatsD`` server: - A counter increment for a meter like ``obj.GET.200`` or ``cont.PUT.404``. - Timing data for a meter like ``acct.GET.200`` or ``obj.GET.200``. [The README says the meters look like ``duration.acct.GET.200``, but I do not see the ``duration`` in the code. I am not sure what the Etsy server does but our StatsD server turns timing meters into five derivative meters with new segments appended, so it probably works as coded. The first meter turns into ``acct.GET.200.lower``, ``acct.GET.200.upper``, ``acct.GET.200.mean``, ``acct.GET.200.upper_90``, and ``acct.GET.200.count``]. - A counter increase by the bytes transferred for a meter like ``tfer.obj.PUT.201``. This is used for receiving information on the quality of service clients experience with the timing meters, as well as sensing the volume of the various modifications of a request server type, command, and response code. Swift-Informant requires no change to core Object Storage code because it is implemented as middleware. However, it gives no insight into the workings of the cluster past the proxy server. If the responsiveness of one storage node degrades, you can only see that some of the requests are bad, either as high latency or error status codes. Statsdlog ~~~~~~~~~ The `Statsdlog `_ project increments StatsD counters based on logged events. Like Swift-Informant, it is also non-intrusive, however statsdlog can track events from all Object Storage daemons, not just proxy-server. The daemon listens to a UDP stream of syslog messages, and StatsD counters are incremented when a log line matches a regular expression. Meter names are mapped to regex match patterns in a JSON file, allowing flexible configuration of what meters are extracted from the log stream. Currently, only the first matching regex triggers a StatsD counter increment, and the counter is always incremented by one. There is no way to increment a counter by more than one or send timing data to StatsD based on the log line content. The tool could be extended to handle more meters for each line and data extraction, including timing data. But a coupling would still exist between the log textual format and the log parsing regexes, which would themselves be more complex to support multiple matches for each line and data extraction. Also, log processing introduces a delay between the triggering event and sending the data to StatsD. It would be preferable to increment error counters where they occur and send timing data as soon as it is known to avoid coupling between a log string and a parsing regex and prevent a time delay between events and sending data to StatsD. The next section describes another method for gathering Object Storage operational meters. Swift StatsD logging ~~~~~~~~~~~~~~~~~~~~ StatsD (see `Measure Anything, Measure Everything `_) was designed for application code to be deeply instrumented. Meters are sent in real-time by the code that just noticed or did something. The overhead of sending a meter is extremely low: a ``sendto`` of one UDP packet. If that overhead is still too high, the StatsD client library can send only a random portion of samples and StatsD approximates the actual number when flushing meters upstream. To avoid the problems inherent with middleware-based monitoring and after-the-fact log processing, the sending of StatsD meters is integrated into Object Storage itself. Details of the meters tracked are in the :doc:`/admin_guide`. The sending of meters is integrated with the logging framework. To enable, configure ``log_statsd_host`` in the relevant config file. You can also specify the port and a default sample rate. The specified default sample rate is used unless a specific call to a statsd logging method (see the list below) overrides it. Currently, no logging calls override the sample rate, but it is conceivable that some meters may require accuracy (``sample_rate=1``) while others may not. .. code-block:: ini [DEFAULT] # ... log_statsd_host = 127.0.0.1 log_statsd_port = 8125 log_statsd_default_sample_rate = 1 Then the LogAdapter object returned by ``get_logger()``, usually stored in ``self.logger``, has these new methods: - ``set_statsd_prefix(self, prefix)`` Sets the client library stat prefix value which gets prefixed to every meter. The default prefix is the ``name`` of the logger such as ``object-server``, ``container-auditor``, and so on. This is currently used to turn ``proxy-server`` into one of ``proxy-server.Account``, ``proxy-server.Container``, or ``proxy-server.Object`` as soon as the Controller object is determined and instantiated for the request. - ``update_stats(self, metric, amount, sample_rate=1)`` Increments the supplied meter by the given amount. This is used when you need to add or subtract more that one from a counter, like incrementing ``suffix.hashes`` by the number of computed hashes in the object replicator. - ``increment(self, metric, sample_rate=1)`` Increments the given counter meter by one. - ``decrement(self, metric, sample_rate=1)`` Lowers the given counter meter by one. - ``timing(self, metric, timing_ms, sample_rate=1)`` Record that the given meter took the supplied number of milliseconds. - ``timing_since(self, metric, orig_time, sample_rate=1)`` Convenience method to record a timing meter whose value is "now" minus an existing timestamp. .. note:: These logging methods may safely be called anywhere you have a logger object. If StatsD logging has not been configured, the methods are no-ops. This avoids messy conditional logic each place a meter is recorded. These example usages show the new logging methods: .. code-block:: python # swift/obj/replicator.py def update(self, job): # ... begin = time.time() try: hashed, local_hash = tpool.execute(tpooled_get_hashes, job['path'], do_listdir=(self.replication_count % 10) == 0, reclaim_age=self.reclaim_age) # See tpooled_get_hashes "Hack". if isinstance(hashed, BaseException): raise hashed self.suffix_hash += hashed self.logger.update_stats('suffix.hashes', hashed) # ... finally: self.partition_times.append(time.time() - begin) self.logger.timing_since('partition.update.timing', begin) .. code-block:: python # swift/container/updater.py def process_container(self, dbfile): # ... start_time = time.time() # ... for event in events: if 200 <= event.wait() < 300: successes += 1 else: failures += 1 if successes > failures: self.logger.increment('successes') # ... else: self.logger.increment('failures') # ... # Only track timing data for attempted updates: self.logger.timing_since('timing', start_time) else: self.logger.increment('no_changes') self.no_changes += 1 swift-2.17.0/doc/source/admin/objectstorage-EC.rst0000666000175100017510000000171613236061617022004 0ustar zuulzuul00000000000000============== Erasure coding ============== Erasure coding is a set of algorithms that allows the reconstruction of missing data from a set of original data. In theory, erasure coding uses less capacity with similar durability characteristics as replicas. From an application perspective, erasure coding support is transparent. Object Storage (swift) implements erasure coding as a Storage Policy. See :doc:`/overview_policies` for more details. There is no external API related to erasure coding. Create a container using a Storage Policy; the interaction with the cluster is the same as any other durability policy. Because support implements as a Storage Policy, you can isolate all storage devices that associate with your cluster's erasure coding capability. It is entirely possible to share devices between storage policies, but for erasure coding it may make more sense to use not only separate devices but possibly even entire nodes dedicated for erasure coding. swift-2.17.0/doc/source/admin/objectstorage-components.rst0000666000175100017510000002205213236061617023676 0ustar zuulzuul00000000000000========== Components ========== Object Storage uses the following components to deliver high availability, high durability, and high concurrency: - **Proxy servers** - Handle all of the incoming API requests. - **Rings** - Map logical names of data to locations on particular disks. - **Zones** - Isolate data from other zones. A failure in one zone does not impact the rest of the cluster as data replicates across zones. - **Accounts and containers** - Each account and container are individual databases that are distributed across the cluster. An account database contains the list of containers in that account. A container database contains the list of objects in that container. - **Objects** - The data itself. - **Partitions** - A partition stores objects, account databases, and container databases and helps manage locations where data lives in the cluster. .. _objectstorage-building-blocks-figure: **Object Storage building blocks** .. figure:: figures/objectstorage-buildingblocks.png Proxy servers ------------- Proxy servers are the public face of Object Storage and handle all of the incoming API requests. Once a proxy server receives a request, it determines the storage node based on the object's URL, for example: ``https://swift.example.com/v1/account/container/object``. Proxy servers also coordinate responses, handle failures, and coordinate timestamps. Proxy servers use a shared-nothing architecture and can be scaled as needed based on projected workloads. A minimum of two proxy servers should be deployed behind a separately-managed load balancer. If one proxy server fails, the others take over. Rings ----- A ring represents a mapping between the names of entities stored in the cluster and their physical locations on disks. There are separate rings for accounts, containers, and objects. When components of the system need to perform an operation on an object, container, or account, they need to interact with the corresponding ring to determine the appropriate location in the cluster. The ring maintains this mapping using zones, devices, partitions, and replicas. Each partition in the ring is replicated, by default, three times across the cluster, and partition locations are stored in the mapping maintained by the ring. The ring is also responsible for determining which devices are used as handoffs in failure scenarios. Data can be isolated into zones in the ring. Each partition replica will try to reside in a different zone. A zone could represent a drive, a server, a cabinet, a switch, or even a data center. The partitions of the ring are distributed among all of the devices in the Object Storage installation. When partitions need to be moved around (for example, if a device is added to the cluster), the ring ensures that a minimum number of partitions are moved at a time, and only one replica of a partition is moved at a time. You can use weights to balance the distribution of partitions on drives across the cluster. This can be useful, for example, when differently sized drives are used in a cluster. The ring is used by the proxy server and several background processes (like replication). .. _objectstorage-ring-figure: **The ring** .. figure:: figures/objectstorage-ring.png These rings are externally managed. The server processes themselves do not modify the rings, they are instead given new rings modified by other tools. The ring uses a configurable number of bits from an ``MD5`` hash for a path as a partition index that designates a device. The number of bits kept from the hash is known as the partition power, and 2 to the partition power indicates the partition count. Partitioning the full ``MD5`` hash ring allows other parts of the cluster to work in batches of items at once which ends up either more efficient or at least less complex than working with each item separately or the entire cluster all at once. Another configurable value is the replica count, which indicates how many of the partition-device assignments make up a single ring. For a given partition index, each replica's device will not be in the same zone as any other replica's device. Zones can be used to group devices based on physical locations, power separations, network separations, or any other attribute that would improve the availability of multiple replicas at the same time. Zones ----- Object Storage allows configuring zones in order to isolate failure boundaries. If possible, each data replica resides in a separate zone. At the smallest level, a zone could be a single drive or a grouping of a few drives. If there were five object storage servers, then each server would represent its own zone. Larger deployments would have an entire rack (or multiple racks) of object servers, each representing a zone. The goal of zones is to allow the cluster to tolerate significant outages of storage servers without losing all replicas of the data. .. _objectstorage-zones-figure: **Zones** .. figure:: figures/objectstorage-zones.png Accounts and containers ----------------------- Each account and container is an individual SQLite database that is distributed across the cluster. An account database contains the list of containers in that account. A container database contains the list of objects in that container. .. _objectstorage-accountscontainers-figure: **Accounts and containers** .. figure:: figures/objectstorage-accountscontainers.png To keep track of object data locations, each account in the system has a database that references all of its containers, and each container database references each object. Partitions ---------- A partition is a collection of stored data. This includes account databases, container databases, and objects. Partitions are core to the replication system. Think of a partition as a bin moving throughout a fulfillment center warehouse. Individual orders get thrown into the bin. The system treats that bin as a cohesive entity as it moves throughout the system. A bin is easier to deal with than many little things. It makes for fewer moving parts throughout the system. System replicators and object uploads/downloads operate on partitions. As the system scales up, its behavior continues to be predictable because the number of partitions is a fixed number. Implementing a partition is conceptually simple: a partition is just a directory sitting on a disk with a corresponding hash table of what it contains. .. _objectstorage-partitions-figure: **Partitions** .. figure:: figures/objectstorage-partitions.png Replicators ----------- In order to ensure that there are three copies of the data everywhere, replicators continuously examine each partition. For each local partition, the replicator compares it against the replicated copies in the other zones to see if there are any differences. The replicator knows if replication needs to take place by examining hashes. A hash file is created for each partition, which contains hashes of each directory in the partition. For a given partition, the hash files for each of the partition's copies are compared. If the hashes are different, then it is time to replicate, and the directory that needs to be replicated is copied over. This is where partitions come in handy. With fewer things in the system, larger chunks of data are transferred around (rather than lots of little TCP connections, which is inefficient) and there is a consistent number of hashes to compare. The cluster has an eventually-consistent behavior where old data may be served from partitions that missed updates, but replication will cause all partitions to converge toward the newest data. .. _objectstorage-replication-figure: **Replication** .. figure:: figures/objectstorage-replication.png If a zone goes down, one of the nodes containing a replica notices and proactively copies data to a handoff location. Use cases --------- The following sections show use cases for object uploads and downloads and introduce the components. Upload ~~~~~~ A client uses the REST API to make a HTTP request to PUT an object into an existing container. The cluster receives the request. First, the system must figure out where the data is going to go. To do this, the account name, container name, and object name are all used to determine the partition where this object should live. Then a lookup in the ring figures out which storage nodes contain the partitions in question. The data is then sent to each storage node where it is placed in the appropriate partition. At least two of the three writes must be successful before the client is notified that the upload was successful. Next, the container database is updated asynchronously to reflect that there is a new object in it. .. _objectstorage-usecase-figure: **Object Storage in use** .. figure:: figures/objectstorage-usecase.png Download ~~~~~~~~ A request comes in for an account/container/object. Using the same consistent hashing, the partition index is determined. A lookup in the ring reveals which storage nodes contain that partition. A request is made to one of the storage nodes to fetch the object and, if that fails, requests are made to the other nodes. swift-2.17.0/doc/source/admin/figures/0000775000175100017510000000000013236061751017566 5ustar zuulzuul00000000000000swift-2.17.0/doc/source/admin/figures/objectstorage-replication.png0000666000175100017510000013132413236061617025445 0ustar zuulzuul00000000000000‰PNG  IHDRèÒ¤Ä$ÔiCCPiccxÚYyTá·ÞßpΩÓ9ÍóxÒ¨y5Ïó<’:NƒæSiJ‰ %DJ¡Ð$sR!¡"d(¤"D÷ÃoºwÝuïþã[{=ïÞÏ~ö~×·¾õ~/€h#=!!åˆKfyÚYÒüi¤ç@^`.à¡3’,ÜÝá´ïèW¥'$ÄÀÿÍx˜I ÄBÃ’±H#ÖÊH`%b@659!€°øYþ„ãÀñËoþÐ_~ð³¼=­Ãl:@ÚzFD2'€ÈÀO š2"éa¢!  ºCÿ‰'â_8CÿrÒéý_½›uTRB =ýÿ8ŽÿÝbcRþÔJR´— i º »"™οñ“ É–ž¿ñËQÉÞÀ€ D¦ØûüöÇS¢},@YˆŽwò *ê꼨,#É*ð'ª—éí÷;Æ9Œimܨ?+ÞóO|dÒz¯?xFF¤•ëŸøutGwà@ è,€_Ð2fŒ'¬@O'$»{ÿ®Õãú»ôm8ËÖó·?ÇL²ñúS+9ÒÛþ?ÆÌòöüʼn‰‡GÙ:üÒ€iD²ìÿàæ 1îοr1oVЧÈ`áÌ8ŸßœXAÝÚé×L°r°:°€ ¡‹@g°ëßO0!hÀ€xˆx`Ѹþ¬Þ^† OÿF[ý‰ƒ(ƒø¿8ãŸp/È€LHúS ÅMqcÜ7ÅÍqS\ 7À ÿ¬õN¶LþUõKk0Aõ7bù[ýúVµ…õo9¡3þS“-¼Dü‰Ð¨Ó˜ÐXø“ÿމ6Dk¢=Ñ–¸ÛŽ]Àº°Ømì2Ö4ìÖŠõ`W°–ÿ˜ý÷TXÀ„$p‚`B °€ qÿ­¢”¿¿QN%N]ð&ÄA4Ä@Ôß ¾ðXõ,)@ƒPˆ‡uN{ü3iy\ ×Å-qÜ7.ˆ‹‚*®ƒà¸nŒëâ†ÿ´‹ÿÚ*„X°˜ÑðX›ÌLK°ŠOHgEED&Ó,b˜*4‡8†š MKCS üi¿^íiO@¼ÿŒ1 `¸€t÷Xl:@€T÷?09€Ø €ºƒŒÖú_@2p?ˆ€$È€"¨‚è1˜ƒ 8‚xC¬DB,° 2!ò vC „ ¨‚8ç¡.à 脻ðÁ Ã(|€)øó‚*‡ˆ Rˆ¢Œh!ˆ)bƒ8#žH‚D qH ’‰ä …H1r9†œBÎ!‘Èmä!òA&¯ÈŠ¡”•@åQuÔµ@Po4@Ñ 4Ý…–¡•èi´½ÞE¡ÃètŒĤ1U̳Âܰ@,ca›°¬«ÄÎbmXÖ c“ØOœˆóá4\7Æíqœ'â›ðøA¼oÆ;ð~|ŸÂ—T‚8A™`Dp ø"©„¡Ya1asa¦pp½ð#á9šˆH´È‘‘¢¸¨’¨‡hªèÑ[¢“bübÆb ±±óbÏÄQq%qOñ âUâ=â3’v $nJLJ JšK®“Ü'yUrBŠOÊT*JjŸÔ5©÷4š-†VFë MI‹KÛK§H“_¡°ÂgÅ–õ+^Èe dÂeöÉ´ËLÉJɺÈfÊÖÉ>“c—3‹”Û/×%7+¯ ï'¿M¾E~\AXÁA!C¡Ná¹"UÑL1Q±Rq`%q¥ÁÊ蕇W>PB•t•"•)ÝWF•õ”£”+?T!¨ªÄ©Tª ªRT-T׫֩ލ ª9«mQkQû¤.«¨¾G½K}ICW#Fã¸Æ&¯¦£æÍ6ͯZJZ ­CZÚTm[íÍÚ­Ú_t”u˜:GtžèòéºènÓm×]ÔÓ×cé՛ЗÕÑ/×4à7p7ØaÐmH0´4ÜlxÙð§‘žQ²Ñy£ÏƪÆÑƵÆã«V1W_õÆd… Ýä˜É°)Í4Äô¨é°™´ݬÒìµ¹Œy˜ùIó1‹•ë,N[|²Ô°dY6YÎZYm´ºnYÛYX÷ÚðÚøØ´yi»Â6¶ÎvÊN×nƒÝu{‚½“ýûA †Ã)‡)G}ÇŽN'/§ƒN¯•œYÎm.¨‹£Ë^—ç®r®q®-nàæà¶×í…»‚{¢û%¢‡»Ç!wžšž™ž]^|^Á^µ^ß½-½‹¼‡|}R|Ú}¹|×øžòõ³ö+ööW÷ßè7@4 * 5èx2pfµÍê’Õ£kt×ä­y¤”t{­èÚ˜µW‚¹‚éÁB!~!µ! t7z%}&Ô!´[S;†ýN‡&Ç'NcΣ.c®ncîï=¦ EMK-íDºUzWôŸLQŒeWš¸™F˜m6ßkq²ÍêžõMŸm›Ý1û‡G?'kg5!WÌõ³Û{·G“g•×>ï\Ÿ_¦Ÿ¯¿}€q Újé5AAËkGƒo…œ ç††2LÃÄÂ~0Û"öE® \g-Cùû.n0¾'áfâUV[RKrkʹõGS ÓÒÒ£268gšlÔÞ¤¸Y<‹?›¼Ý2Ÿ3;¹õuÞ“m·wå_-¸PX³£rgÝ®ËEwv?ß3µß'V¢Yê¼?º¬ðÀ¹ƒÏʉ‡õŽDWœ8úºR¦*êxã‰Åj›šÄSÛk«ê®Ÿ~rfºž»AåœÓù˜ »››ž·Z5/†´í¾ÔyyáªÆµðëGo ¶/uHÞ2ë éÊí>u»÷Η‰{¶½ ÷=èëCúõâÕ<~ûDæ)óYõÐÐó//æ^!ÃÄòkÎ7Ô·”Qò;üÝâØÌø‡‰—ïû>ÜœlþxòSÙçü©´/N_ѯÍÓÑߤ¿=™ÙùÝòûÙ†á?ÅöÍÌ[ÌÿX8·µD[\Ž_^È *`Ѱî DÄ©@æÐhtKÇ…ñ{„}Ä$R,[&{9yˆ¢LÝÃ9ËÉ3ÄgÅ_%0#d,/R"Z-V)^$‘$é%¥Gã£}–¾¿â¬ÌnÙd9/y…iÅÁ•­J%ÊÉ*~ª†jj³êý4K´Rµt t¥ô@ï…þ ƒ`CAÃ;FU_­Úebl2fº×ÌÌlÔ|‡…¶ÅSËÍV²VÝÖq6ü6m¶t;v»óöAT‡NÇ<'ggªs¯ËNWg76·›î›<ô<¾xÖy1½%½‡|Ê|}üýžø`ªþ\ݱ¦ È:hamspbˆJÈ{ú©ÐH†"c"¬Ž®þ5¢)2#Ê(jaÝÕèM1f1‹±—ã2â âgZ7°Ì“HI÷“ËRÂÖk§â©ÓêÒ³3ü7heR3'6vm:±9/‹‘m½E.‡˜3™;¸õF^ý¶£Û‹óó r ³wdïÌÞ•[´mwÑž=Å÷ÝWUr²´vÿ…²ß ”ë<²¹âøÑ®cÓUâÇmO¤ž¬©‰ÄjâI…Oj"-²Ù±ía{ήĞÁ~—,IN"ßæ q¤sôS4)»(©nÔFNÎ .~®"n ÷vÏV^o_)?ÿŒ€ÀmÁÕ‚_„ „„;DÂE)¢bÁâTñ+I’Š’/¤JiîÒTé»+ ee9eÊËû)H(¼V<¹2NIOiQù–ÊNU5šÚ¤z³ÆvÍ-'m-Q]T÷³Þþ]ƒK† F'Œ®*1ÙeZ`–g¾Õ"ß²Ðj·õ>›rÛ“vçí¯9ÜwrúàüÃsãróóÔö2ñ¶ñq÷õ÷ ñ H Ì]]´¦<¨ní¥à{!¯è_arL«ðôˆ‹‘ßÖ©E'Æ\ˆýo–°-ñA’hrXÊùT<Í'½&c.Ósã©MKY«³/åˆçnÜúrÛªíÕÜ…Ûv,ìJ-šÝ“µ—k_E©áþ¡‡V”÷É:ªul¬ªü„{5VÓZ[púäÙ·çÔ/ä6½k j¹’{Ýø&O'v[¨Çæþ¶¾·Ÿö¿áx{kÈ:d²©A®"È{A…QMÔ e¢Ùèaô ú]¤1{,;ŒÝÇqÜOÛñomB¡‘ðhLÌ&v‘xH¤ã¤¯ll»ÙFØuÙ ØGÈÆäRòWwŽ ?%ò’jGmäÔælá2æêàvã~ÊÁóƒ7ŸO’¯‘ß…\ GPZð†P˜0»p½ˆŸ(*Z/$N¿!±]ÒCJRê3­Cº|ÅzOY-9¹)ù~…fÅÒ•Jk”MT$U–T_ªµ«ÓØ®¹Y+S{ƒNªnš^ª~¦A¦aŽQ¡ñžUGLjL[Í:ÍŸ[|·¢Z+Û8ØÆÙ•Ø·9œ \µÜ!<†ÛíÎËËs8%7CÖ>zôh•ÎÆÏ{D„Vªñ ÔYûÍØ–©´?±%²¶Ýng³ÙQQQLæ3©•>‡ƒ®­>cÄMgm±Xèîø*• Y›$IƒQBKdmx¢-噵Kµe2Yå{{¹Ýîüü|‡ÃQòÊ’ÎÚ»ÿ÷j’¬RM •Î:î»}™J{´€‘k*ItÖŽ‰‰)Ù¬ôÌÁápäçç“$I͈gé¥TéY—ü‡Ãår•Ð’b툈ˆ@÷ÚÇ@Ö6›Í`±XÜn÷ãšUzÖÎÍ͵Z­$IÚíöÇqw¥gí^Ÿl9wG-`ü6>¡„–•žµår¹Ëår»Ýv{ÁÊã™!î燵Ng -é¬]É,E¬ý¸·×óÆÚ{{UnÖ:kÇI»;ý²6<+ÄdmDµ ÈÚð°¶Jg ²¶kÓ^ÏqYdmldíçµ{}²%ÈÚcm¨ø›“AÖFY $AÖ~XûÜÝ–«Y»XÖ† ®qYdm ™¶ƒ¬ý<°v´€díDZ6Tdâ²6"ÈÚðè†dµŸÖ.Á$ÈÚPa‰;ÈÚˆ kC7’@wÜÇð’µ-ËsÅÚS·™Lfµ¡bÚ¸ƒ¬0íý<³6hµZ‹ÅR)MÛ^²6ôéÓ'ÈÚ2™Ìl6“$Y)Y¼dm¨€wµ)Y›’ÃsÎÚ‹%ÈÚ2™låÊ••˜µ1òÎÖ†ŠFÜ•›µÀKÖ¦Ò,<ç¬MÉáyfm«Õ:yòä眵ÜØ•+WÆÇÇWbÖ–Ëå^²6T(SI¥gm•J^°6AHXÏ9ks¹\LËó<³öÔ©S=š¾làóÌÚ!iŸÇ £+7k»\./Y*ŽÆý<°¶^¯÷†µ1Ë]µ ‚€ kOŠF’眵™Ïk»ÝnïSUWâ~NX‚¬ AÖ.=kïþß«î¸dmÄãR‘<'î燵KN¬dmDµ¡kW2u;ÈÚïÿŠ"ÀÄý\±v êvµAÖ† k"ÈÚ% ÄdmD%fmBµƒ¬ðžµÜØ k—€@wµ¡²³6I’AÖ²6¢T¬’öyµK@Àˆ[­VCµ+;k£dµƒ¬]ZÖf £ƒ¬]CÜh$ ²v¥gm³Ùdí k?k³Ùì k—€7eÚ²6TvÖ€ kYû)X;&&&Ð÷1|ÈÚàâ¦oHúù§ý€ kC7’ZY‚¬]ˆÊíüç[Ö?wåv# ²6”Æù¯³6Y‚¬]Ÿ³6ø“¸ƒ¬ AÖ.Dåfm·Ûdí k#ʃµÁoÄdmDµá9`íÜÜÜ kYʵÁ?Ädm²Uµ+=k[­Ö kY»üXü@ÜAÖF ‘$ÈÚÏk“$dí k—kCywµ”i;ÈÚð°v iÞ‚¬²vQŽÄdm}C2нö1‚¬ðÒù/ÈÚˆ k—åEÜAÖFTb7’ k#‚¬²6ø‹µ¡œˆ;ÈÚˆ kCµà9`m•Îdm¿±6”qYdmJdíÊÎÚ½>Ùdm¿±6ø¼XpµAÖF YY»Ò³ö¹;º-V ²v¹[H˜L`0¾Ô¸ƒ¬²6$AÖ~X;ZÀ²vy³6E,aaa>#î k#‚¬ LÛAÖ~Xû·ñ kdmŸ€"©Tåâ²6"ÈÚú†d ;îcY,ø bm™LŒ²ÿXµAÖFTb7¾÷Þ{A֮ܬ ™™™am(;qYdmDµÀjµ¶lÙ2ÈÚ•›µGVV–ßX› §@Ô"‘¨L^%•›µ ÈÚAÖF”¶0Bµ+=kËår¿±68NŠ…,ËÓw¥gm,tdí k?E9›ôeƒ¬]¹YÛårù‡µy<žP($IR£Ñà$Iò)‰ûy`m½^dí k?]² kWzÖv»Ý~`m±XN’d~~>}>WÉsÂÚdm²öS! tÇ}Œ k#<ÜHÊéWŠem“ÉDOŸè.ûJ¥ò‰ú\YP®¬ ¥*ìr¹, šJ¹\.Ç£›½H’¤ôÖÊ |àS áñxÏÉ{Àáp˜Íf‡Ãv7>ŸO—Ãó3.e˜\ÑæéüâåÄ-ëˆfÁÛn»te]Ÿ]h4—ËÅb±*ë‰÷(oÖ†R•.ËÎÎÖjµn·ÛívkµÚììlJ1 •Õ>î«ÕªP(ð¸\.­V›››K½ºu:Á`tý£Ñ˜““ƒrp:*• 7èñ[¥Ri2™ÝG`þ¶ìŸÞþç¼Þâp_Ì0tÿæÞëßß4Û\f›kè7¯<(łR©D•Îjµæææ*•J<ît:éãy€XJ¥q³Ùl™L†jI’ …B©TÆÅÅ€Ùl®|›ÎÅB£Ñðx¼°°0üÓívçååiµZ4/X,–J¹[à’$ÕjµX,¦,KN§3''G§Ó¡dL&Óó sÝϳ~²A¹xtÜàVÆ¥+™æÓo­?ª|¥‘tóYËû=ÝE¿ÀjµšL¦ÄÄDüÓl6çççóù|>Ÿït:+ëV±ðkC©ˆ›ËåR‹_ƒ!‘HP»´Z­.—‹$I£ÑˆÎd”1Åb ät|„Ç`0p8@`6›qK—Íf êâv»Ýb±¸Ýn@@’$I’  ;“ÉäñxΓvWÊn·ÝqL&S$áʽƒ‡ÕjÅ;N¼‚ Ð’àp8Ün7“É´X,‡Ëå’$i±XPgÇfÔÅÑ6Í`0x<žËåb0Ô-S6 ‘Hä!àã¦;MŠãý/m6A\.—zšô§ŒÃ{çóù!!!hˆs8x›t9 ŽÅbñù|»ÝŽ_aòb‡ÃAc~Æ•ZÕ}øêWáOî{/iàÈ5Ád4LÀ¥ Ó‘ë:›Ã]?IØ®~(šSNÜÔ‡ ‰*Ç• c&aÕe<¹Æ~àŠ>Se å3Sª š×*p!ßy^sý)2”óêKa®é[Ô‡‹Pœ;Ωs4ޤHn÷Æ¡B^)f´¯àa,FÊv»ÝøL¡ðEŽ£×jµâAºEÅl6s8«Õêt:E"‹ÅBG^&“Éår©G=t6›ÍçóÍf3åÁìt:Íf³Ëåò Âo¬ ¥2•èõz£ÑH­zx<^TT`Ÿ\.®•l6[vv¶ÑhD)geeá+×jµªÕj´3 ’$5 ž¨V«år9^Öb±ÈårÜý—Ëåùùù: u|¬fk·Ûår9„ŸÁf³õz½Ùl¦6£ª™6› =X(§b=ŒF£B¡p:V«U«Õ*•J¬îˆñ F£å ÑhðÀd2áþ‰ÓéÌÍÍÕh4h|ÀSÐ=‹òÎö3X,A*•Šî2Š 8vÉápàsǧI’¤V«•Ëå8| †}™Íf| QcÆápäççS–7F“——‡Ž1¸È§Ó™B0›ÍÔó3êÄóDlÆ„ÅwÎÝ5ÚÓrÆ€„í£ÏÞÖÀ¥ ó½< ÌÛ’•öÙík™V­Ù5raæÀy7°ýÛäSVf ùþþ¯{TîšÎÜ6Ôz/}ï%Íî>}ÓØáË{+ì"ŽXø@epí2ëÆÀdä[àJ¦¹î{×ÿ:©€ß÷ç5™r=KåíL‘‘‘B¡Ÿ&àt¥R‰OÓårååååççc{µZŸŸ¯V« RvNN¾× º  i d¼¬ÅbÉÎÎÆ§Óé¨aæOø“µ¡T7ÇCZAUQ à‹Å‹…Åb¡¹@©Tr¹\ätÈÏÏÏÏÏG‹ŠËå C%Ñl6Ëd2T”ã\.“ÉÌÏϧÖà‰$''¯c0ìv{ll,ÆõF•JE…ùù aaajµZ£Ñh46›Íãñx<ö!44Ôb±ðx<¼AµZÍçó)cZTBBBœNgxx8ê¸d¡dÅáp´Z- J«ÕJ$4¼ðù|Êh¨ÓéH’”Édø§\.÷¹?ƒ™ŸŸŸ——Ç`0BBBp¾¥¤R©Ýn×ëõÑÑÑ(.|èØžÅbQ7®T*•Jell¬Ÿå."6~XuôÏ÷Z͸#b3º¼ÈëØPÚ5U*á³>ê[eé¡ô ÝbW^Ê0}¶Qµyrb§†ÓÉÖ|Ú+æî(€“7×~x!\DØî1‹î m!úel5¼¾hÉíç4o´‰ÚsI{lvÍê2,ü'ç|fßþénÿüGՀɽbλñÅú¿¯ág9p8œððp¹\Ž«C‡Š0AHaaa,Ëd2™L¦˜˜Šèår¹ÉdÂAîr¹ªT©‚ÌÉÉ¡H@.—›Íf>Ÿo0œNgll,’†R©Ä·ÐI‡™N§ó§‡˜ŸYJ¥qGEEÅÅÅá3Ðëõr¹¼è†$jˆô¸y‘HD·sQëk>ŸÀápà@]•$IjùkmüŒ«-ƒ¯k<î}“ ˆ¨¨¨¨¨(±XÌd2õz=ªÌÍ|à-Q½§+È999HÙ83Ñ?Á£ “ɤèÌn·geeÑû¨Ýj6›Íf³)9èt:çHìýF¨ÏÖX½^6nzô&ºUÑ›QºÝnÔ\. u¹\*•J©TRJÕ7ºµ‘ÍfS&&ºp\. p_„ºY—Ëå±¢¢ÆƒV«ÅÕ §ÓéçåWÁ0MjˆšÔMêY*Û› ïŽùåÞæiµ¨y:Gí¸G:V=š{äZÁ¬þp«&Keûy§|ýQ½ÂDÖ‰f†òYN µƒjZ0H V7 üáGòuŽ˜'×”¸\.Nd»Ýž——§V«©¥$ Ðô#l6›½ôgg·Ûu:Ùlƃ|âr¹è;T{¼îg„µÁ{â¶X,l6›ƒÁJ¥z½ÞápxlÖ¦¡8*Gèõz±XŒ¶\4A!=Ñg u)&“) c4u:š}(9ˆD"Šq<äPìСÕj-‹D"áp8,Ëjµ¢)ô–n·›º~hhh`³Ê )#>>ž:Âb±pQìÁž{Dh +zA•Jåp8¤R)—ËÅu .ÑX,–Çz‚KdddÀÃ\Ç-º«59ÖNªM‰ß%rÈ‚lz3~‘¥|äiæêÑÒbä0è»[b>cÕIõøB1ëϬ¿Oi@"xÈwZsÁ¥8ÎήU'!À©uär9=n‹Ãá…¢ËТ£úqãA¡Pp¹\tSÉËË×=îXÒÏÅx…„„„b/U®kƒ÷¦.—«ÑhèᡸŠÁ~3 ü ˆ¾ge6›™LfQ«Õ* q’S f¡ÔIôÊ :@W3ív;Ý‘Ü?@…7K)`ðF˜L&%xÔ’ƒo> Úl6z(u;¸l¤î²&@HHÝ"¡P(ü¯qã²Ô#…Åba0:/Ç£÷Öl6ë&ˆn”|>ç}44Ôl6Ëår4Pèt:‹EWöýÜlÑëõ6›-$$„Éd¢Ç:0“ÉÄ$‹Åb1n$¢Úh±X"""<œÐÆG©ÛHX$I¢I ·7©kRrÈËËS*•häÅsý)|v"‘õnÜx°X,v»zÐè{ãv»ÅbqNNŽT*Å» ·¡ƒÇãF6›Íd2F#¾¥Ð T ( |èÔ›,,,,77…‡N§‹Åþ÷ëÝ,¼ÿ9MÏo3ú¤òR« `ÛÝé ÇžéU%|‡Å˜·%{hk{÷Ôð>©¼W¿½õék±|.,Þ›ëp’o¶‹ò¸ZT(»~,kÁö<ír“+æ)´N‡“€.¤µÇ_Ý9€T*µZ­yyy}É\.nZààT«ÕB¡P$ †ÜÜ\Œ¡×ét8Î=®Æáp˜L¦V« …ƒÁ` ŠÅb“É”““#(Ÿ%(œ•¸h£øÄÃJS@% ¬ ¬·ß~$É›â^&ÏCS •Ä€Ãá8N—Ë…\. °’$F9Þ’$I-oCBBt:Õj%Y¨I¡ËŠÛí&IR"‘8N&“‰ü%ÐéÐétòùü'¦Ô IR¯×cˆcÉ-QUd2™O4Aà º\.§Óép8ЙÚED›Ûíæñx!!!蚊×R©‡5ƒÁ ´ Ô¬m6›Ýng³ÙáááhÕ%OG±D"±Z­hPFwf¸n·»XôºÕS[%Ýf½±Ã .%\)SûKøÎv»Ý|>Ÿ ¤u6›Iíyà=b{ÊŸÝ´¥R)õNâóùl6‡ADD„^¯—H$Aðx<»ÝNigÞäÓ <ÊKžWW¯^=~üx³Üö $OƒÑ½IXjÕl•õZ–%SikZSôÛ˜*õª IuîY.›Ù"Yܽ±TÌc칬½’an™,úqTÕH1 WXnr\Á¤x¥Q¨\cû÷²îŽÜÚ¿eÄ{Ý¢M6GJU¡ËŠ ceäY#C>z5î·ýªmÃb¥œø­ëNýgÁnþÔ|A:e×.v˜•(Óv@X›ÏçÇÇÇ`¾ ÅÀn·K$ƒa0\.Wå+ïðD ™Èår!aFÌÝè~ùl6üQSÓét — 8"Dļ-Y­ê†*´ŽÏ×ei.ˆßH`Á`0T*†.;NLºàû}C2 ¬Û°‹¸@&“¡§I’‡ò·Þe0p×:ÏI²=:X,–L&ÓëõjµMLÞØô*& æoËùó¸6\ÌÙ1bd»è²_ó™&À0›ÍƒþÏ÷àáF’——Ö†Òúqû,Ë»m¥G±‡!!!”%ôyƲwjº ‡?í!EQÔùÏŸ¬m±X.\¸€ûö 6¬pÄDAQÑP¬Ë¶ßXûúõëo¼ñåòñ¼dy"ˆ ‚x:<.ÐÆ?¬§È8wä·ñÃ%Åx¯)•ʲ·N§»ÿ¾G””Ëåºÿ¾÷‰DJÛÞç(»ùX£Ñ ÂD‰ÞÈ”¶}”Cnnnff&ÝýÃh4Þ¿¿TyŠS*” úØËTˆ#â—ÝòÔÉWJu‘§8ŇÈ7–5÷Ëýû÷‹F)•ÊR…§—¶½ÏQBx¤X›ÇÖ´ª{sö'¼—‘õ0ÞÅ7·Z­HŠŸ h¼ßSã™.DÏ÷]àÈôÝ<%Ðï¸ì×Q˜ÈÏÖÜ ôÝ<=NÞ2^˶”ý:‡Ã#¥Ì³…’ƒÚËéGé¬(>Ã`8þXöwÖÀ;Ó ÚøägX,–Z­.¹YQ_÷Bc¼‰šñ ­|’å€ ôC/Y¥:Žyºý#¢‘O 6›éX×oªhàC Ý›õeG±¹ž-kp–2yDÃ{@epÊ5ö¢Éü´f—\S )äéÅ÷9NÞ2¾»\á“ ‡Ê<ó8`ì^±Ç‹ma_~C@R‘”ÀÚ_Omܯ[AÎ^( #<<³›»ç‹ùÑñ3=\*++Ëívcž&z{ªb!ƒÁ‹ÅåçF±6‹áp‘ì2^Åb …B­Vû¸ÈµZAÛ€º)L\…YO©Æn·[­V£íåP~ûékûÄó’Ãáðx<”ƒº\.ª8!ƒÁ ¥"5 –× çq§Ó™ŸŸO圢q}jÂM)óèÓL’gzë§û§ÿ÷fm¥ãŽÂ2î׌c·íÆe|?"ƒkì÷Ìõ™ öèÜ$4ªÂJ­ñðN›M½z%ÇõcY?Ij\­¼²¶›„x¶Þaæ®T*cbbŠª˜U YÝÉ0X)ÓÀÒǤÅb¡Š'`ŽOV±¨€¬Ýë•jùÊ‚eoL%˜S‚’)jµÚd2Éd²ÄÄĘ˜›Í†ëh‡Ã!‹«T©KOJ…;666111::Z¯×— »•kL[´Ð7©y0y :¡{|…yßãââ"""Ìf3’µÍfÃŒïqqq‘‘‘‰rÜn·L&Ã蘠œä€¬Íf³}õŽÄ¨tªš…ÜÜ\·ÛŸ˜˜¡ÕjqwÄh4 †ÈÈÈÄÄÄððpjË‹ž°X¬„„„ÄÄD‰DRNFjÂp¹\o¢ç½Áƒ«ÀGdx7ZœÝ¿º•ÉÎþµžnEƒÿ ‹öKöÑt,ø'gýQýÑ/j˜þh8©—lÉuGep¶«/Ì_ROó{ý¡­ÃºÍº[Nª7ÅÚÃE’T½ ###NgÑ•(–vÂ,€UªTyyyø†ÆÉ’˜˜Èçóé©Öòòò„Ba•*U18«<„KûW2kÓ[úÌ«$<<œ ¤\Xµ+44ƒV9NXXÆâq´/SaâN§Ód2………¡Ô0#Ay7µCO±™>ãD̳á1L±62Þr“Éd AS`{›Í†iNQÝàr¹|>¿_$¡  ƒP(Dm‹ËåRrÀtEáááØ1¡PXN‘“kGGGû*ÐIÂgýövâê¦mg±"¸¦ÏԒ߯*á³&c`Zxÿ¦üŸþÉ€ùÛÕÓ_‹Æ•½›FŒhU°Ûp<æ€*ü‡ÍûJLT(ãïÓªÒ÷è  ³öwIµ|îE„D"Ñëõš‡ÉdÂÅ:¦‰ÆÜdeƒA ¡¡¡Ô×ét¨[àÂÄ>å1)ÅÚL&ÓKÖàÐ &Tp6š2é“ ?Ó“{AavD(´jét:Ê´‚ÆMÌÍäîÒY›Ã2¤ß6r|ôÃb±$‰V«Å´8ôû¢n…€¥!omÜemƒá%kƒM%4˜P&(ºrŸ1}+uª€*'AÂBáCM³XÖnÕ{¡Éì(ûÅé@ƒ e(Àû¢n?°X,‡C/êá!6›Í/„o«{ËÚyyy¾L(KÞ}0`¶OxœØl6憥ƃo{HMе‹ÚåËÊ`²æpÁ•#¹é¹n£å¡ªx9Üœ#åÈŒ[9÷9nÉ ^{Õ¢y]I{¤†á?ƒÙÍ!|6Šeí¯Ôy¾¥Ê`BÏ£íám‚YøQ §Æ¥Sc3! …ñÊÞÃÀ²6­ñ’µÁçÄú$ …ÔÓr: ¾…Bä5aÝAït Ÿõg.ÖÊ9šnX°§àÅ߯EÄçë3í7¬<˜7zq¶ÛGK`íº¾6͉ÅbúxÀâT]-Nçp8D"ºW!'P£ÛÓwM4V«õ•BSX;Q|ÆKÖ†òH2…ª¨Mxx¸R©”Ëå, Ë}¢Ñ7"°F >ª½J¥ÊÉÉÁ?©öeD ¬ýZ·*C{×ð¹Ð`B½uÂÂÂÔjµ\.w»Ý”;`HHˆD"Á!ˆU&(®W«Õ …‚’ƒOr¡•ÀÚ‡£<ªaQü•ŸŸŸ••…5Þ°N`upJåçóù”?httt~~~ff&ºEú¤WtÖFç%dm4¤ú\h0ÁÏB±ý“š#ÞIw]ÈX<:®u]1Lè›§s4ýä¦$„!2†¶¹m€øðÇußý£±°b\\“> %°öÿ"Ê¡Lð3AQQQJ¥ßÓ,+22âyyy™™™¨Éá° ÁIA¹ÏFEEù$Zª‚°6ƒá𒵡ìÄMwÅ¥Aσj8V™¡ŽÇÇÇc],ZY,VTTÆhàWePɬýÙû/1Xe]p›É.,à‚B ¯ï@ @9àÞzÁƒ!J§”E%°¶G½ã§µ½A!66–úLDLL n8{ø‡`Ñ;§Ó‰Sˆ^y6..ë ùÊ éÁÚL&“bm_mvëCÿSÂgÝYÔ€ú³Nïä7õä»ÙæJá° 9ÁdÌš4åU—Jo§€ô îçY FÊ¡j”ÌÚM}!¬öKAUªT¡þär¹Hðh•wˆŠŠÂÊJôƒh!q8>Œv®8¬ýÇú³àkƒßÒºN]B°“Éô¡kZɬÍbߪÌ>î~7 }+‡’YÛ_2‹U좄ñð¸Sžú×鬟Ÿ…+† á³$übŠf$FùlßÊ›µ½Çãÿãƒ7$)U²"°öï¬÷’µ¡æãö9 ƒÁ(µŸà 8k¸v¡³6Ö™}®„„µ‹ ÅÚà%kEÜYYY–¡ïA™¼dm—ËEÙ”+(˵7¬m³Ù*å` ËÁƒµ¡¸´¿ÿ«ßs1`™ùʹ:À[Ö¦UT&P;‡µ¿žÚØûë¸gÀT¢å oX›áÌ'ÁÍf%–ƒ7¬M’¤ßÒ{ EY›îh„58Èô\¿™Ð€'²¶ëî=ÂéÞ‚ ÅÚÞ«ÛÀ XûÒ¥Ko¾ù¦ÉdjÝ<éÇY½¸¼òÊÞâg°vÃñd CL.@H-RÔ‘ _9´T ¹ü‰\<²X,´<ŠD¢JVæ ÜtÖ.*ÊgqÕªU«V­€9‘±½#+U¥Ç‚óD BØ\á’ÿ1›¤@xxx9%ù 0Läbí|¥¥õkÛ€Àù|ôèÑ.]ºÆjýôUßà’þWvmÒ.’¢ `‚ÈÈÈJVŸ×»¶Ûí&Yõ‘@÷Ú÷(™µ}œ/^Œ¬½(6qP„,нö1¼±k;@’ß"kGEE•k«ÿñl±ö#:k¯ÿm8_Itm„÷»‘dH Hz1€ùܲ6ñŽ t¯}'²6bñâÅ‹-‚JÊÚàån¤dѯ¬V/CµËŸ³6k'Å >|»Ýµ›•jwN©2ö½¢T¬Yú|˜n©"Àn·{验¬QT,&WN°Z­’ç%kIjsùçå’X8€Ø­S—еÅb1‹ÅªdãA¯×?£¬ Le Ñù OöücJÉÈw¾NPÑðDÖf2™ Û*%J–ñcÇÞ}÷Ý@÷±Ü1ïI¬-|ç½qcÝÍòÅ3ÇÚmÜÈÚukVªEÂîpÝʰÆD°Ÿè¯M‘ÈÚ¾Šò¨hÀÈÃ'º*£Ï»oZU`u×'¾Âðz!@\ ;\NÈÐxá¯ÍªV*ëxÀÍg§Óù ±6~ÜW¼îW±ùò\Slã?¸|¦—lÌb±èù‚+ ¬V«÷µBBBBІ­?£8sæÌçŸþã?V«V ŒF£J¥ò2Ëë3Ýÿr€O¼n\lN‹J¬XÉyåÊÚPÚì€Y9ƃÇ9™•ãûÂχÙl¶Z­Ï[¸ìv;Ê¡üÔ–²Àår8p`íÚµôƒƒaË–-ô*qeý€ë{.ø¸ºÁ3›Íf2™l6[ù±á³‚òfm(UÈû€±ÿlØžIýÙ¿{•_¾n&åÀÕtÕ…kù¯÷K´ÄÊ.—‹ªŒ‡TF$¤òò«ê[q`·Ûóóóéï-±XLeÈ2™L,ˇ5žÍš5KNN^´hч~ˆ©rìv{×®]ÓÒÒzõê哟8 0 »ðO6À,€á.€M jVþ®fð3ƒÁ ÃY@’¤N§ÃÄúàÖ†RiܧÏçžý§™5–Ì{j[åŸÌ9_͘{òrºÓðWLäçç“$“˜˜˜˜˜ˆ~©|âJ¥²‡™Ñ‘——Çb±âââ°Òkxx8½vŒR©¬ k‘ &¨ÕêÝ»wãŸû÷ï×ét¯½öšO.nÐà@.@À§SNä¼óÜ(à‡C¥Ra ß„„,ƒ…Ê –îdžµ%À?¬ ¥"î o6hÜ ÀúÙôEÙ¬)Í­¾iµ:Õ‹Ùf7ò\Õøþý¹Ë¹÷<ô£²ZùJ‹Ûí¾x5_­±€Éä8w9÷Üå\ú‰àv»Óo©.^ÍwØÝ:½ #ð”«éª€¬ÍI’´Ùl˜`šl$,§ÓÉ`0<öív»Õj¥ØˆI’´Z­ÔŠÒårY­Vz3ª±ÝnÇ €»‹ÔWxŠG¿“Vc)w`0Xê“Zà]à6`±BÀoÝn÷ãäPt­m³Ù¨ZBðX÷@¯^½˜LæÒ¥KñÏ•+W6jÔ¨~ýú>‘Ã#Àd\hðƤlÀZô*ªX²à:ÀEº‘Q`Ð\ ¤“ pàú£¼o¸pô{ÆöÙ0 ï,²3“É g0hAÆAëQ1Õf³y¸ÛâcÅ*¬ÔAáƒ:Õžþ³j ~cm(•©dÛÞ»zÖŽ-°ŒXwÄÀº0äíÝ»æä.Z}“Ì«ÓÛÞš²ã?êTç§ß1÷뚀•=‡î<çxÊ ‘«·dÔ­)¸zàuqßëTçktöœ<ÇÌ÷SfLj÷è{Üzý¶)BBHC9ÕCÙ,Ö¦¥Ý`Éš+oO?Á1ZѼu?w­Wǯ" ƒÅb™Íf,⃥R)š …ËåBçPÌ2œŸŸïp8˜L¦ËåB‹Š^¯·X,n·Ûáp`Ò Ü$a±XHdR©kØl¶üü|,΄E›8Úd4^¯'ÂétÆÅÅùŠ¿h4CBB(e*** ?`Ž*Fc±X¢££³³³N'ö–2§¨Õj| R’ÉË˳Z­(+¬£„áf³‹“$Éãñìv;n ÓåA=‘ôôt‹Å’6bĈ¥K—Þ½{777÷Ò¥K ,ð•0,gÀ8*hm ‚3`ÀÌB‹Š€ð5:¼°Àð@_€ÑâîV`æ¡d€05Àp€Å»R4ü PàÀ€/x^÷ß·ãÌ÷bµZóóó@¡PH$’ÐÐP£Ñ¨V«qÌ$IYT ÇC(<<œËåæææ¢–CU`G>¦WÂj-ñññ,Ëc®Ñ­v~ƒ?YJEÜ×njš®z¥Mt‡´*/¿—Ú0 ý„6-íÖwäŽjI¢ÿ}Ú ¦}uìæ]uÖ™¡1Ñ‚|¥¥ÓàM|~xùüÎpí–©Y£˜§‡š-Ž#'³OléÕôE,ZyyÜÇÇßy³a˜”7øíÝÕ¤'¶är‰ [o ¿ÿÕÎ pädöè)Çö®éÚ¡U·ÛýÉœ¯ŽÞvíß7Ø¿ú*………)•ʬ¬,n!(Ççøøø¬¬,‘H„»ð8dããã™L¦ÃáP(†ÉdÚl6‘H„4g4Ífsll,†½¨Õj­V‹ÄŸŸÏåréLc×h4 ™L†¿+—Ëcbbžî^žhÄT«Õf³™’µ ILL¼ÿ~xx¸P(Ä÷M\\X­V4°à šÍf‰D" †F£±Ûí±±±TîN‡õ%”J¥H$ÂyHY¢ð­€/-·Û››«V«###>ƒÁHHH`0v»]¡P°Ùln5ù™µ¡T¦’ÿ¿þûÜ—ù!œ¿=ûR¿“[­Ÿ'‘HÜn·V«U(999EMhâ £²`‡††R¥ÃÂÂ0Žl*1<—ËÅÕ¥Ùlv¹\TĹT*¥ˆF¯× …BêmAÙRü ‘H+‰\.—F£‘Ëår¹¼è*Õb±Pº—Ë …”œÉd†††Áb±U† K Äl6Ó«Q›Àz½Ï¥®c6›7oÞÜ¥K—Zµjíß¿ÿþý;vœ3gΙ3g¨-ÊÕ«W‡††Î;wüøñT­2â]€c®¼Ð`€G%ó0Wí^X]øíË]ªHÞX€|#h €½Ü€{©<€ OÔ¬˜€B‘ŒøÙÿC¡ðÑðù|«ÕªR©²³³q‘ä“É„aÉøghh(›Í¦Æ’>’¸T*Åt1,‹ÍfãÛÚh4òù|ÜñÆš®x"5×P‘çp8B¡ÐŸ©hýÏÚP*;LÊCóˆÃî>y^þÅ÷§Úôß~ÿäÊx·3´P-ñaaÀÕ$p¯ÐØ]5¡à«˜hÁ¼Eç®ÜPž½”—~Ç f‹3[n€ª ýFSêEÜÍ0ÀÅëù™Ù–zmÿ wéÆmM› åöDЋŢœ[qgR¡PàÛžjC³§ŽP )}]IŸÃá°Ûí8@I’ÄEý‚ÔéõG÷fs8E« ùl6)ß4Z­677•kzßèÉä85£è}æp8ŒŽ“²æcÙoᣔt:õ"‹Åòî»ï¦¥¥-[¶ m&ëÖ­ËÉÉAîÑ£GOš4‰Ífÿý÷ß7nüé§Ÿúôéóé§ŸvïÞ½ìrxà€) €ÃXæÐÜH¦ÙR ÀÆÂÏõhÇkXpà€'ýuZ›ê…îÀ‡…DZ €²Êý Ô3œN§Éd°>zé>(îiÒë‘ÒÇCHHˆÉd2 v»Á` šb·ÛéÉR¨ö8×Ðk ÕÑ?wÖï‰;ý–ªJ¬X `›Ã|¹YÜß¿÷Ö^zú¢<>¶&ÕŒÅd€‹¦xÙí.aÔ}â‡gåWn(Û§%¾ófŠHÀ©Ýz=„pX`¶8Pã£ñᵆô©1jH]z¯b£ýêxgµZõz=eÌ>ŸO„\.·Ùltï7ÊŠG¡Èˆ~¨ŸÏG-#e †ÇÈs»Ý” W$ñx-™>¬ää%L&“Åb¡¦%“É …,+//ÍÙ;Ñívë` Ñh –ß”J¥Tf¬)Lo‰vÏ[·n©TªÚµk''x æää¨ÕêaÆQRâr¹ÈÚзoß?ü0555::z̘17þôÓOg̘qàÀiÓ¦EFF>ÖhÞ.üSÐÞy”¸Y+ëc,ÑS¶¼ 0`6À¶B~§7öðTùÀ#fÌÿÑ2† ,fMDhh(¾Ë=š}ô$I; …Ûí–J¥G£ÑàÜñ î Iˆ(ÖïM%|.{ö›ô#f‹Ä ØÒO®!e2×o©©fWÒÕL&£jÏâÜ?-»´|~ç×û%7n™] …5x!.§¬³Ün÷‘ÓÆfb2²uD㿪 âÛo›­%y”,‹ÇˆÄÁ„/$j¢Ž@wx°ÙlEíªz½žÏçGDD…B‡C=8º”P?AÿÊårQ–e³Ùìÿx’$M&“‡‰†š]ôƒ!!!>6žf ƒH$B³8‡Ã¡kaN§“rÀåV­ZõêÕëÍ7ßlÙ²e¯^½ðU‡åqaaa}A§/(ÞŸyxÊØz¨ªÓQ51Ã//^ÍŸ8㌉L_È„=[wß9qVþÆ;{®Ý2á[á­¡õvÌýjþiÞ&Ï5M™ulÉÚk²¿¦XÁa¡T*5 :®FÜEÄÑC„ÅbÁ%!Ç£¼YÍf³Á` ¬{p\"1Y,jôs¹\———‡q‰XP ß "‘W‘hQ±X,þ÷*l6[¡Pèt:”ƒ^¯W*•€ªÃ€ràr¹§C’¤Ñh4™LE…@Éáܹsyyy¸ûJýAèpb6›sss?^µjÕÝ»w8p`Á‚Çïܹó½{÷ptâÄ +oÚ´)!!!++køðá°eË<.‰f̘±nݺ?þøã©CºDtØpàÀ¯|Tèp²  =@ÀGJ À¯†¹š@pÀàX°¡ð«×ÆœØ 0ºð `$À§',çÞ)tCô3BCC].—ÑhÄÁ ÑhŒF#*àÈËF£Ñår‰D"§Ó©R©ÐT¥R9NlF!‹ÅB’$Zñ¸D"±ÛíJ¥çïÃápBBBT*R¹ÙlFŸ¥ò¾k4fú™µî‚û*ÅœÿuN‹ù‹/Îùù p8ŒñC“?}¿):u¼Ú¥æò 7R»nÎ9÷ú§ï7årYl5˜Ý">sÆßëEàóØôTVïN ¯¿:´Œúý»ŽŸÌ9~'C߸AôWÓšKB93怡¯ÖÑ›ìUâ„ÐôEÙÞ5]'Î84}îy<ëÀ†¾EßåèèhFc2™(+Ý÷H ¨Õj¹\ž˜˜¥T* I’L&S"‘ˆÅbƒÁ@çÙ°°°ÜÜ\ôŸãñxxŠÃá`±XQQQ*•J­V£!ý¢÷ʵZ­Z­›²À`0d2™V«ÕétÔ‚#44”Ú/‰DZ­Öf³ÅÇÇ‘ ADDD ™Ò#™Wxxø¶mÛ† öñÇ¿õÖ[(»ÝÎáp¢££ÕjµR©DNŸ>ýæÍ›,K«ÕÊd²¯¿þzêÔ©‹-úî»ïÒÒÒV­ZõÕW_á¾B.—«Õj‚ š5kVµjÕ5kÖŒ5Šú¶fÍšO¾ÛÇC ðÀ|€ ê$€ &æ\Xð7ÀhæKØ2ª¯,€ßÆ,`ŒXð9€ € 0 `À[…§|  `,—í &™ªZ%dÇŠž%·$9µÒ!¥J2E9™>±¥Ëå*mÒA—Ëåp8è^ÒÿâÅ‹¸yòäÉ‹/2ŒW_}U&“@zzzjjªÉdN·eË–ß~ûíÂ… õë×?vìØ?ÿü3pàÀ1cÆ|÷Ýw”ˆúöí{åÊäú­[·Úíö† –ݺvíÚ¹sçŽ+e’) óIöe €«ˆ!¥(tÂBÄÚžä€ÇiG ”’˜dêÈ“š…~?ŸèÜ ý¯½¼²ÓéÄÊp%7{œi»dØl6ƒAF,K^^^•*Uè—ò~R`åP*P¦íäð=þdíf:tJ%“É8PêU6—KÄp½:«dÖ6‡éÁ¿,&óµ1;'ŽL™2>•ÍanÝ}gß±¼9Ÿ´¤· Wˆ„Ñ ÃKÅS¤Š%I277=^I’Ôëõn·›¾!ùt—-xãÙbñ32ò¾}ûnܸ!‘HÚ´iÓ§Otæ?~ügŸ}vòäÉ:uê¼ñÆÀSfΜyòäÉjÕª¡Éèüùó«V­Z¶l™Óé1bÄܹs›5k]»ví۷ﯿþJİaÃÌfóï¿ÿ¾wïÞ5kÖ ¬Úµk‡ËóòHKàM¼‡—¡1Eiò€ >€`!@íGYäFR^Έ§‹€·Z­:ýN§V«år¹—òä oHе“““+Væ6‡ùç¯]FL܇öØ(öšm¨8ûçÔjƒ¸ö¬izÒÓÓßzë­óçÏ@rrò7V®\9{öì­[·&&&öë×ï³Ï>[¾|9ÇS*•;wî¬U«Ö† >úè£I“&ÍŸ? Ù/¿ürrròwß}׫W¯°°0«ÕúÍ7ß„……=ú·ß~‹ˆˆøé§Ÿ~úé'¨R¥ÊÆ»téèû.+ÚÄ Å˫ݟ€ 44Ôn·£Ù x<žÿ=I<ÜHÅÚP*·ЦE½Ó#ò•§Ë!åù90²â@(¢i*Œr]F\ºt©cÇŽ"‘hñâÅ={ö …:nîܹß}÷ÝçŸþûï¿'&&öìÙsݺupãÆ 4ß¿ûî»gϞݴi“Ýn¯[·nZZÚ[o½Õ·o_ê²¹¹¹_|ñÅ”)S€Ëå~÷Ýw“&MÊÉÉáóùÉÉÉUÇÜn70b³ÙnÞ¼  ,€LQa”ë€75)Š:ÿе¡7â‰fï‘›ço—A¢rP6ÆW_}U$ýû￉‰‰x044ôË/¿lÕªU»víðÈ›o¾¹uëÖäädú¦ëøñã7mÚ´aÆO>ùd„ ›7oîÝ»7%$ú¶mÛRíãââ·gûÇ|ýõ×%wµQ£F–V1`zªIQ¬Ë6ˆµ¡´…žE˜­î݇2Ë~ Ê‚­[·*Šo¿ý–bm ;v¤¦bëÖ­e2Ùƒè~¸Íš5«Q£Æ¢E‹ìv{=Ö¯_?`À€½{÷;vìƒ>øâ‹/ƌӪU+oºÑµk×’òS§N¥Þ"Ax\ M XÊNÜ[wßé;rÇÕtý Zcé;rÇÍ;//RÚö¥Å_œú¯Ü.ŽÐëõ¹¹¹EóL=XJÛ¾B£Ñäååѽhѧwézq±À-J“ÉôÏ?ÿÐ7ŽJ«ýÁìÞ½»wïÞ:uÚ¿ÿï¿ÿþ¿ÿýÏËî…‡‡wêԩد˜Læ¼yóú÷ïï“äÑÓiiZ;¦•ò"OqŠQö…jnn.å±GA¯×=XJÛÞç(!<2P¬ e'îÌÃ_»¼9iÃþ0ÕfwÿµûÁämðRiÛ—¤ÆO;¤Õ=Önû¾²ªä˜ Ø#·ŽÛí¶Z­Þ_¤´í}޲ïÚív‹ÅBG€ÓéĈ  ç-yúõëëׯ§ìÛ·/•VûË/¿”ËåG޹råÊ¥K— àýÚÙårÕªU«èq.—»dÉ’6mÚ`æÞ²Kr1À²Gd/åEžâ_Ap‘YÖFfyD—6-Z Ò¨!Jj/§}"kƒOL%">óÜUýw‹Ï•§Ë ¹Ò1åË£Å겿¬¸²û² 2 Š¡žEp8œ§Kvz÷îÝÆST‹Iô=Þ@A‡‘ÅÂn·ß½{W­V'&&véÒeûöí˜9pàÀ½{÷Þ½{„Ba£F¨T$Þ@¡PüôÓO;vüñÇ‹~»zõê† ¢›°O4n)À§wÊ~¡@@0éÑý§ƒÁ¨8åJ‹€¤"ñ†µÁ'Ä]%Ž÷Ãç/}üÍyƒ …#'³ûŽÜQ¯í=‡oݱïuüO¼ØiÕзwýwGMo¿û@¶úö®s—½-OþD=§ž¿ø‚ÇÁ_V\Y°ü†O<íBBB°hÓãlF£1++ ¸©ƒX¼1++Ë£„#¶—ËåøUy+‡ÅbYß{…jժ͙3çwÞ7nœÕjÅÜ›õÔëÖ­ gΜ1 ÅÞÔÒ¥Këׯ¿~ýz•JõÚk¯¹ÝîM›6áW:N.—÷îÝû믿¦‚3½„Ýnß»wï AƒªW¯þÅ_`Ç-ZÐÛ$'''%%!k3™LŸ¤ê~ À¨` @7€VÓè˽ýCÚü@×TèÐàkZUò²öb¾RNJ$ƒQ¾Eáv»5MNNNvv¶Z­¦£Ñ˜““ã‘Åår©Õêìì윜œò¶ŸTdÖ_mNŽÖàå&aÄÖÝwZõÛÖ´QÔŠ:um—ÔóÍ=‹V^“É‘£0?³uç¶I¯ÙIµ_²æJÏ‘»Û¦Å­ø¡Sý›vÿÓžøK×ߦoT"k3™Ì©S}*ND±Éˆ1m|DD›ÍÎÍÍÅð?§Ó)—ËÓ- èÄ- #"" †\./¹FWY€¬Ígï¾7í;væÌú‘Ž;:thÏž=Æ Óh4˜•‚j€;~‹/V«Õ"‘ˆº)|‡Ùl6œÛÑÑÑ #55544ô—_~Á|ù˜‚ª}ûöô¾:ûÝ»wçÍ›W§NÞ½{oݺ•ÉdöïßÅŠÛ¶m›>}:Õ,--mÑ¢EkÓ=—!ßœ.b0AŒX ðÀ\@ÛBîFÖn0 àÛÂö€Ý̸ 0ØèbA±vãê¡i/船¢€o Ðà7€Y_¼_˜'¶)@2Ón€…ÜõüSXZÁ‡ ³ö–¯z¹Ø,Ÿ¼ø|>ŸÏW«ÕTe„Édr: 111<À'KHHUò㇩| ';;ÛjµÒÓ)ûbm‹Õå%kƒÝkU—5˜ÈsM9¶v-Ö:hÕ<În'oÝÓžR(=zô@K nc®\¹rèС`þüù”ðfQŠÖú±Z­›6mêÞ½{jjê·ß~«V«5jôË/¿dddüøã˜Æ¤(š7o>zôhÌ̉r@ÖÆ„¾’0»8ƒI>€äÑf"'qúWT"75@í_€ZàKeíýç3Oÿç³±ÅLŠæìÆú!“…žå À·Á8eíC§T^²6ø6r’Éd.úº}­Ökç,<‹G°B"ï¡]2'×|Q-éa¢;eá»=:’c^¯ŸR¯@?ÊWZlvWy$–1b²¶\.÷­ë4e0¡gª„ÂÜiP˜ïË·Ó©ÚÍÃf‰„ÒG|¾/_”µÛöþ±U»¾%ŸµiÓ¦;wΛ7ï‡~øõ×_ßyç¢3gÚ´iÛ¶mûâ‹/vïÞÓ•Â믿Îçó?ýôÓ6mÚ„………‡‡ßºu êÕ«·`Á‚:uꕃÁ ˆèr¸víÚªU«00ïhìØ±¯½öš7¡Tá 4’P¬Må&õ(ƒÉÀÂ#‰w\´ü"€p&@Mé¦*³ÅL¡]ó-lÙQ,k÷üt×÷ßû2Ç Ý`‚GØl656N§“ÉdR“?P«a‹Åb±èƒáqu9žgm™Læ%kƒÏ#'Ä‹f§-Z]í!TÒ£CìÂåqÓÒív/øýbÝš‚:5Ã_íR“òÁíJ¨]]šò‚pþ’óø´Ün÷ÈÉ{GOÙëÛN2Œùóç/\¸YÛápøÐT‚àr¹b±˜ÚWA–¤Rx .—Ëd2ù|>Zúð8ÕõMzMÅÜÜ\¬ãçËÚY¦Äø’rŽæçç>¼gÏž£G3fLffæ¡C‡Š6‹:tèÑ£GÏœ9# )!àM1ââÅ‹|ðAƒ âââÆ·gÏž]»vÅÆÆR¯OêÆù|>e÷—Ë…žkÖ¬i×®]Ó¦MüñG»ÝþÒK/ýþûï<øú믽X§oHÒYÛç9øÇ4XSøg€ `{៙^` ø£°8™`ya›öû ?ßhpÐG}{k»ÝæãB¦h0¡ÈšÇã9Êh³Ùl6–Îàp8ÔfÕžÏç[,Ê6bµZ±d OúVX›J‡é |Ÿ«dXÿ:¶ß¤ Ó ¾lÓqÐ_/v^Ý¥]âñ3¹9 ã¶= yjLHµ_ßTëö}­Jm¥Ä·ì»ÎmnüÊš6Íž|%7þ«ïSw†B„”©Ñ‘.7I·P¬ít:}^oW*•šL&ŠŒ"##sss±~ƒÁ@“«H$²X,˜‰î‡ ”J¥ÍfCN¡;/#ÇÚ­_ ïÛ5±„###‡ú¿ÿýÅb 8ð“O>YµjU±âC‡]¹rå?ÿüÓ¤Iª~&uSB¡ð½÷Þ³Z­,+::k â<är¹V«•¾R S«Õf³™ ³Ùœžž>pà@”•@ xûí·û÷ïO©êÞƒÎÚ111kãÞƒoGàG€´Â?«ü0`@Àßm†ÀT€ÁÝšìˆ,$ñÆÑ¯„¬è àƒ"Ç%²ö˜öÂQ>^æ2™ÌˆˆÊ•›ÜÜ\‡„ŽEG <<G*¶‰ŠŠ²Z­6›Ï燄„X,lÏçóãââÌf³Ûíö8¥,(µ¿›ÙšMl4±–²öÓÕãð@±cˆÍfÓmpXß«h3ÊG +“!X,VÑŠ|eAɬÍã²Je#:tèúõë×­[7qâDú½à‡îÝ»ÿùçŸø£ôÕ U¼(èñ(—ËuæÌ™5kÖ,[¶ ÍYaaa‡*Ṳв¶Õjõ-kC‘Bð¨»T\܉l€.Å]§zq×, Ê›µáÑÁLÁcÝcxP`0Oœ,eî3=s¬ ÏCvÀ{™6Êó¯aÆv»ý¯¿þ¢³v ;è' U•ÀÚ¥½`ëÖ­«T©ò믿»µ[½zuŒ˜xºÞæçç/Z´¨Q£FíÛ·_ºt©ÛíîÙ³çÆïܹSÖ‹åÁÚ …·¬ýL ¼Yû™À3ÊÚ@iÜ~y8@¢+GÆJÊ®}ûöí‰'îÚµ«eË–EYÛç~ÔÖ«7¬íp8¼ÂСC±~ÍË/¿ìñJhµÚR¥p»ÝÇŽÛ¼yóæÍ›±Û‘‘‘#FŒèÕ«–¥oØ–”ñÚƒµI’,ÊÚ‡KYsòú€{ÉÚh¿ t—}jWóYdm ˆ{î¯×ý-9¾ùâ‹/À¶mÛ>üðCºqƒbpôꯔðJ×vY].—÷BhÓ¦ÍìÙ³×®][Ô—­O<ÏË«åæænß¾}õêÕ”’Þ­[·>}ú¤¦¦âRÚW¦(k{¬Ðbs àšïB“YÛmƒ@'ç+o<£¬ ÄÆqÃG°Ëåb2™‰¤ÒÔ^IMMmÒ¤ îFFFF6jÔèüùó§OŸ¦+‰¨s!רl¶ÿýW§ÓE„ñßx­a »ï3ˆ8y!„¦d Ãv L1 É«Ç`° ‚ …ÛJ¥¯¼òÊŽ;¾úê+ÚðB¡P <ÑÆn·=zô?þصk©ZµêСCûöí[ªbóÞC x°¶1uîÜÙétâA…B±víZ—ËÕ´–¤ëÚ –ö‡+&㉺¶#}­Ûªe0X­q»er“d“”Ø—_J|šŸ¬x`€+ŒwÿYdm úöíÛ·o_·ÛýàÁ³ÙLé#þ—cùîCÒ©S§óçÏÿûï¿«{T»t:ÝøñãoܸѤaÌ®õcÃøÿºï¾Ä“íÚ w“„Œ!Æ`°¹\.ºë•pͱãÆi4LD?n±XZ¶lYB¦û÷ïoذáçŸF›Éd4høðáÍ›7/o¥¡Ö.—‹5-oܸ1räH—Ë5ò•ó'´#*‰&S€'ÚµIkŽ3})œ¼e|w¹ÂM¬©û´íP©&ųÈÚPcúœ°¶ÝnW(]»v%âСCÛÓž¬öä¬ÿϼÜDÖßÖ.³fÍ"bÚ4Ï.˜jÉ’%{öìÁ#uêÔ;vlïÞ½±ˆ{y£dÖ¦€¬mµZ+%k€ ÛoÞìFÒYû“I¼¾ü³g”µ€ðkïܹsÁ‚ …¢ZµjÆ ëÚµ«¬1¬M’dttô‹/¾xáÂ…3gÎЕî k#|ÂÚйsg÷ôôôM›6ýòË/˜lˆÉd¾ùæ›C† y\:‘ò@µûÏgYûÙem`ddd`P²¯Š6‹eË–}ýõ×:uÊËËÛ·o_jjêš5kÊ~å’ár¹pIA÷!Ù°aÃ7ß|Ó¡C‡o¾ùX­Ö‘#GÞ¸qb£¹ÒPÁSÿbÅDfŽÖ`t=Ùó)…ȉP˜$Ä'?m±X8°~ýú'Nà‘zõê <¸S§Nb±ØÏrÀñP2kgdd 2£BjD‡•¶nÊÍ£Û>µ/e˜G/Îq“uk…ºË¾Çµ›*x6Y*•I’åWaÈn·×­[÷·ß~ÃݪóçÏ¿õÖ[þ)h„tϿ֭[óÍ7ÿþû/fè€;wî k@N®5'7…Ë µ“¸Oô×&Cj"[û*ëÖÿý7dÈcA 8°Gõë×ÇoRÑ ßâ%48qâ·y;×ÇYC+žè¯ýï»pÒ ÇU>L~ë…g‘µrD'ÙòÃŒ30÷1õs7nܰX,5kÖ²¾‚Óé,6me-9}útûöí©ã5“¸‡6½V®r._Ïåõ]ñ±"o¢l8NÙmÍ:nÏž=¿þúëéÓ§ iÓ¦cÆŒéÒ¥Ky …bôèÑ[·n¥{ø°ìˆ7 KKKs»Ý¸–÷3æžúøëÓF4<þw¯º¥Ô‹xåõ][wßÁo; úûàÉþ¼‘@!//Ïd2‰Åâû÷ïÏ™3§iÓ¦ï¿ÿþùóçÃÂÂ>þøã]»vmذ¡oß¾•›µn²ÿÿnî½høß°¸ÃŸW_ón‚Æäh;ãÖ…EepÖ›ü_Ž¦ÒÆÒa29^îýçòùmÏþÓgùü¶ç®ä¾ÜûOµÆé·Tõ;þi¶`3# ðkC©4n‘HDÂá„Ôét˜Á뉩š¬Vë•+W.^¼¸{÷î;wÆ8 ž:QYбcÇ£GzXKJ†Ûíž½ðò’oÓF ,Xû·i‘¯²Ì˜w³ê •pW³(\.WVVÖ‘#G~ÿý÷Û·oãÁŽ;öëׯÿþçþ}¯ªÅ?블a:yÏqvv-T± u]qãÉ×–ÈÿJŒûyI_ûffäØ^íZ7ˆnÓ<>²áÊ Ûn}£ÁóCÙàGÖ†R·‡§X,æñx$IjµZÌ›››‹aÊ”QåÚµkG=yòä©S§ð,@пÿ^½zõîÝ;77·ïÏœ9C¥" šL&·Û5åH’ĆÝn×jµXU@ „…•©~S‹-­%¥:1Oe¦ÿùéû/åå[àOܺo^´òÊ·5 ¾jK7ª´~)ü£ M:·MÚºûΕt—Ëúü»sc^Oþß§­vȘ¿ô¿Çó s«˜ÙS[¢¥ÅdrÌüîĚͷ8s› Œ&G\Œ`Ôàúpú‚bƼ{çU¯rãðeñ¶.-\.×±cÇV®\¹~ýzÜå“ÉdãÇïׯ_LL I’'77pH¥RÊ¢Âáp°t7~K’¤Ãá0™L¡¡¡‰Äh4 ´Œóx¼°°0.—K­V[,LÁl·Û±x•ÙlÖétv»óÄúß³‚Êø›ø!¬•ïU 峯ýz>úãÁ ´°í£)‹ŠÑÝ^ä~òjrýœÍÙI‘ÜÝÔû.[¾Û»Yøâ½ŠÇ4'ï9¸Œ>/ñg H 0ô‡›û.YkÆ0?î·ò@ÞçƒjÅò`Íaå/»ç3]ͪ²'÷ŽéÒÈ7Õ6Ê‚0)oÿº®UBoÞÑLúü0¼9iϸ×ôì\=+ÇøÕ§Wo¾ ý{T›9©ZTÞùä@ÇVUü~éú-ͺŸ_i’=oÑùésÏÇF±ßè—<プ°°ø¹Ë¹Ó¿=þïñ¼VM"ßy³áŠ 7Ïm&å¹Ýî¿´dõÕô;ævÍ#¿ø°yóÔ?ßµ?YJEÜf³Y.—cRÌ쌻X˜ ÅbáäÑétȼ~øáÁƒ nݺo¿ývݺu«W¯Êd2±ð«T*%IòÔ©SÕ«WǼØ*•Êjµâ¼ÕjµF£§ºÓé”Ëå@&“‘$©R©òòòÊâÿ€Ö’£Gž>}š^  0™ÌÙSMûúܱ3òžª¶z)¾VuiõDIõD ŒÚ`ûÞŒ.í‡õ« S¿:ºqÛÅsÛ'×Û{ø~×a»7/î™c˜¿ôbÊ ?|Ñ26Zpâ¬ü•×w-þ¶å¯ڪÔÖOæï1lë½Ó# ß˜í*µuão]y\â£9GwÌ<æ8w9·y¯-³§6ZøUÛ¼|Ë'sN|ýqËr"…B±nݺ_ý533tëÖmÀ€mÛ¶‹Åô€¡¡¡V«•Çãa5‹ÅA„Á`ÀW;ó6›Í\.W*•r8£Ñ¨R©0ï¾ÃáP«Õùùùø&ËåX(‡$IµZíp8"""L&“R©”J¥‘‘‘v»=??Ÿ$ÉbÓ7—+& Ú$]¾¾;¢• ]ÉK5Å1RN“"ß%vïµûÃÛF¶LÚîÞsn„‹˜Û?©.ä2—îËm;ãÖ±Ù5«Ëx—îçoSõoÁÿb`tÝÁÂ9󶪖N¨R;†ûŸÜ:î—L+óûÕT'ŸÍ<ðyÍ­}Âo÷3µää>nX¼WñéÚ¼…£b›ÕM7öŸŸ¹a"øŸ»Û§U©[Sðá—‡Û4OhÞX&åµi‘&“cä ú‡Nœ8òÅ/Dª5–—{ÿÙºEì©}à›ŸÎ¾ÜûÏs»†IyŽg­øóÖ{£êwëXµFUɇ_9p<+ãÄàsWrMø7"Œ;ilã;÷µM»ÿ=el½ïf´ºuW;ôý³{ál7εEß´­[;âïÝ·Ó^Ýzz{ïÆ Ê%×M±ð3kC©ˆ;** I‹ …BÌE…ié™L&—Ë%IR§Ó ‚ž={Þ¾}[ ¬Y³¦V­ZUªTÁɉDl·ÛQ¾uëÖíÛ·{õêåt:±ì7ÎmüŬ¬‚ Z­–ÍfS¥B#""är¹Õj-‹•²–¼ñÆ^ž2õí&5«J~Yqeô”cÅ~wdƒ ÃSv½:á‘/®W'\žkšûëõ]¼Ò¡U1°nf¶aò—GÞ™¢Ô¸WþØwêOœ•?ó%Ô£«'Â'ï5iÛ‡Ãî¾vSµë`nƉÁ‰ bØøk÷˜ …þÝ©A=§¾Ýª'JZôÞòÁ["#|\ž‚Ýn?tèÐü±iÓ&<¤_‹~-"joÎþtƒ’NÜ6‡Kü(“ò8àtHFÈ}ø47WŽþ-§z£E2/µ†˜Íbì¹h«ýaЇ]p›N7 "ÞCùÔŠáÕŠ)¯µW Øw83"Œ—R/͆#Ö5i߬N{·Åê’JÖ"!'OYPÄ=$䡈æ/¹0é‹ÓïOnÕ,Ö`´[¬.0[œ\ZY»@n.—õËn’Ý$Åv’@±6xOÜX ®Þ¢YÓ# REÓ5j´eË–5kÖÌŸ?ÿwÞ©]»öÏ?ÿŒI…´Zmbbâ¼yó´ZíŒ3p¶ãY”êtéÒ¥.]ºètº.]ºtëÖ-!!A¥Rýþûï|ðÁÞ½{/^\ö”Xh-9yò¤7ïeêzÜûàt4å£ UBÀl}¤JH•81dæèëÔ,`óŒ,‡ã¹ÿø›cɵ$;VöBÒùëŸ[x¼f’D©qëô6ÊûõäEµ$$ņ¤¥Æ}2± `쉳òªU|¶/wç®âõq/¡™‚ÉdŽ1bðàÁE“@Y­V­V[¥J:«â³ð`j‚ Ün7E©‡£ØG¦V«y<õb@[p8*%ÂåráPáñx”>êà¥*µãüsN³óœþä·õæC9Ôˆ±»BÕhî‡ôN7I5»-·×Oô\)šm®Ñ¿åLï6©WAóûËîâ‡äø‡ºÑý¼Ï¥!}›Gµ®+¦¾RB®¿_`‹×\µXôEgšÒç²<šUK?“K?’~[õòKž1Û÷èߟyjÍ‚6ƒú$ÀÊMéü¤¼™~KM5»Q SG†ó`ôàúTñÛ›w4v»Wñ±eD X¼÷ãæp8J¥Òb)x=º\.•J…¥?Y,®m ‚àr¹t§l­VÛµk×sçÎÍ›7O¡PP+e’$¯^½:sæÌjÕªuîÜ™ú‡£V«].—N§{íµ×t:Ýûï¿¿qãÆ×^{­fÍš½zõúçŸæÍ›·sçÎñãÇ—ýþÑZråÊo·kY¥f÷ÕQÛN_(ð_”çš>™s¼f÷źQ'¼Ÿ¥€/„§¼ üæ§³øbSk,ßütæ­Aµ<.h±8ÃCyÈk&“ã—Wè?4í«c&“ÃívÏ[t.'¯àÅ0vX½ŸW^¹y§@Œmúo%Xe¨«kç¿#Þß×sðŸO±°Óé4 µbn·›ÒÓN'€&Fƒy (ÛŸÏ×ëõTªµZ£ÑÏx½uäyîñ‹îPdz)Ã4w‹bdkA¸€<­º4’jmäò¨6«Žº5ö´D»Ý¤ÝE† ^lw–µG ä0èåðeûsnÒhqÎX_(Åm“L|¿-Ëhq€Ùæ¿øÎ—2éoÿ`Âð†Ûöåä+-àv»Oœ•÷ÛÅ‘ƒê€$4róÍðZ÷ZÇÏ«vÈÀ³vÈ8tJÕµ]’ÇÕlv„I¸pú‚bÏá¹y½þe`ÌD¾Ò2s^²•Ú ºf÷ÛŸÏ9ìsíÕQÛ]åÕº,°X]bm(•› ˆ¼¼<(œ–L&3::šÒ”5Íýû÷ãããÃÃÃóòòt³Så²wj®y7á…Y«©D|ØÝ1¹[î(Ò!äõ w…bÿUËI^˲ë,ýe·ÆlsÕŠå¿ôñíVçäøb€ýéçÑÕÌ»]çÝëiÉ!3ll‚±í£šþ—ÃËÍâÖÿÜ>*eEl[©uÚíä„aµ?×⢅±Qì_Ù4ý݆_NiþËì]‡ín×<þ=‘ÿËìEýöjU—ëWµçÈÝíZD)òLïOÞ{$eÞgMûŒÞ'àî7YÉñ¯×:tJlsân}ÞÜ^+mE£úQ‡Ogת*ý|rùæ F#I Xׯ_€ÄD¯ÊáNB_)Ûl6’$©ƒV«'e]q¹\‡ƒúÓjµ†‡‡/X°à7Þ '-A蘙™Y·nÝøøø;v„†††‡‡Ó;Àd2?Þ»W¯… Ž1âq½Å8·ÛM-û÷ïŸ2e Ô­)¸zàuoäp5]u7S ²(AjÃ(Êàv»Ï^Ês¹ÈÔÑlÓaw;›­7Ø«U‘ \žkR©­tkxVŽñÚJhÖ8&Trñj~t$?BÊc·ïéB8¬Äq½¶LÑpì ð¬ô[ª[wµbwÒKƹ˹©]7·o¹à«6xDop>‘µúïÿ.¥ø°7¬#Ò»v›½#ªöžñ)›Íö¨á‰2GG#‹Åâv»CBB(; ²-}/Ñáp`õ84[­V6›jz|ãð¸ÿ>5V‹Ž±’~MN§³äê·k×®;wî{CgMòæ²v‡ûÜ=£Rï€Ú±\t¯£Åyî®9LD ‡‰ÊàyUÞÙÙ5Jnöý6ùê¦_6›0"Å9<þ@o°@£úØÕËåtel´Mò\Óù+y‹Ñ$%:LZ°üºš® ãR›ön·ûòu•ÝáJme0:ngh$GºÜn.—À¬U«ˆoÜQ§vÝœwqºTQs­Jœ¸Á áOÜðøðËÃs½þã—Í:¤=yy€2m§§§„µ¡T7îDûUÈ£»iEçå÷ …bðàÁ‹/½AûÕm[ÄŸ¿¢¼rC³o}ϲ_¶Tðp#  4üJÜV«õ³Ï>[¼xñ´iÓÞ~ûíbÛH¥R@pûöíèèèúõë?N«Š‹‹›4i’¯:Ö¬Y³³gÏ:+RF _¾i7bà ÿÝÑ,˜9饧ӯ鸕a€P!sHŸÝÚW­šX!Ô´’Á`0d2úw3™LŒÀ t§ Ÿµð­Ä̵ië—bÙ슫b .—[A¬"Dûú¡þŽé¯hØT e?£B±6”*­«OЬY3o’C5lØðöíÛOÌh·Û.\X§N@P§N§Î ëÿˆ;ÿ£Qƒði ÏkDÀQÑXÊNÜV«U§Óy„á`ÆïëmÿÚk¯ÀÇ\´1f«»Ý>xðà©S§†††~ñÅýúõkÖ¬Y Š<þ`ÑÊËŽGÝþ幦…Ë.šL//RÚö˜’~Än·—ö…ú§T4ü}Ú³6ع»Æ5‡½-öÔ§ø$”5†e᲋ûgzG?˜“k|çÓ“z£·¶óÒ¶÷9.^U—ñ XבîþÉxKu‘§8ŇðI¿! ²¦?’.øô-Ãw[奺ÈSœâ+¸HòxzY“Ë¿óéÉÎCwRÁbˆûï­ØpÃû‹”¶½oQ1Y|e*1›Í¡ÉeÇ÷ßÿÁ¬\¹²zõêýúõ=zt:u¦NÊãñÄb±Z­ž9sfrrò·ß~KÍ´éÓ§Ÿ:uêÒ¥K¥ú!‹å«-¯¿95ýY­‡½e×Ý݇³}r)•Jå«:ñ~æ±òÉ¥Æÿ–a|6˸Hòë¿dh}P Âí&ßš²ÏËÍ –µÁ'›“,+$$D¥Rq¹Üb½Ëå²X,‡ƒÅbamš}ìå¾[îgê³rŒÝÞØ1n꿨›9™]=mµVooûÚ_ûŽXÉÝn÷¸©ÿv{c‡RmÍ–^êù×¢•啤‚³6øÊÉd†‡‡ççç{dy•JÅáp(åK©TæççÇÇÇ›L¦˜˜ –S*•”[©T *­³B¡P«ÕT xîO?ý$š6m }ôlÞ¼ùâÅ‹xý9sæ¼üòËTcе¥R)VÛ*;Ä‹f§šr´[»jô`0·Û=xü®‘k9¥ùç“ݯÞ9jò¾½ë_]¾ázF–þÊþÁX~iÐø]g.ëÀdr z{׼ϚbªâIc¥vY;ñ*) A±öüùó[µjEí(”\.W,cuiºÔjµD"Á7kXXXNNŽF£‰ˆˆÐét ƒÊíž““ƒœN§Z­Ç-‰¨¯| еcccŸX@ÕK L ÿ÷²füoÇg×Ò2ñÞÌ1O\™»î½„©a0±»³é‡×Û£˜Ø#î‹?³Z×ã.§&ÁdLè“öñuÌÇ}î®ñßϪamA/‡7žv«sJ˜Ïõn:kïܵkéÒ¥>¹ì£oÝñÖ”}ÿþÙ—¾"?xüÁœŸ¯œý§F–MÙð…¶ëÛ¶Lè߳滟xwD/§45¨n­Ök«%H`ûÞ{¿ÿyëúXpªgÇê­úmëØº þéCT|Öºòù|>ŸJ4uÐårÙív*ý&ˆD"ßUW¬õ¼uëÖpèÐ!úÁÌÌÌ?þ˜ËånÛ¶íèÑ£mÚ´Ù¿ÿ¶mÛöîÝÛ¥K—>ø ##[ÒYÛ˺e^bXÿ:ZE½9iÝÃäúšŒÛÈAõ€Éd¾9è…}ÇòtzÛ–=wúw¯˜Læ¸7 2p8'WjÜmšÅËsMò\“FgëÒ.ñŸ}¯gÑYû½÷Þóá•117•ÏPѦ¿k.­, ý}O |ôXLÃårÑ#é} е}[»rÖD‘œ½é‘§ÇÒ "6YÂEÄàÖ¡ÛÎèÌ6×–s–~ÍÃÐl."†µ+x÷ï<§‘kìr]Ä#RX¯úØëƃµÓÒÒ|&[sÙ÷ŽœQÿ¼âyÿѬ©xàꉒÞþù÷îûڋׯu¯‰ â×û$õþkçíέbø\6NŠU%">óð ßìÊPx&X|€ž­R©(Om\Ó_³ToúæeõFÒÏÏÏ÷¸2=§3¢G/½ôÒÀ×­[׺uk£Ñ¸hÑ¢F?Þn·OŸ>=99ù?þ0 n·›Íf¿÷Þ{›7oÞ²eË{ï½Çb±¨Š—¾em¼ÙE_·¯Õzíw‹Ïux¹ 0M¶Âáa¤#‹â€Ùâ|mìÜ&‰:7BZ@^ù*3¼øÊ&ú•E|۸˵ n'""77—Jé‡SºÏõÙårÑ÷‡éÇæZNÀ";>gmˆ eÿ<&~àº6~h0ÉTÚãÃ1£Ç†qtf½Îìr“ñðÍ-.If¾µÆ»ä•¬WV—:ʵµªKøü¥÷fœêÔêaŽŠ{÷u²ÈGVçUâ„w3 ZɹžÊIDAT b ^Þ1ÑÖ·îêŸWmküý”ÌìÒUú.Ï kƒo‰›2˜PV¢{•8 ý+jqŠûB111OÜ0äp87nœ8qb=¨ƒ×¯_çp8Ë–-»}ûö¶mÛBCC© î"‘H$effb®«ÔÔÔ~øÁ'½‹‚2˜„K &at¤hÅ F;,f¨ˆ››÷p=A5‹BÀøßȲ¤l-åÍÚPh0¡ÞÐEÝì<gE3™LƒQ¥Šï ³Q@#Iy°6¢GjØæêñ¿e nU ÍDIñkŒJï"X ^ Sx#L¶™Dˆ Ó ËIåÍÚˆñÃlÜ~ë­)û0s=ÄÈø¯>⥞¯²lÖÊÍ7c:6c¡w¬,’?vHø/ß´+'9œDƒ å„KA¸.ŽÅ‡ËåRóSÅf…¥ÛF4Íã Ðaaa+W®<{öìÂ… çÍ›·mÛ¶ÄÄDN÷ÙgŸÕ¨Q£]»G°Z­6 øJËåN§3==Ê h0y{úqü3¹†”Ãaì=T°Í²cÿ½”„‘¼®í·í½KUŽœ*P-ëՀ݇2¨ Žš´ïýÏyýûO€X!•J©ma|{ 4‚óù|úC§êu„„„`-Jê+Ìî+P¦írbmL¾ÙR°Ó˜RU 0‘—2 äàt“ÛÏj;¦ˆ$|VËœ=—nâí¼Pð¹Q5a–ª`Kßhq6›zuÝQßxú‡µ€Édþöm‡#gÔ?¯*pÊnÒ0æßùòÜ9èô¶ûï·m_»º46нÿh&8ìîÿT’lÑD¶ñŸÛÔ}VޱVÚòƒÇø¤{ÏkCyä*Aƒ ¥DK¥R,'ˆuôz}xx8ƒÁ …r¹52ºD"ÁŠ'ôö%ü\:uêÔ©Cý¹páBµZ]tüíÞ½š7oøsåúl(ƒ þÉ娃'ÄŠŽžÎùnqúÁ Ý`› —¬¹Ö÷­íc†Ö¿x5þÒ‹Ø>1A<óý”AþýæccÍ$É®Ëþ¼sbK)*—¿±6‚ X¾]©Tbù1³ÙlµZcbb@*•fggçççóù|‹ÅB‘8—Ëåñxyyy‰„Åb¡ÿ¨¯:Fß,W P4¯%Ò\0pÞO_‹ñ™kåëÌîñ¯ÄÀWCâÛ}qWȽߢ¶xóIÕ¹;.4ªôl–:åÚ´>1">sÑn¹ÎìîØÐyKüÆÚ4˜¼óiAåš>¯To×üJ××74á%‚€—^¬š z½o&“ùË×múŒÞW³ªdÉÚkŠ<{ýÚ£×[¶þzÛ×6N›JðåüSqHËÔ¸²t ṉ̃6”¸±´+ý“ÉŒŒŒ4hµäóù2™L¯×c‚·èèhÜ_b0<Ïd21™Ì¨¨(ƒÁ€íÅb1‡Ã1 í½Avvöœ9s˜Læ½{÷èÇdeÍŸ?_&“µjÕJ*•fee@ÑÒ9VëÓ‡ 4oS»ú#GÄýÖéà‰¸ô1°n$ÉüÅ«Ä /íî‡>'¡âãÛúÿ¼âòïk¯W‰îZÝkÝ–ÿ°ýŒIÍ^¬ù×ÎÛGNäT‰žûçUŸx­–7kóx<ûXXXå*•JÙl¶Ùl&I’ÍfÇÅÅáC'"66V§ÓF6›M¹âذX,$Ib3Ÿô“ÎÚå¡n¿ÿȤè‘öÍ «ÍQ0À~[}ÃQÕ?Ô.'™ZCðèj˜ð¯I ÑÑ/j,Ý'_y ï¥ZÂ/G^¼k›ùN×Hlÿò ’%ã#ÃEe¹~`íÉc^Àâ«Æk R[ãbÀæ0·­è¹|Ãõõ[n@¿î5Gô¯‹¶Áž«ÜÀ¿øbŸWªï_G­µ@¨8䨖þKÖ\]¿å&Áf }µÎ¨ÁõÊÜð,²6”¶NǸqãV®\¹råÊáÇÿøão¼ñFVVVFFƤI“ÒÓÓÿý÷.]ºÈd²qãÆ-Z´(??Ÿr:2dÈš5kÃðßH.·²%¥*àtx¹Jɬ­ÓéÔju@Ê€ùT|m<޵?ÿüó™3gz_ç™VÀéݘS2k1bùòåÞWÀy¶€p¾ý(uÇ¿÷ž9Ö†€R(?¬\¹²oß¾}ûöU«Õ&LX¸pahhè™3g`öìÙÈÚpûöm‰D‚¬mµZ_ýõ7@$^åcm 'ΪöÏYH*,X,ƒÁ(?]ûYß,$3ç7ÛÝÏkEÜôýŸgS¦LA.®_¿þöíÛ¯]»ööÛo÷èÑ£~ýú”çß7ð!)•Ê>}ú=z´OŸ>›7o¶Z\ç.û&§BáúM `9à'²¶Ûí®4ƒÁäå%kË5ŽswÞ^ú™Âƒ|xÉÚ÷³ô•rRÈfxFY(âöUè`a2™†.‰ð^bbbF_Ñýµ­VkVVV¿~ýn߾ݣGÛ·o/\¸°aÆ›7oÎȱ¥vÝèû(Gx£kÛíöJ0JÀYÍîN›7œd6ÑòÆYå0÷×ës½èΖžQÖB*•ú¤änÀÁçó?ùäzÁYê8=bþöíÛ  _~ùe£Ñø÷ßwëÖÍjµ8ð‰užiôë×ïqåâ(…ÂrÍâT •JKÖµ pðàÁ²çWªàx¢®=räHœ,•A,X°àYdm`ø*3ó‚-[¶ôîÝ› ˆˆˆˆ;wbѲ ‚"ˆgÏ]­è}ûö@JJÊ… ‚¬DA<‹¨´Nt|úé§-[¶œ7oÞºuëúõë·lÙ2Ÿ¤r "ˆ ‚ð?ž â>yò$*ÚAÌš5ë“O> t‚"ˆ žÏ…Ûh4®^½Z«ÕvëÖ­^½zîNAD™ð\wADeÂs·9DAñ¬#HÜADÏ‚ÄDAñŒ!HÜADÏ‚ÄDAñŒÁ[? ªDº’’’’’’x·oß¾téRß¾}Ëû‡nܸQB>“äädŸ—ö²Wÿý÷—Ë­]»¶ÄÅ‹©êtE‘’’"‘Hü,§ÓyñâÅû÷ïÇÅÅ5kÖÌO¿zò$<.“b³fP>é½…R W¯BJ ”ÿ³8xðàã¾jÓ¦ÿo]«ÕŸ‰ÄÿòAz‡™3g>î 3gÎôò"åƒÁššš””ä‡ß>|x ’\¾|¹ÿo?55•Þ‡ †òþÑ’gãü,„'NÔ¨Qƒê@RRÒ‰'üñÃII$@ñÿîÝó³<ѽ; @úåY”^|‹ &T4šò9¼Õ¸{õêå¡Í9Ή'ƪÛZ­¶GgÏžõOFŽY”³´ZíäÉ“N§ÿ«Æ7n,Z´¨sçÎZ­ö‡~X¾|¹ÓéüóÏ?Ëõw§M›Vô–‘‘1sæL.—ïO!(Š.]ºÀªU«Z¶lyíÚµ±cÇvìØñÊ•+å>*æÎc‘œÝgÏÂÂ…~ÐsKÂO?Áöí~ûµåË—{Ùµk׺uëèoSâìÙ³‰dâĉôƒ­[·HgÊ OMù€áÇê³oß¾5j`!yÿhÜEáp80=f@ÞçK–,™?>ývæžßÕ=Fƒé1ý¿ì˜5k,Y²„:‚õŒfÍšåçž$IÞ»GÊd$Aà§)¤§“\.á7ÛgΜ …B¡ðÊ•+€P(ìÞ½{@~ÚoxÊ\%ŸþùºuëRSSùåúñ£GnÚ´I«ÕFDDôêÕ‹žówÅŠÕ«WOIIY¿~ýÑ£G¹\®Ç¹Z­võêÕgÏž%¢Y³fC† )¡Lðúõë¿}ûö±cÇêµ7zôè£GvïÞ}ÆŒôã;vìØµk—ÑhŒïÛ·/•†pÅŠ 6Œ_½zõÅ‹…Bá€<2#+Š+VܸqƒËå¶iÓ¦oß¾˜Ò¾(´Zm¯^”~OKK;zôhFF†?—AN§óµ×^»qãÆ„ † F?¾iÓ¦]»vÕ¨QcÀ€”þ¥P(vïÞݲeK.—»téÒŒŒŒˆˆˆ!C†x$k¼}ûöêÕ«322„Bá+¯¼Ò­[·b½uëÖÓ¦M£ïpT¯^°$´_a4B PÀܹZ­°z5œ< N'Ô«C†µrû6;;ƒV +V€Bññ0lx(ª/Âúõ`µBß¾ðÄ2cN' ))СÌšåo (Š=zÆ7ÒÓKP³š5k6`ÀÊâ|ñâEÜ£ºq㦦¨Q£ÆÈ‘#é;F±Èd²”×óêÕ«F£±ò'þ| ²ß¾};Èd²PÇСC@(¦¤¤`î½Q£F9lÝ»wÇg™””D][?räLIIAÝ­Fééé%ôaþüùXüwGýÿÒ[¸p!$''ÓÍʃ¡C‡ ‘HRRRðÝCéã€ï\%à# kë7n …A áRSSår¹—]zå•W ¹•&Ož mÚ´¡ž5I’r¹'OJJ ®Š(eüÀ86$ Ý´B×Ö.\H—ËmÖ¬NàW^y÷1mÚ4Xµj•?…@’$Ù¯ @úðHzz<>ž¬W …Br×®‚o—/'È H.— I™Œ ‚Ü·.Ü‚³$€|âwòd’Ë%ÓÓÉ™3ý¯q;Üž>}:ýø‘#G¦kÔ¨ïïøøø3gÎà·¸6qâD‚ ¨b°B¡º&.î%IZZK \\o­[·nãÆ3gΜ3gŽŸ6<ü‹Rwzz:2Ë‘#GèÇqöŽ5 g—Á`èׯÌ™3§à— wïÞùùù$I>xðnݺE’¤F£‰ˆˆÉdԳܷo¾¼éR@ˆûÀ8ΞärÉíÛ Ž\¸@Êd¤PHâóEâ&’²ó¬ZEiiüÙ¯©Ñ9a @.\øØ>8@DAƒ@÷¨Q£P?£”ËåB¡0""‚"PœÝ2™L£Ñ…Ä- · Í_ôÏ&L𠪱ˆý)Ð5ÈÊÒ·Á`@u˜nR$IÒáp…¤¤$ºt,‹L&‹ˆˆ(ø%.—‹Š:‚sçÎ-zÍéÓ§À>ºöñøŸ¸œnEíºuën߾룤¤$WÊììl8zô(Î7ºC(Nž<Ù¾}{_Ù…|«ÕÚ§O…B1kÖ¬Î;Ó¿:tèôîÝ›~P&“¥¤¤œdÈbå^TÔŸO'|Ó JÝþþûïÏž=»k×®7n”{)X£úô­–,WQŒÍñ \.Èd@ß8-2ZëcÉÅ‹PTYYžÄ½z5¬[sæ€Ó (‡§çæBFÄÇQ¾%Sp|®[·®¨À‹øôé1}£¥PºFصkš= T*‹Îµääd>DDD¤¥¥{…g¥xœ3gÎŒÿóÏ?çä`-E†ÓKÇÞôôô¢n$­ÀØO?ý´|ùò”””¢o/ŸÈ!""­ x\Ð×?ü0qâĤ¤¤;wú³^õG}´oß¾W^yÅÆ›Íæ!ï]¼SRR6oÞ\¬|ŠD=~¤C‡GU(å.“#àêU5 ]ƒ>DÑÐJ…¼'ìÞ,ðxðàĉ#""Š® 6,*ŒÆF›øå T*N']»wï~\ˆùĉSSSOœ8áOÖ^½zõܹskÔ¨±víÚboªèxP*•^wJJÊ7"""èr¸téÒþýûQ%§£I“&U«VU*•ôƒ¨Ê•û[ÿ«¯`ãFhÖ Š}‹£-èܹG¢L¼I”B!\¼IIÿ:‡£> Ë—?òíTÓ¦ÁòåPÜÛ·7nÜÉ“'Wmw“âܹsð˜UWÑsoܸ‘ô(vìØqêÔ©¢cÏh42Œúõë{?{ölESË o á¸'‰~î зzÑß÷£H’€6mÚÐOB°'N@³fͨm do\yü³9ImHz¸Óx 99™ËåÒ7ëps=" ˆÃîÌà&íºuë wïÞÔVŒ\.G×Iºç%…#GŽÔ«Wϯ»p…’EÝiè°X,è×Eu÷`.\HnNzxta(&nÒâÎÕ„ ¨oÓÓÓ1×DQ@/Ýÿìĉ\.·Få+ܬQƒ|œ³æƒ$AÉÉì.¾ò Pàg‚›“!KmÚÔ¬;– çÎ-øóÈ’ Èzõ¼êž¿6'qO²_&œÝmÚ´¡žÁ`@'Qœ&ô)@šÑèáºqãFê+œ& ±Ak!Ýk}JvIzæàqcHkÒc0pà@’$ïÝ»‡¼6|øð™3g8]•)®ˆ›,ô½­Q£Æ´iÓfÍš…oÚ‰'zÓ=ÿ7/.—û89àÈÀ°1.—;a„™3gâû,55éµdâ& ãQSRRfΜ9}út\Kz„GRèСƒ‡ZŠ(WÇUdXt"*ØÛíÛ·£»äÌ™3q.uïÞrþ+™¸©xÔ´´´Y³fM›6-""‚ úì¥`±Xp»²{÷îsæÌ™0a—Ë …å +‘IIÅüî.ZTàÄ=m9}:™’BcÇ\á‰Ä­ÑÉÉ$Ù½ûCwo/ïË_ÄC´ØqH EœÝõêÕ›>}ú´iÓpÇ‹ró(™¸)b8pà¬Y³†Š‹øÇùYá$bìØ±sæÌAßÁ´´4?ë7å ¯lÜ2™¬„¼B¸ÞIJJ:sæÌW_}µk×.Üœ¡Ya1asa¦pp½ð#á9šˆH´È‘‘¢¸¨’¨‡hªèÑ[¢“bübÆb ±±óbÏÄQq%qOñ âUâ=â3’v $nJLJ JšK®“Ü'yUrBŠOÊT*JjŸÔ5©÷4š-†VFë MI‹KÛK§H“_¡°ÂgÅ–õ+^Èe dÂeöÉ´ËLÉJɺÈfÊÖÉ>“c—3‹”Û/×%7+¯ ï'¿M¾E~\AXÁA!C¡Ná¹"UÑL1Q±Rq`%q¥ÁÊ蕇W>PB•t•"•)ÝWF•õ”£”+?T!¨ªÄ©Tª ªRT-T׫֩ލ ª9«mQkQû¤.«¨¾G½K}ICW#Fã¸Æ&¯¦£æÍ6ͯZJZ ­CZÚTm[íÍÚ­Ú_t”u˜:GtžèòéºènÓm×]ÔÓ×cé՛ЗÕÑ/×4à7p7ØaÐmH0´4ÜlxÙð§‘žQ²Ñy£ÏƪÆÑƵÆã«V1W_õÆd… Ýä˜É°)Í4Äô¨é°™´ݬÒìµ¹Œy˜ùIó1‹•ë,N[|²Ô°dY6YÎZYm´ºnYÛYX÷ÚðÚøØ´yi»Â6¶ÎvÊN×nƒÝu{‚½“ýûA †Ã)‡)G}ÇŽN'/§ƒN¯•œYÎm.¨‹£Ë^—ç®r®q®-nàæà¶×í…»‚{¢û%¢‡»Ç!wžšž™ž]^|^Á^µ^ß½-½‹¼‡|}R|Ú}¹|×øžòõ³ö+ööW÷ßè7@4 * 5èx2pfµÍê’Õ£kt×ä­y¤”t{­èÚ˜µW‚¹‚éÁB!~!µ! t7z%}&Ô!´[S;†ýN‡&Ç'NcΣ.c®ncîï=¦ EMK-íDºUzWôŸLQŒeWš¸™F˜m6ßkq²ÍêžõMŸm›Ý1û‡G?'kg5!WÌõ³Û{·G“g•×>ï\Ÿ_¦Ÿ¯¿}€q Újé5AAËkGƒo…œ ç††2LÃÄÂ~0Û"öE® \g-Cùû.n0¾'áfâUV[RKrkʹõGS ÓÒÒ£268gšlÔÞ¤¸Y<‹?›¼Ý2Ÿ3;¹õuÞ“m·wå_-¸PX³£rgÝ®ËEwv?ß3µß'V¢Yê¼?º¬ðÀ¹ƒÏʉ‡õŽDWœ8úºR¦*êxã‰Åj›šÄSÛk«ê®Ÿ~rfºž»AåœÓù˜ »››ž·Z5/†´í¾ÔyyáªÆµðëGo ¶/uHÞ2ë éÊí>u»÷Η‰{¶½ ÷=èëCúõâÕ<~ûDæ)óYõÐÐó//æ^!ÃÄòkÎ7Ô·”Qò;üÝâØÌø‡‰—ïû>ÜœlþxòSÙçü©´/N_ѯÍÓÑߤ¿=™ÙùÝòûÙ†á?ÅöÍÌ[ÌÿX8·µD[\Ž_^È *`Ѱî DÄ©@æÐhtKÇ…ñ{„}Ä$R,[&{9yˆ¢LÝÃ9ËÉ3ÄgÅ_%0#d,/R"Z-V)^$‘$é%¥Gã£}–¾¿â¬ÌnÙd9/y…iÅÁ•­J%ÊÉ*~ª†jj³êý4K´Rµt t¥ô@ï…þ ƒ`CAÃ;FU_­Úebl2fº×ÌÌlÔ|‡…¶ÅSËÍV²VÝÖq6ü6m¶t;v»óöAT‡NÇ<'ggªs¯ËNWg76·›î›<ô<¾xÖy1½%½‡|Ê|}üýžø`ªþ\ݱ¦ È:hamspbˆJÈ{ú©ÐH†"c"¬Ž®þ5¢)2#Ê(jaÝÕèM1f1‹±—ã2â âgZ7°Ì“HI÷“ËRÂÖk§â©ÓêÒ³3ü7heR3'6vm:±9/‹‘m½E.‡˜3™;¸õF^ý¶£Û‹óó r ³wdïÌÞ•[´mwÑž=Å÷ÝWUr²´vÿ…²ß ”ë<²¹âøÑ®cÓUâÇmO¤ž¬©‰ÄjâI…Oj"-²Ù±ía{ήĞÁ~—,IN"ßæ q¤sôS4)»(©nÔFNÎ .~®"n ÷vÏV^o_)?ÿŒ€ÀmÁÕ‚_„ „„;DÂE)¢bÁâTñ+I’Š’/¤JiîÒTé»+ ee9eÊËû)H(¼V<¹2NIOiQù–ÊNU5šÚ¤z³ÆvÍ-'m-Q]T÷³Þþ]ƒK† F'Œ®*1ÙeZ`–g¾Õ"ß²Ðj·õ>›rÛ“vçí¯9ÜwrúàüÃsãróóÔö2ñ¶ñq÷õ÷ ñ H Ì]]´¦<¨ní¥à{!¯è_arL«ðôˆ‹‘ßÖ©E'Æ\ˆýo–°-ñA’hrXÊùT<Í'½&c.Ósã©MKY«³/åˆçnÜúrÛªíÕÜ…Ûv,ìJ-šÝ“µ—k_E©áþ¡‡V”÷É:ªul¬ªü„{5VÓZ[púäÙ·çÔ/ä6½k j¹’{Ýø&O'v[¨Çæþ¶¾·Ÿö¿áx{kÈ:d²©A®"È{A…QMÔ e¢Ùèaô ú]¤1{,;ŒÝÇqÜOÛñomB¡‘ðhLÌ&v‘xH¤ã¤¯ll»ÙFØuÙ ØGÈÆäRòWwŽ ?%ò’jGmäÔælá2æêàvã~ÊÁóƒ7ŸO’¯‘ß…\ GPZð†P˜0»p½ˆŸ(*Z/$N¿!±]ÒCJRê3­Cº|ÅzOY-9¹)ù~…fÅÒ•Jk”MT$U–T_ªµ«ÓØ®¹Y+S{ƒNªnš^ª~¦A¦aŽQ¡ñžUGLjL[Í:ÍŸ[|·¢Z+Û8ØÆÙ•Ø·9œ \µÜ!<Âß\šÝRò“¹rW7Mí<’h&(’f†Ü4Iw3%ìÆ‘«ˆÐ)z§Å`(=j(ÞÛrõÞ6tQ$.í?+ä¦[Å¡1œ‘z"@/á4×7lºô­©l+$ú2IÔdÕ€Tiwpد‘z"¿‡µ±è`cþ-¥[$À“4zjXÆÔƒf ¤ê. ‘z"_,ú¢º³Ÿ6å¯ÇÄSàE“ÔÇÕÃþ+$a´ÿA¤€€HÐó W³õ¹o™Ê¶ñ]qÄØˆQ+ÃÝE ÄÝ‘z"@OjºòSͯ±Öfó]*¡2A3ú/CïïT°@¤€ž€HÐ3Z*ò*¯¶Tî㻢ÐÚ‰ÕïìàöˆÐ)º›Ãb¨ÌþkÓ¹ÿïB ØÈûÝ7m£$,ñ†["R@O@¤èVÆŠS廿cÎlè&”ˆÄܶ-Šq›¡Â{="@@¢D2¾K€¾‹H¼W"R$Yì}|—}-—¨bÛXÏwaàuæB¾K€>J=øÏ„¢]ÖfSU®©*×½‘ ð8mFUÿi²x "…ž&PÄEŽZB)ùnáÕÏG]ý|”û%D €ÀSuô Š¢fþS ˆã»èC(šÄÏúN( «Ïßn®ø¡Õ«ˆ§áôúÆË{Å¡1I÷¢åá|—}MâïÚ’0Ú\[Xupn[¯@*ßsGóµ_ešÔ”gÅ£ù.‚œ@*ìwßoªÔéÖÆë×¾›î²µ± "@`r’²m ÷IÂâS:¬²Šï‚ hÉæ¦wa¼¾øœÙOñ}!¸ÔiL× .lôúC- š1µD ’;-Íug·èO­v™êù®€¨‡þwäÈ%âÐXÆåª/ø¾:{©K™+GF¤|xFF†H$êàŽÞæÎ;dÈÏ56›mÿþý¿ýö[eeeSS!D£Ñ¤§§Oš4)33Ó½Ù¾}ûNœ8Ññ«O=õTXXXÇwùå—_vîÜiµZ Åþð‡ÔÔTßÛ>|øèÑ£.—Ëó¤r¹<&&fðàÁEµ¹ãÆu:Z­^½zµ@ èì/ /@¤[¶liiiió¥½{÷*ŠG}4++«S;º¥¥¥yFŠ‚‚‚wÞy§®®Îs½^Ÿ=qâÄ'Ÿ|’]yêÔ©‚‚‚N]Èõë×;)¶nÝZUUÅ.ïØ±cåÊ•¾·ÿì³ÏŒFc›/mß¾=&&féÒ¥éééÞ¯ž>}ÚápTTTØívD €6!R«ÕÊ. 8¦iBˆÍfkllÔëõ„–––·ß~»²²ò¾ûîkoÇÔÔTvGoƒ r/×××oÚ´É`0BT*ÕÈ‘#### CYYY~~>Ã0999îÓÓÓZ­ººš=©F£‘Ëåž/Ñ4Óñ«.,,tç BÈéÓ§F£R©ô±‹Åbiu£,‹^¯g¯¨²²rýúõüã'NœØ¿5€ ƒHT^xá©Têþ±´´ô“O>a› ¾ûî»qãÆÅÇÇ·¹ã‹/¾è¹c{>þøcöÝwÈ!+V¬Édî—ôzý÷ߟŸŸï^3gΜ9sæ´:Â+¯¼Ân³dÉ’aÆuåb=Ê.$%%•––:ŽãÇO›6­³7ÊåråççðÁ•••.—ë?ÿùÏ!C:ÕXÝ3‚[RRÒêÕ«U*!ÄårÛæuÝ}÷ÝS§NíÖú‚"@PÙ°a›êêê¶mÛvðàA£ÑøÖ[oI¥Rßo±cÇŽíl3†J¥zðÁgÏž½}ûö={öØívBÈÖ­[ÓÒÒÜ3ap®¤¤„íD9dÈ÷ 7w¤ÈÍÍ5›Í­&ãòA.—6vÏñ=­AnÒ¤I[¶l1›Í555çÎ:th7ˆ¦iwdqÏ]Á-÷W>úè£>Æy¾øâ‹¥¥¥ Ã;vìî»ïî¦ë€VÐJä$IVV»¼ÿþ®Êd2±³\´éâÅ‹l¤ˆŽŽf¿Ä‹[‹åäÉ“„š¦ÇŽ+jŸûzÝz"@ðs ?uêTWžlÞ¼yÅŠ999ÞÁB¯×¿ÿþûìòðáûã*rssÙi0222دkoÏ„ Ø/D-+++++ëŽbÀ|¿ää䤤$v&̓Þwß}ÞÛlÛ¶­½é&‡Î~E§ÝnggøÖh4cÆŒ‰‰‰Ñh4ƒ¡¨¨èçŸf¿t#11qÁ‚ÝqîÙ²[Ípå-<<|РAì ›G]¸paÏÝk€> ‘ O˜2eʇ~H9tèн÷ÞË~ˆ÷´sçÎöö­ªªb#ŤI“Î;çr¹ôz=;õu+111+V¬èޱMMMçÏŸ'„Ð4íþê2²²²ØHqìØ±|í7 Ý ÿ7r¹œ"‘HÚ{ïÌÊÊR*•„úúúÚÚZÏõ7<¸F£aÆÿæ›oNŸ>ýÎOOZ­véÒ¥ûÛßnøýél©„ŽÏCE)..fŸ¶Œ3†½߯ŒÞ¨¾¾^¯×wüFù}‡€òTÆì§ü:TÏÖ=!„·æó]‘§·ò]ÿGKK‹\.÷žT»Õ6‰„“ùª««ëëë[ZZ"""bbb:~L†a ƒe477»\.ß½(<Ùl6“É$‹Ý!¦ƒ7Êï;Ü“.l €?ÔÐGd®ü=HàÁ@0 …7|¯íÈ6åßÜÕEùWFHHH§¶‹ÅÞ`ü¾ ÜÞ=€ „<à"p‘8€H@¤ R)€ˆÀD à"p‘8€H@¤ R)€ˆÀD à"p‘8€H@¤ R)€ˆÀD à"p‘8€H@¤ R)€ˆ}”Ífã»þ¹\.»ÝÎwA‘ Ï)..þ׿þuúôi¾ áŸÅbÙ°aÃñãÇ,ºNÈwÐsŠ‹‹÷ïß_RRB>|8ßåô MMMÛ·o?tèÐäÉ“GŽ)‰ø® P!Rô ža¼!Xt"@C˜è8 €®@¤ZþA°ð"@B˜è: €ÎB¤*ÜB°è8D € 0Ñ},:‘ à!Lô  ß)ÂDÏC°h"@@B˜à‚€7D €ƒ0Ñ{ XxB¤½‚ ‘ Lô~Ã0­V1û)¾«ê@ÝÓByk>ß…@yz+ß´a"©Tªî6Àjè#2Wþ$ÐJÐK!L.´X@ß„HÐë L èk)z„‰àƒ`}"@¯€0Ü, / ù.‚Z)z…”””””´U«À;D €^Á"ø L@ßHÐë X„ èk)z)‹À…0}"@¯Ö}ÁbÞ¼ycÆŒáûúxf2™^~ùeˆ0}"@@‹Eï‡0€H0,z'„ "@€A°è=&Ä3Xð]Ko0ÀD €>‡ 6›ïBø'•JŸ{î9„ N RôQb±˜ïøGÓ4MÓ|W$ðÿ%à"p‘8€H@¤ R)€ˆÀD à"p‘8€H@¤ R)€ÔÕÕ]»vï*Úåt:‹‹‹›››ù.|A¤èëœNç /¼ðÒK/Ùív¾kiÛV¯^½oß¾8WYYYnnn¯½½™ïàwN§S§Ó•——K$’èèèØØØž9ï¹sçššš&L˜ ‰ÚÜÀh4666²Ë4M‡‡‡K¥Òž¼3GŽ!„L˜0¡ÍW››››ššØe©TAQ”ßçúòË/óòòÖ¯_Ÿ––Ö“×)ø§Óé>øàƒË—/{~8Öjµ3gμýöÛiº­‰N§S têìÙÙÙ„‰'¶·ÁÊ•+<פ¥¥ÝsÏ=Æ óãb;[¡N§+..îß¿LLL›<ûì³îHA‘H$111)))óçÏ ó£Bÿøqç‚ "ÏöìÙ³e˻ݞœœˆ|:}úô'Ÿ|" /^<}útÏ—/^¼sçN•JÕñ£Ùl6BˆÕjíø.'Ož´Ûí&L¸á'ìyóæ‰Åbvù›o¾ùöÛo?ùä“1cÆH$’ŽŸÎjµ2 ÃÖÙ ÃdggÓ4=nÜ8ß[ÞsÏ=îòl6Û_|±wïÞÍ›7GGG0 ãú§³×”Ð=€76›íÃ?$„<öØc­ò!D*•Ο??##£Õz³Ù|ñâÅS§NéõzÏõÕÕÕuuu„‹Å¢Óét:Á`¸a ìSÎ~”Ÿ={¶H$2ׯ_g×Ô××_»vaÏÍš››KJJا9&“I§Ó±q§¶¶V§ÓUVVº\.ß'ºxñ¢^¯1bDhhhÇË‹Åÿõ_ÿ5{ölBȇ~èt:;x}`F§Óåää{>ŸêÈuùq:€@„V Þ9r¤¦¦&11±ƒïèv»}Ë–-{öìq¯IMMýÓŸþ¤Ñhš››Ÿ~úivåùóçŸyæBHzzúºuë|P¯×$$$$%%uªr±X¬Õj+**ŒF#»æÿøÇ¥K—6lؘ˜èÞìóÏ??|øðŠ+ÆŒóõ×_ïÝ»—]¿aÃvaݺuééé>NtÃ~>Ü}÷Ýû÷ï/--½té’;™ù¸‡>uåÊ•·Þz«¦¦†ýQ$=üð÷ß~;!Ä÷uùw:€…HÀv*ˆñãÇw°æ›o¾™——7zôèéÓ§K$’œœœ~øaݺu›6mR(<òHEEÅâããÙ÷à„„ßd»)ÜrË-­Üf³±¸=„o·ÜrKxxøjjjfΜFQT¿~ý|ìb·ÛOœ8¡P(†îÇíU*•999îHá㺟›´RTT´víZ¹\þØc¥¤¤ÔÖÖ~ýõ×~ø!MÓÓ¦Mó}]~œ p!Rð†}jà9XT¯×{N9%222رçÏŸÏËË:tè³Ï>Ë¾š––¦Óérss><}úô™3g8p &&†mó¿¡£GRåG¤øæ›o¬VkRRRxxxwéß¿ÿþýÏœ9SSSsë­·ú¬œœ³Ù|Ûm·ùýîE©¨¨`¼á=ló Ÿ}ö™ËåZµj;¬499Y«ÕþùÏÞ¶mÛ”)S|\—§\ˆ¼©®®&„xtÜ¿ÿöíÛ=·yúé§ÙùØG3fÌð|uèС¹¹¹—/_öãý©¸¸¸¢¢"33³ƒ±àÀ ±±1??ÿòåËB¡ðÉ'ŸìÖûsñ7¤P(!&“‰ýÑ{XSSséÒ¥äädÏi*’’’T*UCCƒ^¯gSK›8ÿ•ôrˆ¼‰ˆˆhhh¨©©qI˜2eJ||<»üÓO?:öG69r$77×}¶fmm­gïlÇÌO>ù„]‰D#GŽ\´h‘wÓ®kllššššêêê Edd¤÷»¦Ó鬮®mŽo¬©©Y¾|ybb¢{öè²ÙlEµ¶ÐžÚÚZ¡Pèn0›Íî†·æææ††­VÛfçÐþóŸ‡~öÙg;>ÅíÝC§Ói·ÛÛ,ÌápTUU¹\.F#—˽7ðq]¾e~¸°1þPC‘¹ò÷ V €^D*•vdZI•JåãJÁlw‡NuSèÔä•‘‘‘ž?zç BHHHH{-V«õĉJ¥Ò¿I¸;®½{(Ú sB¡ÐÝÓ³M>®Ë÷¯ 8 {&@ßrìØ1š¦Û{&»S§NY,–ñãÇw°Qz´Rô-O>ù$EQž{•Áƒ¯^½:55•ïB Ó)ú–^>íRHHÈСCù®üÀD à"p‘8€H@¤ „2b.ß6D B†Ï!Yò]@`C¤€>oØlrË‚® C¤€¾mè] ò) :‹™øEá8€H}Õ;™‰#Op‘ú¤Áw0“!Op‘úžÁ3˜[Ažà"ô1™Ó™[ÿ@Qø—À1üa…¾$c3ùQä €îàõ·•á»"€n’>•™¼y ›xüyeÞ/CP4…™òE#Ot¡÷*jƒ¶ *'3·-EžèV‘‚òh¨`—ÑbA`À$fê2ä €îÖF+Åÿ6Qôæ¶ Ä舴‰Ì´?"Oô€öÿÔR½û?€J½…™Ž<Á©ß½ûÂÒâ» èëÂÒêwï>÷ÿ·•oÕ4úg1·?AѾëè!IÓå±ã[ÊŽØ×ù®ú(‘2!fê?b¥{ >ÀA0J™ÀÌ@ž€ '+ãf~Â`\4ð¡è¸™Ÿxæ ‚HA(esÇ“-ìú‘z9eâä謿ò]ôEÑYU&Nnµ‘‚KòX2c9òô‘cV…{‚ï* o öDä˜UÞë) ˆÜ4†Üñ4 O@ßsÛ;á#–ó]ôá#–ÇÜöN›/!R@°¸i4™‰<}EQ±SÞÒNXOô±‡îÄPÚ ëc§¼ÕÞ79#R@PHIfþ?"ñ]o´ã^L˜½…ð]'Z’0{›vÜ‹¾¶á»H€.ëw3™ù'ä UÚÜþœ‘ÇŒå»6ò˜±ý9£J›ë{3D p‰#ÈÏ¡˜ï:zqXòMÕfý…Jù®‚%”j³þrÓGÅaÉ7Þ˜azó´Û7òÖ|¾+^% 'w=‹<àÍÚPTyà ãµý|LÙoZÌÔÍuÿnH+~¹ëY"’ð]@ïÕ|uwÕáç¬õù.Œ$|Pô­B’ïìÔ^ˆ˜â‡’»V"OÜãr6æ\{ü/6C ßµ@©’µcW‡eüÁ ˆ) Å !w?‡<ÐqŒÓÑXðYío¯Û ù®z)±:-rôŸÃÒ¦üH&v0¹ûy"Fžè4†aš‹wÕz³åúa¾k^D‘pkÄÍ+BRfµ7áD!R@@‰É ³W1º²t‰E¾þì¿ >wY›ø®xCKTaé…]&Õ æä€ˆ8b2Èìç‰XÆwAÂe75]ÞÚ˜ÿ©±üŸø.zCÑÊøIa‹TæÓ"9‡GF¤€=Ìù3sù¯X6Ãõ¦‹[/n±êÏñ] t#‰fHØ ªô…âøî8>"ä €am¸ÒT¸ÍPø­¥úßµg¤Q7‡¦Ý«J»G¢NíÖ!R@¯Ff¯&ä €žck.o.Þe¼º»åÚA—ÓÌw9Ði´@¦èw›2ùΔYÝÔ&á ‘z·¨42ç"Qð]@årXZ®6^;`¼vÀZsŽPü–ôJ¢¢ì7U™4M?‰îñIÙ) Óö'sV©’ï:€B¦š–²C-åÙ¦ëÙ–º |—„BJªÉ'LTÄOT$Nʵ<Ö‚H½ò@/f(Úi®Ùj¥Ãá iš¦ÑpØÐJœ©¯¯ß´i“Á` „¨Tª‘#GFFF †²²²üü|†arrrZí²eË–;v…ÂW^y%%%…ï+ÿ!Rg>þøc6O 2dÅŠ2™Ìý’^¯ÿþûïóóó[íÒØØHq8F£‘ïò¡K)€3/^d–.]ê™'!fÉ’%xÚÄðà ¸Q__Ï6QÈd²ÈÈÈ6·¡(Š“sù=ŠÄn·ûŽ5‡£{oS×êïÍÐJ܉Dì‚Ùl®®®ŽŠŠò±ñµk×¶lÙÂ0LYY»æ«¯¾Ú½{7»œ˜˜øÐCµÚÅh48pàÚµk×®]«¬¬ IHHHHH˜nÜ8¾u܆øå—_<×¥R©T*ccc322¹ÊÅàÃO<ÁŽõ¸í¶Û–.]êcËŸþùßÿþw{¯FDDlÞ¼ÙsÍÅ‹ß~ûí6’…Âûï¿Ö¬Y­ÆŒäæænذ²råJ½^ÿñÇ»_R(~ø!!¤²²òoûÛõë×Û,C­V?ÿüóÉÉÉ­ÖŸû,Û5dÍš5C† q¿ôæ›oža¿øâ‹¯¼òÊ+¯¼òÚk¯½ûî»}ôÑ /¼ÀæD†aöìÙ³fÍ›ÍÆÉ¹0ê À›T*]»vmRRûcSSÓG}ôÄOlß¾ÝjµúwÌ;v°i³²²|ðÁV²çÎ;mÚ4BÃ0_~ùe{ÉÈÈX¾|¹ûS%›'!³fÍZ½zu«¦‰‹‹óÞ%::šâp8جììlò?<¼w4hPhh(!¤´´´‹wžwÁ)XB¡páÂ…ž1°¶¶Ö÷.Ý1‚¨gF%ô"‘hþüùo¿ýöœ9sÜ}) Æ ¶mÛÖ©C566šÍfBˆV«u7~´’””¤Ñh!.—K¯×{o0vìØðððÎ^…B¡`ØXb±˜}ÂÞf5›$D"‘»SˆÅbihh „ÄÇÇ{ŽñÄo4M&W¿^C÷ÌŽ¸ýöÛ÷ìÙÃŽúüóÏŸyæÏW;>‚¨ã£žü•4T*Õƒ>8{öìíÛ·ïÙ³‡íܶuëÖ´´´Áƒwð UUUìû¾Ûžððp6L´9xµ#íÕÕÕyyyõõõN§³ÍŽÿB¡0>>¾¬¬¬¨¨¨Õ¹t:]aa!!$..Î}ÆÊÊJv¡¢¢âñÇoóÔît¢×ë=û‚œ¾)„BáüùóÙ!I.\ð|©ÍD —/_>tèç¢âââ3gÎxnÉv«a•——³‘¢SÇbr¹|Á‚cÇŽ]¿~=û)üÓO?õì5é›û¡ƒZ­ö±ûì€üO;A§~õÕWÞ3…·göìÙo¿ý¶ÃáØ°aÃc=–ššÊ0Laa¡{LÊŒ3¼ë·ÛílsE{$‰V«íú çQ_‰„Q£F± F£Ñ`0¸ÿýujQG=ù1* ˆ%''/Y²äÿø!¤¼¼Üf³‰ÅâŽìèîáû¡€ûÕˆˆˆNVXXøê«¯²]GãããGŽ©V« EQeee;vìðÞÅÝ‹âúõëëÖ­ ….—ËÝsܸq“'Ovoì~z’œœüÈ#ø¨$:::аûP¤ËåJ¥’m_ÒétîH9tèP3gÎdG]½zõüùóì¢ØØØ•+WQO÷ÝwŸ÷¨§N /6lMÓì»oEEÅM7ÝÔ‘½ÜO|Äww¡`;HvÜ›o¾Éæ‰G}Ô=ăuöìYïHát:Ù ¬’““¥RiAA»«\DDÄ}ºÕ Ð‚‚‚ýë_Þ»¸¿Ÿì½÷ÞÛ´iÓÎ;úé§ììì“'Oæçç×ÕÕyÏIí®ê»ï¾si%F’¾öàÃý°-&&ÆûÕŽ ê”î8&@ïd·Û+++7nܨÑhÆŒ£Ñh CQQÑÏ?ÿÌ>×HLL\°`ç^îN999»wïNKK«®®ÖétóçÏg×7nÇŽ¥¥¥---ëׯ¿ãŽ;ÒÒÒbbb***òóó÷ïßïgÑÙqqql;‡Ãñî»ïNŸ>=33³¼¼üÔ©Sßÿ}›bRSSgÍšµk×.«Õzâĉ'N´Ú 44tæÌ™wÜq‡»¯å°aÆzöìY½^¿jÕªy󿥥¥ÅÅÅQÕØØX\\œ———››{ß}÷Í›7ïßa—ô¡Ha6›Ý!´U¤è좎èŽcôf“&M:wîÛoÀ=a§˜˜˜+V´ë1xðàäää«W¯:ŽO?ý”]©V«Ý‘‚¢¨•+W¾óÎ;—.]²Ûí;wîluXš¦ï¿ÿ~÷DÉ'‰žxâ‰7Þxƒ’—————Çö{`ùÐCíÞ½»U·PŠ¢ÆŒ³ÿ~«Õ*—Ë¥R©Íf³Ûí6›mŸ0 ìÿ^xÁ=;Åã?þ¯ýëìÙ³V«µ½YÃ;8¦7ëC‘ÂýL­V{ÔñcÑ uÇ1z¹ñãÇßtÓM{öìÉÉÉiõN¬ÕjçÌ™së­·¶9ëÔóÏ?ÿÞ{ïååå¹×´ê½¹nݺ~øáСCUUUî‡ J¥2))éhó»¦ÝP} μùæ›×¬YóÙgŸ±º\.š¦SRRyä‘ÔÔÔC‡544xî^^^Îþy‹‹{ñÅÝOm\.Wiié… Øo$9þü‘#G¦L™Â¾¾zõêC‡íر£ººÚóÁP(LKK›0a­·ÞÊ÷/°«úP¤pÏÛê;s;;‚¨#ºã˜½_LLÌâÅ‹/^\]]]__ßÒÒã{ƵZ½jÕ*£Ñ¨Óé¬V+ûĤÕ64MÏž={öìÙv»½¢¢¢±±1!!Á÷,ƒþè£ìv{{}'YC† Ù°a;vƒ¢¨””wƒÁ믿n6›=‡™|ñÅìŸ÷gžyÆst MÓÉÉÉÉÉɆ~ãòåËîHÁš>>>>¾S7' ô‰ÅÅÅîžÆ³fÍòìHáÇ"–QO~z3wJ¨««c¿²ÄÛ¯¿þÊ.t$™ lG˜_|‘B£Ñ´êOëÇ"–QO~z3@À>:7™L¯¿þzqq±gSDuuõûï¿¿ÿ~BˆV«;v,ßõö´ zð±uëVö‰”Ùl®¬¬¬¬¬ô"œžž¾dÉwo–#ˆX>F=ù}Lèå–,Y²nÝ:£ÑxáÂ…Õ«WËår•JEÓ´^¯gûXB”JåsÏ=§T*ù.¶§Qí=ì .ôý>ºhÑ¢Vó®»:uŠAÄòA´`ÁvÑ”)S–-[ÖjÇ^xÁókH !jµšm„ðû˜ÐËétºüñرcÞ“S)•Ê;ï¼sÆŒïÃL‚¡•"++ëðáÞkD"‘R© IJJ0`Àøñã}üv;;‚ÈÍǨ'¿ ½\llìâÅ‹-ZT]]ÝÐÐÀ~zdd¤V« £é ïQàC0´Rp¥ÍD6›AÔÞ¿ߣžü;&@ÀA¤àS2p‘8€H@¤ R)€ˆÀD à"p‘8€H@¤ R)€ˆÀD à"p‘8€H@¤ R)€ˆÀD à"p‘8€H@¤ Rþæ+ã¬\ð%tEXtdate:create2012-06-17T09:50:14+01:00Å1K(%tEXtdate:modify2012-06-17T09:50:14+01:00´ló”tEXttiff:endianlsbU·CtEXttiff:photometricRGB³ IßtEXttiff:rows-per-strip3“<´‘IEND®B`‚swift-2.17.0/doc/source/admin/figures/objectstorage-buildingblocks.png0000666000175100017510000014041013236061617026123 0ustar zuulzuul00000000000000‰PNG  IHDRn#ºn—¡sBIT|dˆ pHYs × ×B(›xtEXtSoftwarewww.inkscape.org›î< IDATxœìÝwxÅýðwv÷Šº¬jY¶Ü î¸`ŒM‡HB j„Bø…=!!ÔJ€PCè„æ† î½K¶$[½ëtewg~Ü}’OŶ,i­÷óJáBLB$·Y® !Æ!†öVmD‡7"r"·¾«ÀrmÚ;6ønWDt1z»"¢Ð§ç¸ !tW \ÓJ©êÚf8À$h° @)€yJ©@œí&˜  R)õïý¬Ïà›¦8@ €r«|¨”Z¿?ýµ¡·³¼Ïü|ˆœŒÁˆœ¨¯¸ à‰È÷CÜÒAÛÓ¼ÐÁú*!ÄãîQJùc–Ÿào–èrpBLð,€ÉqV_à~!ÄÏ”RuµO"ê9 nDäD}=¸ÝóýÕBˆ?*¥|lS àNƒuÓÜ`‚â<¥T¼ÇÞ%BˆK<ðk1Âq%Â#{ƒ‡px[x û ¢C‹Áˆœ¨Ï7!ÄñfØÀ0áæëdÓz¥Ô£m–Ý*„¸À“Ï » ÀsX× #üºÿ€+”Rumš½%„¸I)<}Ñ¡ÇàFDNÔ—ç¸ý2rû(ÂÁía¿B<| £eJ©§…—86òõÜÖõ0€T…¾¯” µ³¿V¡M1 ÀyFÈÐ`€7•R[°–V„®B8äf <ú¸ ÀkJ©ݱ¢Ãƒ9QŸqBø€g®ó#|ÀÇØõj„CÛ¸¬köžÍy[{¡­§ üÚº]q­Rê©©)¦¶™þ -Îê­vLÿD‡›¾ò*Ñþè“Á ÀO>«ò¥TƒRªÀ¿"ë®>ˆ~ÇGnôlÏSc¾g?·ýÀ%f`€WxüE‘s€5EýáÐöÂsì˜àZ‹²o¢ÃG܈ȉú\pB¸\¹ûL̪§tßB VJ•îg¿ç h`Ñ–7$r[®”jÞŸ •R›>lµS±ÀTcœ†pøÚoBˆØ;Šx•RjWäû2+¤O¢ÃG܈ȉúâ·³dؤ”Z]¨”Z` Â#q—w°}’âl!Ä9BˆK…¿BüÀëÿ“ý_/`m)‘Û’ܾ•È\½/"wGDWV€ïLMDýG܈ȉú܈ö†²x×d{À_\.„ø};')ä!|¶g[M^p½RÊ:ÀÚ¢£lÙ²qäÚoç#|‘à „GÄFEV'··]g”R¶â~„/ƒòâxe?çáõ nDäD}*¸ !#|ò$ !~Ú¦I4Ü Ex²ÿÿâtÓàe„Gkþ$ƒŸ(¥Z²ÄŠÈí`!„®”²»²‘B |ë¥1‹m´ÿéûM)u—¢À]fE¾îBÜ੃¹nÑáˆÁˆœ¨O7„ƒMôPmGŸ’„çÁÅ n•J©¶¯»|¹5œàÓ.n÷#„[€ëþø®&!D €GÐ:аÈ%OžGøpémÏ{{áQÈßuÇ>ˆ nDäD}mŽÛe‘ÛgžÏÏ„Ï,=K1 ÎÅo¥¥Ö˜ˆðÈVWƒÛñ‘Û'•RÏFFÂ[E;ÛÈaà—„¯"|¸ô*¿îàÐ2Q¿ÄàFDNÔgFÜ„G#ÊB~Õ^ B$!ð\àñžª12—ìóÌBÜàæ.2®Õa«Ö‚?Æ„8Ëgy¼ZF8¸%HГ!—¨Oëí³°ˆˆDŸ nØ;ÚöaG£h‘Ï*}7r·[1î¥Ôìýàû_øBqµb¦bŒât!ÄuBˆBˆ3"í¾ŠÜ~'Òv¢"Q‘…ð§(ij5r{£btœå— !f ! !„&„x(²ï1Bˆ !Ä1Ø{x´¤‡G&‰ú<ޏ‘õ‰à&„ð jÓÿ¿„Å·ò˜åEBˆ ~ @)µ3r’Ãé;#ÒïzÏìïÅŠ‰úÁ9ŸDä4Bˆ_!<²k»Rjæb9縑õ‰C¥DD=ÁˆœˆÁˆú%7"r¢¾2Lj¨Gñ…Žˆœˆ#nDÔ/1¸‘1¸Q¿ÄàFDNÄàFDýƒ9ç¸Q¿Ä:"r"ޏQ¿ÄàFDNÄàFDýƒ9ƒõK nDäDœãFDý_èˆÈ‰8âFDýƒ9ƒõK nDäD nDÔ/1¸‘qŽõK|¡#"'âˆõK nDäD nDÔ/1¸‘1¸Q¿ÄàFDNÄ9nDÔ/ñ…Žˆœˆ#nDÔ/1¸‘1¸Q¿ÄàFDNÄàFDýƒ9ç¸Q¿Ä:"r"ޏQ¿ÄàFDNÄàFDýƒ9ƒõK nDÔç !\mu:ÇMá=tõ7"r‚ABˆŸÇÜïpÄM‘àŽC^Qcp#¢>O)µÀO…¿Œ.j¯­"À{ü=QQO2z»"¢.úÀŸ…:€­ñDFÚ>p<€›{°6"¢Á7"rŠ÷#·8§ƒ6Çh°´'Š""êIB©v8õ‘ê$vÐÌBøÒוRßˆzG܈È”R&‡K­šÏû°GŠ""êa nDä$ï£ó×- ÀÇ=P Qcp#"'ù_hwƒRª¼'Š!"êi nDä‘@¶¶ƒ&!ïôP9DD=ŽÁˆœæ?hž›ÀG=X Qbp#"§yí_ƒ²À⬅ˆ¨G1¸‘Ó,ÐÜκ/"gŸ–܈ÈQ”RíªñlR":¬1¸‘}ÒÎr7":¬ñ“ˆÈq„T£õ?ŸÛ•R£z©$"¢Á7"r¥T€¯þ”0ÁË€Q?ÀàFDN{Yx"êx¨”ˆI1ÀêÈÝ€4¥T K"":ä8âFDޤ”Z "rwCõ nDädÿ‰Ü¾×«Uõ7"r²"·œßFDý縑c !’,UJïíZˆˆzG܈ȱ”R>7õvDD=E(¥pÆw}Cè qDDDD}URCꨔ¶wi ‚¡z¬Øøèæô”῜2öªU³ÓjÎàVJ<¸j;¶t݃g´ªît»Rþ1{Ê-ÿèÊ6»*§n/y÷y)­£[­0›P\ö…ÝÐTtÞ‘Gül Ì_~Ë?¥´f0óÞ±‡ |""¢Vz4¸m/y`iùüÿ)¨,!ôuWê?½Ù«M»%=ªŸ2}WÍÆ;–¬¾gÖ±ÓïþQOÖÖUÅeŸe4ùJžTP†‘ðÔ¤¡¯º]ÉÍ-þÊÁPÝ @wš ¦Õ’ª” `}/—ÝeKVßsI Tw/º÷E·;íóDOfaK j\0Ôð-[†¾U×¸í½¥ëºræÄÿûè@÷3}ü/æ¯Ùòô©é©#wu_õû²ì@žRr º<- °äýû¥´ŽÖ„±4)qàɉùE`MV X7:d6>fعˣm•²'*ùÐTODDÔZ·Ý•‹ïQPYºîy}ò˜+nhsHqþ†í/¾]U·öU[†Nûzíýg5é×ow×¾mºæVÛOEͪ£T†®¹þ;÷È»nYµÀ¢ƒéÛ´|ÚþŒ4v×cŠÚ²ó­ü@¨þVrbÞå3&\ÿIÌêÞZ¼ú—Cõh TüiwÕW_ÊžÕx õMsźÎjÚŸÇØÏG0Ô Û2ô æ˜aç\40kfSdU €•^;˜þ»óg¶¿¿/û£»·ˆˆ¨{ôXp[¾á¯ÇÛ2t FœyK¼y`ãG^´ý«µ÷ýѨ~Ô¨þ?Pý^‚7Ë€¯ÖÜ{~Ðl|]žèWt…TVš¢N×Ýêºçu°ìÀE…¥¾ÑöŒËE+ïú­Ï_þ¬”ÖLMÓ7êšû@Ô+%ÚÛñº­Ïkl.~N*{DFÚØÚ m‘Zf@FÚ˜Ç;z0‰ÞÜGÀ¶CGµ]§”bÛÁ³´]šæúB)™gZ¾Ÿ¯ÞôÄ;5õöÔiZ-^©ìiRY£c·_µéñeUK?³eè4!ŒušæZ¬”=>ª»oÑÊ;[¼ªºµ‰K×=ôvÈlþR2_Ó\óuÍý ÒuÍåoS™û3Pí:-xB¢P «·üã¶`¨Aïè¹ØÓ{ë¾½Ñå _±­ø?ó,Û9 ,]sÿPɦå»nõ–§>.­X˜ÛOÈl¼QJóX­Z×Ýkš±X@«ÙûüÚ™BèEºî}K×=ï+¥2L³ùW«6?ñ@l?þ@µ±hå]/‚uHiMÕ4}®‹Ñ(¥5Ç´š/ˆm¿aû‹#wîþlžmÏB+Ô5÷§JÙ£CfÓ-KÖü©Ý ˆˆ¨gõ؈›m‡F€¡{·tÔÎe$I!ô­JÉ<Ÿ¿b €m±ë¤ŽùÉä1—¯€&_©kõ–ÜnYþ+«ë7Ü àÜh»¢ÒoUJs»’ï?fêíÀÎÝŸfîÜýÉ‹RÙÓVlxøô™õAlßnWÚßfOùí³Ñû‹VÞu“iù®khÞñ3WÀ‘G\óõ‚å·¾gËз›|»^Y°üÖ¼Y/yÄ5Ÿ·whI×ÝŸ;í÷WÇ[7{Ê-Ox*z¿¶a³wÝÖçÞ–Êž²jÓß›>þºy°jóÓLË÷s!´9S.:bÄ ÷<·í\RcSÑ«Ãj6¼@d¤Žþþ„Q—´ûÜWÖ®NRJæ0GùNQ{í 5¹`KsË.Heï3!_­tpî±ß‰ž\Q³"yóŽ7ž•Òš»yÇ›W3uüÃíõëT Í;ï”75©à‚iã¾ÖmûçØêºõšVË•ÛKÞ*Ú÷–ÿþ¹TÖt]s¿7~ä…¿ÈL¿'¬µ}NÜ®”ÇgO¹õ‰ŽWT‚'ûŽ–@å–åÿñ’5÷œàv¥>7<ÿÔ×;ø‡C?ãžSâ­¨®[§Rr Û•ò§è¨h0Ô /]÷àý–øÁŽÝÿ»apîÜÛb·BTÇŽØÅš;íw¿‰½_XúAnqÙ¼Ïm;x^aéG1ø´JX¹éñKiž iÆWÃryAÞIµPS¿!aíÖç Ûö[]¿þ^@ Hôfÿô¨I¿þù,¯^úŽmÏ]·õ¹Ç&ŽþáÆNž:"":ÄzlÄM*{¸Œä’ÎÚjB/ÓjÝQ»”¤ÁæÌ ÿw§ú&)­cVmz|44ïp[vàB!´òéã¯Û3Z0tÐÉ5.Wò›5œÚYßö)¥ÙêìÂ~yµÛ•ü šl:­¹e÷‹ WÜ>oéºÏè¬ÏÎd¤ ¸\Éo@ÈôŒ.oj.ù‘àÉúclh€xqóŽ7‡TÔ¬|C)¤¦%½xòØ+×t´ßŠšÃ!´²èÉížÿ2 R‹v}œÝz­¨‹½¤Knæ´æ¬ôñ7P¦åëð„“ …/ÏVʧëžw£¡ &Žºt³®¹æðT×­;‡Ójù) Z†:鯨ÐÄNºê¨I7¼“’”ÿ}!´B¥äè`¨þ›ŠÞX¾hå]7Õ7n÷tµŸ …/ÒÅÓ WÜñtÛ9[±eH¬ÚôÄÜ@°vŽTÖ@!´&¥dF¸F;%¦îq00kZ§'@(¥RË«—þ[)™•’4ä’#¸æëζIðdÖF¶Më¬mMý†$„w¬¬ô ûÌÃkküÈ‹·UÕÝT¡”Ì«©ßÐ6dE…ÌÆ±áììE+ﺥÕc‚L[@aéC•  cygÁä@DF:ç¬ØøèlŸ¿ü"Ûž>¼ù3F >ãÌxóÛjnÙ=tÍóeÛ ™“1Å·©èµåRš§”U}5&'cÊÊ®ÔÕä+um,|åtÓjž*•© ½F){0Xv0JÊç§+¥r„ÐJÚ†üx|þŠðÈ©RÞ}žwe倔ÿ½Ñ¡×cÁMÓŒ­RšÇ[–hgm¥²‡€×ÞáaÕ½}»ÊÀ¶CC ¬+¥ä0Óòý<Þ6J©¬..D”Êhô{²Ý“ZbWe¦÷g¦À«_­¹÷<°æÏ–í¿bÙú¿|>cÂ/?í¬ëúÆíž5[Ÿ~AJëXRQ ¡°omá“ ósçÖu^´JPJ%Ð,Ë—ß•‡9ªà̲ҊE>@¥íª\œšŸ3»Ý³E+k×€ÚÎxãOÔXU·673}üŽx-l*)­9RZsⵑÒ΀–@u~¸Q¯]w™vÄÏX¼aû‹®ª[û/¥äè»ÿw×Ç]×Ù¶–BÓãþ"„^ ˜†‡ |Æj‡¶—¼?°´bÁ[‘ëë™Bh5¶ y•Û®¹e÷€ÈÚý¶ªÓ„ŸweO•–oj¼6JÙ]û{!"¢CªÇ‚›¡y·Zðô[¾  ÝÉ^ª”=²3¦nîJßRÚé i® p»’Ë}~@Ó\Ÿ7ýdé]:ä6kòo,\qûÑ–¸(ªŸ àSˆÈ¶JѶ±è•ˤ´ŽÕ4×§CóNºnè “k`Éš?]Ö¶šl.„V¦”VZ¾ #Ú®=Bhé)#~Z߸ýy°öÏKÖüI?zòM/vö4¡m“ÊžRZ>ÿ›ù9³_o¯]SKééáöƦÎúŒŠÌŸ³ó²gínÿ®rÀ·+åÇL½í‘ŽúKð (kò•@)™ÝQ;@D~8¨Ö?ò¢íË7üíÆ&_é«RšÇĬR@ü¾ Ã[2¡düУ” wj§Ó`wÕ’[”’à #ñɱÃν7{@ø‰…+î¸Ç²ý?Œ¶6è”’Šš¦R² +—õÐuw9LÀ0þÑæ7DDÔÇôØ·!y'¼-„¨”Ò:zÙú¿œÔ^»ªÚ5¿àÒuÏkúIJíÀ±àv%o€¬“ ØRZÓ*jVôØÅQ…К@©ðYÝ®”zPúl˲S Ñ›ýLlûœù)„¾ Ê«—Ø•Z¦Œ½jeæ€ ç ˆÚ@°öÞeëÿwò|,gÀ“Õýª½¹\…¥䚦ï§TrbÞSñÚ´ž{¨Ò…Ð ;ú80—‘´ ¬Èϳ#ù¹s‹˜RÙ K?Èm¯¦ •= +µvÄнÀÞŸo˜¨ JÊç§·mŸœ8hØ2x¬iùZý­•W/M‘ÒšÀÊËžÕ¥‘e)Í©Ÿ3û‘hh — Z=§ Þ,KÆj@¥,[÷ÐèD‚;c+ØvhnÛ:‰ˆ¨oé±éüœÙ žì[ ¹¥ì©¯ÖÜû½Ø3ÿ*kW'-XqÛc¶ ˆº¡y'Þ¯Ÿ–@e«C9KVßs‰TÖL!ô-“Ç\±0º/C÷¾¨´-;þý`ÛðV^½4hݶŽ]±áá¹m—oÞñFiù/—‘° 2ÓÆU0¥´§–~”m Eš0J ¬=½³ý&' |¡º›c/O„ç=ÅÛfâ¨K7§¦ û!ÙÜRþèÆÂ—Gt´Y“~óïÈ%<†®ÞòÔ{ _»~õæ'§–”ÏPIºî}5ú±OÙTôê°†æ€×“ñ—ŽÚNsù§Bè[¤4ûrÕÝWÇ ;ÑïÓ’‡… #ñYîÒŠEµ½¬Fô9qGN†±ìÀɱÏS{ÁÔ´|ÚWkþôýªºµ‰±Ë«êÖ&66_šfì9¬=‘fWå—{~† Í;ܶ ‰ñ#.,Ô5×ÇJÉ_¯}à†èzŸ¿Üµµø{•f ÏædLñuô¼D ¡—@EõŠN_R’ÿ€òkï[´òÎÛ–­ÿó©KÖÜsÑúíÿúg¤·=£p“Ç^¹FÓŒ/•²øzíý·øüå­~Ÿæï…ˆˆºW~rÂQ“nx÷ËUwÿ.d6þƬùë‚å·ýNÓŒÕJÉt¾xªKmgjRÁ/Ú›pÖ>0oÙM× ¡o _¤TŽ„/9qЭ± 2ðø{wìþdš-Cgn,|õ¸Í;ÞØ š¥’ùJÉ!ÙSÆÈ™‡õÛ~lÙ‹¾Xzcƒ&´Z¥RöJÉÁ iÆ’‰£ø&>£rËη޶íàùÅeŸQZ±`R2 €~üŒ{NLOñzUݺË-;pñ¼e7ÍÖ4×*–”ÖĶû6þÚ… WÜñ¬eûTÛ°š¡‘ IDATùÃyËnÞ¬ ½D){ qÜŒ{N‹Wï‘ã®^ºxõï†êÿXY»æÉ‚¼OOJØî¼´Á¹Ç^WZ±àA)ÍS*jV|YY»ªH½P){\džtÝóöÈ!ߺ#ÞöJÙcç/ÿí›Z½R2[*k2®{^›5é7ÿîè¹õ¸Óì´ä‚šv<2oÿrÕï¯Bß @WÊ.B/˜5óâhûaƒNyh{ÉûÇHiž´­øÝe…%l€JÙ# =áÝ9GÞñ§Éc¯\ºpÅ[”²'¬ØøÈ¢Èc¬i®•ñ>?vÍ–§çúƒµ^¿í…{…Ðv ¡+%s”²ÇÐÑœ™6îÎhûoÖ¿š[vŸÖÞ?ùoχ‚.•=.3}Ü9“FÿhCvÆä;+jVN0-ßõó–Ýt–z‘Röx¥džúúƒÏx m íIðd>ïó—ÕýqÞ²›/Ö4×z@Rš³Ú¶=òˆk¾þzíýWûÕw›VË5¦Õr ¡•ïÛ3™vÄÍÕõë_6­–k–­ÿó¹"|ÜVʈàÀ¬™ž…MDD‡ž@ŒÉ9ñ @¸¥&–ÚºÞp(w8dàqË¡º·ÁÚ$• ”5@Š&Œµ†‘øêø‘?øÙˆ!gìl»]qÙgg+e2Œ„@É4©ìIBˆ®¹æegLúé”1W®ŽmŸž2Â? uÔ+5 % ”´ÇI% „>Cw–š4dAôSʪ¾Ê·e(ÓãJ—Ÿ3»ÕuãJÊçMBÔÊšõº×“a×5n­²¬¥ (%GG>‹TBßæq§>”sôžkªäøiUÝÚõ¶ ¦rˆTÖDÒ5÷²‚¼ÿ§”-J+æ£iÆÖ¡y'í¹’ÿ œY«wW~™¨ÄúÆÂšüœÙÛÛûù Hå:èä·*kWm³¥éTžRöd!D£®‹“r7{Êoÿ–’4¸Õá\%-Uݰ! š‚,Êš—¦K½Ù˜=å–V×o«¬Y™Ó¨ü‘zÑÐA'¿]>0kæn©Ì7}þòd(™"•=JB«4ŒÄO† ÅÌDoÖÊäÄürÓòy›wNókn “½ÒÛ5ÑþëõàFýG“¯ÔUVõõ"D~ï4ÍõÉ´ñ×¾ÖËe–š[v}#d6ÿ:ªG]cìÑáó¸Óîœ:î§û&&"¢¾Ï1ÁÍãN{Û´ýë“Uõv-t`L˧{Ü©w+¥ÜnW꺮û¼·k:\*8ë‰â²ÏÖ†Lß(¥ì !´&C÷çfN[ÐÙu‰ˆ¨ïêõ9nDDDD_Û9n¼Ø&‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9„Ñ]¹ƒf†fÛ#»«?"""¢Ã‰ÐEµßã):˜>º-¸PP¸@ˆî꓈ˆˆèp ”’X  o7PB ¨ëÎ~‰ˆˆˆœJ@%CA­n n  šü ÞǺ»_""""'Jð¾%€ÉÝÑON """r7""""‡`p#"""r7""""‡`p#"""r7""""‡`p#"""r7""""‡`p#"""r7""""‡`p#"""r7""""‡`p#"""r7""""‡`p#"""r7""""‡`p#"""r7""""‡`p#"""r£· hO]ãÖ”æ–]i¦å÷$'ªË0±N]õv]Ý©¾©0©²vÕ°@°>ÇíJ®KK¾+/kfUo×EDDD}SŸ n¥ ó¶—¼÷šR2µõaºguZò°&޾lž“Cœeûµå¾&¨¹XA%D——W/ÃÖoo”sôÍ£†|gsoÖHDDD}OŸ n¶ ¹”’©BhuWêGZP);Ù²£,;0£¦aÓŒ¯ÖÜ÷âÑSn¾¯·k=PË7<ü3 úJ!´ÚOæ n#¹Ä²™¦åaZ¾9YéJ{»F"""ê{ú\p‹Ò„±ûè)¿ýSì²ME¯M­¨Yñp TwѺ­Ï9qôe {«¾¥”-ÁšÃzÙÐA'Ç®™†Û•jõRyDDDÔ‡õÙàϸáß[UßTøV X{™Ï_>ÀBؼãÉÍ;gZ–ˆ­Ì,¡ #qˬI¿y8º­Ï_áÙTøÊwƒfã$©¬T—‘´1'cʇÃó¿Ym³~Û ³›[v•èÍ^=iÌå_Äî{ÅÆGÏ6Íæ¡³f¼Õ¨ÊjlÞylbBΪI£4/¶]eíꌢÒ.1ŒÄÝÓÇ_ûzÛÇP^³"K)•$„Vß6´@{¡mÍ–§OòùËŽµíP¾®{J’ó¿˜4ú‡ bÛTׯO/-_pL T7Á¶Í¥¬TMsUg¦{M*ÛhlÞ971!wEÛíüÁ÷šÍO]-4½å¨‰7ü#º¼´baÞ®ŠEç›vËXÍïv%o7ü‚S’û£m”²Å¦¢×§5ùJgXv`Tf¶€0Ý®”Õ3'þê(*ýhDyÍòØvh  MsU&% übÊØÿ7Þc%""¢øÜÀÐÊÀ–¡Üè²²ª¯Ÿ BèµJÉd)­Àö’÷Gïª\ô€”Ö0¶"`Yþcwîþì²Ú†-÷Mí›5mcõ¶ wûƒµîÜýéùÑ`µaûK3›wÞièÞ¯ òNøÛ–o¥ûƒ5—‡¬æÕZ·Ý•‹ök.÷Bý3^ýÙ&Öl.z#¨”L_½ù©S¦Œ½ò“ޝiùô¯×>pŸiùN` ¡×[v`V0ÔpÞ’5úçÑ“oz0Ú¶¨ô£ó|þŠk@áDвGͦO¼žôÊHÍS´ nE¥Oñk.wI{jY½ù§Ö7m»+2õJ¥ì,ÓòºrÓcg.8늼ìY•PßT˜\Q³â™Èf¶zR2E)åð̆í/UY»úqºz¥Âo™ÁÉ@… €Áˆˆh?8.¸CõÓÀíJÞ»\­fÆ„ëOOJÈ @MýÆ4°eHÛ]¹øn)­a‰ÞœG'޾ìùOFhcá«3ªêÖÜßä+¹e{É{kFùö–¬ô õ™icאַßðHqùÈÏ=æ²æ–²„꺵¿Bk9ä[· ¡«Qg®+¯^VcÛÁI5õÓ2ÓhˆÖáÖL€Ô¤Áqãz‚ôz¼Öþ°®qËý‹WÿáÍ™Ó_>ø´ÂxíWmzâRÓòâ2’>™4æG·¥$æû×n}þ„ºÆ­·‚µ—l/yïÝ‘C¾½%v›äļfL¸þhn)ójš¡½ÙÁòª¥e¶œÜм#)-y˜/Ú¾ÉW2½Ùó ¬ziv}Ó¶»ÊËšqõØáç¯.)ÿbpqÙ¼ëLË÷»?¹*/{ÖݱûÔ4WáQ“~ý=¯;ÝŒ}þk6ÿ€ž–2üÖ#Ç]ý.Bõ®úÆmiü¨‰ˆˆ¨ Ç\Ç­¤|~þ’5÷Ü`Z¾SDKVúÄ­[;Ú ¦Öm}þd[†ÆF‚£&Ýðd¢7;(„®Æ¼piZò°ûhåÕË~Ýnâè.ð¸Ó^·íàä•»|Ãö—n”ÊÎKOqwt”I×ÜÒíJù€¶³ì³c«0-ßLá1äÛ+Ú{,ÓÇ_û·Oæ³ 5œ¿³ì³·®¸ýÉ Û_<ªM_z‹¿â'BhF]r{jRA‹ºš<æòϽîô÷ˆªºµgµí_®–è÷ɉyDovÜîÔO;výwnlû Ùx"{ðÀãæÀÎÝŸüP)•”’˜ÿÈØá節!O(:èä‡Â57žiËP«ßaFC[ìó/•=ò²f.Ž®óºÓÍY3ªÛ{~ˆˆˆ(¾>;âfKsÌüe7¤=|iå‡3ÓÇÿzXþ©;ºÒ?X3 ’~ØvÝÄÑ—}´pÅw[v`R«å£.{på¦Çfùü×ÐÝ®”÷§Œ½êãØ6RG½W^½ü¼å7¼ åÕ˲¤´†ºŒÄy±!¦-—‘dÏš|ã_J+¾º»rñ9þ`íY–˜UY»zVCsÑ¿šô›ßëš[–V,ª tá*,­X0µ´boVÕ4£l;˜ß•çÒSF~R\vIsKÙ)>€¢]Òá2çg˜X–åBè¡uÛžŸÛGä°iNeíêÌ®\sÎíJY֎ܲó­'*kW=6nø÷æñä ""¢Ógƒ›,¡Õ°4a¬Ó4£Îë°jôÐsþ›œ˜èj?¶nwZEÛu†ž 5¡WJeçÅòLIìOJÈ{¾ÉWr HýfÛmÇ ¿`eeÍê]–í?º¢fefnæ‘5»+—^OF—Îvœ;·lpîÜÇ,ÛÿøšÍOŸÓè+¹1jüîŠ —Μø«§›|%ÃÀ–¡ Õuë‹×Gd._—Œvîꊚ•U¦Õr\£¯815© ¥ªvÍiœ8èÝh;[šÃ ¡yÇïÛë«©¹8·+ÁmÊØ?²jÓã‰ÁPùµ [Z¼úžêDoÖËGÿ🠞ÌPWk'""¢>Ü4ÍUxì´ß_|°ý¡×€m“â­WPɬ”¤ü=s¾jê7¦5·ìþ‰Z½R2µªní-C'ÿ zÈ1*Á›õšÏ_~}Iù¼Ór3|±%Xu29({ÖûS£¡'ÈiãþæŠ ‹F_Ém`í7<­ë^_x½wYFú¸¿ÇÛÖe$ùâ-G]yÜi?,,yÿÄ©ã®~?ª;]­~ôÐsæím'|J!+#mìõ†‘Я¯Ü¬éE]Ùg‚'34{Ê-wïªXôtIÅü‹Áúó}þŠk—¯ÿëìÙSoý±®¹eWë'""êïúlpë..#©0d6Á¨6g€—}>D)™¢i®í±‡ï6½v§Rvvö€IW7û˦ùÕW­ÙüÔum/ú;zèÙ¯¯ÞüÄU-ÊïÕ5n}DzGzÂWѹpû+-eÄêF_ ¤²@ZòÐâªÚ5ÊÊ9bøË»ãÓ"òsŽym{É{—6·”}wSÑk»¤´ <™ÏƆRMsKi Õ4W`üˆ —ì> ?wNY~îœû˪¾z~Ëη^·ìÀŒmÅïN;ìܵÝÑ?Qà˜“T^öQ"èÖ|WåâÑå–í×JÊç_‰Þ¬·£ËWnúû·MËw’ÇþÊ„Q—,žvÄÏ×5÷†@¨îÂME¯ÛwzÊŸ×ñš”Ö°…¯Ü (WrbÞ»èDIùƒã-¯ª]ó]Ð5÷6œ{ìn]÷¬Ò*X¾áá ìhmÈÀãv¹ŒÄ–˜QU·ö:rPÎìV×›KNôÔ5l¹¶ÑWœØûÊËžUiè Ë d6¥wgßDDD‡»Ã~ÄmpîܲݕKžn T^³­ø?/•”Ï{O×\Pýq¶œ¢i®-“Ç\ñþ`ûÆæ¿B¯œ0êâ¿á òs¹­¸lÞ+•µkn1øôïÅŽÎ :éÅÍEo\2›¾%„ð*8ëÓŽêÙZüŸ#vU,zyçîÏVèºg»¡'ì²e0Ë4}GÚ24€JKþ|´ýÀ¬÷í®øòÙæ–Ý¿Y¸âŽcÝ®”•š¦û-+gÙþs§ýî'ûûœ¤§Œ|¹ªníñ¶œf‰ † ¸ÀQ“nxb妿ïnlÞù«@°ö²ðRaº]©oO}é½Ñ ¶©èõŸ+%3¤ŽúMjRÁžKjŒ|ƶÊÚÕ¯‚u—¬ÞüÔ%3'þß³ÑuyY3«ŠJ?~?d6ží6Rºtâ„¡{—[v`še¦±çpÐ4×ö´äaý4†ÑgmtI””ϻӲýGY¶vLûB°Æ½¿“üǼpÉ‚·íÒ–ž<ü•vž³kVnzüê@°ö‚@°î .ü¬ á³eð½®ìG)[ØÒÌ3­–メúV3+}Â=¹™ÓxI""¢ý ˆoO¸ëa-Ù2ÄcA·{ŸaêŠÄ`hª°åÅR àOðþ­›ëì6»*åBu)ù¹s‹:ºdÇþX²ú7Bõ?”3ûÂ1CÏYß•m|þ Ïîª%#‚¡ú C÷¶$'æ—ççÌ.ïh[ TïÚ]ùåP©l}@ʨòØ ÿJeÕK³›‹&' ¬ÎÉœZé2’ìýÙ¾¾©0©¶aó@ËjIHN̯”sô>gø®üo …ÉJˆ%þÏ«û³­hItKýN¡d‹f©[ûň[¬üÜ9eʺ£/Ëökë·½pj Tÿ}—‘ôß®†6HJÈ Ž.8k¿zÝéæˆÁglÛÿJN^Ö̪®\ú£=é)#|é)#¶wgMDDDýQ¿ nÝaÉê{n4-ßT©¬ÁJÉTM3ŠÆ ÿÞÝoIDDDtàÜ€®»«M»ÅÒ5Ï+uÅØáç¿;'ŽˆˆˆèP`p;3'þêiO÷vDDDÔ¿ö×q#""":\0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C0¸9ƒ‘C½]€&•Û2ç %GBì6_R‰ðmì2@Ä.kÛ&ºÞV"vl%ölÙFÄl/öôig+!d=!DDD´¿„PÊÁJÀ¾·áïa­îë\m–íù>¼ZÀëþ—Âê•G£×ƒ›ÔD(àu晥ºmŸ.€,a*Mlà‚‚á‘A €& 4zø¾Ò bÖ©èòh[m+"_*N û.SqïID jc¦h,[‡O„Cd›p¹78¢ƒ€>cC¥Œw?ú}l„ÏxŸˆˆè DpŠ|ßnpŠ PªÕ2Î Âïû"\Hë²Ú|¯X*2°À`CDn#딀- ¼B©!¤vkRym]4w÷ó¶¿z=¸E…Ü®-B…ž yŒ¦äqȲ\Æ–ËØÕM»Ð„RZÌ­M@…ÞÚ õ˜0¨‹½ápo(ŒÝ1ëTÛ`¹g«UÀ o+ •jo_"¦]´ŸH~uGøì € ‰ð/ò~¶áÄÞð¹OhÊØ gô3\#”ᓈú!”ÒÃï mo#ï á÷Ž˜÷=ï%‘õб÷=iϺ˜÷™è Hìv1ÛîYuŠŽL¹°78…CT¸=ˆ‚S”­ ¢AiŸð¤ö¾_D—™ü ‚ØÓ>¾bWø=‚ˆô/ö¶QBD—uzMHåu‡B3…T ¨šþYÐãZ>òþÓg‚(!¬€×=_·í5îõM—i~W·ì­¦Çµ@jšï »—ÑCžJÄû=r†=˜**c£Š½Vá3ò‡ªâ…ÌV#”ˆ„Y î:w›>cCª@Ì>÷}QØ'|ÈèçÞïö@Kí ˜*òû}Ì}»®m›xÛ¶×V*Ñávmî‹v÷駃m÷ô!cÚwº¢ƒu‘šZ­ßÛ·ˆóE«û½¥C)Py-ûÞ‡ˆ!Ôž#*{Ú¶n»B(µÏ²6ß‹hH'E_Ç: ;|'Lµ~}Žþs=·í¿mÅ[#=г÷Hlóe#üwý‡ØŽi/•hõ¾  Cˆ [‘C‡mƒ“ Y{n#í"mú$¡”Ë2§k¶<P>[ÓÞ¹Ý+UÂ^OêSÁ-ÊÖõz‚þª;dŽÔmû w t©Ôµ¯C÷J o==-:ï¦êà/¹ëîÑ϶/Òm¿Â'á´y1PñÚEŸÔÖ}ªöûÓOÌ—jw{ÑAŸ±5E^3Ú«3üȺ¦ëa+~ËxKÃ!.N˜Uí‡ä8yŸ€ÛÅíömy‘í( ÇÓÞSw,?d}‹ö_ö·¾¸­ÿي׮U°j§ýÞдÏv­þ6â,·o½]ÙëjËh»6¿¿¢uȉ =bŸ$ÕÞùÑ*æŸËØ#¶‚Ø3å¦Í¶v›>£S`ö¶mBXô~t O¤mÌ2ê"¡”î ™S4[Δ%5í“Û½´/Ìg‹§O·¨Ûµ](ã1OМ­ÛöñÞ–ÀøÈáÓ’Þ®Êa1úÙ´Aˆ¾jáó©â„LÕêÍ2ۚѶ‘Ýià„Š´íB8Е`[o{ÁzŸuqú‰Õµ‘ß.¶i“£ºµïxË##¡µí,ä‡ÃJü‘ähÀ،ێØî»]üÑÞpÌì(˜GëmöÑj];uÄ_³¿8Ë8‚Lš+hŽ×m{–€Ò¥Ð…Üî%RÁÞ.¬#}:¸á¦€×½0røô.Ó<[·ìBÓíš'u­×' õ¢èOë¥bŸADD1 Ób˜Ö‰*Q ñUÐí^$5­¥·ëêŠ>Ü¢l]oô'èo¸CærݶO÷ƒ—Úº¾Ìt»–÷åcæDDDÔ7hR&º‚æqš’£Ć Ûõ‘­ë ½]×þpLp‹ ¹]EB»Cæ,ݶOÐüòËe|ÆÃ§DDDÔᆦh¶œ ¨FK×ÿr»¶övQÂqÁ ”2èq/Öm¹Ö2¿é2ͳ5[®yÝ‹ÑÏO^ ""¢½ ËÊ5Lë$¡TºÚ ǽ ¯žxÐŽ nQ¶®5ûi!èq½¨w t¶PÊÓÛ5.vW.<Ù­lÙñïçÊ«—îízºCCSQjCSQjo×ADñyÁã5)Ç[ºþJ¨Ÿ]þë°:«´#RÓ‚!·ûEw(t…'üNÐëy‹ŸqJ)P]âþn—š<´.Á“´¥iÊu(j;PRZ¢ªnMn‹¿r€iù”²õoVMzʈªÔä‚/Ð]TúÑ;wö É;ᆑCÎ8l®ÿDt8pBÇjRN°týU§~lÕÁè7Á l]k2]®¹Mó*WÈœò¸—ôvMD½mcÑ«Wù5§ïïv¹™GÞ9~äbyDF IDAT… EMcÕ¦'ϪkÜv  â¾¾ ¡ù<™ŸÎûV~î1Åm××5NVPn¨oÚ~$7¢>Â2GéÒžiÛÜÛõô†~q¨4–eèÕ¶¦ýO·íéºmgôv=DÔ½¤ %Æ„6%„ ¡ù¹·R2©%Puæ–o=³bãcßk»ýàÜ9Ÿ»Œ¤5.#iÍàÜ9ŸôdíDÔ>¡”¡[öqRˆ¥!·kco×Ó[úÕˆ[TÐã^šàNt…ÌSìýµÞ®‡¨7MuÙcA³á™¶Ë×myîI©ìwú¼±ÃÏ}¤íú”ÄA}þ3gLøåY)Iù>°ì€^]·.§¬j霆æ*e§54]µ©ðµâq#¾·gô='cJeNÆ”ë{¯j"ŠÇ4gJ„ÜîÏz»–ÞÔ/ƒ˜.ã]·i^í ™cL·kKo×CÔ[’óZ’‘×²Ï !$ „ÊLWÛ ¥u+C÷Ú³f” ÌšñFIù¼åÛK>xD)é­®_ÿœ6Aԇ鶮K{ª­iïIMÛ÷õª›”V,T^½â„¼¬ŸççÎ);Tû9ý6¸YÆÿ³wßñqTçÞÀŸs¦lQïÅ–,[²-[î6Å$0-„4’Òà†„v 7 å¦nÞ %Ô@h¡ã‚{“d˲$«·ÕÖ™9åýcµòZ–-É–43Òóý|líÎΜyvWÚùí™™3J§f±”‹ù€Á ¡1ÐÓW{êšÍwœÇx¬X‚Ô4Å_¯ëéuå%ç¾™6+p´ekž[ÜÕ³ó WÉSUÅWïõdî\PqÙ=ÃË:K Oß×ÐòÖ6Ó žÀx¬BF(U%@G϶¼ºÆ/ÏwÚÅù'7%–kíÜPÔÖµiEÔèžË¹‘#$K“‚§QªöjjJ]Н`û‚Ù_ym¸õ×Ô?µ<Ú¿Ô´‚œ…”ªAJÕUñµù¼¹µÓòOYŸ1§g,Ÿ3Bn¦0^&†Gß8Úekö?UeZÁLJT‹RÕŠÿÔ,…êUt¦R¥ª>KU|VFêÌž–Žõúž†çž¬kzqŸªúväe-|²¢ôBÇO7eƒWèV•óK©þñLðM‘XÇ…É÷97fÆÌžU[ªÿüÉé½­¼ä¼íƒ—1­>móî?ýW8Úö) Ó…UhZ}'¯Ýöë3ËKλeZþÊÆ±¬USý¦Ãb¡¥†ÙwÊ®ºÇ¿|âÂëÿ¼ &ÝZóà¯UÄ_«TáZ¯'»…±hJÌè.é 7~ulžB“ ‘2WrlgxËøYâDzڃ«çùŒGNèìݹ4´«åÙªò/ÿ%Õ_;Æv‹cƒ[ýWË:{·ŸæóæÕT•_:.ÇŸHB9@¤ÄKa!4ò²<9§ìSÉ_‚Þܱ÷‘í]›"$ËÙ½ïŸg-šóõ6µ½W 7_–2ý¡U×ý5ñXÙ´Wo«ù¿ŽÎÞß³XdaMýÓ˵}ÌvÕ=¶Òb‘*ž¶N¡ºÍò„ÐÈ’Êo>“¦‚Ç‘mÁñ(ݱç᳂‘¦O Á ¥ä¹RŠÌäyL+´zKõŸ“•„ñ ÉZñûÄ%jÇG—ýôŠÑÔ! DÈ‚±xN¡ÃQªJUñî7E(—sãÞm“…g¨ª¿&iN8ü3R¡z›,‡ñبÿNZÞ¼çÈui-¥…§ÝQRxھѶ{$9™óêšÛãÛ•˜Ñ3ð\C‘¿, Å_ôÆX­¡©BRÒH™8â»/G}œÛHÍš¾zï¬é«¿?^íÛ‚›ªøDQÞ‰¬–p~Ìè^%¤HÁ;A¤&4Ò—˜(ûÿ“DŽú²=D‚_ír¡‘£Të‚Ò»Íyl€iOÜZýÀGkƒscÔÁ¥~È!y&€GOï¤EÿsËhw‘'#µlàÌY‹E¾ˆvto™‘¸íÕ3[ÇrM\UÎO÷æ 㯷»;Ùº«4?{qw~öâ‡àáÞàÞÔ½/œu~\V,$Ïñê™OfeÌ~I¦JÉ4!¹*ׄ䚔B•ñŸš¡J)4B”c8PP–IJñ’6#B#~K’äéRð´þǃ”(Gþ‚Rõ¨Ã‰ eÙük.K€÷ƒ-?¿Ëb‘…†ÿÚÇà1Æ£4>ÎQý9DΛuɸœñ©2žE2…B·¡©€R­• ³Ô£¥oX¹ä¦ÛÇs]ªâ啳>ÛöÚ¿ýEJžÞØúöÍùÙ‹¯Hõ1øð+ÈYÚX×ô¢3zŠÆ{}MF¦G_ëÆJ5˺XPòWA©awMvpäµJUÅ'æ—iýÊÅ7ß6·ìÓÛÆm=Œ.˜ª¶×:BG¦(žV“…*¥d¸ùWnæü®üœÅwan«ýëuã½N¯'Ë TkE[>ÎxL™ˆõ"4ÉHS×_ éÃüºÂyºÝÙÁ‘Ám"h+¦R.ešúŒãŽ¡#Kõ½ „U´­öÿΙˆuΟõ…÷|žœWbF÷Y5õO-Ÿˆõf¦Í| >(ñ‡;î½Æ´ú´äÇë›ÿ³h"ê@ÈÍ„Bƒ¦W QuÓº\³X¡Ý5M´)܈”ªÊøy‚Ð&¦©cvFBhtÌþÊ¿ÅSШ¾fãÎÿýB$ÚîKž§­ksÁÞÆŒåzç•_òûþ µóÃëL+4î‡,šóç5Õ¿ ë¸`Í–Ÿ?ôþæ;~òþæÛoyûÃþ_sû¿ïš ¥ÃëyBÚ«1v¹7f~”È©“gsŒÛD!¨7f~r™®>nw=Me ÕEiѪ_ìo~í!¬Â@hÿåk·ýúrB”^B¨)Ë” u]KÝX^rÞcµÞŒÔ²`vfåý=;þ‡ sÚŽ=]´tÞ·þ9Ví…*—Í»ê{Ûöü¿Ë#ÑöO Áò ³7/ñ¸Bµf.¬âñ¬¡ÉB Ÿç_ºa.Q8_åÅ0U}ÎÒÔIÌú”I¨ Þ˜q!TXºö,W”QŸ¥†[eÅ«]Quí~oîó‰^0)y¦V¾©B EñŽù *.{UU}»¡ý_ EZF=œÐhù}ùÑ“Þxß¼Y—\Z»ìÇi3ïÏÍœÿ˲â]½lþ5ßĻ©)øÙ„Ðð¤éÑ7ÏÃ’P¦1v…7fœ?Ù}#@ίºõ> 4•©ä÷†®7KC~Ã\B¸¸TˆE}Þ{ǸÎãF¤Ô¼1óBr¡¥iÏ2MÓ V#„ÆFOßžÌÎÞ3é÷åwådTvxôŒaGˆú`w=¹¥k5–®Õj¦5_aü$i®„|`êÚû‚Òqög¢Lúàæ1¬…ŠàçKBºMþ÷Éôæ!„Ü£±õ²ýͯ_í÷ænðz²š=zf—–5ºŠúBûϱXd@FꬦB#BãDZº¶ƒiênÍ´R.Vx óDAèû¦®~ (=†úeÒ7Ý´f*\œE@NãTÙdzõ÷‡ý@Ù¤§¯vžÅBK¡ÐÒ@¨~¨Y¤Ï“óÊâ¹—?5Á¥!4éHB¸éÑ7)·k¦µ„rñoÌ„0»k<W7…‹•±TÈÅä4 ¤©ê»–®í<ñ!„B£´ ucÜÞ˜±œ+Ê¿M]«µ»¾¡8>¸)UÝds©‹ ÈÙˆ%)©eªú>SÕ1M!„BSK€[K…ئ™ÖGUÎ/U¢b—©«/9íGÇ7Íb3ÎQ)€&­çªò¢¥iõ l.!„B“Lÿì_QÛ¦šìLa]ÉTñ”©k5v×–à¨à¦2ž«2¾H±„dJBš™ª¾Ï4µV‚R"„BhÜ1UmáŠò¨n˜§«œ‰Æä1¯þ8à°,Ç7*„_7­«€€ñk¨r¡0¾\a| àñÞ?õOgÈ!Óú.˜7i¹Ä¼Âû—áv<„B ‹) *‘R Äo(¤ T˜ >ÏP÷û—‰ß/sðvü'¥Bœá‹ņG{Êî«/8&¸ J#LU#4HzQIüEUáà‹ªÈøôÇÀ ‰7 ¤Ö?M!ñŸZÿ›@úÿ%þO ƒnKH‡ÉÿH rØ4HG6(<&n„äÛl P’xÐ쟎»‚B9V"0%B‘òðt„ÛG Sý·•éIÓú—SûÕ‡özÉ#Üêï° ú·ÁÄ‚ø}§G%&“ç!À$Iž‡2Ù¿­§B¤cpKbêÚîñj›HIã!0žÐÞîÿ…ƒaâ18bx”çK ûq‡Gƒb|Zü¸Ï@H ·œžô8rúÁÈ“¦%·•¨!q{`݉eûÚÞuŒB“%RÒþ`¤€ ñPCû·%É·ãÅ·atˆûÊþ咧ŧ\.þxbþä^§øöð`xJv¤ð$O`A¼c$yZ¸? Pý{ÌÓÀ’„&n3yÈã„IB,ß{–xlÒn—ÜÆ“$DH&€}ÇÊCxLþƒQˆLúC;øÇ§H…ÈCþãa1ÞI~³‘4©½þ?Òx7pR°:\Â!z?ÑK™½MÞ?ïPAô`zz"„ H|0ôÿì‡LëŸWö¯ïðid`~!ÈAaÏ'BîBˆ”âŸK$P’$Ý'Ó’ï‚’ BIŸ¹ýAGìb;, %æ9,(%…$€ÃSbZâs5ÙáAäð¸”ø|dûüdp°3@ÀÁÃŒÌÄ<2²’ö(žµBÖaáé°…Ÿ¡ã`Ê7'pBx!2eP< ûƒ¥ñoƒƒ>Ô†È!¦ÓA%nS">ä!56·7ð$¸ü<äy qkÈç;ÜëƒÃ!äû‡†ÄäédÐ}"@&v‰“!ÛH^§HjóhóÉþöâ‰uuûèÓ@{GZfð®ŠäuÌß.erxû‡-C-3Ô¼C?çÁÈa?åÁûýw÷~þSù9jG^nÈu¥­äWeÈ:†yìÐpspO@"ŒþAÍCáп;2€?>(( nkˆå/3ÔãCµ3xZüÕ9º¡—CÜ‹”þ/›ƒ9‚æéÿgÀÀŒÁË ‡-$ÜårЗդÃkúç' €ÄƒÖÁã³1$MÜP2ïf6|¦qœQàŒÛ•‰{™¸ÝÿO’Cï÷oÝ  ü# yÈüƒæbC3x~yøü‡×qhM‡´!‡œ'égüµ8¸1KÞx'߇¤ ä¡ÓòÁ£á†_‰Ýsìk;Ò’<)l÷÷<öÅ`Ðí/0‡1#„„B!„ ƒB!„K`pC!„r n!„B.Á !„BÈ%0¸!„B¹7„B!—Àà†B!äÜB!„\ƒB!„K`pC!„r n!„B.Á !„BÈ%0¸!„B¹7„B!—Àà†B!äÜB!„\ƒB!„K`pC!„r n!„B.Á !„BÈ%Ô±nHP5Ʀu»!„Bn$A¦ cÒÖ˜7À¯™ìkcÙ.B!„{6-Yp„Òk¬ÚD!„š,¤~¼mŒYp‹ùô­°u¬ÚC!„B‡Â“B!„\ƒB!„K`pC!„r n!„B.Á !„BÈ%0¸!„B¹7„B!—Àà†B!äÜB!„\ƒB!„K`pC!„r n!„B.Á !„BÈ%0¸!„B¹7„B!—Àà†B!äÜB!„\ƒB!„K`pC!„r n!„B.Á !„BÈ%0¸!„B¹7„B!—Àà†B!äÜB!„\ƒB!„K`pC!„r n!„B.Á !„BÈ%0¸!„B¹7„B!—Àà†B!äÜB!„\ƒB!„K`pC!„r n!„B.Á !„BÈ%0¸!„B¹7„B!—Àà†B!äÜB!„\ƒB!„K`pC!„r n!„B.Á !„B®ä1Ì™ºiÙ]ÇDRí.ÀNT]³X!‘²HYD$IB¶F}ž÷í® !„BGG…XH$œ¡2Þ) l”î0u­^"ì®m¼L‰àF$Ͳr©H4YÓ ⽎1IH€,€êcY‡/û ‘P9†e£1 lŽú¼ÏOÞ˜±” yÞXÕ„ÆLOÄïýÝñ6B…TUƲǢ 4¶,Mí’„ðcYV³X‘RëšÐñ”†™ª„Ç´Q½’Òz"Är…‹3}Q#Ûã!N­”Zcº>›Mºà¦2ž¢p^D…,…DÂ4(Ò%)í„ì”´qUiŠð†#Wëz‰„T „ J·ÑSAlj ±„H™r¼í ^HŠòΔ…Æ‘b2k,ÚRËW¿ðЧ’Û-Mí9–…5‹} JƸ&t|¤$â%¦*¯Œi£@†Ïûøb±<…±¹TÈ9Tˆ•Þ˜iÀnAÉv¦ª;™ªDÆrÝvpmp£B¨*ãTˆ¢ÄnNˆ÷¢¥}’AI‹$t³Ph;×Ô.ãU“$¤ÓôyÖŽWûht<‘è 2Vï6 ß[çУà™cØ$åšú¨ 48†m¢cD…ÈQ,vñq6C$¥›™¦®“¢ÐqÓLó3ã½®©\S;à]Êyšj±9„‹¹TÈ/ê¦%tÓÚ# ÙÁTeû±~)°›‚Ñ,–¥p1x7g(`JB:$%’’¥í\UÚ$¥¦½e#„ÜD(JW•^»ë@IJÆdÛ$ D¸¦vŽE[èøi¦É&r}BQ‚¦¢|!<ªÉ*¨àsˆçkû”f±&IÈv®Ðí¦®5OdmÇÃQÁMáܧ2^D…,EDB1€$è–”tHBj$¥ï EiãªÒgsÙ!„r0I©ayõ°ƒHIUÓ*£\Ì%Rœ¦2¾Ze¼KØÚrÃ>'ŸÜà˜àæÄ¾U@@Hé i„씊ÒÎU¥óXRE!„„Ë£×@¼¨šV1å|.b‘ÂÅ*_ÔÀAévSW«vrƒc‚D$!­–Ïó¨ 4fw1nÐÕ»;½¦þ鯯\òƒ»í®½5[~qmEÉyËË^ˆ»ïBhœ0]kКà …±,Åâs‰s¨'yc¦ ‡žÜ0¶gÄÇœE% é }S%´­Ýú«ÿú`Ë/¿•<í½M·ýpÇžGVŒ´p´-ͰúNûêÐñرçáÞùðÇ÷E.=1mãÎߟ·v믯M;†ÙûqÓ zǾBt<¤ä¤³wWFò4‹…•Ξ£:YbÍ–Ÿ_·y÷ŸÏÛêбêÔ¤Y,¬$OëìÙ‘9xÚÑÔîvî»oùÕØW‡& WÕÓçùÀHñýÍôyïªòš¤$… ùyÝ´nõGbßöEÓ4˾!„Ü $d‚”Ž©i4®$Óµ ¤Z`LJŠc‚[ÿY½DÈ)ÜæÍúÜŸ'û¥-»ÿòÄ´–Ž ehpNÙ§väeU½Äxt!4ü2Y IDATçæüE«ÞP¨>0&Že…VàÌ·6Üüp[÷¦ÿa,¶Ü°úÓ£:egÌ æ,ûqsû·I)6îŒÇä,} ªâÒõÍík‹M+BhoKç†ÊœÌÊuÉmB#~oÞ“¸ ÍY*g}vkOßž5áhû=zú„оP¤¹zúö¤J)2S|…}”*}1£g¼œÜ¥jX¡žšÓVÜöM;ž:²U×üîýMw<îkOLãÜ(Iñý;q_U}[£±®‹EóTêiÜFWou)HéßRý—{Ó,+4ä±­hüÍ*Y]ר~dçÞ|?1-i.U¨¾[SS8@ª¯pKW ú”®@Mº”R+Ê;¡mp;\˜3‚¡ÆË6l¿çÓ„à˜£“ ™'|8ÑëuTp“ÚAÊr»ë˜hššÂ§¬¼¥¡å­GJ OkÞwàÕ wþî6¿¯à;PÃiþâÿà÷æ=ÚÕ³ã†;÷”bà€È¬ôŠ'Ú»·Ý±nÛo¸Ï›³W¡žèüòK6ÙõœÐA‹+/ÿýºmw œ=è÷æ?ÞÕ»û;›výᯡHËÙº–òZNfe_næü'š;Öß³vë}~_Þ.)¥`æô³_ÛRýÀÕk6ÿü;)þÂõB0}Iå¯Û÷ŒPBüo÷”Ÿ4¶¾ýWJÔýŠâiŒÝ àߌEù¼93`‚] n#=µ¤µ¥c=_þ…ës³ªð8(XQuÍãï|ø“OpaÍHõ7tj*-V45…‡¢­‹êiÈÍœ „ÕûžZ<øóV¡Z³GÏ\{âÂï>dϳ@ã‰rá iB!­½nG7ÒA@Žx8 7KM™ö®GKïNܯ(½ &nº9=¥t?ÀŠªk¿³½ö¡ÏÅb]‹²ÒËï]<÷ò7NZtÛwÿ¹>k?dzÊôâ»Ü”}O\ߨsf0Ô8=-µäM;žHO-­Â|1qßïÍ3ŠrWüÀ°‚™'.üîC›wÿ©5m_îõd­[4÷Ìù™í”jßîìÙ¹º/Ôð1¿7÷ïie½©þâXåÌÏ\ÒÐòÆÅÁpÓ*]KÝc×sC‡«(=¿¶³gÇ#†ø(@iÑÛ³ÿ¹ÿ{wã-Ùœ›¥ªâÝ9oÖ%›Ò÷7ýìÓo­¿éQ]O[Ãxl¼=-e[Có›l¯}è]O{KJᩜùÙ{r2+±‡Æ&„(rFñª[÷6¾ôO€ò’s÷¶vløÏ{›n\U|;-^9£øÌ+2Rgüª­kã]=}µ¯KÉSmäç,ùCsûÚ»ßÙxË"•zx½Ù;—V~sBbGã‡rž‚+Ê„ï*%@ίºõ> 4•©ä÷†®Ö•?Q¼1s.â[†ß{—¤Ô˜ðõ‡#WJ ïD}ž7F»¬?ûª¤T3üÞgÇ£64zžHôsDÈŽˆßûØñ´ã‹+ Èóc)þ{ƪ6t|ô¨qå|qÄï½ý¸Û2­b•ñ-¯ç#¹V©aõ©u/.˜7ëó›Ó¢F—ÞØòNùœ²‹vôôíImhyk…ªxCóË/ù0qV)&­Þ÷Ï¥†È×ÔÔîÅ«¶¦¥Lìix~v0Ü4K×R»çÎüô‡ªâ›²g–ª–U¨Ö×,Mýé±^܉]/Ú`ú¼odþ½ÿ.OO)éJ>ûsOÿæäg/nJO-ÄÏü5̾ìâü7%ŸUÚÐòæô®@õ|J+;cNuIáiÍña›š;Ö-–R(Ey'l.ÈYÒ}øš§o8r¹²!êóŒY€õEcÙFŠoB{6õ¨q"å|yÄï½uø¹‹øu¡ÜB¤ˆP&è¨7®Ð*„TÏa:uÍ_BS‡GKgÉ¡ ÀçÉ1¡ +½"”•^ñæàeª‹ùå_ò˜˜ŠÒók v¬ëE#S^rîÞÁÓ*J/¨I¾?ø}O(-:£i¨aaò²öæe/QpDîB¤Ì[rŠcÎ*`ªÚ™BC‚ „BÈ]ˆ´çŒR‡7I@@‘Ò¶KI „B•”¹’[‚›£v•ö›C‚س_ÎÈLõ ?'JènSÞÏN9c»Ýu 'È^93-S©°»7 ô˜ Úê—ì®!„†£0–ª tÂÏ(p`p“„´ Kí®c¼-Y^¾âG·þ·ßî:Ü¢¥¹UûÞu÷tŽnÅÞ“îÿëÏgÚ]‡›|û7ç[ÀÁm_÷‹«²æ”Ÿ­¨:·»·èj¨eúÙ?²»Ž# „öûeÖ 7åû»þ3ši0­i[îC¥ym³»–‰F¹(ÎTå°ñû&‚ƒt!'ý…Ò…Ê”Tÿ”=kl´¼>¯«^+]×&ô(®Gܳ½T¼zÆG.»1?#¿Ä²»·xí÷7™`ÛxÓ‚Ѫ“s2Wy–fw-n±¿º×û?ê’Dˆ|蔲agŽ:Æ @Ưý¥*Œ§Û] B!„P2"e°kýŽ n\Q:@RÎñ„B9 ‘. Óø‚cÁqÁ©JÂDJ¼VB!„F¦Û®lâ¸àÖ¯ ¤Ä]¥!„r )’@ЮÕ;5¸7„B9åÂTìq;„$ ©ÃωB!41¨© %!Øãv( 1¸!„BÈ1Hp㊂=nÉ$>ìqC!„“)SÀà 5ìªÁ¡Á@'BèvׂB!R¦€m½m¼rÀ@p“”‹4Ni—ÝõŒ‡m[j»®¿æ6¥{„c4´º2½vW2¼®Ž`Ëõ×܆ס…Öæîö»‹fD© 9ò‹¯ Ái ÎE¾§#d|J¾VDB `p;WhŸÊ¸¤B¤q€IÜz[‹þ¸¶9¬Ø]‡›x=epAp ÷Ìzzí›=ÜFÒB–’gw#CbÚî×ïûñ›v×á&f$›™ZbwG¤ëéæ–·¢ïíZ»¿L‚"fÙvõûÈ4ØYCƒ›°Ìþ.ÉI©¬2tÃ'?ó‰IMÖ±b&}⯮£ò»kNfAÃ/¹tõ2ªR¼^é=ýÄËV7Üdw#¡pµÏŠEzí®ÃM$—,jté>OŽiw-Cñ{óÌŽ^¥Áà6Ò˜z‡3ÅÇp³5°:2¸õ›Ôc¹åæe‰‹?{.nØG(ŠÀ3ÿ|S‘¶:rª^ôés¤ª«øþŽÐ[¯­mÝvW12 ”ò•ë?—S2;fw-n±å…ÿÇ‚ºßpjp˜³Œ¯>÷«Sò¢éÇ¢·3ª=ú«V€:»k™HdŠû†p|pƒIÛã†r¯”Ì\+½ Ä²»·Ð¼)ÀÙÉ\÷j,·Ðï):: )’Ú{Œ›“.ìË !„BÎ@@ Q;‹plp“úŽå†B! R*’·³Ç7è?{!„BÈV‰àÌÎ:Üú¯žà¦¡B!4YIP@`Ûúá%”s o!„²×@Á·¡J”‹I;$B!„Ü€Tð·#âŠÙAW„B!ûÄw•à1nC”0MæAxB!ä ÿ¬Rbk›“àLÖË^õõÕ×_~{ê].ä¦IãÒ weLˆ7þó¶®(xå„‘ ƒnxkBSÙÁ³J1¸Eïd½zÂöìÞ|ø„c{<ÈïÉ äÚ]ÅðÚ²ûáõO?own¢P—Ù]ÅÈpnZžþSPóá(û#Õݰ—¤Á »Ë8ª=ÛzøC¿Ü±»·0-¡Q¯Ý•L(¤$öžœàèà& @Êiv×1bfÛtà Û]‡›X¬»± wY—Ýu 'j´gõ…÷-µ»W‘Ä,-:£Éî2F¢¢à“ë¡ÖÛ]‡›¤RpúWðåÚëc{í®Â]J³ì®`b€w• ŸÝUŒ‡¨Ùº*33¥Üî:ܤ§ç@ ¬³»Žá„¢U)ip¾ª¨Ø#3B@_ž±»Ž‘hl}§È3Ów¥îOv×âF¨—òFù»âü“Úí®åHÚØýW”ê9v×á ­^ÙRà½Ì·c$ܰÇí(,è?ývŠªÚúæ» âš ¥ª¨ ßß‘#6Žã1}ù9_/­8ùlÓîZÜb÷[Ϫ›xÈÑÇôæ—h…Wܲ0Ïî:Ü‚1A~{íÖn{ϯœpŽèqsú1V8?\"„Bhòëï<¶f' n!„BÄXTÍÎ:Ü$ƒB!„lÇ€)mÝíïèàØã†B!'èïq#6w(9<¸ n!„²$ñS1ˆ”¸«ô(/ŽªŽB!ÛHèqÃ]¥G" ô¿H“vH„B¹A|Ø"€=nGc€›»%B!4µIJƒªcp;’ÄèÄvˆB¡©Í)™Äé(žn‡ØUº«îñÅQ£kšGÏh÷{r;3Òʺ²3æ'¼Âc$¸ wwõöÙ]‡›HICv×0ÔHOOW€Rêš+=ØÍ4M[G"GM]Q£KïèÞ–3zÓ ³7Û´‚yEy'¾„K´1{‡qtpK\V‚ô_f"ÙÌéŸØ¹~Ûo¿è«[ „RÊ4BÀ Dé"D èjʦòÒóÌÏ^Ü3ѵ§|úÅZ,Œ»€GRÍ÷ò’O¾Ž´:þšªNÃ…Iª;>ìú<ÙáO>°aÓ3uô '‘Ìâ^OVØî:ަulýõ·7Ø]‡›Ä‚›Ýv5q‹…•-Õ\Š4€XRŠ4)E€Ô!1"RýÅG»®.›w•::¸%Î&•‡m°½z¦uêò[¿»aÇ}Ã×HUJP¥d)ÜÐTߺÒâU;1´Ôxæ+ºªì®ÃM ƒµfgÜp›Ýu §¾ù•Å?ð-J¼–噦)Tõ³7f¦ÍrôÆ Õ_jlÛªª:±»7¡D•v×p4´7}§Òjµ»7Q­ÓmÁMSSxFjÙö˜Ñ³Í´Bg'Ÿh@ˆÒ]ZtƵ³¦ŸSwÄX¸«ôèâ=mäðà–°¢êš§vÕ=¾·µóÃßHÉsãS¥Çb‘S«÷=õü¾¦—_ÏL›õtUÅ—Ö¢8惃RÐÓ3Òþú;JwW¯+z8(%Ôïõ+7ì#ÔèsÌßæpB‘–ÔÙŸ8ë«'ÿ~G¨}ß.ïº?þáǹYU½v×r$岿S?ª°*ì®ÃMnÝÓ¼;í²»Ž‘èìÙ‘¹ïÀ+«#ÑŽ‹¸0+“’„©ŠgÓŠ×]í÷æÃ4ï8–:,Gð$R­„øG2oÖç¶”MûØê ;î»Ã²Â§Sª6ÁJ¤Ç´Bç´wo=§sýÎ^oö3%…§>;-eÛÄ<„ÐdäK˶rfÌ;êç:(Ò×ëø±85JDeª³»7¡ŽþÂÅx”îÜû‚û/²Xäô¤Þ5©*ވŅQ™žRrÏòªkžI›H@¦cÙÃrtpè?)¡ÿX·£ñyrÌS—Ýrã†í÷|6iùúâ¹ß8«®é¥Õ‘hûÅ\˜•B²i‘hûUÕûžüV]ã‹ï¥¥”<=¯üóoy´tüðE!„&‰º¦—g¶unüdÌì½ààž8JÔf¯'ë¹âü“ŸËÏYܾnë]ÿ[^rEg4¸qBBDÈÌq)|„Ü@r´]¥ƒ­XpÝuM/o„êsOXðßÀc5õÏÌëèÙ~±iõ+¥HµXäÔî@õ©ïoº½Û«gþ« wùÓ³¦Ÿ½oüžB!„Æ[[çÆœú¯>ј®¥¾š•^þìüò/lH2Õ¨I;iñW¶óF€¢1/|œÜâ»JGÐã–lp›SvÑ®9eÝ޶ÞY½ïɇ"-3[.¥ÈŽÝ_©?ðê¥é©¥«¦k3Ʋv„BM ‚Üe]5ûŸyOJ‘’â+x¶¢ô‚W3ÒÊ;é阇#$óŽ·ÎãáèàFdüä„Ñ·#IñËæ_õ<<¿ïÀ«3Z;6|*fö\¨*Þ¹™óŽŒÅjB!d“~÷;^=Ó~ÎÑ“ñà–F¤¤’[†/rtpë?ÆMÂ89sÚÇ÷Ïœöñ»-¾¯«wwÖX·B¡‰7^¡ ` ¸)*ã©–¦Ú2ˆ¾Ó‡WP`ˆ1ÜÆ’¦¦ðÂÜå㹄B¹ŸPh€ ûÎ,òÁ !„Bh$„¢„ˆ”vÕàôà¦7„B9ƒa;ƒ›£q#”Ñ â6±h$ÅîÜDJéèß×d¦aø¤ŽtÔ)³†­!„‚„À¶]¥Nßú`R~ g¦.zÒ°ºß´»7IõzK€ì´ªêî€v«À+•Ž˜Wðys]ó·^·áuhm´åŒ27ŠöuÙ]°Z ÉîØ+\ó;èÝL5t»‹°$"6ãæðà&Ó%Wl¬G+Ý?Çbùv×á&ª’Rµv×1œ`dAÄl8Ëî:\ECJQcw#Q³¤Ëì ÿ7sìU7Gƒ¨(=ßÑ{OhÉuw¾#9^_x”4mŠ~y!Aû®žàèàF$¤K “2¸Y<°$'/}ŽÝu¸IWgoüËî:†޶OËÈð-Ót¯{8B=Ý=Œ±ˆãxÿXiéØ7ÏxùûE¾±_r*B{Ož|£¯PÓxïw§±|»Ëp“M!_­Zvßì®c¢õ_=¡À®õ;:¸@:Òaw!” %§äCêg‹‰£/°í$ou‚÷Ý&îè“á¦{Eê/ç[/î6ŸßbeLÉo§„„Ï*=‚ŒÉº«!„Bî# é–CüœÜÒ$c»žB!„Г”†€¨ŒÛÒCëØà¦2îµÿB!„² 4Ä®±Üܨé@d|_2B!„í„B£À‰´gHÇ7"eÄuÁB!„@H{NPprpK&)ÅaLB!äñ'±ÇíDBÀݤ!„r!ìq;Ìä½jB!„\ŒäرjÇ7ìqC!„IBºÀ–+m8ùÊ ™$`wã…€~ «³Gé ©¶Ø]ÂHèZjo ÐP€—D)!„ TsÍ•ÂL(í^Ör¤ú˜pò¶HÚnHÇ×é$`ÊþJ;)p¯ÊxS• =‰ÒÉ¿¤E’Ç_PüXæ¬|5ë\cwn¢PÍA¨0ç„êí®Ãm(uÇ«Sü…ÁG;óþõD·Tì®ÅMRýÅ=v×p4ulú«_Ûʳ»7 ‘ìý¶]iÝfB¡Ý@Îó0¸€f± H”¶Û]ËxQs6~ꓟÉ]I¼ÞáH}ðR]ÎøÝu §µscÙY_3˜[ä7ì®Å-j·ôð=ogÿOaîòN»kŽG˰8M¡Ù]‹«°>ŸÝ%Êà»ëp*c©v×`¡(!0‰”yP7‘ëvdpS¸(àª:i/0¯¨@—­*äªF1¸ÐŽºsÉÎó™ó3ùœ%¹ÜF(`rÏÛvW12†Ù«2³û̯•P n#´;Ľ7Ô7ì€v×r$ÓHÓéÿ;Ÿ—Û]‡›|ww¨±àM»ë° !ÝýÁmB92¸)‹€@@RbÙ] B ¦S³4âØ“»œ&UqþñždiίÓI(À”îxzˆ˜øùÁC¤,’„LÚÞ6„B¹›$¤l8³Ô‘Á Š1¸!„BÈ©$¡ÝCäÄž¨ä¸àÖÿH2yOL@!„»I…t€ªY,{"×ë¸à¦Y,T¡`pC!„3qEíBÅÄž à¸àF…(Á5µËîZB!„†")±€@p¢Ï,u\p#Š€.˜âg« „BÈÙ$n€)ÜðŒR„B¹!=DNì™¥Ž n0M‚Ç·!„BÈÑd|á„7G À«pî€L"e¦ÂxW—Œ“?z̾öå„(dÊ^¤w´]1šbw#T½©[t¶ÄœøÅÈ‘kxhBÈu$%]À!]áÜË%6ëtTp#¨$°†±@‹K4B:%µBUª™¦µÚ]ßX’='>ùòý]/Û]‡›PšÏR ì®bxÓ NÙ»ñ™}ß³»w)€¼ì¹Ž¾y¡ªx³ ‚õQ»+q('šú„lÔŽUÓºoØÅÓí®ÃMšMµW·»› …v)•‰<®(±NG7¦*a¦*O ÿÔMsbåråâÕ´‚’ÐZ¡ÐjK×€aw½Ç#/{a/ôÚ]{©þ¢Xª¿hR}Ñ@e¤ÎˆX©?þA­Ý…¸Ì4» †·ì†ûñ=©Ú¸ªöj` "EL½à– Hã×@=<¯›VÂEb"Ä%ŠÅLIÉ^¡(5LÓöLÆkšJÉ !ÊÀ.$Æ£TU|£ ¬\˜T¡ú°ËpaRJ™¼¾#ÕŽŸ”œ$¿¦Gzýf¤ï­”œÉÉàyñ½E¡ãC¤T€@ˆH˜°3K]q Ž©kmQŸçõˆß{¯¥©·HBž%B‚b±ó=‘èw<‘è%zÌXJ9wË!PG%%'{_¬¨®jA XŸÂ…IÒµ[ýÆ£#~ÏÁú”­ÕœÍ…yÔe¸0é–Ý>o[íß>’ؘô…ü;ë[^»ÿ¹yRrbX}ªÅÂziɨ©í½¢Ýûþ¹¸µóÃ\Æ£” “~¸ã¾/×5½<+ùõ?.Lºy÷Ÿ.ˆ]GýÒ+%'ûü§lýö»¯Hþ=0¬>µvÿ¿*wÕ=¶,iöraÒáÚB##%'\˜” “&ÞO‹…•@°~TŸOÉ˧³wWÆPË'·1Ò¶Ðпž]½»Gµk5ñ»1’yC‘fo$ÖájùDøžN,…±,Í0+õhì4O$úo8r¥'½$¤)û/Žìq;KSû,M]k.¼šeÍ¥B. ‚Ÿ®3~Ò,(­åªRíÖA|ѦÖwïöysžîêÝ}¥ZïÒyWþ‘s³„qCP¨.…ä$ÑK“è±’)9¢€¢èœR=J‰"ûÿà €ªøDÿÏa檉Z;?Ì­©ú—>oÎ7ò2{¶·Ôîîš‚œ%Î(>s¯¦¦ðä6ª‹äu$êK~``c‰:’§)TU“1¥ûš^¾IUý5½}{µÚýÏ–.wå÷1ó2£ªâ‰^²äçä×W¡zDU¼<Ñnbúàù,ösnä'^wÆ£tÝÖ»~­ëi[AJº¿ù bX}ùœÅ²U~ã1–ο—„(2±B ñ߹ߧþu¼·‰eÿŽŒÿ+l/)9©mø×ÜÞ¾½'+ÔÓ›1gsIÑ© Ûjÿv¶–é¼+ŸiøÆ¿»$+}öúò’Õ{Žöwa±°²cÏÃ÷²ä¦¯ij hïÞ’ÕØúÎi–)JO¾~Fñ™Û›ÛוU”žW3ÙÿÆÆC0Üä۽¤”jª¿pgiÑ;F -_pí¯¢±®„Ð×Z:ÖçÖ5½t¥,+'³òÆcþ¾Ðþ ˆY^²úžÁ+ïí«KÂ*ž7ó ’ IDATs7û¼9!Tn­~ð¼H¬ã‚õÛ~³¸¢ô‚_uto›ÛÜ÷Bh´¤ðÔ?L+Xy`íÖ;o‚eù}yoëZZG ¸ï"bÍš~Î=ù9‹:·T?pA$Ú¾ €ˆìŒ9ŸSvѦ-»ÿòé˜Ù³Ü£gn^:ï›vW¿ÛÉIw æ4”yôô¶¦ö÷¯Š™Ý€TJ¡F¢íŸ7ÇPŸHúÉ Õ¥É ç†âód›ÓòOùOfú¬@"4ÇŒÝçÍ5)QdÔèÖ)Õ„¦úãI^äšñ(­©ö»©þ¢7RÓ‹ö´wm-jëÚô•’¢Ó~âÑҀɊ”‚hªŸ+TŒG©i…4Mõ3UñŠäu(Tñžÿ¢º–ÆÓ,QTÅÃ'ó¯¾PCF4ÖuVNÖü?„£ms¶T?xÎòª«J$Úáõûòb Õeâ}L¼g‰÷9ñÞçŸt@Sý/'^;Ó ª„(Ò«gZŒGi4Öåñysú¿@K*% ô–îÚûØ Pý꼬EÆÌî\J5ÑÚùᵩe?Ô²æ÷&¿‡º–j%~·Qà h)¾ƒ “$Ö‘ü»3ztžaij OLcÜPï½/ý¸"R* 㹄ó|*e™G@愈_ âm«¤¤EPÚbij‡´ù{W·d’nxô¨€§=†5 QE¹XH¹8Iµ¬ˆ$tPh5Óµ}’nwÍG%¥ÖÔúîÌP´u†<#Å—!D ­Xpíê_ZÔÒ±îÒ´”éotjV•¯ªÆºÎPïë1£ç£Ë«®¾Z×ÒX XŸf±P¥a´¶®?*-ZucqÁÉM-¥¥L{ˆ±ˆ²i×ý?Úßüzô@ÄyšQ´jÍŽ=¬þpÇ}we¦—ÿ}NÙ§Ö^œwâÝ%E§×ÕÔ?}¢içÏ/ÿâÍ5õO}÷¾^œžZ²…R­}Ùü«îöhiÌdáZÆ"Êæ]º©®ñ¥Ê@¨~9À’ÊÿúŸ-Õ܉v¤Õ5¾|®×“µ«¤ð´5»ê» óUñMÊÿd½Á½%Ý}µe¦ºº±õÕ©36&¯;+cvŸGÏxkÓîûï÷{󞯪øÒ3 }ÞÜW^þP¸Ùߨ¹º¢ôüïtª+ZÞ¼!?gñœ3O\tÕšêçýëøpwÝãŸhl{ç¼`äÀºp¤muå¬Ïü ±õÝS£FWYíþg¥aõÍ]4çë·n«ý74´¼U6kú9u6½äÊëɬ¯˜qᆚú§Ôž¾º³•eŸ¾c׾Ǿrñ÷¯nj}¯¤¹cí¥iþioBû/"„Æ*JÏÿÍþ–7. †›^ËH+k«ÙÿÌ­hÌïÍ}';³rÓ¶÷¿)ËIõ=W5ûÒCÎN—R.ŒRUñ†Ë¦¼Z¡šhhy{cÑ¥¶ßý³ìŒ9Z,’Þ¬»€0¿'÷•E•ßxlÃö{n‚ehª¿®(oÅ¿[ß½R–ê/|aÑܯ¿°i×/‰Ä:ϦD ªŠoﲪ«îÛ¼ëO—™V° ¥…§Ý;£øÌ 98Û„(}å%çná<¶mÃö{b,¢pao­yð.¬üœ¥wvõî¾ 'kþs³¦Ÿ½ûƒ-¿º³¼äÜŸïmü÷(UúüÞ¼ÒSKkÛº6:/{á]›wÝÿà œ¢P½iYÕUwüÿöî:<ª+møû^ÍH&îF€ÁµB¡F‹”B»u£T)•­oéö«ûJ½ÔmëÔ¶¥»ÔhâNˆÏ$“™É¸\;ß™tÓ ¡0p~ÏÃó™sÏ=÷ž;3ï=v7—¿r­¢D2 !ܘ¡7ÞÝ}ß*Q0"vd ’–4ª^¯K®B¸ÊºÏî­múoE~ÖiUÔ}úH×&C /ºËî\?ÈåÙyËð-¹SŸij[57¶͸Ò[ooj]‘Ûܶê/ˆ¬WQÅܱCoº°¹muA›kãu€LT¯±­1øš/†à•3£()¨ª©HHrg+$@ZB#\§²h—9ÖþG-ï±¿ŽšÀ­ÕðÐ9Ãã[^’“8YéšÜ0—•d…0X«2L…"ðÕ*Ãq•£Õâì(›ÆqºÖ¢Ü™7k³À±Z%5iT•Û[qy^ÆÉ7—/¾ÂîX›É0B[RâPgSÛª_7”=ÿ³1ïÃŒäq›ÝÞªDDΕv\#Ï”@¨E»¥âÕ»!¼¢D‹D)°Û8Ds±Âð;nk´ÿœÛÔ¶rQUý—2 "£² ¯†#Î"­`]oNÈ ˜rWx|Õ3ÀÖÎ2j”P¤]Ø\þÊ]„¨‚¢D D9`’ä`a¢¹ø3A0IJrp $JÊk?9T—§Â’jáþOõŽAo×i§³§%y2?ëÔJ—·|&"«šòü(éuɆáÜ-ŽÕ9’›1õ9I¾YÕðÕÞ´¾tà¼w QP–à ‹¦?¥áMrgÛ —Š’0!ªãôå=÷Ï2‚:vØ-/9ÝÛ?¬iü檭¯Ïçyc#ª,ë.oyË M)¶á퉖î5[Ÿº5¶)r¬FØXöâE¢äJˆªçX]M0Ôš§6šòý­+Á`ØQ,ËáÁ[+ßXDˆª †ZÓàXÜ0j¿µâµœpÔ}’Í<ðeQòÛôº”/†]ôùƲyüµ©L°Õ¹!Õå-£Ó$n †[Çè4‰«œû¥Vck›þëDæë[~8[§±­(8ïã®Àœe„ç}ÁÆÌoåEª*-ë¾sŽÕª™)²·¯»nÍ–'¯HµxVà‚§Ý2fèÍ÷"2dõæÇþ5¨àO×õiÁuÛÿù¾?ÐøµªÊÖâܳJJÚAˆ Ã?ç 4f¸½—»<+CaÇÙc‡ýùâÖöõ™vçºË®-ÉQÑ3)=yì“íˆ6׿ɹSß=\'ýP"@PU¥ôm•oœ#Jþ§ÛÁqz…eø–Q% î­múO©Ë[9Í O[íñU•Ö>nµP…‚¬éÏ%Y‡¸ì?å¢ ×–äˆè™ayuÃWçm,{á…¬´ã"- (Ñ$£>cSWж±ì…K$9˜Ï±ºfBˆf·}#Kò³N«ÉN?ñŽúæØÛ×þ5;ýÄ{Y†WÞœçg^ek„ì&‚Lì»ecÙ‹Jr`ÇêQµ‘¨ÛÀ0|+Ãð*ÄÖ EÚU¢Ø\žò©€¨è4‰ÕÜ> %`nJ² ù5+í¸FD†á9½¬Ó&;=þZCnÆÔµ[+^¿¸¾åÇ`^——yJu(âü¨ªá«›ÚVýd5n†[“8FS˱Z‘…ûOÙõ-?.Òh,¿U±ÊJD ¿õ„èµÉ☡7¾ÙßÜZùÆ“5 _ïÀ Û9ÆTuÚ¤-«UÎØ,Jþ<ÂÚ° ¯6¶þ’Y×üÝ}e•ªÊ6UyBd‹Ù˜ÛëâFBTT‰œµLF~AHب¨Ò~¯fðGC 6M(ü§X+š :¿«¼ÐÙŠVNüQe»Ìq•Á¸ÿ:&·îbkÅ­€uŒª ‚(3ª:eeà˜£ƒÈžÕíˆùíÃqºrD†p¬Vá8}ƒ,ÉN?ᳪú/^)Ì9sõÙVÓøÍˆLÈdÌ~Gà"Ïé«ôºäH’eð3µKþŠˆrfʤçd}[*^}€ÂiÓR½.å—ÇêËQ–¢`SëŠÒ_õ¨p¬¦q`þÜŸ[ijsnºÆã«ž1zÈo)eðæòWþÎqÚÚAyç¾íöV¥ðœ¾‘…ôä±M­íë›+^}á4‚yYaÎô;ªßÕæÜ8›5A¯±вg¼UYÿù ;ž}‚ãô5ã†ýù¹£¡Y¾/ˆ,pœ¾’çÄ®ãDd‘ç ‚ÈˆzmòÏÙé'îjwo6µ®ÌÎË<¹ÞáÚl«nüæ ª†axWfÊÄ7Ì yîÕï´~û?ÎL=îy‹©pqEýg"0‘¬ÔãŸâ9½Ìqºr†á‰AŸúéŽêwDdD&iUNúIµÁ°ãm‡kóY²ÉÑ –Šâ¼³×mÞ¹xðŽêwEdCŠ.~Èj* Þ8ô€è´ÉÛœ»±k,Sì®àœ ")ΛýÓ†Ïÿ“aXo’µ¤£©mUVzò¸ošÚVXýÁ¦<„Î!6yƒÇ_3³Á¾¼exE”‚…]Ê*‹©pgcëÏ#b{•ÝÞ*sªm„»s<ÔãúŒæpÔ•Â0œÃ Kó(ª”ÚØº"G+X‚«©Ü^õÖY¸à¼U«U»MÖùÝ —N›ÓZ¯¿öª‚ìiöŒ úŒFž3¼ìò켄² HJâðº6×æÜªú¯J5‚ÉŽºm ÃyÓ“Æ,k°ÿT€„AÞ‰º3U¢lf ¦ñ›7_@€Ñi“\ ÷·´¯î57ôië¼þº¹Õ _¹aû™6óÀ7e%Ú5V ¡ÖdD6¶åÅŒª×&ÿÐ`ÿi¡Ã½eªJ9ˆ xãF „M±ûŽaxYˆMx9r¡„´6¸YeÑ®°l«Ì±ÁÃ]²Cg ¹ÿYŒQæð…¨ 4îBH#ˆR>(±5äö‹>¹œ0 Õk¿èòô¶f[÷u»: >Âë¯;çôö‘%×}Ò­eî·Y|]Ûôœ%Øõÿ®¿cùc×>»§ïmÖh÷™©]÷\/¬ûÒ%ˆ,Ô5/+ðøj‡*ª˜ J¾Ò Ãï¼³kðn÷ýôÇùЄÂB•´‡ôÚ&]8:̈ô» ô?½­¿Ö}7Y 3­+²]ž²±‘¨·t|é­÷óœAé«n~?«´k6o÷´²fºï^7=gö63µkV[o³ƒY…šòÚg#²b(ì8sHÑÅ·Ø,ƒ|½Í"îB8:žQ”á!½ö¡ƒÎK”28Y¹]Òj^T8ö Å&DÁ»>¥Ó$¹ò2O®ÇØŒîšÆ¥EŠ* E93vî¬ù`œÅTXŸdâÜTöâ6iëÐÿ´£ú½É¡p{©Àë‡ ¸äëªú/ÆØÌƒjRl¥ÎíUïL G\CµëÎDsq™½}í,BTFc-/-¾üÛò]ŸŒeYMdPþÜ­*QpÓÎçˆR €çôM©¶QË3S'Ú·W½=9i/M4—dÒXÝøï¹DUôYiÇ/IOÛ¶µâõSäÎZnÐ¥F›ÚV¥5·­šCˆ¢Ñ–Šáƒæ/õôöŸGEDO®Jdݘ! _ml]‘ÝæÜtE“‘<~Iqã$)‹JWH<÷ÀvcéC‘[U–iuÚå[—¡¶é¿Ç—œ·¬ûg`óÎųF ¾úK·§ÂÔÒ¾vDIáù¿4ØÊoq¬]0aø·µ¹6§4µþ2‘ §Ø†¯×&ûÛœ‡pñŠûòì6çÆ3€)Î;ûí]K§‰r0—A.P”;ó}Qôiìík'|Í— ²¤¢nÉÐ_Í úŒõƒ Î]co_—ÖÔ¶jÏêÚ†š¿¤¬æƒBá¶Q}úº’ÂóW6ÚɉˆnË üs·(ªˆ›ËÏ¥`ƒ\ (gúæ„<ÿ®Æ¥ƒ#¢×æõ×^:qÄÝWKrˆ+«ù×LI æhµ¶í¥ÅW|×_ßËÚ`h>\ÖiþÛù+¸HH/¨ìÿ!QCŒLÑÀ­Ÿôwà¶7]?HŠófo>Ò›²ÚÝÛ,-íkF |BG~Öé;u·è‘¸íMײ¾@C~aö«úŒ#n fOÑÃW×9F% —v¦Cݲv¤n½/lÜý5E™¦Ö_²šÚV] „ðc†Ýü`×,j,]×RWÝßëZö§+ïîKÆt¥ï¾ K× AWÝ—êÚ¾k=…îV~X»ío·¢jTU¶f¥÷L^æÉõç»ÊÕ_³J´À `÷ó°ûÍô†ÏÍ%ÿð´¤ÑÏą̊ØýütåÓ³¾{[:§{þ=¯ÞòèYÇ{».¶T¼>%n=ŽU¯LÛF•,ø {+qÖ) ÜTÏÀí˜ë*=Z ²¤¤ð ‡»û#9q˜'9q؇»G:D–ägžZõ‡»,ûJ+X¤¡.ýõp—ãHÐÛ]÷×XFP3S'5ië‹6ëàŽ®›.D–°ÝÒaÿwí±žûì­…d·JCëöHBâú))«SS“FÅÕáZÁ"i-ôùÊ{dWç‘%±çfSq„nýURÈGÅÂÃ]êwúkáe ¯ê§¼¨þáìÏÌXIÁJr¸?ó¤ Iè—|T’)„£ãû#/ª_Äõ ב‚nýDâ¹Ïow9¨ßS<èña¢Àmfµ¦?ÊCõŸ~\D['£¨Åý”Õ?œp0]v$$exˆ:ha@ 7G‰nýDâ¹£v*ü±Naٰ²ôËæ(% |›(ðîrPý+¤×¾v¸Ë@Q‡³÷$EQEQÔ‘€nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚nEQEQq‚ëú‚Œ¬ÎÑ+áÈá,EQEQC€B°ëÏß7"!@ÃS.Š¢(Š¢(ª'@íŠÏ8B™1ôO‘ÐnSŠ¢(Š¢¨#•_„B›Ø(Š¢(Š¢âme£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠ4p£(Š¢(ŠŠÜ½Ã‘föŒ4 œÜy)ÔeNõyBH}äw´AÄ  “ø‚"î2 ¢vØÄ”—3òŒØùÕîôìªØäú¿þÈëhˆƒ’³t§LÚ$„‰Däºv{d…‘!„¨‡hŸÉÐYÖª²Æ D` Çq'2 3EEQªeY^NYË5 ý˜e%!$ÔùÅ-D´étº), L ® „l9ÔûËsò-¼FÇ|n:šw}å¨-[qðyÅ7Dä`O¿q2!„HÞxà–,À˜F £8P6¯ïœª~™SüÐÀ­D4Á‚O\•Ëå¼Ö {Vu¯àˆÜ€<&)ù¼Kúá‹à©…«ý‘ÏÑ5Iiº…“ÎÈšyÂY9ú”LƒHT€€7Zºá'ûô ?Ú7 â„pï[§ÓÝ`2™&÷G^Á`0æÀ1ûcŽˆ‚ î„qþþÈ«ÝᢿC€ˆ|á€Üû®¾þ⑉6«š“<Üe¢œÑšœ|ÎÃ0κr]Ï÷Ö~ü<ïj¨<à¼éæ!¢BÈ^¿¼@¿¯écÛð`÷¾6§&ð8u´o˜‘ÊfžlcÈMª¹°˜‰ÀAßIQ½CD=°GÐuðcíÎŽU‹ÿo“­¾Üól!„^ËOc9¼ï¼›J¿|Õx&‚À-ÞÅê’#„øö!í!¯KBˆ„ˆÏy<ž"EQ¾"„t "ʲ<›eÙ;Íf³‰DN‡Ã¸Å£#­~{JÏL¹ò‚‹çLñxü¤©©U?aT¿ %¡ºhÐÇþðÒ=m!Ÿ{Y÷·äpH€îõ8¦7Däsµðr’-}¥i⦦°ò`,=“(Àœ\3g²Ó‘maÛš¢ê׎|Ð} ™€8f€‘9[Ë’A£Íh1r3AF[ŸW 5!õBˆ»—2 šÀ,˜hÁ" ÏpNQ 0°¿V‡Ôg!{:=g¥qE ,‰Ü¶S²‡Uppúƒ8EÇDdmiúÅ–$ÁÚWšgd‹Ë¾?–ž1Z„ÙI†¹%ã’Ò±`H¢££-üµÇy¿ûu€ŽÎÎ3Í Ì Â!‹ÖÀ! bá0«7ä—7¶Ö^ „¸z)Ó즊GÚÍ<çsG#™y k[êÿì-}Bˆ‚ˆ@¸ûŽ•¿¯Þæ¾ÃhTžK@D#!ä¨ëbFDdYöŽãûJ£(Ê6I’î‹¥gX–ɲì¹Z­6Q§ÓµK’ô¢(ÿ"„ˆÝò)ÂD¬Ñh, Ã0@´Z­_Q”²,¿@qöR¦Áf¡V«-FDNUÕˆF£Y'Šâ?{Kß!d¬êö7AÄ•‘H$l6›DL9óϲ³3^IIµÙúzßÞÚ^ÖÒÔº ózH°$Ì,ÈË>÷¸Çeò<#Ç u:ìŽo[ZÚß%„D»¶aDAQÎ\V;hä˜aVƒ^ϰÂÈÑCý_pSuuÝ „GÏý!â ÁCŠŽ0r -ÑÂ9îèÀë++kÿAiß—caܬ٧]’™•üõéçüif>tŽÇ D<ö¼tº"c nɵ IDATbfŽ·µ±RQ¢Õ„½6•!"f •@TUU­ïh®}fïg`ßÓ`ºóï+æwdk`AÕ0 ^»]Îèla+60ž“ÎL<'Q"p*´ñó¥C]ø‘]€ˆwwÝÍ¥jÙQ7æ33³2¾‰pX jš2¿:ë‰Z2o$„´tí3Cƒsg§±7Ý”Ç tI„+Ô±BUL²† EÄ!m}Œ#ËÞl”NöÈð[„oG›ñ-8Æ>ü­Éš‚óoºÛu Ñ²ªFǪ/.Úàèü²ÈÈOx`Ü©™Ç?=[ñ{¢œªL° Ùë´ß°òß c×ÀbÑ–N»¸hVɘ$_À'raŸÄ :V5Û´BSµoæg¯”Œ]M]û´&if;9ã–WkEE_G”KË6 ­ 3>[\1$vô96¢·H4˜ˆFT&QBpÔvÃpW˜˜˜¸[]2 £"¢âp8¼“dAxÀ`0œ`0TB§ª*²,« …B ü~ÿDD¼«ë†‰eÙ¡f³y¶N§ó*ŠÂ©ªÊ2 £2 £Eq†ÇãéªËÆne™i0n³X,ZEQ8ŽãI’NïèèèúL7ïç!–h4.‰èE9æÆ%§$ÜûÀm&`~ß“ ×jUk¢Y¾úŠÛƒÃF ŠòþoƬSN:ÿâÙj‡Ûˉ’„ÉI‰šeÿùùÚÞû|"ÞÙu­ÓéÏ¿öâÙ'Mäõtø8¯ÇÇêôZ599Q¨©®;ó©Ç_ˆ7uŸ —’’xæ3¦ÞqÃ-WꀸÚ]\N^–¦¹¹õÔ§}±¤çõÐD´Ž7âÞs/˜¥yü¡gëìÍŽÅðè!8uG´´¥w;í¢$Df·ÖJIаe?|\ tËiÅÃï+>nFNrá0-/hˆ³n§\±âß^§{\ ‡ìÚ–eÙŽ›q[æqlBRºFЙ˱¤£¥V©Û¸„ßݾSþíK>ª‚!E £Ÿ*áÙOí*kª¿$kἫ²™'%2Ò­eR 1‚?(*(Z2õá¼5Uƒãß—Àó]y¥iQü´UáßjTU¨1r`¶ðdÒ}Å|Ê\ÎåÊŸàˆ˜sj^ÿØ ï©åÍ^òmkT­``fÜ]ÈÝ’Ïå-ªR.€¿÷u0„6D¼²k°ò Ú¶TWkxÙÛm5v½ ‰ŠÞdÕŽ¹ô®aìêÿ´°nGd5€É¦=礹y“‡OL‘Þ|tK°½5øQ@²&i¦^póP›5Y;fÉ ;/€v啘¬×,k~ø´v¹*“AÚô&nÒ¹ JÒæ\38ûíÇ·^O bVé¤Ô.¾³”ùð;äšmÿñ:ÃÕiy gξfà€ÙWç¼ûÔ¶‹à©ý9@KªîüãÎÌ6®[ÖŒñëÍEYær¹~«KBˆžeÙÑ6›…Bœ¢(¿°,;×h4NÑëõ’Û퉢øDY–j³Ù’X–ÕÑÑ1žîÊ‹eY1~¿ÿ¨&„$°,;Ñjµ¦[­Ö,§Óy<Ð9¹@«ÕÞh³Ù·Û-G"‘ÿBªX–=Ãjµ´Z­Y.—ë’®ô{‚ˆÓY–µ²,;P¯×OÑjµZ¯×ë“$é¿ý|úŽx-ÍŽïî½ó‘ßf/G%Yg±˜G?ôÄÂKþÃ;Z¿˜¬¦³/¼äìS¦9UºçŽÇÂu?D%9’žž<åžûÿœœ‘‘2âáž™Ot啚š$ýwéOšW_|÷gQV«õz>Ád2O¸í/×eÜuÏÂÌ;þüÀ%ð"¦8yüÍ>µˆ}ôþÊëÖmYfon+/(ÊvËmóÝz×µY¹ã±ËºÒ÷%77kþW—þþ»K‚›6nŽÉ™Ô’Ûkw„ÿ7˜Ÿay!gøq†”ü’PÙ²#ˆX?jÊs§Þø„¾­r3»ó‡½b$Lr†g˜u÷ËÆŸß|ì!^§û‹/ïÌ…Å!SÏ5hŒfqû÷vÝä5H+®=ó¶ç`ÍGÏÌ_·ä¥6ø"–?&åº÷øKï<јªnþ÷>okI8RsÊuâº%/^µîóWìðï=ŽA›`%ဇ‹x$6 »u_»Ý÷æ˜ÜbóÃ]#"?ÐÈ<µ0Öy~Ôªþ»5ï ¢í$^xQ¸l³D~t‘ëQwŶùdQ¥´xÉ( ÿu›z."~Ö½MjÃêG„u±ôYÏÔ*ï|0Š'Oï’ÏDÄç!r‘ž¹âÖBÎøv“B–:”Ç‚2ù<–~Å;-øÎû#yå•&y:"¾H‰ìᘎÉýÁˆ³GºþFD.=/á‰ihêwz`å×ßzÛ#o"¢µd|Ò%SæäŸ¿s=l_íX@©ŠmóñÏìxåöç&i6üh?—t¿Ó&„HŽÆà']Mýˆ˜þÍ;ÕïÝþÜ$øòµÊÓñ„)5WùŒ+$üòeÙøsËÓ!¿üq,ýòŸ–Ôÿëöç&*ÿý`×±ë`ŸZÍ´nÆÄÓ³Î+™(¾tÏF¿£9øv¿žÀ#H, ý]] ‚ð¨ÙlÖŠ¢@࿊¢¼†ˆf­V{™Éd ¶··c(ZÐÕ‚ˆ¹ÝîWÓÒÒ´¡Ph"~ÒcÉ!Y’¤Ocݘ€ˆ©^¯÷ýôôtà8îDü;!$ÊqÜ¥‹ÅH8þ»,ËÄÒÿàóù>LOOW|>ßiˆøüÞ¾Ð5Í<³Ù\Àó|TUUÆãñÈápx1!¤¼ÿÏâ‘­µÕñ[k"²EEyÏ¿æÝ®êzxÿ½Ï¿kiq,FÄ„‰Ç¹âò+ÏÞpÍÝÌß­XHÙÛæƒGîûû«ï/yYÿÍWßψÕïoÝeªªÊµµŸBVÄÒ'¿ôì[|øùbLKK›‚ˆ#„„rs³/¾þ¦Ë-Ÿò5YúíÏxÝÞ÷bé¿ï%üåkÊ+/ýk*">Kñöv,ˆ8tö9gÌŠŠ2Y»vóBÈDvhÏà‘ÉU_y‹«¾ò·Ö–eÇO>ûÆÌ’±¥»)Ú¶kû“ˆˆ¶œ7NºäŽ„ÚußúÏíiÙõ:ˆ-e«gt4VÏŸrÕý²«±êfD\×}‰wSl]úî³ðhv­ûáD_{ã-'^~Od×úï/Cį !2ÇqS Æ2Á’ž'/ýÛÍUmÕ[֊͓1zß óEê7¯˜ˆÿí>”¢“њĚ’3Å1sLó4×Líh©S’rŠVºk^'„Ô̹¢M2ݤkáŠóÒ˜IfÉË ÊöºúdìG=w‚…Õm÷«ºúù±û‡œRWƯVv¨ Sl¨€¢=íƒÒÔ.‘f¿lª†a!¶î•‰'ÃG™˜Ð÷N%R€EÄ™ˆ8FTúÕŽ–ru ɇðP`IÔ^:iZæ f›Fýö_5emÇ! ä ³éZvùuí-¡_º‚6BHC[cà³ò ΄!ã“ P¸§}Bìo´!ಛ†3€Á( /j o[ÙèvŒn­8Û[BBJ¦€¤}9ƒ™;cÔä´»æ\7Xyÿo;HÍ6ç}û:îæhÀ²ìEz½þ$Žã¯×[.Šâ£]u©Õjõ’$i%IZÕ}Ü !¤I’¤O"‘H‚N§3ÀÞ?Óm„ZY–9–ebuÉ0Ìp­V …B!EQÔnu9N–e‡,ËŽãöá3­ªª×ï÷{|>_TQ611FãŽãÎ:¨çl6ÛÓgŸ657?GyáŸoTíª®{$vCž3bÔP]]]£¦±¾quWÐ@i©®jøhÅò5 SN9NÏqÜÞê·ÝïõÕ´Ù|FfÊoõk±&Œ5vxøûe+B¾ŸÜ­~'ÔÕ6µVUìÒæ0ÐGý""3ppÑÍ^*Šdí~œŽ¸†ˆ#Fã|“Éu8AQu›‘Éó¼VQFQ”í=·U¥R’$Y†çùðýžö¥ªªKUÕÁ ÃtÕ¥O«Õ¦0 £šL&«Édº»ç6„ Çq¹ÀY_ùmkåæ“ó³KJ!¬ìk?„’^<²!ÔÑ–m´¥tÎv07À˜”.]ŽÄôA£ŸÍ<Ò–„Æí«5¦”ì0tÖkCÙï¬Û´ü’ºMË`L°¥ 5&gN1ý²Ü3o{ŽùìþKîCÄK÷69©/4pƒØàP ÞsO‹T˰®ƒ<Ò}À0`y”€ ½.¬È„¢v¯‹ÊQ"ÙÎ?Y4 ƒA˜ZUÅ/ª»}@_€€ J,=u ¢¥h˜uÑ97”0Ÿ/®ÀêÞG7°˜Žc…€¢@oSUU!„å€ Ëìõ:PU"ª’Šˆ€ÐÙúÍó Ѱ̬ü¦I ¥^ºWj!’÷z âð±SÓwÓòñ³;˜?ÛŸúä¯÷V¦£"š4ͽ‹…éèèÀH$òxîNþ·ªyokÏøß2:ûò=)uËQUU ªªª½v•ÅZÿöú™Ž ˆ€ªxžO1g }$)€e{Îá肈 #G ýëÝ÷ÞÄ=óô«¸iýæ'»w?±,Ëò<Šª¢¤J½}g«²¢ª<Ë1,‹{ÿÎVTQ‘Dd:ë—xމF%ü쓯‰×ë÷ô¶]$!ÐGÏVÉÐâsÏš3_ñËZẛ.› s»ÞR2P“’–$]~åù³¶o-Ÿ¹ìÛå `ëÞÊy40%g\9bæåÔpìøá£_CNûkÝÞN¶¤çéB^7„<®Þ‚&·ßiÓŽät–¤=öz(bÔ X^£S@‡ˆúÜ'&h ʨYó%è æ~G•Ű¹·÷ºÄ>¯MÝ^ZˆJáÀÓi…ÃF ™znrã¶_‡Â^nîûrÌnˆˆ…zæÏ7ç±éß»Tø¥ƒ¼+ÅÆ6t +ÐP ‘I‰ —¥eÀ—ÝßOÕ€l²u!UmŠ(U°ÿ:ÚEUNäõIªç—r>ô½b|ŸãÛ¨‡ˆ˜’c¼ùŒK‹2Êֶö5Ž÷¥°´¼{ETíuð )¼-E7¸gz‹¶Ð–¡çÛ‚ª«%TqÅðú<¢¤3p +ek@_cŸ¢}¼ˆh0Üöà…·c¿z½’ݰÜþdÈ/qå‰[ÇÝd2™²#‘ ‡ÃB~葤Y’¤° <˲»Õ%˲ù<Ï ²,I’ö{!įÕj# Ãh !‘Häè5xƒ=Ôe_dYÞ"Ëò\ŽãeÙÔýÝ>Þåæf.œÝŹ×o%?}¿âÓ@ ò»ÀUQ”æŠòšÈŒÙ§r)ÉÉ{no0òrr3´õuMÐP×R¶¿û'„GŽdX4H’Ô±òçµ—ÀnË;ÅôZ¿’$s}ø¥¬Õhw{_VPĨHU=fÆ-#â¨!SϽ hܩҗ_çv7T=Ücö¦Û×ÞÉJÀélé½da6X’ùP‡“=}µ†uCüþô†ÅHPî×ÿîsÕU¼‡Èì6ŽMQûùÄ&BH?oÛµ}¬5«¬…4p;PZ&ŸšÌœ<8¥§¶(Mauq/É×y‰x}#fiÈ©ˆøVW×"ZÇ›qö$+ë_Ø"1°ßƒ !ê¶|½G7-……ùôö(y¯{šØš2,|phpW:!ù´¼Aéù»Öµ¸íáÞ¨m¬ÞÖ!žyÙ)1]?_ïZ–-Jç“ä{ã¡-,Ôío!$k€©¬z{Ǥ‘'¤Ac¹ïtoGøw º]}>2Κ¢?ÿ´ òS·­qÀ¦ŸÛþòËKö·,ñ '%$$œ¡Õj£‡£U–å{IÖFE‹Å"±,{""ftM*е֫Õjý.—‹¨KBÈŽh4z¢^¯Q§É²ü»1L±ºäúªËØû‰¤Çº}ˆˆ‚ LAŠD"ª¢(ÇÔ#ÿAÞ³¦Ÿ8*zÍe·¶74´<ßK²Æ-›Ë¢¾ãZ9#3ýDÌêZrF)=ÿ„Éü÷Ýó«(Ê ÂÛ7¬Ý2õÔÓO‚M›Ê¦9Ûœ¯uŸ­½·ú­ªØõLUÅ®ÝVÕ€ü…·\yG¢Õ¢ÿ½/>u;ÝK¡ï.¹£"š2]tâ•…Uï= ÍêeOÇ\¯ÿ_Ï…OV#„ÅÙzö³/Ú”‹ï(äÕˆ*-Np‰@ƘqÖEœí{§Â¬÷’Ï!uR–²€úÒsuJé߆ðPá‡JØ1Íaum œ"0ÙƒÌÄõopØ,}´BDcÁPË­Ó/ ~ñj…Öeצfç§eÿïyéA4 ‹j|ŸþúmÓeg_5P•"Êb“M·D•¥`ˆeæ¬ùÅIekLõ¶Ž¯!5R–æjÿâ¥oWœÿרRã»6k@ÂHC\‰(!K’•UdšØTíû'ô:V ±p˜õ¬±'§‡ž¼a5Ïñ¬93ßtGÏtîÖðšpÅ£"4Í­‹Eõz½ZEQjyž¿R„ßÒ(Š€W¢ÑèG@àJ‹Å¢ªªº˜ã¸O@Òjµ³ÌfsJ8Æp8ü-Ù‡7{#ŠâbÇ3:99™EñjAF¨ªºFQ” ÏóY‚ LEñYXÝG‚ ¼¤ÑhÊE)S¥…eY£F£9U¯×ày>âr¹B°³í:ˆ¨/QrÛ‚[æÁsÿx][ߨ\“——3/?ÿ·aaàõú£ðZEyÕ½ÿÅÕ·ß}½…_NIIù4ŠÃG•Ì\xË•©×oÅu«·.;Й¹••»^}éù·Ç?÷ò£ÌÎWþºbcib²y¯#HMMÎ*2pRÙŽŠ qV¤ï…_ëZ嶃²ã@ÊoLÉY—š5/Ë^¹ wmøá;¤jDì¹Ð²½­f[uÐi/)œpZ~ÀÝv-"¾ ²ÆdšR0æ”SSòKÂË_{ صì¶×|¶í»ïyÖ•¤i线`ç‚æÐÙrj€Á`ï«þ8þ|E WBç’A>ì\¸$£d̵y#OŒlþú-ÎßÞrÀϼ:¦7|ŠMNP¼(‹.Ê‚)ÝßT ཕ’7…Õ·ž©ƒáWf3C_Æ'UÉB0²²ƒàKõJy}¨×Öº}B)ËÔá»·–Áåw²ªOb&¶ˆd²¤XxT6{‰î¡5 hàv( 6!%Å”¨‘ŽŸ™£?3礞 ÞÿÛ¶`ÀëYÜÑ~ûÛ÷ª‡O™›7üšG%¶Ôú¨*ÌBS´r“—¾[Såh ¼t !„”ÛÒuo¿öà–ys®D¦ó&v8"“eYc‚ ÔUxôŸ¼Xž}n`3Û´:ŽcÔs æ`zÏ •>ýW¯WਠܠX§Ó¥±,+ Á`0ìöÐ{·ÛŒF£¯ªªúž×ëe2™F¤¤¤X$IZ ª* ‚‰D"èóùjdYî­5gŸBª8Ž{Óét^e±XT“É4^Q”TU–e•h4ª÷z½yÐwàV Óé2Fc²¢(SE–e‘ã¸!ÛÛÛ!‰˜˜^T:>½¨ôãîi\ÕAWCåyí»Êž^ñöc/œ²àQNÐ'\R³æ?sdIT³‡NЙ}òó[i5ÛŸ"±EÐ÷—è÷³ã»ÎNÉR2ýÖg²ÖùÚ³î† IŽŠDg2£ÁšjÝñÝ÷AŠQ“9tü¼¬!ôí5ÛÅÌ!ãĬ!ãy[n1?æìkyWc5W±âßë¡ïïð½:Ö7øw»â_æTèc%ù¶hg7!ăˆ×ÿ½®úW³2#WÇH,Ô…TÑ!â²ú°úB÷uµ¼’">X©ú£ ìÖ?ÿª­’è—¿Ïæ0yU@ÜR 7h’§cÂØ£*x%ÜÉ ôù¨£>Êî>}mÔêá€.àcn\Þêßþ«C}\W´ë:ð!â ß¼Usåʯ›ÎJÉÐK ‹ØÞ\%zÜÑïÛ›‚Ïu›¹‘$ò|™_’w_ïÇçŽ^¾wC4V~7žÑe¿ˆ[Zêü7 Z6;%C&@ˆ§=!¿\ÎÜÓµ±µ>øëÅËõ} ƒAi¿ÇUÅ‹P(ä‡Ã}Ö¥¢(]uéGÄ…WÙÇɈˆ^¯WTåGY–Ÿí±¾šÜÑÑáSUu·É ªªÚÛÛ#Ý×eùmDÜ&ŠâBDÌåy> DQ¢(J%"îi6¨+ -F£¥Ç1,Ë2Š¢¨²,ƒ¢([%Iz©û2ÇŠŸ~Xå_ýëF-ôQ¿.G‡ s".|é¹·/ÿbÉÒ¹™9² ðøü?%—³cimmã3ÝgjJRDùÛ“/{eIÞmâ‘Óé þùÆû"áPøwÏ—lkk·ïÚU“N«ËÍÉË #ƒÄÞâŸ×W¥ˆJŸO8Ù“ÿþgyð—Ÿ×(á@¨¯uÂŽ6&Ѭ¯ßô3Ÿ;z² »=~pýg/³®†J „” ‚pç×½ÃN»0ýøËî6²,O¼­õÜÒ¿ßìkÞ¾ö™hÈÿ»‰XŽº2½ò÷ï4n_m zÚ»–Hç3‚oûî…»ï,wêqy#'Fžq‰ a‹ÛµþEkN*ØÃ±dE…¤ÜÚÁ“gsZ£Y#EÃŒ¿½YØüï7Åú-+Ö9ëv>Hº=©a!ùƒP?-™½÷ÓÑ”®×Ù™ûå;§ª?u¸€²¹?ʶ¯bMÙiÐ9[Ⱦ§ñF±Ò¡skǾ.¶Qÿ§…%ŸœwcÉ^gsí‹§®®Zµ´ñúþÈkÄ®ƒTèœØJöü”Ý×Á`x8==}\äåv»Ãç÷ „•nuÉAçgúPÕe:t®Œ¿Ïu‰ˆšØvFð€ƒìaî?œs§¿ðÞÇÏè¼>ÿt)wá9×_¹§®§ƒõ×/Îú‹g#böœûß}£ôô‹:ð;íü'‹Î¯nÓòWö³ É0a’~Ûð b˲c-ÙEÃX†×{ì5[ÅpxsÏë@×µº­ûp&Du°†t{\`ìš!èŒù¦ä¬!ÀD}®F¿»½¶’=,šˆÙ¬F?"1-{8«Ñ¥+²ì yÚª‚îö°ŽdàuÌ·¸ˆØ–½&<¸}ˆ°Ÿ³V¨?Vì:8 ;êý؇ô:8äþÀºÜïAæ±µªêú½@Ç#¹~©ÎEŽà«ýÜÆ Kcÿö”®zÇHésèQìšÙû÷é~–­:×[ݯcÚWôÉ EQEQqâ°´¸uH„Õß«<`A….F§ÔëÞ—îÝxÐ×€Óêký$ꦪj}&éM%EQT/þðÀ­1B¾»|‹Ô/ƒåcÄÇÜ£^â]l|·»Tÿ’eù§¶¥XW=IDAT¶¶~{¥(Jz¢uhjlo¿ù~©k{‹ƒÙ9¼¤²ï?nªÛøÓA¯ªÊ2ö¹éoò!öÿø£U†{'IEND®B`‚swift-2.17.0/doc/source/admin/figures/objectstorage-nodes.png0000666000175100017510000016402313236061617024246 0ustar zuulzuul00000000000000‰PNG  IHDR¿€Å ÔiCCPiccxÚYyTá·ÞßpΩÓ9ÍóxÒ¨y5Ïó<’:NƒæSiJ‰ %DJ¡Ð$sR!¡"d(¤"D÷ÃoºwÝuïþã[{=ïÞÏ~ö~×·¾õ~/€h#=!!åˆKfyÚYÒüi¤ç@^`.à¡3’,ÜÝá´ïèW¥'$ÄÀÿÍx˜I ÄBÃ’±H#ÖÊH`%b@659!€°øYþ„ãÀñËoþÐ_~ð³¼=­Ãl:@ÚzFD2'€ÈÀO š2"éa¢!  ºCÿ‰'â_8CÿrÒéý_½›uTRB =ýÿ8ŽÿÝbcRþÔJR´— i º »"™οñ“ É–ž¿ñËQÉÞÀ€ D¦ØûüöÇS¢},@YˆŽwò *ê꼨,#É*ð'ª—éí÷;Æ9Œimܨ?+ÞóO|dÒz¯?xFF¤•ëŸøutGwà@ è,€_Ð2fŒ'¬@O'$»{ÿ®Õãú»ôm8ËÖó·?ÇL²ñúS+9ÒÛþ?ÆÌòöüʼn‰‡GÙ:üÒ€iD²ìÿàæ 1îοr1oVЧÈ`áÌ8ŸßœXAÝÚé×L°r°:°€ ¡‹@g°ëßO0!hÀ€xˆx`Ѹþ¬Þ^† OÿF[ý‰ƒ(ƒø¿8ãŸp/È€LHúS ÅMqcÜ7ÅÍqS\ 7À ÿ¬õN¶LþUõKk0Aõ7bù[ýúVµ…õo9¡3þS“-¼Dü‰Ð¨Ó˜ÐXø“ÿމ6Dk¢=Ñ–¸ÛŽ]Àº°Ømì2Ö4ìÖŠõ`W°–ÿ˜ý÷TXÀ„$p‚`B °€ qÿ­¢”¿¿QN%N]ð&ÄA4Ä@Ôß ¾ðXõ,)@ƒPˆ‡uN{ü3iy\ ×Å-qÜ7.ˆ‹‚*®ƒà¸nŒëâ†ÿ´‹ÿÚ*„X°˜ÑðX›ÌLK°ŠOHgEED&Ó,b˜*4‡8†š MKCS üi¿^íiO@¼ÿŒ1 `¸€t÷Xl:@€T÷?09€Ø €ºƒŒÖú_@2p?ˆ€$È€"¨‚è1˜ƒ 8‚xC¬DB,° 2!ò vC „ ¨‚8ç¡.à 脻ðÁ Ã(|€)øó‚*‡ˆ Rˆ¢Œh!ˆ)bƒ8#žH‚D qH ’‰ä …H1r9†œBÎ!‘Èmä!òA&¯ÈŠ¡”•@åQuÔµ@Po4@Ñ 4Ý…–¡•èi´½ÞE¡ÃètŒĤ1U̳Âܰ@,ca›°¬«ÄÎbmXÖ c“ØOœˆóá4\7Æíqœ'â›ðøA¼oÆ;ð~|ŸÂ—T‚8A™`Dp ø"©„¡Ya1asa¦pp½ð#á9šˆH´È‘‘¢¸¨’¨‡hªèÑ[¢“bübÆb ±±óbÏÄQq%qOñ âUâ=â3’v $nJLJ JšK®“Ü'yUrBŠOÊT*JjŸÔ5©÷4š-†VFë MI‹KÛK§H“_¡°ÂgÅ–õ+^Èe dÂeöÉ´ËLÉJɺÈfÊÖÉ>“c—3‹”Û/×%7+¯ ï'¿M¾E~\AXÁA!C¡Ná¹"UÑL1Q±Rq`%q¥ÁÊ蕇W>PB•t•"•)ÝWF•õ”£”+?T!¨ªÄ©Tª ªRT-T׫֩ލ ª9«mQkQû¤.«¨¾G½K}ICW#Fã¸Æ&¯¦£æÍ6ͯZJZ ­CZÚTm[íÍÚ­Ú_t”u˜:GtžèòéºènÓm×]ÔÓ×cé՛ЗÕÑ/×4à7p7ØaÐmH0´4ÜlxÙð§‘žQ²Ñy£ÏƪÆÑƵÆã«V1W_õÆd… Ýä˜É°)Í4Äô¨é°™´ݬÒìµ¹Œy˜ùIó1‹•ë,N[|²Ô°dY6YÎZYm´ºnYÛYX÷ÚðÚøØ´yi»Â6¶ÎvÊN×nƒÝu{‚½“ýûA †Ã)‡)G}ÇŽN'/§ƒN¯•œYÎm.¨‹£Ë^—ç®r®q®-nàæà¶×í…»‚{¢û%¢‡»Ç!wžšž™ž]^|^Á^µ^ß½-½‹¼‡|}R|Ú}¹|×øžòõ³ö+ööW÷ßè7@4 * 5èx2pfµÍê’Õ£kt×ä­y¤”t{­èÚ˜µW‚¹‚éÁB!~!µ! t7z%}&Ô!´[S;†ýN‡&Ç'NcΣ.c®ncîï=¦ EMK-íDºUzWôŸLQŒeWš¸™F˜m6ßkq²ÍêžõMŸm›Ý1û‡G?'kg5!WÌõ³Û{·G“g•×>ï\Ÿ_¦Ÿ¯¿}€q Újé5AAËkGƒo…œ ç††2LÃÄÂ~0Û"öE® \g-Cùû.n0¾'áfâUV[RKrkʹõGS ÓÒÒ£268gšlÔÞ¤¸Y<‹?›¼Ý2Ÿ3;¹õuÞ“m·wå_-¸PX³£rgÝ®ËEwv?ß3µß'V¢Yê¼?º¬ðÀ¹ƒÏʉ‡õŽDWœ8úºR¦*êxã‰Åj›šÄSÛk«ê®Ÿ~rfºž»AåœÓù˜ »››ž·Z5/†´í¾ÔyyáªÆµðëGo ¶/uHÞ2ë éÊí>u»÷Η‰{¶½ ÷=èëCúõâÕ<~ûDæ)óYõÐÐó//æ^!ÃÄòkÎ7Ô·”Qò;üÝâØÌø‡‰—ïû>ÜœlþxòSÙçü©´/N_ѯÍÓÑߤ¿=™ÙùÝòûÙ†á?ÅöÍÌ[ÌÿX8·µD[\Ž_^È *`Ѱî DÄ©@æÐhtKÇ…ñ{„}Ä$R,[&{9yˆ¢LÝÃ9ËÉ3ÄgÅ_%0#d,/R"Z-V)^$‘$é%¥Gã£}–¾¿â¬ÌnÙd9/y…iÅÁ•­J%ÊÉ*~ª†jj³êý4K´Rµt t¥ô@ï…þ ƒ`CAÃ;FU_­Úebl2fº×ÌÌlÔ|‡…¶ÅSËÍV²VÝÖq6ü6m¶t;v»óöAT‡NÇ<'ggªs¯ËNWg76·›î›<ô<¾xÖy1½%½‡|Ê|}üýžø`ªþ\ݱ¦ È:hamspbˆJÈ{ú©ÐH†"c"¬Ž®þ5¢)2#Ê(jaÝÕèM1f1‹±—ã2â âgZ7°Ì“HI÷“ËRÂÖk§â©ÓêÒ³3ü7heR3'6vm:±9/‹‘m½E.‡˜3™;¸õF^ý¶£Û‹óó r ³wdïÌÞ•[´mwÑž=Å÷ÝWUr²´vÿ…²ß ”ë<²¹âøÑ®cÓUâÇmO¤ž¬©‰ÄjâI…Oj"-²Ù±ía{ήĞÁ~—,IN"ßæ q¤sôS4)»(©nÔFNÎ .~®"n ÷vÏV^o_)?ÿŒ€ÀmÁÕ‚_„ „„;DÂE)¢bÁâTñ+I’Š’/¤JiîÒTé»+ ee9eÊËû)H(¼V<¹2NIOiQù–ÊNU5šÚ¤z³ÆvÍ-'m-Q]T÷³Þþ]ƒK† F'Œ®*1ÙeZ`–g¾Õ"ß²Ðj·õ>›rÛ“vçí¯9ÜwrúàüÃsãróóÔö2ñ¶ñq÷õ÷ ñ H Ì]]´¦<¨ní¥à{!¯è_arL«ðôˆ‹‘ßÖ©E'Æ\ˆýo–°-ñA’hrXÊùT<Í'½&c.Ósã©MKY«³/åˆçnÜúrÛªíÕÜ…Ûv,ìJ-šÝ“µ—k_E©áþ¡‡V”÷É:ªul¬ªü„{5VÓZ[púäÙ·çÔ/ä6½k j¹’{Ýø&O'v[¨Çæþ¶¾·Ÿö¿áx{kÈ:d²©A®"È{A…QMÔ e¢Ùèaô ú]¤1{,;ŒÝÇqÜOÛñomB¡‘ðhLÌ&v‘xH¤ã¤¯ll»ÙFØuÙ ØGÈÆäRòWwŽ ?%ò’jGmäÔælá2æêàvã~ÊÁóƒ7ŸO’¯‘ß…\ GPZð†P˜0»p½ˆŸ(*Z/$N¿!±]ÒCJRê3­Cº|ÅzOY-9¹)ù~…fÅÒ•Jk”MT$U–T_ªµ«ÓØ®¹Y+S{ƒNªnš^ª~¦A¦aŽQ¡ñžUGLjL[Í:ÍŸ[|·¢Z+Û8ØÆÙ•Ø·9œ \µÜ!<ÂP„0’ÀyŸäæÜ›ß¹I^Î>bmmm€@ˆ £¼¼¼ÝAƒ^¹xcc#™L.--¥R©%%%åååT*•J¥@AA‡Ã …Âb±ÚH ÔÔÔp8œŽŽ'‘H$IGGGCCCGGG[[{ð¥÷%“Éååå4­¼¼¼¼¼{€½o[[ö#­¬¬d0ܳôôô°bbbbbb²²²:::Üw—••ÕÖÖ677WQQðÇ6@CöD‹URRRPPPQQQþ/P^^ÞÜÜÌ`0***ºr)111UUUÌ ˜J TTTŒŒŒŒñx<7}yyùË—/Èdò‡JKKÛ]PCCCIIIOOOZZÔÔÔdee?ûÖeeeÍÍÍ  …ÃáÉ䎱Y[[ÛÚÚª««›˜˜Éä´´42™œ››ËëDîûjhhrhii„vÉh4ZMM ö{ëÜÜ\&“Ùñ­MLLŒçÏŸïè訡¡! z ì‰è Þ²Væâµ¨¨¨|I(_"77711199977—L&“Éäv¥9IIIccc‰¤®®zzzx<ž@ `O;Âf³KJJ°ÇUUU ÍÍÍEEEŸU‰¶¶¶žžž²²rAAAZZvÐØØxäÈ‘C‡511ÑÕÕÅœ«¥¥Õ“[G£Ñ***ª««ËÊÊòóóÃÂÂbbbêëë¹ deeœœ† fff¦¦¦¦­­M"‘äååÕÔÔzò¾l6»¢¢¢¡¡¡¡¡¡´´4''»Ï‰‰‰ ¯¯?mÚ´3fLœ8±'o„@öt””””””p z%%% £¤¤¤±±±ªªª­­N§WWWówq=== ̪:::+fff&''ÇÆÆb‰•””FŽ9lØ0###L***ÊÊÊrrr½•Y&“‰Õ¾««« òòòž?þîÝ;=z´‡‡Ç„ lmmeddúôžëééIJJΜ9ÓÀÀ`ܸq#GŽTVVîÓ7íHxxxbbâË—/CBBÀÌÌì·ß~›={v?‡1`@öÈÐh´´´´ÄÄÄÜÜÜ‚‚‚œœœ¼¼¼ŽÍvJJJXýTII‰D"aÇeee¹¿p<•Å Feee»+p…ˆýòóókjjŠŠŠ:†„Ãáôôô,--íì쬬¬lmmÍÌÌú󞤦¦þóÏ?~~~ÆÆÆýù¾ááᎎŽ}­é.R]]ýèÑ£ßÿ=77û”555 ‚ö×ÎÀÀ@__{ è`…dÏFrròÛ·o£¢¢bbb¸R}}} mmm---MMMeee¬¬×wÁ`õG*•ÚÜÜÅf³‹ŠŠ²²²222( =zÔÏÏOÐ÷lÒÔÔäééY[[;dȬٴ¨¨«Ýóbff†µØÚÚÚ6¬“î¯Á²ç!**êÞ½{wïÞź>deeÝÜ܆nmmmnnnhh()))èÿ†††ââbuuõþ¯À":‡B¡P(”ÒÒR …RXX˜‘‘媙™Ù´iÓÜÝÝÇ×®|°ì)ò|8cÆŒ¾þÆ>|øpçÎG2È©¬¬¼qãÆõë׌“’’º;cb`0¨»Ì„‡´´´'OžDDD¼{÷Žw¤º¤¤äСCuuuuttÄÅÅeee•””@UUURRòíÛ·ùùù¢®ÎÏÂf³‹‹‹åää:é”g2™ å³i°ãÜ÷ ÕÕÕjjjíF\VWW744hii È{Ø‹“Éä”””àààׯ_ÿ³Îrssmmm/^¼téÒA78TÐMƒšÖÖÖsçÎqk¬JJJK–,9zôèóçÏÓÓÓkkk`ÿ±eËKKKì±¥¥å¾}ûLLL°ÛòÍ7ß´´´q¿´AAAmmm·nÝRQQ‘““ÃápÓ¦M£R©mmmØ<¢£G€––öôòå˘%%%oݺ…½ •J9s&–Œ{œ÷]Þ½{'è»"lÚ´‰·SÈÖÖvß¾}ïß¿oiiy÷îÝÑ£GGŒx<Þ××7??_ÐñöÈž£µµuüøñ`aaqâÄ 2™,èˆI;{ª¨¨ÄÆÆ¶µµÅÆÆ@```Û¿jÃÒ¼}û?~ÜÖÖV__¿dÉww÷¶í9mÚ´ØØØwïÞaOgΜYQQÑÒÒ²sçNIII:ÞÖÖæîîîîî^QQ] ‡Ã½~ý{n$ˆ¶¶¶}ûö­X±âÌ™3¯_¿ÆþDu$==}Ë–-âââÊÊÊoÞ¼tÈý²§À¸uë8p€Åb :ÁÓΞ۷oç¾diiyâĉ¶ÿ·ç’%K–,YÂM“žž999˜.1¶ý+SLÄmmmØÒGØÚ’’½Š+V¬XцìÙÞ¿¯©©iee%è@ú Ôî)0´µµàÖ­[ŽŽŽcÇŽEƒÛyÁn†©©ilj‰‰EEE‰‰‰ØSl-¥üü|EEEà]Û tuu±ÜfP̶>>>ÜæÎŠŠ 4±‡ØÙÙ¹¹¹]¹rEÐôÈžÃÙÙùÖ­[«W¯ž0a‚±±ñ¼y󜇎F¼w‘%K–øúúò144ÌÏÏ€¯þ)Âøûûóv"‰\ÇÑÖ­[ïÝ»gnn®¯¯o`` §§§£££®®®««ÛÿË‘TWWïÞ½ûÊ•+í>” ²§ ™7oÞ”)S._¾øÛo¿a±eÍÌÌŒÍÍÍuuuy»àìì\TTäàà€=ÍÍÍõ÷÷ß¶m[OÇzêðx<÷ —.]’——·±±K²Â¥¥eUUUaaá½{÷°5ð¹Œ=ÚÚÚÚÑÑÑÞÞ¾O§í²Ùì„„„›7ož={–Édúúúž?^Ð7¦Ÿ@ö0$iÓ¦M›6m¢Ñhqqq±±±iii!!!aaa‰DnM–w595jÔÀ^««•?|øpøðá«W¯vppسg¯¯oKKˆ ššš<ˆ•=¿ŠVtõ÷÷766~óæÍÊ•+Ÿoo;/¼£—¸³ÙlƒÁ=äíí-èœ Ølö¾}û† ²dÉ’¾~/†mëV\\œ•••œœüæÍ¬g\YYÙÃÃcþüùžžž‚¾% dOƒJ¥ÔÕÕ•””°X,îÿØ«4F£ñ¦OJJâ W__èС“'Ož8q¢¡¡¡PµTWWËÉÉ ¤î²ÈÈÈ_~ùåõë×zzzÓ¦M#‘HØxòòòÀ;Çwœc†m;Ê…J¥2™ÌÒÒRlSh:N¥R±Ñò‰‰‰¼{ab³¬­­Æ7¨Æ`ö'ÈžŸòòòׯ_GDD„‡‡geeqëëë3ÆÚÚÛ X]]]CCCPóD7lØðàÁƒÅ‹Ïœ9ÓÎÎNt5š‘‘ñüùó€€€””ÆÆÆÆÆFÞe[{===555cccì}}ý~Þª~Ђì9¸ ÑhØ·d2™L&'%%UTT´Kcaaadd¤¦¦F"‘TTTˆD¢šš@ÐÐÐWQQáªMFFæ³Íí`2™Ø<¦¦&¬h\[[ÛÔÔ„•¡***¨TjjjjRRÖ !!áîî>vìØ‘#G ù¸W–›››””÷àÁƒšš6lØ‚ V¯^Í{JJJh4•J­¨¨ÀrÊ[Wà­Chhh´Û¨»ÿ²²²ÊÊÊXÿxÇ4ˆ~Ùs°ÓØØXRRRZZÊÝ£¾¾¾¨¨¨bPVVVTTÔÑÑ‘““kjjÒÔÔ|óæMaa!öª’’ÒÈ‘#íííõõõuuuõôô´´´ºbí^§¡¡¡¢¢¢¸¸¸¸¸øãÇùùù¯_¿æÆ©  àêêêää4{ölTú {":+1Œüüü²²²ÚÚZ¨©©ijj´´´TUUu~ </..®©© RRRج>%%%n#`Gªªª²³³³²²²²²>~üÈûª¸¸¸¡¡¡ŽŽŽŠŠ &_"‘øß•••%%$¤¥¥yg|v“ÉÄʌؼ¬hŒe­¥¥¥¼¼<77·°°°µµ•÷,KKKSSS ìÿ>ý,tttÐHu¡ÙÑ%¦M›,è(5ÇŽÛ´i“ £@üúS†è***¿üò‹ ŒÔÕÕýüóÏíFS ²'¢«¨««oܸQÐQ F(ÊÏ?ÿ,è(íÁ :I=‚PÍ!’\¹r…;‡ÊÜÜ|ܸqÝ`ÿðáÃ#Fù`R„0ƒÊž‘ä?þHII€êêê¿þúËÛÛ›Ífwë —/_n7è¨ì‰U¦M›†-wÂf³õôô²²²°—ØäccclD=F“””ÌÍÍÕÓÓ#‘HMMMív° P( ÅÂÂ[} ›tO&“ÑzTˆ/Êž‘§¸¸°:øáÇýýýGŽyûömØ¿ÿÔ©S½½½>œššjkkëïïïîîž––†~ðàÁ3fŒ9222V­ZåíííããóÏ?ÿ:sá•=¢Êºuëvî܉mŠwñâE‰”ššzúôéôôt™‚‚›)S¦€ººzDDŒ?þôéÓnnn4ÍÄÄâââ®]»†í†’‘‘áéé‰íÊ9lذgÏž :‹¡Ù!ªì߿ʔ)L&3??ùòåFFFYYYžžžX]ÛÀÀ`øðáØšRÎÎÎÀd2_¿~ýøñc ‘H“'O€øøx ‰Ÿ~ú »fAAÖjgg'èü!„TsGˆ*JJJZZZvvvúúúYYYŽŽŽOž<ÁvˆÌÍ͉‰ámß”””œ4iÒóçÏ ¡¡{`ooO§Óýõ×ǯY³fôèÑêêꀖ/B|TöDˆ*Üš{^^ÞªU«.\(##³~ýúQ£F9;;‡††Þ»w¯ÝRLÇŸ;wî‹/>|ø`ii £Gþæ›olmm'OžüäÉ“cÇŽ¡Íø]­‚è®®®UUUÜžSYY‰í)%%¥¬¬ÌûRuuuii)·»œF£‰‹‹sWqg2™Xg:“É”‘‘ÁF‰b§p×Û¯®®–––žÞv …¢­­½gϞݻw :Ä ²'B$éd |eee^Ÿ¶+~JJJÚØØ¯ÛÒNÇÄgAížÁÈžÁÈžÁ¨ÝÑUÒÓÓÅÄÄ!, {"ºÄš5k ÿjhh`ÃæElR?Bx@#–ƒCCÃñãÇ_ºtIÐ ¨Ý@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙ@ øÙÑODDD0 îƒQPPÀ›,99966–÷HyyyDDFã=A&“yÉ䈈Þ#4-""¢¼¼œ÷`lllrròà Œ¨ц@ô ýõ”––r”––À_ýÅ›ÌÓÓÓØØ˜÷Ƚ{÷ ..Ž÷ |÷Ýw¼G¾ûî»vß縸8¸wï^[[›±±1–ÞØØØÓÓSxãÒu=*‹Åß§<¨À ÚÞˆÁÂÔ©S‡ B"‘¸GH$RPP¹¹9o²Ÿ~ú‰N§ó1bDPP‘‘ïÁ   ===Þ#¾¾¾&Là=bdd4|øp’’’€3gΉDá ŒK?Ö•¨† ¥¥¥írèˆX[[› c@ ú+++ww÷Ç :¡¦  ÀÛÛûÀ555ªªªS¦LtDB j÷Dôd2yüøñÕÕÕ‚Ñ ˜LfSSÓ¥K—<(èX„TsGô?~üðáCii©²²² cAt wçÎqqñï¿ÿ^[[[Ðá;Èžˆ¾bêÔ©rrr‚Ñ%ŠŠŠÅÅÅÀÐÐÐÂÂBÐ ;Èžˆ>©Sø)((¨­­UVVnllt,"j÷Dô&d2¹ªªJÐQ ºÁÒ¥K§NZ[[+è@DTöDô cÒ¤I¦¦¦¯^½t,ˆ®ràÀ …‚ çBt dOD¯A .^¼¨ªª*è@]¥¨¨ˆD"ñ)ÅØ¸q#‡Ã t€B ²'¢7™|xΜ9­­­‚Ž]$A히π†Ä‹"Øø¦¦¦®Š_²d‰–––††† cIPÍñyž?®ªªjgg'è@]µuö3ÈžÄ@©³ÿAíž„ƒ†Ä dOÄ'ÐxQ ‰ ¨×ñ 4$^ACâj÷Dü/Z ¶NÁ‚Êžˆÿ@ê º;$þ–ìì]P»ç ¦   ©©IÐQ ºCâáßQñ‚Ž} j˗/kii¹¹¹ :DWénmP…½Ï@öD D¤N¡ÕÜa§»ƒ:ëì=ååå¾¾¾ÍÍÍ‚Ñ º;¨иÎ~Ùspñäɓ˗/Ÿ?ž÷àÌ™3ÛõƉ‰‰½{÷Ž÷ ˜˜Ø–-[xlÙ²ELLŒ÷È»wïÄÄÄ‚‚‚xZXXÌœ9“÷ȉ'ÄÄÄxu@¡P:ž8sæÌv'bµóˆ˜˜Ø‰'ÚÖ.GX`írdaaÑ.GX`¼GºØ—rÔ.0>nu|||sssHHH·ẕq»wïîú)ˆî"¾gÏAÇ€è?LLL455G­¥¥Å=(--=bĈaÆqˆ‰‰™šš:::òŽaRVVvvv644ä‘””´··wttä=ÑÀÀÀÉÉIEE…{PNNÎÉÉÉÜÜœ{D\\ÜÚÚÚÑÑQJJŠ{PSS³Ý‰ÒÒÒ¶¶¶¼'rã=QYY¹]Ž$%%mllÚåÈÀÀ ]ŽäääFŒÁ›#n`¼7­+})G=¿Õ:::,kìØ±]ÿ”±¶N%%¥ž|U_õ!BM`` t¥à‰–:îg=¡¦µµ5''§+):ôôéÓ> õ:û4×hpáâââéé¹cÇŽ~x/ …Âd2 zr&“™ššêààÀ=ÒÐЕ•Å}jffÖÉ©„„Þs6h©ã~Ùsp¡¯¯ßoÕº{÷î>|¸'¡R©#FŒà­!eee3ÆÓÓ’““>|8zôèÏžÞîÜŒººº……ÅĉÈ ÙspqíÚ5A‡Àf³;îÑñ ›Íþìé&&&÷ïßÇ߸qcß¾}Ïž=ëüâÝz_‘ ‰hÄ¢_‰ŒŒ´²²rpp°²²Š‹‹€êêêyóæYYY{xxÐh46446lØ?þØùÅÅÅÕÔÔ€F£Í›7ÏÎÎÎØØØÍÍwðPSSÓš5kLMM‡ 6räÈŒŒ ˜>}úÉ“'­¬¬;†¥44´²²rÙ²eáááiiiŸmµ,**Úºu«ŸŸßôéÓwìØñóÏ?ÀÝ»w‡š”””ŸŸoddôäÉnz¬dš––6yòä7nÀÇ[ZZ°÷ݲe F ~üøqzzzvv69s¦±±1333%%…Á`\¸p?~yòdPSSÓÓÓkjjŠŠŠZ¹r¥ŒŒ œ;wÆŒãææ†éICCãÙ³gk×®€‰'~Éh½N``à—úÜÑRÇ‚•=gÏž Ô»×××·¬Îb±nܸagg÷ôéS%%%lT#›Íæ&ûlº¢¢¢·····÷ìÙ³ÿþûï'Oždee…„„ØÛÛ?zôH\\ÜÉɉ7}tt´¥¥åÕ«WÅÅÅmll¸Ç¹à‰D"Ðh4L\êêê‹ŠŠŠŠŠ”••}||°ãý¦ÎNÀÚ:‘:²çàâàÁƒ¼UÚ~Û k=lhh 377ÿóÏ?¯_¿~ðàÁyóæ544€‰‰ÉË—/±dIII_3??ŸÉdÊÉÉP¯Ñࢼ¼ŽŽÔÔÔ.]º„†Ä !Èžƒ GGÇýû÷ó™5kV»Å’ÏŸ?Ïí“ÁÈÊÊš5kVtt4ïÁ 6´»ÔDZaäÜ#ÕÕÕ³fÍj×Ù²ÿþ 6ð‰ŽŽž5kokf·{õêÕ7®]»v÷îÝÊÊJá Œï;Æ{bAA/œ šûàbäÈ‘ÞÞÞ¼GyWù€üüüÄÄDÞ#t:=11‘÷G)))튱 %11±µµ•{¤µµ511±Ýžd2¹°°÷Huuubb"Nïn`ÙÙÙaaa¡¡¡MMMbbŸñeeeõõõ§OŸÎÚÙÿõðŽñž8tèP4$^8A½F‘$44ôÔ©SX‰lÒ¤I+W®ÄÆYYYc3ÐuttÜÝÝg̘1qâÄþ§…$ {"D‰ÆÆÆË—/Ÿ:uŠL&__ßõë×[YYÀÌ™3ꪪ™L~øðapppll,‹Å"“&Mš1c†§§'ZþÑ[ {"DƒÜÜÜÓ§OÐh4ƒ 6¬X±‚wÆäõë×-ZÄÛ4A¥RCCCƒƒƒƒƒƒ±í|œœ¼¼¼<==mmm'„hƒì‰vÚUÒ7lØàééÙqÒdcc£ªªªÏÕ«W;^„Åb½~ý:(((88¸  ttt¼¼¼f̘1nÜ8T¯Gð²'BH餒þ%<<<¢¢¢°Ê{'ÉÒÒÒ>|²²²“&M ¤¨^è:Èž¡ã«•ô/qáÂ…•+Wcûv|*•úäÉ“àààT¯GtdO„ÑÅJú— R©ššš¾¾¾íÆc~ƒñöí[Þz½V¯3f ª×#> ²'BððQIÿnnnÉÉÉeee|¯&—œœŒHccc@VVÖÝÝ+r×E Ù!Xø®¤ ¬òÖóÝ%ËËË1†……aõzgggwwwoooþÌŽ` {"C+é_‚ïÊ{'0 n}II  z==ýL/VÒ¿„««kZZZO*ïб^ïååååå5eÊT¯t´!ýBNNŽŸŸV+7008|øpmmm_¼¶½pXXXŸf§¬¬ÌßßßËË +{âñxggçƒffföéû"„dODŸâåå…ýµž4iRPPPkkkß½]qq1øùùõOîš››ƒƒƒW®\©££ƒåÑØØØÏÏ/,,¬O³‰8¨æŽè+h4ÚåË—Ï;×w•ô/áââRPP€i´?IHHÀêõ €êõAë1ÉÌÌܰaƒ¬¬,ôq%ýK`•÷7oÞê·«×?þðáÃ999‚ Ñë ²'¢7yòäÉ©S§BBBÀÝÝ}Íš5½Ò“Þ]JJJtuuýüüŽ=*ØÂ`0^¾|‰õ×———€±±1¶Ú“³³³0lqŒàdOD/€UÒO:•››+++‹UÒÛ-!ÜϪòÞ ±±±X½>99H$’———»»»§§gO†¸"†  ¿ц·’nll|ìØ±~®¤ l VÞ;¡¸¸øÜ¹sîîî¨^/Ò {"ø$88ØÝÝûìîî,èˆþœœرc‡ 錆††   •+Wr×v266Þ±cÇ›7oP½ðƒjîˆî!„•ô/agg×ØØˆiTøÁêõAAAiii ¢¢Â_íB‡ õ„¶’þ%°Ê{RR’ éùùù§Nš4i·^?iÒ¤cÇŽ¡z½°ì‰ø:B^IÿX©s×®]‚„O}}}¹cEÍÍÍQ½^x@5wÄ¡Jú—­Ê{'DEE…††¶«×Ϙ1cÊ”)¨^/(=…ƒ ………/^ ljjÒ××_ºtéìÙ³åååù»‰Dú순òòrƒÑ§9~üø±cÇž~ü¸œÜÜ;§ÈÊH * f+»¡¡EYI¦'ilbî<ðœÃi«£Õµ{éÞ½{óçÏ·³T]2×APyì-b ®ßÿíª ,kÁ‚–9™÷a©Ãi+-§©*ˤúêç|äì«‚:›Åî뺂hì)DpÕzsù¤ñf‚§G47µÎûö2‡Ó¦©"QFmå}‰«ÎÛk”z&h“œZòãoÏ4U%ʪþ/\uîßî¶sËdA‡Ù#~;ò¼ „ÞñsDàâOŸgØn?y¼ïKL.ÓOËÅ.ŸÄ{|€©sסs<õ~Ø0\бÈžBÁ@U§·ûÞ—ª:u4ÿëõêüe³£¸¸˜ Ã:=R§È1øÔ)èh„dOƒÔ)r|I©sPzÉ Q' xu@dddAARçà•=Æ QgIYˆÁÀV'› €Ô9Ø@ö ƒDÑïÊ^ESí­Ô¶:=Hƒ dO0xÔ¹jG”½µúó»ë¶:ï>)Bê„ {ö7ƒJ6ê¡w:E¤N>@öìW›:Cn!uŠHüìÙ uŠHˆN@öì':E¤NDç {öH"R'â« {ö9H"R'¢+ {ö-H"R'¢‹ {ö!H"R'¢ë {öH"R'¢[ {ö H"R'¢» {ö>H"Ç`P' uö.Èž½ R§È1HÔùÛ‘çH½ Zß³—Aê-:wzñ!lRg/‚Êž½Iyy9R§1¨Ô9ÇS©³wAöì5°:;R§¨0ØÔùËfGAÇ2Ð@öì¸ÍH"Á T'*xö:Èž½oO‘ céHHˆ®ƒìÙSL';R'R'¢[ {ö¤Nѩы {òR§hÔÉ7l6DDÆ8þ²'Ÿ uŠH|ƒÝ¢û¡¥~~~:::‚΢ìÉH¢R'ßpo‘ŸŸßÑ£GEáÙ³Û uŠH|ƒÔÙ9ȞݩS´@êä¤Î¯‚ìÙ :E ¤N¾Aêì Èž]©S´@êä¤Î.‚ìÙ%:E ¤N¾Aêì:Èž_©S´@êä¤ÎnƾvÆÿ{s• Ãá^oúqïK\oR3öoÞ÷wû’7ï\Ú(è0{ ×›½¸^'¯7ÛÚ7»*{~Tä-P‘“oP‘“?PÙóó ,u<~ž½ÑשSÐaö¤N¡Ùó3`êÌÊʺ¼D¤ÕÉaµmü1SçÚ¥Ö¼/!uŠHª¹·§±±ÑÕÕ5++ûÊÉyÞÖ=¿  à°ÚÖí¸sáæ{¤N¤ÎÏ‚ÔÙC=ÿÆÆF2™|åä¼Åóÿ`êüûJR'RçgAêì9Èžÿ©3** ©S$@êä¤Î^ÙóH¢R'ß uöÈžH¢R'ß uö"ÈžH"R'ß uö.ƒÝžH¢RgO@êì]õxO¤NÑ©³'°ÙПê¤R©T*µ¼¼¼¢¢‚Á`”——3 ìöjcc#‹Å*))ikkkkkãžH¡PX,Ög¯I$•••¹OÅÄÄ$$$°ÍBTTTdeeÀÀÀHÿ¢  ```À}µwã{PÔ)Z uöì.i›Ìê]uæææäææ–——Ðh´ÊÊJ …ÒÉY’’’C† ÁápzzzÒÒÒÜãZZZá«oJ£Ñjjj¸Okjjªªª8™Lîä,<¯­­­©©I"‘444Œ ôõõ544øËþ൧‡‡GHH'fj¤(èXzD½µˆÒÐQ%eS=k〖$Ï:Ùõ‰ :“ ê”I¸vꀳ—?œ €…±—刱Ùl11±6<ÏÿX­œÜZ©óî“¢žüØ FZZZ\\\ZZZnnn~~~^^oIIIccc###EEEUUU}}}%%% ‰***’’’}z›ššh4TTT0™ÌòòòÊÊʺººòòòâââššš’’’vž%˜L† feeÕ•÷¼5÷ŒŒt[ ÙI.z‚¤§>Îs´Ul§N¨­kiãÀBoM Ñ.u@c#‹œSóÃz‡vꀌìj9ÜêÅæ‚ޱ{S}¤N??¿îž›ðöíÛ„„„äää´´4îq {{{###]]]===---ÌGFFFFF´´´:IVYYYZZZTT”ŸŸ_PP••õáÇàà`ìU`eeåäädkk;qâD¬5 #ƒ×ž0ÉEïÏŸÇ :Šžò± ¡®ñ¥W7¯¶³·QtŒ=¥¬‚~îz¶¸øçËÀ:ÚRàs€mû"“32úH]¬³766>|ø0((èÅ‹uuu ))éâââíímaaakkkllÜ×åÇ~@MMMMMÍÎÎŽ÷`CCCFFFFFFJJJ||ü©S§°ãúúúnnnóçÏ7nïŽöƒÚžĦ»êÌÍÍ=~ü¸¿¿?ƒÁ””ôññ™2eŠ£££±±±x/]ˆ‘““stttttÄž655eddÄÄÄ<}úÔßßßßß_]]}óæÍ«W¯Æ ÚÈžüPWß²ÿx÷©†*q†‡‘‘>IÐqõ&ÑÅO^æsŸ·Róñ4•hCÜ~G­iÆËÊHz¹€¢:tS,kïÞ½d±Xß|óÍ‚ &L˜€Õ32227n¬¬¬ ùûï¿wìØñ矞>}zÞ¼yíÇÐ?45³ÿá2RûG«gڹݩ¢6 :®Þ$-«:+†epÄ0Í‹·2ÖþøJÐAõ>WɪD,›êª2®>cÊTOén©sóæÍû÷ïŸ5kV~~þõë×½¼¼:Û¡¦¦¶dÉ’·oß¾}ûVGGgþüù·oßFeOþ™>ňû !µ<ìMá”ñú©™T)Iü(M:½5&±ŒÍæ8Ùk*ÈKUQ›+ªš¬†*@µ™ZÛ$ŽÃ)*TU¤€Á`åæ×a¯ fF$nm-UÌÆÝþçÏ Yµêª21‰”QöZª*Ò…Åõ)UjÄ‘v™S­¢(ƒe*3§ZŽ(UA¥sKs%”Fqq1Mu¢ söŒ­Ã°º†ñèùG›¡*¥åL&‡ZÛ4ÖIÞÆ—ÕÖ1FØjhª Vzvµ•*‡Ã««Ùl޾®×ï?ïëÊáp2sj<—µ291 eÞËËÉJ,Ýô<1µ»È¢Ï(‚ÎJg”WÑee$Èy5^K®Þþêì•Õ5-.3îÝx•“O=íNXd‘¤$n×Ñg.§ÀæÝ‘Á/ò)3V<®™S½vÇ+QQ'Ðh4‹Åb±Øl¶@b9š›› ªª •=ùçÔ¥d¨®a„GKàñÆè= Ïq{øú…þúèÅ>æ §-÷ûÉø€cSnq_ûã«inCÆÒöœdXEmþñÀÕ?!ðWîeîÝ2JÐjÏ»ä ,yùu×dý±Ë ;¾bõbó*jó¬•!q‹•¥¿[>ÌÞýÆÛø²YSMž¾,üõX܃g¹7Ïx(ÈK­Ybœko£žWH£Ö0„°UñÖì˜Ä2zS뇌êÐÈ¢Ô°…”ŠÆœFRèB"QâÔ¥ä¡&ŠgMGíy«ŸåÇ/»xd²Í¤ë--ìÌÜêûÆKHâèM¬´Ìj«¡Ê÷Ÿä}»¨K£{žLc¿qãFMMÍ¡C‡lll¼HÀf³_¼x±qãÆÜÜ\===Töì)zÚr»69FÎæíQIH©´³TŦ‘R .NÚ®£uüo¦ïÛ6TU¤§ŒÕ{U\BiÌ+¨Ãj¾ÂÉ(­Èû³×,ùô»23"@¥ÞÚ\QIQp8Ü$½”Ì*8²Ûås©3=Œ1QΛfv: ½•É z–·f‰ð®Õ¯¬HXìcžûv)Ö°`iB$% +·v¢³–f¤F¥¥¬‚®©Nük÷Ø­ûß=8ûÜ¿[a{ïi‡Ãñ¿‘î=Ù¸ÿãïá sçΊŠ6l˜‡‡Ç½{÷°ç.ÅÅÅgÏžµ±±ñðð€qãÆêsï –Ù~é%i|£{ÜÄh•‘Æ@]}Kð‹SCÅç¯ ±öÄÅ>æog¸8Ö­ó&èÜ|†¶êŸÍ#6îR‡ãgÊ`°‰ÒQ4Ì\þö£¬­k†+ÈK©ªH{LÐy›Pú÷µ¯ïÍéê{÷#ó½Í:)Ë%éÍ­ÿ摊 RçFPæ8Gå;rvoQ€ÙSMvìØ£Gzxx˜™ýߑȞ}–5Ã7™x\Gël_ïpó¹ ¸þâ_n8n÷÷#—oyþ䊷„$ÎwÞÐÄ•:Z½¿L_#!‰{qsæ·?„@+‹}rÿXC]yÏ%÷ÿय+ÿý*û13nßš3kªÉä±ú ֽؾ~¸ CæÏI†94+×k$’„®–üù?ÜÒ2«÷{—üüá°Ûüu!‹䥖Î5_±õå„1ý:÷·×—ìÇFŒ|8ïÂN¼ {òƒ¦:±­dMÇãÜñ=þì¡ °î×3ÍÌü4{Ã2[n¸±‰¹z‘0¶~©]"-|1÷±­•jüÓ ‹@øôEzvm&ö@Bÿtö˜ÙÊÖP“ôp5tž¾’.ö6ê¼ÇýVÚù­´ãfSGK¶ôý ì%'mîcF gƒ¯MN(èëÕŽ fÏž=ÍÍÍ%“ÉØ*!¹¹¹7oÞä]ë455uttttt´´´TTT”••µ´´”””äää$%%UTT  ihhhhhl‘'*•ZSSS]]]ZZŠ=ÎÊÊ***âZ’ö°aÖ.]jfffeeeii©¦¦Ö•·CöìC:¶’WH;u!åÅ›¢ß·; :ÒÁUçg¹ù€|óaöêEÖ¢>O©“lÒé­{þŠ ¸CNx:¿ßâéÿ…â½¼¼¸Gh4…B)***...,,¤R©•••?~|ýúu;±¶ƒ»H––VÇv%%¥ŽkŽ”••aC…xéâòt §§§££cffæææ¦¢¢¢§§§««««««¥¥Åw«.²§ÀP”—å õÃzQ×Jç 5Q^¹ÀÒËM ž½‘(1ÜJm©%wÀ|_#${l`+[XX|öU …B£Ñ°‘kjjÙlvII TUUaåÄæææââbhkkêk•••˜‚õôôx‡ÃáÄÄÄÀÔÔTIIIIIIMMÍÓÓ[Á”••‰D¢¸¸¸††‰DRTTüRÕ»‡ { %Eé¹ÓMEŸck¥jk¥*è(ún³L? $êü*ZZZ/'ê äROŸÂápÊ*è¼*éôÖºú–ÎÏb0X5µ"3žNommµ¹•Éùê‰5µÍØà‘ ŠúѶ29]Y²€ÃéR²^GTÔ9@ö䓊ªf-û«—ïdr\º“λðÒgyþºpÕV‘YkãÒtÇ«etî×9©äª¯ž¸jë«ç¯ ~Wq¸fÇKîÓTr•ëœÀ¯žUQÕÜ•d½ R§PìÙ#Öü•W+è(ú:£mÃO‚Ž¢Ï¹ÿ´ðQh^ϯӧ u Èž=âÈÏŽßþÆ[¯©mÞ¸3ÜÊõê˜éwŸ„}Zój ÙÊõêøÙw_DuLöïA!dÝbÓâ²ú›ÚwhFD»Í»oåzÕ×/+œÖÔ6¯ÜfêàëZMkÂ’½‰-å&ë»&‹6NO÷]úûÐØ[_¶«‰3¬C§ßÙM¾f7ùšÿÜŸ}×Êõê•À ì‡ÃÁ’œz“›¬×AêB={ĺ¥6€-·ƒ1oÍ3]ùÔ— ¯œtóÝü"-³:>©üÐéwáw}ž\™AÎýTPýn×k#CRZøâÛç<Öýôª/ ° tfÏ/B ˆ_<2ù»_"yëïÙyµ 7mz¬¡F€úF& èÍ­Å”ú[³þ½ÈPqñÞüký®ìÛoe¤eÄ{c1M"QâÂa·Y«žuÃŽäµ(+}Ú^\UYº¢Š^ImÒRûôI©(}Êl]#*ž’õïŸO瑽9FG˜Õ™””¤§§×Å¡”©©©†††=™ Ïd2SSSº{bjjª¹¹y_LpBeÏ^+sýyæ=Pi¬ʧy` )¦CHFú¤÷ÿ.œšY*ŠÒ’’bk–ØüùóØ?;~”î=Roƒ©3+¿y÷/»{ëš’8¬þ^AmÓ!¤„¤O9ª©mÎÌk2Ô“71 p —TzZrjD,{¾¥¡J”!HôVH˜:MŒMlmm{kv'í5‹­7ïy=0J55ãÓƒÔ̪¡ÆÊ–fÊo(Ø‘ü¢zìÁ}…铇`Ùœåa¢«Ùk+h³:322ÆwàÀΓùùùa“&¿ù曬¬,>Þ¨¡¡aåÊ•@¥RGŒèÆ ,6là¾5•Jí‹›€ìÙ `õ÷r* ~ÿÁaÑÆg÷Ÿæì=›’Q5kªñ‚fO^基”|5üÇ™DÄß3ÊgUðý§9箤nÞóš[Æé!\uÞ¾u{ìØÞܤ«¿Sk90}²Qv~íÞ#±÷ŸæÌ[óì÷íÉD‰Ã?»,ßòüþÓœmû"KË›ÀÃÕ°º†±m_ä£Ð¼å[^”P{kVW¯_÷n™bçw#$%>í¹sÓˆ?G\ $ûßøðÛ±„ïVs¦¦«)¿m_äý§9»ÇbÉ~Ü0bËž¨«ä«ä¥~¡Š¤Þù(…YpçÎ#GŽ\¾|›/ô%ÂÂÂzøF ±±± ¥¥ÕÖÖ?”؃´´´>´ìÉ'Š R/¸qŸš)Æ<ô^8sè–5öû¶¦TÐM‡¢ÌS—R—Š}< %Îb±_ÜœµuípX³ÄæïC“(t)IñØÇózeé6^urWvè “Çê/œ9”ûôûUö/¸êʉáw}L‡(ô_6;n_?¦O1:{p¥‚>eœÁ“+Ó‡[«KHâÂïú ·R+¢4,ŸgqdK¯Üy^ujhôÂ’ÒÇöŽSTÂø‡ÓŽíãGë_ö¦71 úñ\¬¹óÖYÃ4+©ÍíqÁ’rÐ ½9ƒÞĤ71ƒ/{÷Ê*×B®N&“yúôiOOOooï`<øâÅ ÞÇçÏŸÏÉÉY±bVê °²²’——?xð –ììÙ³¦¦¦†††óæÍ«®®€éÓ§>|ØÔÔTUUuÿþýðË/¿äääÌš5«²²ÒÊÊ Ølö®]» MMMwíÚÅf³‹‹‹=<>>gÏžõóó³´´<~üø‰'z~+ĺ5@ 0 ƒ‚‚¼½½¿š²nggÇ[¿...njj233KMMÍÌÌ´¶¶&‘HRRRÊÊÊ•••111#FŒ R©ÜÑòÜ‘óL&[…ÞÖÖë®áTÏ}œ‘‘‘““3a„¬¬,l´|uuuTTL˜0»Î«W¯ètú¨Q£ðx|SS“…By÷î³³sii)6Z{»úúz{{{ÈÊÊâ.nÏûø«lݺõîݻȞÄ ¦[öD``öDã=‚=‚=‚=‚=‚=‚=‚=DûÙˆÎa0€æ!ƒƒ¡¢¢2tèÐ'Ož¨©© : !!ÁÕÕÕÉÉ •=ˆA @¸|ùrrròСCÏŸ?ßÝUÚÅÅÅ{öìqrr"‰'OžDeO±±±kÖ¬III‘••]±b…··÷˜1cúb'5Q„F£½|ùòÎ;÷îÝc³Ù>>>GÕÑÑAöD Ÿ =wîܳgÏZZZ$%%===]]]‡ fccC"‘]¿R\\L&“ãââ^¼x ÊÊÊsæÌY¿~=¶A vOÑŽÆÆÆ‡†……={ö¬¢âÓæ©úúúãÆ³³³6l˜®®®–––ŒŒŒ #í5h4…Bùøñcrrr||üÛ·okjj°—ÌÍÍÝÝݽ¼¼Æ‡Çÿß’žÈžâ‹äææ¦¤¤ÄÆÆ’É䘘l7 === 333]]]}}}99¹.îð.(JCCCuuuAAAQQQAA™Lþðáו`dddgggkkkkkëè訢¢ò¥«!{"ˆ®RRR’’’’››[PP€ýŸŸŸßq´“žžžŽŽŽºººžžžŠŠŠ‚‚@À:ôUUU%%%%%%1+©¨¨ô¤uµ¡¡ëæÂöwkhh¨¯¯lÃŠŠ *•ZYY™ŸŸ_TTÄ«H ccc333[[Û®·Q {"ˆQ^^^PP——G£Ñ¨TjAAö ¸¸˜J¥ò1’TCC££ÂJJJ»{)eeeEEEìš:::$ÉÈÈÈÀÀ ‡¹Èž¢a±X%%%P^^Î`0Øl6ö”J¥b6lll¤R©Oä&PQQ‘••í˜@GGkˆÄàñxmmm ‘H$ ÇëèèôiÖ=‚ÐhyàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàdOàüßÿ]^^.è0B”X½zµ C@ $ ~~~›6mt0!ÄÅÅÍŸ?ð@"‘ @ˆØÔk„@ ü€ì‰@ ü€ì‰@ ü€ì‰@ ü€ì‰@ ü€ì‰@ üÐ%{666²X,A‡Š@ BDgöd±X?ÿü³¦¦¦œœœ„„ÄСC¯_¿Î}5777!!AÐñlÛ¶ÍÄÄ„; ãøñã...ݺ΂ Ž?.èÜ Ñ 3{þøãÇŽÛ±cGLLLxxø¤I“-ZtûömìUôôtAÇ@¥Rsss—-[Æ{F£•””të:ååå4MйA ¢Agö صkצM›œœœÆòäI//¯cÇŽa¯v¬Ë³X¬/Ù§±±‘÷é—Ö%i—Œ ƒÁ`0„ª¡¡uúôéNÒ0ŒÏ^ŸÅb}éâ_Ê•Jíú-F ’Îì)++›ÀkÉ“'Ož?\\\JJJ¶nݺ`Á())™6mš´´´¢¢âСCŸsæLiiiMMM ‰9sæpuväÈEEE99¹¡C‡nÛ¶[é&“É...ÒÒÒÒÒÒ®®®d2ù³¡š››ûùùíØ±£]ý#77×ÍÍMZZ»~hh(v¼±±qÁ‚ØÅ,XÀëÐÛ·ocaKKK/[¶ ‹“ÅbmÞ¼YZZZUUUQQqïÞ½‚þø„@Ù³gOÛçð÷÷5kÖÕÖÖr_*..ÖÑÑ9|øpYYYkk«•••³³sfffUUÕÖ­[ñx|LLL[[Ûøñãñxüþýûýýý‹‹‹g̘áààðádž††àà`pìØ±¶¶¶k×®„k×®UUUa ÚÚÚjkk544f̘Q\\\UUµråJ†††vqúúúŽ?¾¹¹ÙÜÜ|üøñØÁ={ö`inn600pvvÎÏÏ/++[³f ÿðáv¢±±ñ›7oÊÊÊV®\ɽaaapìØ±ÚÚÚÌÌLssóùóç·µµH¤¤¤¤ÖÖÖàà`<Ò†@ áááЙ=ÛÚÚbbb-Z¤¡¡x<ÞÇǧ¬¬ {ÉÀÀ  ­­-88òóó¹g999aº?~¼³³3÷¸¿¿ÿ»wïx“mذ{°fÍîñ 6`â;uꔬ¬lss3v¼¹¹YVV{S^0{bÑâñøS§NµñØóÖ­[À »­­ÍÊÊÊ××·¡¡Çߺu ;ØÚÚª££ƒÝ ///wwwnú(..>xð ŠŠ f^ìíxÿ¢ ˆÁמøÎ‹¥NNNNNNœœüäÉ“cÇŽ¹¹¹%%%áñÿ˜ ¢¢Â»J“ƒƒCDDöØØØ˜{|ÅŠ¡¡¡{÷îMKKKKK#“ÉæææØÅ}}}yOÇŒŒõé{xxp_b±XiiiD‹Õß===¹Éd²ŽŽö€›ŒL&“Éd‹eii‰ÄãñVVVØcì-\]]±§X>77wéÒ¥çγ¶¶677÷ñññöö&‘H«2 òE{&''?~üüùó˜(mmmmmmÜÝÝÓÒÒlmmÿï*øö×!¯9sæÌˆˆwwwggçM›6ýøãØq‹ÅÛºÊûXCCƒW¬¾¾¾FFFägß¾}ÁÁÁË–-svvþRx,«cÀíb¶µµ1c÷éš5kÌÍÍ544rrr^¿~°ÿþ€€€¥K—öõ‡„@ „ÎÊž^^^³gÏþ/5²²²¼É¬¬¬ÊËËËË˹å»ääd¬PÉKZZZPPP`` ÷‚%%%XÉÔÉɉwè(÷±¹¹yPPмyó0¯±X¬ß~û[Zü,áÒ¥K...¹¹¹X´ÆÆÆT*UEE…‰¹¹¹¹¹9@HOOç9Éd2öW+Gsµ˜››{ûömðäÉ*•ºtéÒ‰'ž>>Ü[qîÜ9nÆœœrrrÚÚÚZ[[W®\‰¥ÇúÐ:@ n¯‘fÏÝ»ww¢×’’‹¥££Óy"•Je±X¼ý3a±X%%%¼Œ:::Üþ¥ü1$$$))‰› ¼¼ÇsuÆ_ ¯±±‘F£éèèt<¥  @EE¥]KvœD"¡.#báêêúõ>wŒÏš:ôÆtÅnXqµÝÁÛ·oGEE½xñBCC#!!Áß߯ ¼ :7rùRx²²²ýˆñ¥ížÐ6PB(V¨Û½{·ŠŠŠ¦¦¦˜˜˜‹‹‹ÏöíÛ@tF—Êž}††Fxxxcc#Ö3þ¥’ @BaOŒNjÐ!lEÍ@ DdOàdOàdOàdOàdOàdO€mÛ¶‰‰¼û›¶ãÞ½{ÒÒÒ‚Ž±ÛlܸñK9"“ɺºº‚°ÛØÚÙ~)G®®®‚°{I_ÚÑ [ÍGÐv ‰—/_òí !ï)(6oÞ|ìØ±i“MÍŒÔË×ÉÊ«|ü<›Åbåææ~6Á½{÷æÏŸoe¦›@DÕùøyÖíö:Rˆ¨:ÿ¾’°Ñ×|ÎtýŽ DQK6Þ稺gËðŽ¯Š¨:]¦Ÿ–#ŠÞ9¢‡—¼öQu^;1ÎÒT¹c‘V§·ûŽ DWß.4^»ÔºcÑUç_{\2íë©"­ÎËÇ'éhÉõðjƒÔž¢«N S¥Ž :…Ls<õ6.³ëøªH«Sš ÞîU‘W§f/,©1í‰Ô)X¤:;òSç/›ÅÛ«©Sðôº:aÚ©S° Tuî:ôb°©sëZçÁ¬NlöDê,H‚öët®NXµjU``àê%‡~ž.è`»D©•=˜:322˜:›šš˜:`€©sß¾}¢ò#‚¾T' žñžLx/ÐÞJ}À¨“ÙÊNNN’Ä· u¶²Û@ ’:àâÅ‹¢ò#‚>V' ’²çSç»” ƒá–jF%eelI{À¨³™ÁS m0`Ôùôe>ˆÊú^0ì9ÀÔý®lÛïïì­ÔCï¬0êôõ #Hн Þ8`Ôùýž×eÔ֣γ—?„DTˆÊúE0àí9ðÔ¹jG”½úó»ë’:9o–:_ÇU$už ‹ÊúK0°í9 Õé`£zk ©3òѤN€ÔÙ¬=ª:C:R§ÓŸê„jO¤NÁ‚Ô)è`¿RgÏ€öDê,H‚öë uö ÍžH‚©SÐÁ~¤ÎÞb@Ù©S° u :دƒÔÙ‹ {bê\8ËZ$>u¤NAÇÛ%:…ªŒ=þùglo¢ó-þO©SÐñv‰¦N@êì]Â<÷ãÇïß¿Úd³Ûÿ,•–‘t8_©SÐñv‰§Î«·:{‘·çñãÇýüü:R§ ƒíX=ñÙ,¤Î^D´kîH‚©SÐÁv ns'Rgï"ÂöDê,H‚¶Kðöu|©³'ˆª=‘: R§ ƒíw²#uö‘´'R§`Aêt°]S§…©Ì¡c:ûÑë5Bê,H‚¶K„Ed-ÙxÛÊDþŸ?ÆË˵ÿ™ uÀëè |_AÄÊžH‚©SÐÁv‰°ˆ¬) .Z™Èÿóçx’‚T»W‘:±›pæj¶ÏìÙ³ù¾ˆ(Ù©S° u :Ø.©Óbˆ,Rç—Àn‚ÏÍ›7ñxþëß"cO¤NÁ‚Ô)è`»RçWé-u‚¨ØSäÔ9vÚñŠÊ¦¤Î¹«:…œ°ˆ¬Éó/˜>«N9uŽñ:I”nëuu@`` „„„„„Dhh(ßW{Šœ:ÇM?QU͸vÊuÀ¨sÞª€àÙH ¦N #ÿ#“>«Î³—?ˆ–:§ŸR¿zrrïªÓÒTñû5cÆ9é‹Å*//çû‚ÂÞç.Zê×'¨5Í×N(u>FênÂ"²¦Ì¿`a,sþ‰_RçÉ2›ò‡¨¨sìôS ²¸€c½\êÄþ~¤f”þs%N mœ]S¨Ëž"§Î‚¢šJjó•ãHÂË@UçЯ©sõR§\_ªkK=¼sD/+¼eOQT§ëŒHÂÌ`V癃sì×é7u^>>©¶®¥‡WÒ²§ˆª³’ÚŒÔ)´ ru Á³?ÕÙ+×F{Š®:¯wíø*R§0€Ô)èx¿‚È©„О"­ÎŽO¤Na©SÐñ~QT'›=‘: R§ ƒíH_¥Ô BeO¤NÁ‚Ô)è`»RçWéu‚ðØs@ªs˜¹ R§Aêt¼_A¤Õ BbÏ©N}-âƒ+«:R§ ãý ¢®N{0ufd×|÷K´¾1<è;=%¾Þ¡_Aêt°]©ó«ô³:Aàö`ê€%›ÂÕT¤‘:R§ ãý C X{ÖÏñÔû6Gü3›–ö_£ž„—ápb–¦Ê‡N†:&è¿NfnÍPc™¨ûÓ>ëÍk÷²~?• &Cä£bómÆt¼_!'¿/wϹêË÷ʹÞdSþHÍ(U0þIŽ(vßß­ßöŒ€=#""`ÖÝþkþhja¦g³ÿøÑá³ê€¼âº&gœ£²2IzØ 8¼fާÞg‹œÐØÈ¤T¶Ž°QÐÕìoy?_ª¥&ñÙ"'9—f %5ÜZMБv•¬5NÛ‘Ÿ]>«Nˆ{_&))æå*Õ‚jZSz6gÿV‡^Wgÿ9¹¬æ~ï‚§ Þº»”Uеì¯d¾r¯Žìko£.è`»„…ëUiñÎÓìÚ4rú#AGÚUf¯x’’Aï4O>ÓŒþüy¬ #í*§.%oü9VšÐÙ·ÎHŸ *¿£ÄÔ ‡©¾ú#ê" NÚêî<ʹ÷4'¿¨ÎÞZ}õbk[+U¨©mf³AUEZÐÑu›ø¤ò3—SR2ª,MTæN7Á¬ÔÊää×™) ::~(,®?v>)>¹RYIjêƒoYáp8HN«Â>,‘£®¾åÔÅ”ˆ˜p©å·ÊNA^ ò iFú$AGÇçâ­ôGÏó«kZ†[«l\nkj¤XBiTT"E §áK‰:Aà#–>ËÍäÃçÖ.±¾yÖÃmœ®ãôûÙyµ°öÇð"J½ £ë6ÙyµžK-ð6 <ïµr¡åÖ}ož„åÀß×SŸG ::~`0Xãgß³4WºttÒ®M#>ÏÛ}8ž„åï;/èèødùæ0jMó‰ýãNìG«kñ^öª¨ÍÞ¾Ÿüy6ñÞÓÜ]›F^9é6ÌRÅÁã.Ló ªod :4þu‚p–=£ÊÖ.±?ZŒôIG~¦çäÓ ¿¨."ºD]…¨£%›W—T¡ '1ÑYH”¨©mÎÊ£±XœÚ:†—›!­®%"¦¤’Ú<ÌBu”ƒ&vÙ˜„²ôlê˜Z´:¦™IIQšÁ`=]X]Ë3B«ïÊ€oßQ|¦Oq5À²sæ÷ )UÃ,T“Ó¨Wkj¤XSÛö¦¤…ɞ䢫©N€°È"C}ùèwåÓ'Ê$^Ç–ój u&Õ—Äa'¾}G17VRV$€©‘"‡Ãy_–ž]=ÜZm¤Fß}@éÙÕ’b+¿±ÆžžúÍußÑ8ƒO).«I(å ÙÊä¼z[T\Ö0ÖQ»·1 eºZráÑ%cµôu屃(-1e¼¾’¢4ÔÔ6‡Fe$FØj×c\ZfulEWSÎmœV¼í#î‡Sc7ÿÈ—ëBª¨ÍáÑ%µuÌG¡yXu!&¡Œœ[cn¬„Å–œV¥¨ @j¢lk¥š–Yý.¥ÆŽÒÆŠ«­LÎóÈÂZËôɆï’+&Õ€Jcxt‰”¤¸çDƒ>-Æ¿¯ô[a‡}ŒôI…%ÉiU•Ô–ðï)C”¥Ó2«“Ò«Ô” ÆèIHâJ(ÔšfZ=£¥…=ÅÕ ¬‚]Lonµ³RÚ¤°/Xnmì(íüÂzç‘ZžNo}ò²€ÞÄt«¯£Õ· *u‚p–=]uvýëãC^! 6,³õœdØ@g65³Ê«èMÍ­wåÌZÌb±c˽nÕÕ·ä׳.ä÷SñáoKŠKí§Ü¢TЉ2’ë~zé㜺”üã(qn×ÁXŸoŸæ×Óé­cgÝI,ÇãÅç­}^ÐGÙ3BëâÝœ#ç“Óª8Τ±z[ÖØ75·ÖÒZji- tffNµÓ´ÛÅ”újZóèiwÒ2«`©ß‹5?¼Š~G¡ÕµŒ÷¹÷2ªXM™x%0sí¯ &¡ÌkéC6‡sãyÊ‚ ¬ »þLjS) r’ûÇ:ý®ï> KSåVfÛÆáobK –‘>)àØ6»­ªº¹©™UUÝTWß2fÆí°¨"qÎkéÃ;ràðÙ÷óÖ<‹O*Ë/®Û¶/ò˜’¼,!>©ÂiÚíV&§„Òè4ívEusfn͸ÙwŸ}—n¥oØõŠ(-–?cy0‡Ó³mh:e¡·Áêí/Ÿ„å×Õ·àp¸Û禪ªHWVÓ™­ì"J,ZrÌ? œz·ù—×pýA挢ÊRËï<ÊY½ýAJ¼²ºÉÂõv^!­•Éïs/,²¨…Ù:uÑ#¿Ý¯±ÎeÆÝfk~1ÍnÊõ*jsßåÈe”Ö–_ßÜ|@.¡4À¾FÑêÌV6¥œÞÂ䜺”¼|Ës‹òÑmÁ½V&çý‡Š¹kžœ½ò!ê]YrZÕèiwèÍ­à³ò)V[Úþ[Ô±óɰæ‡Wž¾ÏjëZÊ*èvS®ç|¬es8æ&¦Vô]v„M œeϹÓMä$îç®ßm¢/³r¡ÕwˇÙÛ¨› Qšïmfj¤è:çþ£/ìï!ƒÁºt'Ãe¤…ÊÌŽò–Ä¥eVŸ?<û;O”[Â`°¶ì‹+K\¢¤(=wš©Ó%¸ó8ÛÑNãÀOcÀÎRuþº§Xñ°×15RL|âsáfšÛ‚°rå¶µÃM]µÀÞFÝ×/ôg?§Å>æ ®,½ûpì½ ž”ÊÖ'W\l­T«¨Ík–Xc¯:Ú©{.y¿üóϱâyaé#HL­x_šúr!‡›>ÙÈÀéÒªo¬°2]¯C à£Ͻx3}ÅÖ…”–9ú;ýF 5Qž>Ù°–Ö2}ŠÑ©KÉöÖêX͘ZcfÜõñ2nF’3¨û¶&ðs§›\{E­m¾|'cï0¿•v %…Ca0Xk~Š*‰_¢ª"½`¦¹Ç¢oãË\œ´û"GàdÒµû™žMðò­v¯¾å[ûIcõfO59wåÆe¶ñIåÉUØí]0ÃLwÄÅ +†€½µÆÉß\àQhÞ•“nX‘óý‡ªtr59§VYIêè¯ãÀØ€´agöÁq¿œp%0cËû>ÊÑwˇ Ñ•¿þ kѦ×6æÄï–Û-›o©®"½|¥¢‚Ô¦Ýq…± u´d—Í·œîûèqX^·ÏM€˜„²gܱ"vc+!¥b˜…jÀ2%q¥„$Î{ÊeëËðϵK},vú]M¹½Å= ˜ÞyBu‚pÚ¦¸Lq58{`Bìû²-¿F¶´°¶¯ÿ´…SYRÙÊíÝå “@q©e¢/ƒUi­†*,¢­Ýþ*5³šRÞ8ÜZ-¿¸ÎD_S ‘(1Ê^2rªo=Ê .f+;§€ÑwÙ±ª|ô×qGö¸ddÕþ;Áwó Þ/YBJåÆå¶Øc{õßN|*6š’@UEÚØ@aÛ¾ÈwÉÔšOA†½­¼râÓð©qNÚW›SØd3ñ:v°¦®-¿¸¾ì šêÄ~#wú,,®|’3vÖ½âw˹¯–Ô´ûô™)Rk9UÍ`l €œæfxètBâ‡Ê…uõô6xÿ¡jù‹O·ËTùM %7¿ŽÉlsˆ,*mNɬê;{ø•ßX¯üÆkE™µ2$úále%öjAqƒËHm¬é€@À©N®Æ>ÙOŸÂ(Kw2’Ó*£ÊÊ+™ gšå×ñ©ÕÈÌH‰ûÁ–¾’”€Ú:¦ë¨¾Êàp¸éSŒ¦O1b0X¯cJ6þ±l¾%öRn~‘ž·¢=ÁY÷Cfµ•ªµÙ§?Çáê·f_¸™žšYUZÞôÍL“ü¢ºQöØOLIQÚÒ„©Õ±Iå7fÎb²ú¤r œêá´§©sÀ³ë3ŒôI’8'í}ÛFýs-ûª¼¬$Ðé­X›Q ­™ðÿ£:.ÝJ¿œ½ÿ‡Ñ–¦ÊO_å_¿—­¢(STÚÌáp°oy%R;7À ;‡Éì«ZáÆáncõ¦O1ÂápVC•÷ÿ0Fwäÿ?W#pßVß"#ý);XÓ2«}¾}zù˜Û®MŽ´º·÷ÀÄ€ÐÐÈÄéÊ«èú:òòrRs<ô¯vÇÎåÞŸ¾àÒ­ôEõû~úºò[ÖØÿ}íC~q7AJ¢îß®‰V&°LIJâ ®¾ÅÎíŽÿa× ¬t´dåMÿ5iz ;¥†ÖÒ2â*Џ´ðÅý£ÄÔŠ]D?»6”¥çN7yWò\…•î€(ƒ¯ä©e×7¶¨*Ë^üÓôïe\Gëþ´q¤±¡ÂœUϰ5+·{•ÉdcädpØw vCú:½ÕÀéRAì2"Q‚@ÀOq5øqÃ[HJˈ—Wþ×wT]ÃÐÖ òž¾ïh|ÎGÚŽ #Œ Î]ûPZÖ ¨@(þ·Ï–ÃáTP›@š ~rÿØYSM°šÝM+B«NÎvÏyÓÍ6ì ÇúÙË*è§/§b•\¨¥µ‰s½ôÎ]M€šÚ濯¥Ntþ¿÷ÙiÎÚXáôú½lPU‘vsÑ>s9•Á`Ýšó.µ&Õ;‚5<]¾“é¾ðAeÇuŒÎ¦Ÿ#cÊ8N]}Ë途¹^Ÿ*nuuL˜5uÈ1ÿ¤V&‡Ã᜹œ2ÛÓ˜÷ôŒœ ¥Icõ䥮Ý#ceä ,þ“D§·&§UÜ!ÀH[G/Š’Óª 9­ÊÀéÞÚG93Bës©÷Ÿæ0¬V&kÖ4ÖWTU–©oláp8“Çê]¾“^SÛ §¯¤¸WÇFÿ`TT5IIÁôɆ:Z²OÂòš8 ÌyÞ&‡Ï%”UÐË*èÇý“ÀHŸ¤£)sóª¨ÍV®×°Üõ6æªyuGÎ%ÖÕ·p8œø¤òûÏ>ÚÛ¨ËËJfæ5Ñé­#m5^¼)Åš¤cÊ’3¨vÿ?0ëý‡ÚÙSMLó‹ëƒÃK`’‹î­G9ÉiUtzëÁS X²óÍN]Háp8­LÎR¿ço¥w?Ø.A$JL«·~ç+¬Ñ3¯vãAÈHã)Fú$#ƒO÷¶¬‚~îêçÿW ÎùH?FÛj¨r “}÷Q.X˜)ö¡Ÿ¹œJ­å€ç$ƒ’±oÚ_ç×ìxÙ»¹fu‚p–=w~7âø…$×9÷)•­Zjßk»n© Ìð0Z°>$à¨ÛÙ®k 7y‰HÄoY=|ÒX½´Ìê!úŸj…k[³>4ði¶†q®—éƒ<8xÂûß:xÜœãebb@'íÖÙœz‹HÄ[š)_>6¥²3kª ½‰½î§—É’’bß.0={ÀFÚjÌ[ýŒÅæü´qä¯Çâ œ.eÄgO5ٴ°šLŸlxïiŽ•ëU%ÂêEÖ£‡+×Õ·lZa÷Û‰wŽ^·†©M«G”–PU‘~rÅsÙ÷¡­­m*J„G—¦õ]IÍÔH1âÎô_ÅÎþö%Ìš¢ûðÒ4 Iœ±©©™=eAЋ۳¾[nç4ív+³mÌÍ+ÇÜ@KCFJ¾ÁׯfÒ5’¼ÔlOãóŒ+ªšÆÖýv‘•÷²GÊJ„ Î:Lº0}Í/:C"IÚåÜw#I%$q¯îøìø-JÍ6€Éla£pëŒ;6TÀgª®¦Ý¥²¤eO®xÎ_÷”ÙÊÖR'†ß™M àU”¤ä>Uí޹ÎZõ˜$/eo­þë–á•ÕMšêÄûç=·í{ÓÔÌž>ÅàM<Àþí£·þúÆÈé2‘ˆŸéa¼üߪt_ð÷÷·£¡‰cb@øa=|3ËÜ}áÃ'W¦sï-‘ˆ¿px¢ÕPåò*º–ƧÉr[×ÚûCØé€”!ºŠËæ›§¤Sq8\Ð…é;Fï;·hÖP,Ùt&kè"Ø\£×w½¾4=dãÎJUcFøÒþɃÁ:wíÖÒUWߢfÐH^%Ñå–l®Q'?¢vôµ:=w÷I‘ŸŸßÑ£G»{zDD„««k@@€0–=û‚ÜüºEß…¸8j=]ä;×¼ëêNZ˜ìY+/™34/¿®‚Ú4ÎI4&;w‚¤$ÎgåÓ Ëmé­·3#îÍtD=EE‰àµôuìûr’¼ä©‹©WNLtD=‚@À¿Š*ÎÊ©µ4Wºù çÈÏŽ]Wgwfuò2XìyöЄÄÔŠÒ²Æ_·ŽÑ©„¼,ö1먕’Q5l¨ê»\úî{ÜoØÛ¨Ç?ÿþCl]c/ÒS 1”¥“BÆ$–55·Æ?/Š3ŒÛtÑ+î}EUuÓ¥£“únv‰¨¨=ÀÞF]TVñè úºòúº"³RWPU‘î£!·‚‚H”èV‹Jwãô낞8Ž;s¯h§Î1^'åˆb—÷΢s\uÀ±cÇŽ;!!!S¦ðÙç!e–V&'1µ‚wªIY½°øësÞkj›»’¬ÿIˬ®«oá>¥Ó[±þܯއ®$ÙyµX÷.—.ÎôϨ:§ŸR¿zrrïªÓÃÕèäoÓ<\?µé———ó}MѰ'µ¶Ùaêƒ3—S¹Gî=Í9üÕ£â)]IÖCâºýÌ_÷tÓ/ܧ会ùëžvå>t%Yi§Œ.òãïÑæòÊÅaj—Í_÷”ÚÇ®))k,­ìöø­{Os†ºÞá½ó×=%çÕtåVDÅSú4GÍ öûµÝý›cç~ïQh÷é©€ä{OsºrúáGÔNc§ŸRÅ]>ÑËöýÛÝžÞ^cn¢™/-%ÞÃËŠ†=@N÷Óxþ~Ø}ÊÃ×ðqâ“—…¼_e!!#»fã/Ñü[^ÉÜóWŒ sÐl‹þ ·D‚Ø·?„õéüz>À¶ih∋wï÷+))¶bëË>\ÏÕÙ‹»wðªsç–ÉaYS\´"{l¯c¯,2öÔÓ–>þ똎_åJã¼5OML¸2:u)Y{ø»Éמ½ú´\Mm3–ÌÊõj/:ëaÈÇ%Úóµbù…Ã;~• Öæ_^›:h¿pä\"–Ù°È"+׫†#/¸ÌMöãïo G^2yéȹÄÞʶ5“²"ŸþØåp‡ü&¶´ÝñsWR±ø×n…NÓ2«ÇϾk8òÒÆáÌÖOujÿL°d «WrÄÝIŸ¯åVÌ7“–ÆóVz0Bà ì&_3yÉcѬV‹}Á°#åUMX²7±¥XƹÉzww&[K5q\÷Ú=Môe6øÚlÛÙîxfNµÇ¢†#/Y¹^ˆ.‡óó1ÚÃ/Œœz3öߪUaq½Ç¢بã§Ì7Qç?Ž')zxq‘±',;´ãWyšoûxýì(ßWw|Vl}™–YYtóAN^ôÒ˜Gó J?M\¸1Äm¬nv”oø]Ÿ­ûÞôJSç47Ó6Nâãô¶¿Ê{ÿŠrä’¼è¥aQE·f—UÐg­ yrÅ;?~7Ùñ Itf^ìÒÌÈÅñÉØlŸ‚©SMEú~ÀJþ® ¥N¼px⊭/xëïwå\ ÌŠ žŸ»TEYzë¯o`þº§»69æÇ/s£ƒÍž ‹,úçZÚ»§ òã—i¨Éüü'Ÿå_^x7¶“‘â§/!vîÀÄv•žì¼Úå[^ž÷Ê_¶l®•ç’‡g÷áXs•üøe§~sMH«€² úüu!¦åÇ/ó[a7cÅ£žçˆwc»!zJlN[w¯°}½CBJ%o¢•É™ºðÑ–oíóã—Ý÷÷šîû¬¬‚~ý~vFVMAì²·f¥çP€ÃáÌXñhƒ¯mv”ïÀi³V=é•¶]Á©³Æ,Š’=q8\»¯r ¥1¯ iéÜ¡ £%»Á×&ôuÁ˨âeóÍ <€_õÔÔ6‡DTÈË…æÅ$RÌ•"ãJz W·ÏûÒêùüuü*ûßL_»Ô‡Ãø ¾¶A¡ßÄQæN‚u¯¯YüiIÍ[³t´dƒ_ä?]¨§-ûôÕÇf‡«Îð ïT•øÿâNŸb4ÚA“·þþôÕÇM+m‰D ·z‘õÅ»9Ùyµt: 댞5ÕDKMž„åÛ[«¿Ž)yš§®*ƒM?í ¼ê´±Ð.©¨ãï:úºòí*=‘q%s½Œ±¹ês§›Ð鬬¼Únf/kFú¤Ó ìM±Ž|:¹úQh^ “‘ÛÓ¾—v{‚~,ªén¯øËÇ'óVzÒ³«I¤OcLçNö¦84¼`é\s Iœ‚¼ÔŠV•W›‘Kg³9BóÒÉÕ¦†Š1‰ü÷·`ˆ´:A´ì <_e» hu-j’ÜEs¤ v#)/û©L®ª$-L'VYM/¢4Q&Ó³³êÑî`¼ê¬ 6ü¸ï ×á~•Ë+?Õõ¨µ9YIì±¼¬$«µÞÄ”ûwð£Œô§•Ô–F: ËŽ¾Žü,ãî¿ùðªS]EîÛ-·{rµ?wå­¿74°RŸ> 9Y &³­Î$ÿ*§¨ ”JzsK+–#›³ûûnL4êH;u®Ûq§ÎÛe»J£…­¢ôßàM"ßÔÌb2Û¸[E©*K@]CB°QŽüì()É7E;u^½œ^‰ãë÷ko£¾Á×fîpì)­ž¡¢øßn†Š$Éfk3ƒ­øoÅVM™MÍ,y¢7; fšhkôÈq¢®N9{¿_å?ϼcC…BJ w Ì«¨â¡¦ŠfÆŠ1 Ÿz<Ó*@EQ –ÙnXfK”–ïÁ*åíÔé:ãD}ÖãÀ¾Êßþð {:iŒZjF7x=mYK3å·ÿf‡»Ž‘½ª• –s#¥îöðÒNóV„Eå÷äRU‘ÆêïØS åÿ޲JJ«´µ57RÊ)lÂê}5µÍ9…M0ÜZUUYˑǃžÐn'ú‚ÿ¾’ Màh$·ÒSTÚ zZrQ Ÿþ6ÔÔ6•6›)®œšIžŽ)s#¥¦fÖº¥6–Ù.›kYZFWáwÁÀŽê\²ñ¶œ Nœ_o_ïžUð ã’¨Xc ‡Ã {Sd6DÉÔˆ„ývà™ zZòõô¶of˜aŸQK [NŽÿ @ У屝²é¸›@ àý4rÞšg–YÇ'UT×´L`ØÂdñ¾óÛ±xÉËwÒ'¹èIHâÎýîìíûxûú i×ß=]Àß»wTg%µyóJ‹?Ïñ¿XÎöõwƒ?5\þ²Ùqþº½[GÖÕ3O]L »;ÓHŸ¤«)¿rKظQ:'/&aÉvï8Ý7˜RA'H‰ÿv,áéu>—¤í¨ÎÇ/²\ouàtZO>£éSŒî?ËÍ)È€ ¬&Ì u5é?Î$žù}‘(}jKæXÜ Î–Â‹ÀòyVcfÜ&HIX˜(þq6áÇ |ö‡¶SçoGž<9ÇSïý‡U3±JÏò­oÀÃÕðìÕÔµÛ_uÒúûÚ‡ÝßÛ‰½b닟ýœ¢ßQª¨ ˜à¬£¡F\þý‹©†Ü~˜­§-Ëß”°Ïªsœ£*§²¦‰¿ì`•lH™Ž–즕Ö>«ƒ—ÌúºÐtˆÒ˜‘š†z .3î@K ëö£,ï)CTU¤Ý2|ÖÊàÕ‹¬£Êâ’Ê¿[nÇß» u‚¨ØSQAêØÞqܧúºòÑA3°ò£ßJ»ñN:±ïËÜ]õ±9‹>.xþ“—ð8`µ¦–Í·1L#6‰bg¥Ê÷DÀϪóÊqWJåׇ·ãØÞqŠÿ~–þaÀ´üÂzpqÒŽ ž÷"²ÐPO>1t>¶¼ñ­³¯cKŠ) ÷ý§‘skÀÞFKF4‡¿ýd>«ÎÛíG;hòaÏ­k‡ëjÉqŸþµ{,Öž £%ûx¶kÓ‹›³°\¿•v޶éÙÔ£{ÇæÖ+*Hø¤Ð…Ø9·Ïzò7°£:wz1ÇSï—ÍŽ3–w÷j“Çê75ÿ×õ¿tîP5C]y IÜ£‹ÞÏ# Ë*íMüuqÒ¾ìW²dÎÐÕ‹­U”¤q8\ÐE¯WQ%%u[×ço–N3ƒõËᘎêükËûÞt÷j¼?"{õÈÀiêª2°ï‡Q1 e‰*VÌ·3R‡ÃéhÉ&†Îò²PANâÅÍYµu-°}ý×Ѻï?TzM2ük÷8þþ u‚¨Ø“@À·›ñÆ;çÒÖJµÝÔu"Qbîtì1×,VC•¹+óÁ½àœ×qÕÕiaªÄ‡=ÛeÇHŸÄÝöÛ,÷U I7=7;“u‹YÔ»Á…Õéí>„¿ñ€íì ¤(Í]¢ [o¸czìnÆy?5> Ö¶|IüÕpÛ‡Ãa &€„äyÓw<¥‡35·üú&=‡ÞQÒ~²Ô.Þ…ú¹%EilÐÿw]¤‘v=Ùpðï+ÒsèC @{öéR½ µº¾¤Nn²Œl¡Éÿ%ètVfN‹±£:¹iròi"ôUÓšê9ŽX'ê,+o¡–Ô@çê¤7±E%Gدc ©bO]žÆ''''¸qãÆóçÏÝ\ ×.sƉ÷ëj1ÝEJ?n”ñ—J.Ž:ÝÒÊ!g×>‹ üºÝCN®§ÓúG#i‚ÄgKÊJ„k'ÆÕÔµT×0ŽOe0à÷]îF†*‚ù+X™i QùR…ýèÞq% ÌV¶ÿõtr^Ów+FMË+Aÿ ©&?Ò^ÿKöíëìÝ'PàÙËÂgeÂÿ;"JKNp60êØSEEeéÒ¥›7o~þü9ö·‡ÞœKAQÍgÕ ââàâ¨ý®ììõ,jÆ^%%~ߤÿhnjý¬:1†[«•”5úž ãpÄbC6ØÚˆÆêË´u´5ä¾ßóšœ×„èl—褭SUEz’³îÙËžE”‰Êï(9µdÀ¨5ÞsóæÍÇŽ•:U'Fô»²U;¢lÔ€:¡Cçµ ãíwµ÷#è`»ÄW»‰xû¯…ÿwÔ?êÔPŸþ}l?¨Rö2¾TKM¢“ÁIä\š–Ôp¾VÃYkà—ïG~ipRÜû2II1/WѨTÓšÒ³9ÛÖè#uºL?­®$ÞÏ› lÄÒ½ ž‚zëî‚í©Iùʽ:²{¬¨ìüaáz•;)ûKìÚ4²?÷Ôì!Øžšëô™fÔŸ{jölOM|§kÐéDåw„í©Ù+%ÙϪS û¼ éhù«ä 7?Pk–fÊ[VÇè–UÐñâ8QÜ]+,²èìåYk4Õˆ«ZccÂéôÖ"JýPþð ì¼Ú?Î$ƾ/S#Ìš:d½ï0Ïáp’ÒªDåOH;j„*(©O IDATj›žOË€I.z?¬}ÓÒ2«{2ÉB€p8œ®¥] ̪k`8 ×ܲÆn¨‰r^!MC…(º›î :A8W ¹ù€|îʇc{Ç¿¸5kÞt“IóaKÒmø)¢ˆ"Œ›uNZfõR¿k—Z¿¸5ë×m£~={ó.ÝIU,èèøÁ`™qwÜ(ë3ýLˆ~WŽ-KúìUáïÇŸ,ÜA¦]˜N’˜±<˜Ãá”UÐûa+”>âϳ‰O_ø™r}æDgGÏ{àíû¸¾‘)èÐøD¨Ô ÂYöŒN([6ß›|9kª ¥‚^TÚÅeõÑ%ê*D-ÙÌœê·ï(R’³<ŒˆD‰šÚæ¬<½©µ¬²iþtSjms؛⺆¹‘Ògl »7±¥)™U£ì5™LŽ™IIQšNo}ô<¯®éÙ@À@fNõë˜RC]yC=05Räp8/^åЬ̔ÇîÃfåôìjEɧñÉþµÇå੃O).«I(å I§·†¾. TÐGhceLB™®–\pØG·qz†ºò¯¢JÈy5Di wWMu"”PC"ò‰Ò.Ž:Å”lâ`|Ry|r¹–:qÚ$£>Ýx9$¢âünØÍß»Õ1ûc-­®åM¥¶Žù(4oú#‡[š–UÍýR%§UÉHãã’*Œ F9h&§Už/€ ct±)›ØŒÞÜê=Ù8%£ ûdçÕFÆ•ˆãpÞS†(ñ»êRWˆ_¹z‘5V¹Y0Ó<û#-9­ª¶Ž^€½ubjÅ»ä %’´çD"Q¢„ÒH­iþXDc±`ît“¼BZdL)½¹ÕÞZû,¸_°‰Îº¥etç‘Z¾¦¶ùaèÇ&››ë>BØÔ ÂYötw5Ø}8þÔ¥dlÿÈ Ël'Õk 3›šYåUô¦æÖ;ræ|û”(-QTRo7åzµ9¿¸ÞçÛ§'.&¿O­ T4Žžv‡Åb«)wŽûçZóOúýT¼š2ñ3‰S¾yœ_\_Wßâ<óv¥A‰$½xÓ³¾Ûbhì(í‹wsö‰I(ker&Õ۰̶©¹µ–ÖRKki 3Ó2««­k)¡4ŽX×ÈT“ôZú0&¡¬ò"„êá,{zN2 ügêGÙ?ˆ—“ÿv¡åöõö6êfC”æ{›™)ºÎ¹z}VZ¤Ö4_"»ŒÔ¢ÒX÷þñ’ÄeçÕž?<‘» ›¸ƒµå×øª”%JŠÒ3Ý´ìýàfP–óííëG€…‰ÒüuOû¨“ÄHŸ”>ÏÿzúR¿ÐòJæÂ™Æ¿íej¤èê¬ ö6ê‹Ö‡üi V”SS‘þýx½ žÔZÎÑ=㬆*×Õ·l]ã€5•š+ÍZ ‡N'œÿsÒ(Í3!=»â“ÊÓs¨‰!ßàp¸i“Œ”­ü7¯²í£¢„$îmмëAäŸÿŒIL«÷œ yh瘡&ÊÓ'ÖÒZ¦O1:u)ÙÍEo÷'9\ÃÎíÎÂY¦°dŽ–ÍôlêRK Iœ—›á©€Tjmó途MßÚnXf ,vÛ› Þºý÷ø²Ä%JŠÒ³¦šx,zð6¡´ïÊÔ—ŽN¾õ(ûÚýÌyë^Ž®üÓwž“ gO59wåÆe¶1 eéYÕ©/b·×ÀéÒÆå¶à:Zçäo®ð$,ßÿð$l©{OsÓÉÕéäj]Mù?M¹µ?¾€_ÆýóÇD,ô¦Ö+Ø7°/Xùµ®¦ÜÝàÜõ»¢õµ¤Öùó[i§®"½|¥¼¬ä¿'”¿_¢ª"=wºÉìO<Ë%ÊHeįv€ä´ªk'ݱþ†bJý‡Ìê‘¶š×dQWJHâ&¹è¨»n¦-›gá·ÒT•eœz÷(€ÏÅ;A8Õ ÂiOøwÅ—#{\R3ª·í{C $q¿detJe+·¢=ÊAëM\‰ËH-}¬fgl¨—TáëšR fC”róë†É`*‘Ä© y…´[r£K€ÙÊÆv×é#ŒôI~sà§1y…´cÿ$-ÜòìÚLî«ÉU›WZ*ÑÊL%ëc,öØPOä¥4ÕdÖn•šYÝÄø´|ØÛÊ+'> 'ç¤ Å ¹t›‰×±ƒtF[~q}ßU UU¤ýVÚù­´+« ßx@;ë^ñ»åÜW Kêm,T¸y§3Ú*ªšÀØ@;8ÖQç—Ã11‰”º†–zzdçÑœGj}º ¦Êob(ùEõ,Œˆ,*mN›XÝwö”Ä-ö1_ìc޵9,X÷"òþLl7(¦4ºŒÔÆš€$$qNvØRF†$,Á({ó7ÒÞ¾+#çÖÔÖ1a1Q†[ZúËøßdao+ K_JJˆ@mÓu”v·‚ì.S\ ¦¸œ=0!!µbÝO/1Í@~Q½‘ž·Öe”ÖÇÂzë¡ÊÖfŸ67V ¸“qÂ?9>¥¼•Ùæ3Í(¿¨n”½öSR”j$©Õ±Iå—ng³•Mob÷z„V œöTµþ'6x®‘> ‡ÃÙZ©n]3üŸkÿ­=,/+ tz+ÖoXYM—%Jòžþϵ´‡ÏóþÚ=ÖX_ñqXÞõ{ÙZ29…Mûö,®‚”ÄÎM#°ï‡Ã¡ÕµôQv­™>Å+<é“~ún¤Îˆk¼ äÌë¤Õµ éO –Áø¤ò[_\>6ÅÎJµ¢ªiüì{`iB¤Õ3¸Í…F† DüŒÉº·ÏMÅέ¢6÷Ýà„sWR K°R•¦:qËû¿¯}ÈÍÿoã ‚”D íÓ_£V&‡ÃiÃ2%)‰€šÚf»7ϸíØà  /%oú¨©HÓ›>-¦YCkiq"A,-|1v°®¾E†ÐW=Å1 e›~‰ˆº»í³¦š„¿-ÉþHãÚ“(ƒ¯äY¸¯šÖ¤ª,ø‡àÌZ<ÍmÈ™ß'èhÉÎ^ñûX³r?-»ÕðoGŠ".ø²7·U´² KÚØ¿1k‘(!!‰å ùý·ÿͰ’“•(¯dr•Mú:r¼§ÿvâ]1¥þ—ï N\L)-kPT `0€Ãá`ËìËÉáÿæÛ÷G…EÑé­%”Æ’—ÍùÔDPQÕL§·.˜irÌ?‰Á`1¬cþIó¦›òž^RÖ ¥Nt®.)‰ó¿™^@i€5K¬N¨¢6Ç'•Ÿ¹F€Qö¯Þ–Ä'•@|R¹…ëÕ¾ûqºÓ;~5\SÛ\WßréV:˜)ª*ËÔ7¶ÔÔ6O`pêb*voO\Lš5EWAþ¿™sÔ8ÔR—ºÿ4§¡‰ÓÐÈüf¦ÙSqy…´¼BÚSq`¤O27–ó¿ñû m'ÝÈêP?‹ƒ:­¾åç?b ‹ëéôÖ°È¢[r쇩ÊËJfæ5•PGÙkDÆ—bMÒa‘E9ùõvÿ¿¤ìûµõ°þÌàððœ¨ëQN|Ryµù÷“ñX2ß¹æGÿIjer ÖŒåï<Îî£øóŒ—oy‘™SM§·¦eVcÈHã³òj´Ôe­Í/ßÉ€¼BÚ•@ò¸ÿ/çÖ9Ô25R¬®i¹qŸ fŠ$y©K·Ò«¨Í'.¦44q`ê„!§.}¨«oáp8¿‹Û}8¶³ äêá,{îÝêxâbÊ·?„½K­³µ]½ÈæÛEV°p¦ÙO£‰2øûÇý°ÿ­©sQ¿ó».NÚi™ÕCô?Õ W/²^õà »É×,MT–͵ºv?Ξ°ïhü7ëŸM«kb@'íý?Œ™ºø˜QºxdregŠ«Áñ}c÷‹IªQ!á—ø˜ÿuŒ²×\»ãARüÇïèM­6“®IJˆ/ö1ßü­Xš±Ó§N0|ñºØfâu]mÙUßX®\Em^·ÔæÏ³‰ß¬fn¬8ÇC_AŽ ¤(ýèÒ´£Ë*éºÚ²áwf÷ݘ>#}Rx ÷ïÇ¾ß SÆê½ºã#!‰33"Àª­¯î]ð<´Ëyêâ­­m.#µÏžZ2Rx05RÜý½ýï;šjDwWý KÍ*ªš\œ´Üà¸æ‡Wê*23¦cU»{î<ýÛ±- ÙãûÆöݸ IÜ«;>¿?û…Ê;BõÑ¥iز͋g¸/|ðöáÜ'W¦ïøímn~µ…rØí^EIZáßÅ´nžqûö‡0iü[õßp TбOäÈßïëO5ÆÚ‘ömýóŸÑ¦Î—‰Dübsl;Ø>âÔoãœ{?çÛ§é9ôÑÕ7¯²€åó-7쌸ø—ÛÓS~ø-j×±:òLj¢\ZF×ÒøTÖÞ±aĆ]¯.Ý"› ‘߸ÜîuL ‡»ï?í·ãñ—ïf,šmŽ-c8wºI ­yŒ÷p®ùÇ®1½Šé¾¾ÿÖ®[ ‚®WGSÛ¾þó‹eTQ›ÇÍ ~xÁ­?çEDËHK`݇N¿€nu§`s_ñúR‚iK‚gN5ìϹFy…´ø÷åX­6,²èÄÅänõ¨`s^ßõúR ÌÆ”ªÆŒð¥ý“ƒuü§ÚfMû+äU]C†Í5êäGÔŽC§.æs×íuubjöóó;zôhwOˆˆpuu Ʋg_@«cÎ\ùxÊxƒ g¹k[w]‰‚¼¤ç’‡ËæYÄ'WÀ‡¾í|èHò„™ËƒW~cIk`<{UøâÖ,AGÔSå¥~;ñî}Z%IŽp90ãò±)‚ލGø÷ªæ­y:ÒVýö£ìã{FõÝðÛ³—?­:y,ö<ðÓ˜ì¼Úü¢ºi¹q7Ò]fM5i«™žEè¬gc¡ŒëÁËB‚­•jbèüÔLª8÷ûvç>ß?()J'….LH­ha²Ö.µîÓñýÃísSÓ2«KËæùOão#®ÐnÛ8¡U' {Âç6íit´dûî,”¥ûtŠTÿƒuv÷ÝõÅ8ý½¦g·Vü*"¤NÎ>÷Ž´29‰©ÎSMÊ*è…Å_Ÿó^SÛÜ•dýOZfu]ýÃ;°^Ѯ܇®$Ùyµ%”FÞ#]ܰ,-³ºO'a°XÝŠXVAÏÌù¿»–YÝ•‘ Ùyµ5µüìKÚ½±9lv÷î[»O¤°¸¾+“ºø[ë9¢¥N{Rk›¦>øól"÷Ƚ§9§’¿zbT<¥+Ézȳ—…Ý=eþº§Ë7‡qŸ’ójº²µ¶¹­x—ÂÏ6?þ=an ¯\º¸aÙüuO©}ìš’²ÆÒÊnߺ÷4Çjb ùëž’ójºr+¢â)}š£fûý‡ÚîþÍq˜ú[¡ãT@ò½§9]¹ýð#9u‚¨Øädp¿OÂ’¶± 'Æ&•ó~•…„èweÛ~çs+ÍÚ:æž¿úp~=`»ŒðW¸%Ä–oyÞEãní2ÒÐÄïÞïWRRì»_"ûtr=ˆ¢:A„ì©§-}ü×1¿Ê…ÅõÓ}™:˜:ÜyôééoÇⵇ_°r½úèy>7™Ç¢¦ÎV®W¹Ézö©»¹òqîÅ#;~•ëê[Öneê`8òÒÏÄ`wå޼dåzõÓŸÖc0X“õlw&s>¶þØ9æÆƒœ7±¥¼9Αs‰¦ÎÚÃ/,Z‚Ui“Óªì&_³r½ºrKþiŠÑ©KÉXŽ­ámÖè Ü šôùj#^1ßLY‰ð×ùÄvÇ…æY¹^5u?û.V»¯©mƾ‡nóîc“Ù "ºËæ˜éw±¥:zwƒ&[K5q\÷Ú=Môe¶®±ÛðSD»ãi™Õãgßµr½jåz5,²Z™œmû"µ‡_°›|-6¡K–W‹%³›|-"º×WQu‚Ù–ÎÚñ«õÛ9÷ž %NŽ\’»4ûcí퇽0e…»±Ýõs¾ü]AY‘pù˜ÛŠ­/xëï×ïg‡„¦†-*}¿Âdé‡ýo9ÎÜ5ONüêš¾xúdClöÔ“°ü›r’BæÇ/³³VÝÓ³Vx÷¶“‘âgî^Bì܉í*=Ùyµ+¶¾|qkVv”ï¶µÞË·29;ÆŒsÒÎŽòõ?<)#—%”Æ…ž?˜‘¾ø·N>«‚{ž#Þ½í†è)±9ÝÞCéûUöÅeõ¼•ƒå:÷Þž-Niá‹L›µ2¤„Òxí~fQicq²¨óò‹€Ãáx-}¸k“cZøâÇ3nx^Eí…öÑU'ˆ–=q8\»¯raq}^AÓ¬©& ª"½òË—QEQñ”eóÍ ¼„$ÎwîP¨©m{[ Bó¢â)ÖʯÞöô/'ï§^UÓÈßE:~•/ÜÊZ³Ä‡ÃøõKm£KÞÄQ¼'Á¦´/k% |š­¡.ü"?øE¾ž¶ì“°‚f‡«Î[ëzrIcõ&ŽÑã­¿‡†¬]j­IºbÕÕyYyµ­Ì6'm˜>ÅHKM‚_äÛ U~Uô(4(#qáVVsÄ«N í’Š:þ®£¯+ß®Òóêmñ¢™fØ'â9ɰ•Ù–[X{ñnμéfXúÓ <ºÄÄPþý‡ŠG¡yõ ̌ܞö½´ÛôcQMw{@BwñÈdÞJ9·ÖPWí`¤OZ8Ó8<º$$¢p‰ÏPG$J,˜iYyµå•̦æÖG¡yï?TèÈÇ'—÷ð3iu‚hÙx¾Ê--lhb´*“þ+Se% 6­®…(ýé ª’ ´098œXe5½ˆÒPDi˜ù·¡‰Ã] CFZ‚ÕÚFobÊý;çRJòÓÆ=4Zk#…eG_G[‚„oÚ©ó›5Wzrµƒ;ÇÜxíÙ53Øø›çäd%˜Ì¶–‘øßP9EIhhd6·´b9b±9¿ÿ8²'1´Sçºwèü7n´«ô°ØÞ o‰D|S3‹Élã~:ªÊÒP×À,GE”†#?;JJŠwû½ÿ¥:¯ÞNHN¯äï×k5T™·ÒÓÐÈTû/;²²x‹Íjm#þ»‘—š2ššYRRÿeçí{ ”ÙÿÇ?Á0ƒÁX“Æ-d*JK­BQ*•¢h£•­vk7mm÷Ý´Ý´«ïªØìÖné²²K«¢ˆBˆDE!wã2Ì0̘ùýqì³³£Ä ô›×_ó\æyÎyÎyÞÏ9ŸsÎçã¾Ôh µ_7Ò¥FœzÂ?UÙ70ôµUª˜ØD™ûÉåãÇ©NžHIÏê5ÎÌ­Šª‘0jÞLÝ­ëºÌ¬g@žh¬›T†,›Åå] (Ó ê¦t ÊŽ‘™ß÷6†˜t:¬zžÏìÏóQQ–öŸ»jë]´9i‚zÎ?}…¬ÜZ³ $º¡jAi+2€64¶åµÀS UeÊÑkmôu”ŒîÒùË¥ e¢äUëô”¶€¾¶Jü?߆:f[Ai+Ý@mÖ'êÏ_u=·ÄÔJ01VojiÿÂcÒÖufë>XZѬ*iñîÒ¹öËP%E\_G0P§çâŸù ¯£ò(‹‰Œ-àÎýÒ‰Æêã ÈÙ/º²“šQúÚÊÍáŠÅƨŒÚÛ;1g`ðH'ŒÄÙò¨*›>è˜õbψM«'e<­åñ;Ûtð:í–‡í=–BQ#\¹žgo£#+‡ûåÄLG_m0«dpî%—­q‘Ð;*õUËL1é¬ar·6þÇ ç’]¾þÌ<<ª°µß3Ýuãí²*vSSûÕ¿óïÿéJÓ"YLþhí—w--¨!wwÛmµØãfyU3¿Sœ{ûª„.i»K瓜êû,v‘pØa?SgÉ\ý «¯`ãjÓY.±[yº4¥™—NÏ#ð§|¦/öˆ\çF¿y·˜HëVL˜¹,ì›Ã tÉ'2OÄ” o‘Î/=é·û>«L]måÀc3×z'À¼™º¿\Éñôޱž¦xñéÉo?!eïµ^ùEôî­æiOuL.Ì´3n¬šûÑsgj‡Þ(°˜¬‰Ì}åÒ9ë AmC«dÙAžÉóÀ¦EÚ¿mÊ¢µnNÆñ+,&d1ù#m-¥Y.µ·óÙ~lRقٺjª ÇvY,Z±qµIæÓºÊöWë§Hv÷C:a¤x áu^5Š.r(*eáq8ä&#¯ >¯ ACMÑjÚh´f‘Ãá%¤VàeFYšf5µ£ÓŠJYOŸ×dg[éôi! æ%¤²ªåÄÙç‹ç ýÕ³†Ù‚¤óÊéYUµœ¾z ÉÍ«76PÅ’QÇl«©kEdÔpR3«ˆ ²ÖÓÆ ?I(¢NsKÇts-촆ƶÔÌêÎNõ4­¾®D^Bl¦é.¿øZé“%ðòª¨‘¢FÀRÂáðò‹ÐápxÉé•íSͨÈ\Ù¹u¯ËX榚-lzè4NÏĘÒ×UaÈKÈÙ¶o”ÎÏ=L%ð¨áttt¢ÊƒÈ|VC7P#eAJ:£®¡u¼‘µ¢Šý$§FgŒ²ªŠ¼IVMU;6Z 9@é=ÈKHôe‡ã»Kçÿ|lvNê«—±ø yõde*T£?"}ò±&z‰ššÛ“Ó«ˆŠø‰ã(­m<ôróê_7¨‘ 3-Çôi}0æ%¤ ˆ5Ò¥s„y ‘•É­]«>ÞH],®/‘(»È¾kæÄ@—ÜŸîᑉêÏ—Î ãÔªjûÕì Ò‰SPÌAeTÏjmb ‚Q=H'£ºu娴¢z–NNkçHÉz;>$é„!QO½^Æ7|èA:ñ¸Q€Œb#…¤9¾Ýyä1@¿¬Ÿï%E™¤/;êê’«7J†:™}£é”—Ç—UvŒ¬÷èC’N»'“ÉÌÍÍ€ÀÀÀ°°°¥Æ{¶ÍÅI:zøÞ˜éVSûg»îå¶Æ\ÛðH't š6Ô‰}7˜tÂØ=Ož<éïï÷Nœåûþ •ÍüNx›t¾xÕðÙžd¡*«š&Î8þΫ „‚N!ÈŒ}ôò0PÔÖ²›Zxo“NN+­w\u-OIQfÕæË½¹ `Tggç¨Q£„x^}^|ÝZ9¼²ª–FØ÷O}þŠ#'‹Û¶ïzï.ÙÙ)222C’#È/hèa„ýlðÓ¿¢ÊàðÌ?Â3{sAž@€Çá„CÔ\y]Ò¤NÆ}0Ò C¢žÈè¹sÓ„!ɰ°Ùü « ?ìµx£t@U-G(€UNz£©# á ¿\zéî¤ûFé€ÖV^u-o±½Š‘9"»U¤=šÐÃ䤊*¶Ù’½ÎP§´·<ήÉøîëio›œô²¨QI·i }¨SÚ+Õ­ù »¶X|0Ò C8cé}F7ì'ŒNÐÕWÅw<«í›¦¼·˜šýäV\™ŒÌ;Z ^îßgLÍ~òº¤%ëÅ;|¿ÚÛ茠Zp!;ñQ=¾ÇÞ mŒüHÉQ泚«7JÞYëzÃ0‘Nž+5ÁÏÁÏBþ.hjáŽÕUÁÂí––7ËÉÉtŸÃ<ü‰‰/ñ z¨å¨(6¯5]ãJ‡WPÌ›¢öÚ²¯Ö›-ßt9 ýÚ'©ªFB_pCHvnÝú÷Žî™{mÙ/'fÿï× ä’îŸϓWöûòC‡Ã›¹,Üe¡Qìµeý²(¯ á›ÃI[ì.{¨S'!+6ß¡¨)D_]}u)EMa±g„@ `ÔpÖ;ÔI“S¿e弨ÿë—E±×–¹,4rXu–o¼ÝÌîê¤IȰ’NžmÏ'9µŸ:ŽC+ÿFkØoUS×*ƒÃ•3šVhRˆ4-Rvn]òãJ¢‚¬Óü±jª m/‹XœV^yUËêeãk˜­Ñ ÅÜöN-M¢³ƒ‡CÎcŠË›&×ÀãqÆdô¯1¯9m¼é棯ӕ[»d®>j¿ŒÖ$þx`&£¶µ¢ŠË€WEã T+ªØÑ Å|¾p¶•6Êx܃2ª1ùqå§‹ ê^iS WE‰°dž>Z{šù¬&5“¡£¥d4–,ƒÃ3PåunÅUÕpŒôÈsgé ^˜âü¢’‚,æÏw¿uàŧ\.?5³ºœÑœšÁ˜n1šÃá]¿SÔÔÂ57ÕDq%S3ꪄ»JçXkêªÞ‰/.«j!ÈË8Øê£V^iyóøy9™ÙVÚU5ô¯¤´Ê§yuêdÂ’yƒÚbŠK©½ðÓ<”’ƒ;,™ m¬¦ö¤GUM7cŠ–Ì7‘±ÅeU-:ZJŽsõq8\vn¢þÁ£ C=²í íÔ FfN ØZj£åŒMÍí×o¶wt:ÚÍ/l°Ÿ©yõ÷’Ë ò2‹æŒÔŽTú“ÚõîÐúôO—UÕ²³së›:¢ãKÐ[#öl+ªØÌ†¶×e,NkçWú«¢Æû)åüN‘­ a‘Eµõë©c˜ mÖÓ´<£†uï5·½s޵¶Ø‚ée¸I' ϶§£½þ¶ïRN>Fq4×¹M T{9,ã®8-Mb‹k¹8”QÃ).o^²îÖÅÐe•-eŒæOC‰ ²ZšÄÓ¿eŸþý)þ)ý—+9:ZJ¿]Ë¿òVqysSs»åâPNOGKéó=÷¯÷"<–dÌœ>&ìváöïã”q¹|û™:¨çŽ‘žU=eþòr²DE¹ùîéYÕà¾%z߉”šºÖÖ¶NËÅ¡M-\-¥ÄÔŠ»î@L|‰Ç¶»©sòê?Y~÷A)¸}~')­J_[%äÆËÝG“¯€&Ñ5ÈdY— Q7cŠÛhZ¤ãû¬DOhhl›2ÿj-“£¥IÜö]BÐ¥gàwöÉ–}ñMM­müÏvÅ&¥Wêh)Õ7r§Ìÿƒ×!(*eÙº„ËËÉt ¶.á~gŸÀÉ ÌcéúÚ*¯K›ç¹G j|¡ + Wl¾sáÚsäÃøÌQ;Qß+Ày}dìƒ2-¥?o éÕ¿óæ»GT28ŒÎù?r¾÷OÓÑRRQ"Ìp Ï+¨çrùsÝ®×Ö·ª‘>Ûë}0–/ßx[K“³\þÔC æèlÜu?àBvvn@ ðöúK¤¿ÿé‘–&ñÕk–Ýò0‡÷$§fÑÚ›±‰åµLNzVµ£Ç 5²ÂGêÄß'¡·cã7÷oß­£¥ô_ê"Ï;Mí¥åÍ3ÿ ZšÄåo‹e@†¡tÂðl{ηӋs ‹,\âÙÂîÜæeºÿ«©ã TµG+ÛΠѴH[ö=xå‚>tÌîçÛΠ5s„—ÎÌÃápE¥¬k?; ë§µóáã*‡÷ý©§ ¹ž*Êò ìô£î€Ëáy‹fë#wŸtC5Gb¾9 ]rÆm·+áùßyð,Ÿã¾X×w¿5M‹dfB€qª+6ßšrôôãû¦£CŒNK«ë#L1ùyo5ÒUD}U}måØÄ2ÿsÙ9/눊ø±ÚªeUÍã Q‡WV7sÚ(­h¾ò÷K4J%ÜÁËŽ®¶ò~ïiû½§1j8?f¬Û~76tvôùËú]_˜£ß&Æ”×¥].õµU@MUA^NÆÓ;&ãi-E€%>ª×ûÇsÚS ()oyQÈ1±ëšžÉá «j؃×1TQ–ß¼vÒæµ“Pçt沿Yë°£¥Í“&PÐoš©£Cˆbëi« ¦t¯%>Éaòøüöv€WE,ëiZèèxÕ¤Ôªâ²f˜¹, í,«l³³3ÐʇÃ-[h´l¡—ËOL­X¾éîX²¦F×´’ò&ë©]>Ùdåp–S¨¯Y`lØåYÊPOõ·Üø‡åÌn ³ÍË}bU ÇtBW-ÕÖRB?>©ß°3VNV›:¬§î[*ûˆåË1'}lž½¨ß¸+Sê‚bÖxE¬qm1EóÕkÖ Sã®1LC}• +9GN¥ç6€Óü±e•-Óͩȧ¢šªÂxExñ²!-«úüÕ\èàurZ%÷iý6†­tÂðTOåq¿fÜYŽÌãÔ·xLúõÊ¿¾‡•IrÐÔÜŽÔ°¶žC"ɉþýôïO“R«~øÎÊ@—üç͂Я4)Š¥­¼*û—¯€ /»ÛTÔáuj˜:š}'«·D/²×Cm®ÑšÄ][,hS¯ˆž@Q#´¶vÅ•¬oäb.»‘3Ýô¬ê/¿M¸~nñcÕâòfûåÀD#"«™‹Ä±¤¼ÙØP•¨ˆwž§´ý—QáôÑégï ¸ý²°ñÌQ;PQ–_ç6ñxÀc$v’¢\«ëk„ºÛ¨ÔääpPÇl›á~ý¼ÃßÚxåq¿€ÎR«+Ž&ú/YEžHõìÞ*¤Yu̶þ83´Ê¯¾‹Ïº»=öùvzË^½faê©F&TUÿ[CªëZ©ýçË´ÌëÖÊeôëçÕT\6D€Šáea—Û­–j(ª¸È`'T·*nèápx$ãßês<ÔTp8œ™‰Æçk'aG)j e•m=ÛšÚV]š’èß¿ýñ!›ÍÿùØlšÉÿ|V%£…¢N@J  ¬² ””ð~ßZ¡ºÍåò[ؼާ°7 gé„ái÷Ü¿mÊê/£S3ŒNvn_Ðǹz€—UTÒ,#3jà C¿ ' ¢Š}òlÖÂÙz¢¯oàŽÕS2Ð%74¶»šjª Ëè~ïÿˆQà ºôìyœÆþøódä:ý{ÖûîRvÖ¸Ò÷OŠ+fÔpŠJY?fltï2T28MÍínNÆÇÒ9‡Ãó;ûdå²ÿXE+-ºc” T;ùtéŠ@¹y­©ÏÉG¥åÍ Ë yÓÍ©÷S*P4¡„‡å“ì¯vP˜âî8Ï7¼v³àü9UìŠ*vÀ…l06P¥~D¬®kmhlsœ«ðû³Òòf@pìLú*'=ÑŸÖ6|lªA à/‡å·´ ZØ«]é'çæÕçæÕ=ýhZ¤O¦P~~E¥¬ v—˪úU­,? ß~PTÊbÔpbâK®ßy=Ãb´2Iîy§¢Š=ÕL36©Ùõ¢âŠ«ªÙ“ÆSD¯P\Þ2Ã|´šªBæ³šÈø X4G÷ú× ËKË›¿ý±+LÞæ5¦ÇΤs¹|‡çúYÔí{Ń”#"Qvóªq«¾ŒÎέcÔpR3§ÏEüË¢MŠ"öló ê/…å϶úÏÐB]}ÛDºM‹Ä¨á ¦åx#u-MbÐ¥gŒÎg3[Z°lág3ê˜màèéÇÇN§`†¹tÂðl{~󹹺*aç÷Éu  5"6ArÕRãƒ'SU”dO´9rꑹÊùS‡gN·ýª¨ÑnF×rì/<&yLœ»âú˜ÑÄ›?Žˆ~ §Ìúé\öÖ} 6ÓµŒô`>IóìqÛ•[bZ¹í–S´.þ4w²3ßN/è„]`ð³ü¢"Þy¾áî-0ÇZÛkÇ}ŠZîöS8­<륡àñéįÖO€eó»jób{ƒø” [×pu5ù¯Ö›1ª[›šÛ¿ð˜Äï~í“4΀ì¾DOE‰ ¦ªÿ§Ë¾)Û}µµ”ãÿt‘,Do i‘\w9˜á.»ƒ×9ÇJçþŸ®²r8C=u5ù¯$^ t8uxæÊ-1M-ܹ6:ÇíàcS %¢èj+Û;ÍaU„ºÁižÁ‘·p:Ì'ižØo}øÔ#5ºªë8péôüïÿ÷ÈÄá7¿9ƒ7¤++‡»}yéÿ~Ítý,’ÓÊ75Ö¸yÑ¿oõ0^ì‘ü÷Ї7\öHùê»ø‰F”˜gol ú‘zWã4$Ðaëþxu5‚Ù„‚ŽYsZyjª 1Wý~Éàó„sgj?Yû¿šzôôã™Ëþâñ;7¸› VÛ ñÓ¡™ŸnÜÇjn×£|hÇtظÚdëþ„àSóþ\ðÝ©Sæ]¡¨*^ûÙaœj}#÷cÓ®žû¾/§m;˜u¯xŒ¦Ò±=3²rëàÊ™'ƒ2·&&8/0@n —Ì7¨­o]´6‚Çït°Õ<ìÃ_:a¤Dæè?‡N¦íÞbA à¹\þG“~/ÏðÀ|ο,2‡½µöOˆK.ïkdŽ~ráÚó™ÓÇ WùK‡ÞœÖO?•$L£ßÙ'ö+"Dç0:mè• Mƒ8ó*ìÊÚ>OE¼û ÔzÙÍܼ? Þ‹Ëß=kÊïì“'9ƒê’ÕÔþ(›Õ×9zÆÿùïëpõï<´Æ÷Ïa°_¢‘(0RÔ(ª8¿ ,Ѫ, ‡#þnð:‚nc~òÙàœ; þøìEƒhUK¼Øšn@нÆåò|é÷ÃÇŒ]Ç2$þûž£)o;Ô½ŒºïÝwö“ Ûc[œ$é䪩ŒZ¿ãîÛr÷êôÆÄx­c5µ{}sŸÇÊÈô-ª»œÜ¨mߥ”¾åÐ=ñ\.¿ûKÔ›ÖwŸ¡Ò #Èî©IQØÿÕÔõ;î¦D¬RÈ+¨ÿ|Ï}ªªÙû½-¼Vš ‚ÝG“oļ&*âÇUÓÓå¿ÇÃ;ªj8‡vXÔ<»³Á9g.æO:õñã>ïýýäœå›îζÒÖýgÍ%44¶}}èÁÓu,Ïu±Á‰ýÖ8.äïü‡SÆPÍM»Ì‚\.ëþ„´' Xý¶û7´z**®89£2ïÁšôÛîvÖcú¥õ?G˜tFÇÄÈËK8G-à˜­X§';·nËþø”ˆé·Ý8`½t}$¯Cpà‡'‡±)7—_9³àE!JË›wø$Ç^[–¾üÔ÷¶«¿ŒîŽ0é ½ª¡!‰ý™y}W´ÓÃåòVÝ8ë;;åæòØe›vÇ•²‚Þs¹üg÷V¥Þt«oh^‡àÓÍQ§¾·M_ÿ—ëÆ]÷ûï jDK'Œ,õ„nU¹´¼¹¨¤MêVSUðZ91&±$9½jåRcY9‡[ë:ÛÕ¿.o¸}9jˆ­XÒ 2:¡.dÿüL‰(_ÒÏì`Ò™˜(Ùk‰°±³r©‘hÿ=&¾ÄóÓñháÓZ× !·J_5ÊÊB¢l,ÇèiÉ@dl±¢¼ÜùÜ€ Ùõ Üà¿úë0PT:­­­%¾ÎhMâéïgŠößÓž0×ËÊ9mfÞ^6cñŸ^+'²Z¸wî—Æ^[6Z“øåþx— Q3¦R¯ßî2GÆ…:Û¯ˆØ¸j"^÷ó¥œø¿–IvwL7++«¨Ô‰ÞC$ʾJöüæð´tb¶Ýò°¤ôJŠšBPpîÃ'3ý_M2ïŠÇ§£b‹‘·žÀãvsÝ®³Ù|c#ÕŸ/>ýùØlÉî.¢›÷û¯›á¿- ù;ù—ùÂc’ûÑ.¢l¦k¿š»cÓÇæ“4Ýrž¶ðÚÖõ“>®ÎyYggEûÂcRv.ó|Èóy³tBþ.X2_O²•¯œV¾ç¶»Ï 9þþþÛ¶mììÞ2u©ƒáx»?OúÄÆrŒƒî´…!+—ÑcJôƨ¬s›èh?ÖÊ9Ôe¡¿³3.©ÌÞFÇ|’æŽÏ§ìó}èµÊ$;·¶Õþ«£·ÕM¡p¤ê&bd¨'EUáÚÏ ±ÍњĬØOñ8¸/¥Ï°Ðzú¢Îæ“1“?Âáp²r¸”ˆ‰ixü¨›?f5µÀ§KŒfXŒ~’S3ÍŒúÃÉ–² T“®ý¼ó '+‡‹ºä\S× f&™1n©™Õòr2Çv[£tž9j—žUÝÈâ®[1ùIo¤žsoåãìœ÷gS$[Ç"Öäìgß7s? ‡¿™áæd …¬˜U‰ií™1nÈá¾”n>I3¿°áÒéùõ \Šª*µ”ŒJN+?1|¹d ¶Éé²Ð¨£ãßY¢îKéã Téj8.4haj£®¾5ê’ô›6…š~Û-5³já=E‚¬I‡Ã?iŸžU]]Ë9r¶dóÌÛ¸üïü ”tоDã Tóâ?U"ÉÀŽÍæ çè¼fßc†¶P©%§W**ÈîÝ:¹žÛºÎlÁl½çùõŸ˜Q?ùXS‚ÊÎNáÐäÄê)+‡Cb0ƒ „®¶²è°5:öÅ–´Ó´HýY£ùË¥œçœ‘NËŽE[ú†\©‹…uÄF¢ÕTúãh=-ƒqùzÉ@I't[ÊE à±…C¢Å!z>º ;­O¾NĨ¯ï@éM†èR(d·Eƒ¢Ð½uÙÏxœ;% `«S¬Ö‰Ùx#u1}'e± †Õ:]²è«×W~xÂ`ò> é„!TÏ^.DÔ7ràҙ𰢒12B~²š;jê:ŒÇëA:S3«‡:™} œÑÜÜÊïŤóekÕ:4°gélj┳àC’Î.|||„ï‘­[·uŽ%ÁÕÕ•Çã½1GOñBŒÇc0oÌNyyùP§NˆŠŠIIIo«uc‡:’àïïÿ¶9;;uêúŒ··÷û”šA">>†ÆCÝñãÇû9høþÁãñ–––okuº¸¸$%%ñùƒ¾X{133#“Éo”””‹íœ5kÖàM­®®Ž‰‰Ý3þü­xÃa§ž\.7!!At›ÍÎÈÈðòòlõÌÏÏ_¼x±ƒƒÃÀÞ(--­ºú?kÆ“““õôôO=“““,X`aaabbæã㓚šjhh8€J¬Œrss™Læ¦M›)GL&ÓÆÆ­J óõõMMM¥P(uýÒÒR±•”””””Ì™3g°Õ“Íf»»»s¹Ü­,K,GL&Çžz&&&zyyÑh4l±±ñ‡­žï}{ŸàñxŽŽŽ ¥¸¸xPoF¡Pðx¼§§ç ÞèÈ‘#x<>::zðnaffæêêŠ~óx< ls0ˆŽŽÆãñƒZ‹8@£ÑZZZ„Ba[[FÛ³gÏàÝ®  €J¥:::¾Í³Á²yóf2™¬§§7¨wiii±´´lll¼[ìܹÓÚÚzPs1LÊuî}bïÞ½ÑÑѱ±±X ***""‚Åb™™™mÙ²uƒƒƒuuu9NXXŸÏwuuÅÚl6;000##ƒL&;;;/Z´¨û]BCC===}}}ߨ!@ÂÃÃ8àëë;þ|´';;û·ß~«®®ÖÓÓÛ´ij!&$$”––š››²X,kkëM›6¡Uö|>?444::šÏçÛÛÛ{xxt_}offæææ†~£úbÍ$??ßÕÕÕÙÙùàÁƒhOuuu```~~>…BYµjZÊ:’6l8uêTII Nß¶mÖxLHH e±X&&&X™Š2gÎkkkÔ['4M¬9?€°Ùì¥K—’ÉdÌŸ›Íþí·ßÒÒÒ‚½½ýªU«Ð™‡Ú´iSpppFF•JÝ´i¶,=??ÿ—_~©¨¨-ÖîDEEEFFúøøøûûRvëÖ­ËÍÍÅìÕ«Wãââ¸\®……Å–-[Ož<ùùóç‘‘‘ÁÍÍ «¥L&óÔ©Sbe*Fnn®……Å æb82lÛžW®\€€€lÏž={ÂÎ;ýüüèt:N¯«« …¶¶¶hóÀH8„BaKK Úïçç·gωtäÈ‘î7ÊËËC‡<==¯í™““C"‘V¯^퉈ˆÀãñ«W¯ö÷÷···'‘H©©©B¡ÐÇLJF£Ñh´;wnݺÇïܹýÅÍÍL&8pÀÏÏOOOïJgbb"zÓ=^Ô* … ÅÚÚÚÏÏÏÓÓ‚‚‚„ÿ|®étúæÍ›÷ìÙC¥R-,,Ð_ðxüÖ­[ƒ‚‚,--étzM$D ââ⩘\]]I$R^^–G333===___ooo‰„Õ011qvvöññ111!“ɨ6ÆÅÅ‘H$gg瀀tµ¬¬¬î7ª««£P(ÑÑÑ/^Ô¶ç‘#G°7B(º¹¹‘H$Ÿ#GŽÐh4 T|è%²°°ðññqtt䳪¼¼œJ¥ZZZúûû{{{ãñø‹/v¿ …BY½zµƒƒªoƒ—£¡k{_õÌÊÊ­©B¡°¸¸Ç_»v m¶´´ } …¶¶¶T*{‡­­­Ñ9B¥R±·1,,Œ@  *þFO= ÍḬ̀D …B*•ºuëVlÓÑÑiŠ`/°···¡¡¡P(Œ‹‹ìU,((x§½ðØ¥GGG2™,úž¸ººZXX`Þ#GŽH¤––TáΟ?”——×ÕÕÌÇÓÓÓ{[…ÌÊÊBß{77·AêS#¡‰ŒŒÄöøúú’ÉdÌ›*$+àåå…ö#·~B¡PìsåèèhooÿƧ‡JPÕÙU8€6“’’°ô …BƒA&“}}}…B¡­­­™™ö`±‚ðòò211ÁöûùùQ(”¶¶6Ñ» ì›˜˜Ðét*•:ØÖ¶¡b¸÷Ü™LæÒ¥KétúÙ³g±)))|>ón‡>ïÑÑÑ?þø#XXX`ã°†††¨g—@£Ñnܸö³Ùl.—›˜˜øž]äñù|www6›‹%277·ººzÅŠØinnn«W¯Fôd2™Nï :off‘‘‘d2ùéÓ§OŸ>E‡(J\\ÖûéöíÛ/^¼xëÖ-ìRÈ¡C‡¢£££££E»¥ÑÑÑÀŒ +V¬8pàÀ£GPØQ+++´ßÀÀ¥011‘Ëåòù|l€˜F£%''¿ñކ††ååå/_¾ôôô\ºté­[·6G111¨E&jÞIHH@ßf´9gÎ …’˜˜ˆz¯X–°X¬êêêÜÜ\kkk,G ¥»XdÜ ðr¥°°ÐÍÍÍÁÁáðáÃhObb"•JÅ’M¥RmmmvïÞ ¢nõôôO¿„„==½«W¯¢ýl6›ÉdŠõÓI$RDDĬY³q`ÅŠúúúèÝüPŽê‰´†ËåÞºu Yd%%%h`ÛC¡PØl6ö[ô"\. kc€­­íÀNvé ‡Š‹‹µÞ“ÉÑAIôSÏî×A‡D³C§Óß8ôÌf³×¬Y“pçÎÁðžåãããçç7g΂_²ÙlÑ¡´±X,uuuÀJË*¾ÈÈHì/x<þmƒé$‰D"Ñh4__ßÕ«W£ÙK•#$4ÎÎÎû÷ïÝÏårÅŽÉd2ª] ««+vt(---??Ûiii)z“Éܳg½½ýÉ“' ;;›Åb:tÈÉÉi}".•JÕh&“)öxÉd2æùôCä¨!"ö‰YÛÉd²èL …baa‘››;Pyž GõÜ»woBBBRR’XY2™L.—‹½„HO{¸N'‘HýõÚäóùÈÿ>³~äÈ‘€€±É"èÍ/**ÂÚn………x<¾‡äQ©T‰„:ˆŠŠŠî5žÍf/X° ¤¤$55u0Zùùù¨™¼cDZC¢¯"JŒ3“›îçÀåË—1d2™¢ŸLÄ·ß~kbb‚µÓQ¡¿íš€„†F£]¸pAì‰D¡b2™=|€Ñ¡`ýÔã=‡Ïç[XX`“Šª««ÑD=1‘í'Ÿ}öYIIÉãÇESÛ}À­¢¢¢çYº4ÍÁÁ §Áår™L¦Øw+---88øÌ™3˜ª¢1ÀÌÎ0eXÙ=ÑHÑÍÒ ƒ@ `©E&´ikk+j¯ôôô´µµ …þþþ³úáñøìÙn÷D#E˜uL :Žühkk³´´tpp …>>>¢†0Ì.†ŒV˜å7''GÔŒ±zõjö¶ÀýYZZŠp1¼¼¼ ±CžžžT*•Çã!ÅÇ aÅÅÅh³¥¥…L&cêêê¨Tj÷ø žžžØey<ž««ëÀ ]]]©Tê+ÆÅ‹ BNN¶‰ÇãÑ&ÄÇÇcgbõÖÂÂÂÖÖYy<ž½½=6DöFÃîéçç÷F›8²cïWNN@@›¶¶¶¢R€½SÞÞÞT*«NhôUld/55®\¹‚6Qq‹š?$†¯Ýóüùóhò 9Á ÑhIIIçÏŸ÷ôôLKKCƒ•È^ó6¶lÙ9uêTGGG.—éçç7€óÆß Zð­¯/&³¸¸øòåË ,˜:uª™™Yrr2ŸÏïÙfmm½gÏ77·+W®ÉäÈÈHggg1naa!::}út±§7P9B3WLMMÅ…„„?~ÜÎÎnüøñööö………ÙÙÙaaa=„ %‘HçÏŸwssKKK£ÓéÑÑÑd2¹{™>|ØÆÆfÊ”)ÖÖÖÙÙÙ%%%hô,)) #‘HsçÎ;äííªƒƒ—ËŽŽöõõí¹UuáÂ…¹sçŽ?ÞÖÖ¥666v RÛKÐL†Í›7‹î´´´ ñõõõòòŠˆˆ ¨ as°ÞÈáÇ“““MMMíííY,V\\ÜŋŚ«–––ÞÞÞžžžÈ@±sçÎ7Îü>>>Ød½!'&&æSùH$’‰ÂÂÂ7n ©jsæÌAofLL ‘HÄ |ÉÉÉK‰‰‰IKK#“ÉVVV=OICãh(ÌÎÎÆFxÄððð‹Z]]M§ÓœœP§5;;»¨¨“ŧOŸb›÷îÝ33³îãEÝ̉>½þSXX˜’’òÆChe—Ë /,,¤R©NNNȰ€Råââ‚z‘l6;<<Û,))¹qã‹Å}b°ÙìÐÐPd©À.;  Ä¼ñÐäÉ“‘!òÞ½{ÉÉÉ$iÑ¢E˜1$88Xt1bpp°••ú6‹®+]´hQÏ©+ß}¶ÅvR©TTarss£¢¢ø|¾••fPЉ‰ÑÔÔÄ ¯¢ïŸÏŠŠÊÎΦP(³fÍzÛÇ#99-x3gÎÀZ!† vvv/^vê)EŠ)ÃL=¥>–¤H‘"E¤ê)EŠ)’ UO)R¤H‘©zJ‘"EŠ$HÕSŠ)R$AªžR¤H‘" Rõ”"EŠIª§)R¤H‚T=¥H‘"E¤ê)EŠ)’ UO)R¤H‘©zJ‘"EŠ$HÕSŠ)R$AªžR¤H‘" xˆˆˆ¦ EŠ)RÞÆ¿ˆÐˬ)R¤üE=ù?v¨˜àïŸÒ%tEXtdate:create2012-06-17T12:55:23+01:00öçÔ%tEXtdate:modify2012-06-17T12:55:23+01:00c«_htEXttiff:endianlsbU·CtEXttiff:photometricRGB³ IßtEXttiff:rows-per-strip6ãV@IEND®B`‚swift-2.17.0/doc/source/admin/figures/objectstorage-partitions.png0000666000175100017510000006773613236061617025347 0ustar zuulzuul00000000000000‰PNG  IHDR']ñQÔiCCPiccxÚYyTá·ÞßpΩÓ9ÍóxÒ¨y5Ïó<’:NƒæSiJ‰ %DJ¡Ð$sR!¡"d(¤"D÷ÃoºwÝuïþã[{=ïÞÏ~ö~×·¾õ~/€h#=!!åˆKfyÚYÒüi¤ç@^`.à¡3’,ÜÝá´ïèW¥'$ÄÀÿÍx˜I ÄBÃ’±H#ÖÊH`%b@659!€°øYþ„ãÀñËoþÐ_~ð³¼=­Ãl:@ÚzFD2'€ÈÀO š2"éa¢!  ºCÿ‰'â_8CÿrÒéý_½›uTRB =ýÿ8ŽÿÝbcRþÔJR´— i º »"™οñ“ É–ž¿ñËQÉÞÀ€ D¦ØûüöÇS¢},@YˆŽwò *ê꼨,#É*ð'ª—éí÷;Æ9Œimܨ?+ÞóO|dÒz¯?xFF¤•ëŸøutGwà@ è,€_Ð2fŒ'¬@O'$»{ÿ®Õãú»ôm8ËÖó·?ÇL²ñúS+9ÒÛþ?ÆÌòöüʼn‰‡GÙ:üÒ€iD²ìÿàæ 1îοr1oVЧÈ`áÌ8ŸßœXAÝÚé×L°r°:°€ ¡‹@g°ëßO0!hÀ€xˆx`Ѹþ¬Þ^† OÿF[ý‰ƒ(ƒø¿8ãŸp/È€LHúS ÅMqcÜ7ÅÍqS\ 7À ÿ¬õN¶LþUõKk0Aõ7bù[ýúVµ…õo9¡3þS“-¼Dü‰Ð¨Ó˜ÐXø“ÿމ6Dk¢=Ñ–¸ÛŽ]Àº°Ømì2Ö4ìÖŠõ`W°–ÿ˜ý÷TXÀ„$p‚`B °€ qÿ­¢”¿¿QN%N]ð&ÄA4Ä@Ôß ¾ðXõ,)@ƒPˆ‡uN{ü3iy\ ×Å-qÜ7.ˆ‹‚*®ƒà¸nŒëâ†ÿ´‹ÿÚ*„X°˜ÑðX›ÌLK°ŠOHgEED&Ó,b˜*4‡8†š MKCS üi¿^íiO@¼ÿŒ1 `¸€t÷Xl:@€T÷?09€Ø €ºƒŒÖú_@2p?ˆ€$È€"¨‚è1˜ƒ 8‚xC¬DB,° 2!ò vC „ ¨‚8ç¡.à 脻ðÁ Ã(|€)øó‚*‡ˆ Rˆ¢Œh!ˆ)bƒ8#žH‚D qH ’‰ä …H1r9†œBÎ!‘Èmä!òA&¯ÈŠ¡”•@åQuÔµ@Po4@Ñ 4Ý…–¡•èi´½ÞE¡ÃètŒĤ1U̳Âܰ@,ca›°¬«ÄÎbmXÖ c“ØOœˆóá4\7Æíqœ'â›ðøA¼oÆ;ð~|ŸÂ—T‚8A™`Dp ø"©„¡Ya1asa¦pp½ð#á9šˆH´È‘‘¢¸¨’¨‡hªèÑ[¢“bübÆb ±±óbÏÄQq%qOñ âUâ=â3’v $nJLJ JšK®“Ü'yUrBŠOÊT*JjŸÔ5©÷4š-†VFë MI‹KÛK§H“_¡°ÂgÅ–õ+^Èe dÂeöÉ´ËLÉJɺÈfÊÖÉ>“c—3‹”Û/×%7+¯ ï'¿M¾E~\AXÁA!C¡Ná¹"UÑL1Q±Rq`%q¥ÁÊ蕇W>PB•t•"•)ÝWF•õ”£”+?T!¨ªÄ©Tª ªRT-T׫֩ލ ª9«mQkQû¤.«¨¾G½K}ICW#Fã¸Æ&¯¦£æÍ6ͯZJZ ­CZÚTm[íÍÚ­Ú_t”u˜:GtžèòéºènÓm×]ÔÓ×cé՛ЗÕÑ/×4à7p7ØaÐmH0´4ÜlxÙð§‘žQ²Ñy£ÏƪÆÑƵÆã«V1W_õÆd… Ýä˜É°)Í4Äô¨é°™´ݬÒìµ¹Œy˜ùIó1‹•ë,N[|²Ô°dY6YÎZYm´ºnYÛYX÷ÚðÚøØ´yi»Â6¶ÎvÊN×nƒÝu{‚½“ýûA †Ã)‡)G}ÇŽN'/§ƒN¯•œYÎm.¨‹£Ë^—ç®r®q®-nàæà¶×í…»‚{¢û%¢‡»Ç!wžšž™ž]^|^Á^µ^ß½-½‹¼‡|}R|Ú}¹|×øžòõ³ö+ööW÷ßè7@4 * 5èx2pfµÍê’Õ£kt×ä­y¤”t{­èÚ˜µW‚¹‚éÁB!~!µ! t7z%}&Ô!´[S;†ýN‡&Ç'NcΣ.c®ncîï=¦ EMK-íDºUzWôŸLQŒeWš¸™F˜m6ßkq²ÍêžõMŸm›Ý1û‡G?'kg5!WÌõ³Û{·G“g•×>ï\Ÿ_¦Ÿ¯¿}€q Újé5AAËkGƒo…œ ç††2LÃÄÂ~0Û"öE® \g-Cùû.n0¾'áfâUV[RKrkʹõGS ÓÒÒ£268gšlÔÞ¤¸Y<‹?›¼Ý2Ÿ3;¹õuÞ“m·wå_-¸PX³£rgÝ®ËEwv?ß3µß'V¢Yê¼?º¬ðÀ¹ƒÏʉ‡õŽDWœ8úºR¦*êxã‰Åj›šÄSÛk«ê®Ÿ~rfºž»AåœÓù˜ »››ž·Z5/†´í¾ÔyyáªÆµðëGo ¶/uHÞ2ë éÊí>u»÷Η‰{¶½ ÷=èëCúõâÕ<~ûDæ)óYõÐÐó//æ^!ÃÄòkÎ7Ô·”Qò;üÝâØÌø‡‰—ïû>ÜœlþxòSÙçü©´/N_ѯÍÓÑߤ¿=™ÙùÝòûÙ†á?ÅöÍÌ[ÌÿX8·µD[\Ž_^È *`Ѱî DÄ©@æÐhtKÇ…ñ{„}Ä$R,[&{9yˆ¢LÝÃ9ËÉ3ÄgÅ_%0#d,/R"Z-V)^$‘$é%¥Gã£}–¾¿â¬ÌnÙd9/y…iÅÁ•­J%ÊÉ*~ª†jj³êý4K´Rµt t¥ô@ï…þ ƒ`CAÃ;FU_­Úebl2fº×ÌÌlÔ|‡…¶ÅSËÍV²VÝÖq6ü6m¶t;v»óöAT‡NÇ<'ggªs¯ËNWg76·›î›<ô<¾xÖy1½%½‡|Ê|}üýžø`ªþ\ݱ¦ È:hamspbˆJÈ{ú©ÐH†"c"¬Ž®þ5¢)2#Ê(jaÝÕèM1f1‹±—ã2â âgZ7°Ì“HI÷“ËRÂÖk§â©ÓêÒ³3ü7heR3'6vm:±9/‹‘m½E.‡˜3™;¸õF^ý¶£Û‹óó r ³wdïÌÞ•[´mwÑž=Å÷ÝWUr²´vÿ…²ß ”ë<²¹âøÑ®cÓUâÇmO¤ž¬©‰ÄjâI…Oj"-²Ù±ía{ήĞÁ~—,IN"ßæ q¤sôS4)»(©nÔFNÎ .~®"n ÷vÏV^o_)?ÿŒ€ÀmÁÕ‚_„ „„;DÂE)¢bÁâTñ+I’Š’/¤JiîÒTé»+ ee9eÊËû)H(¼V<¹2NIOiQù–ÊNU5šÚ¤z³ÆvÍ-'m-Q]T÷³Þþ]ƒK† F'Œ®*1ÙeZ`–g¾Õ"ß²Ðj·õ>›rÛ“vçí¯9ÜwrúàüÃsãróóÔö2ñ¶ñq÷õ÷ ñ H Ì]]´¦<¨ní¥à{!¯è_arL«ðôˆ‹‘ßÖ©E'Æ\ˆýo–°-ñA’hrXÊùT<Í'½&c.Ósã©MKY«³/åˆçnÜúrÛªíÕÜ…Ûv,ìJ-šÝ“µ—k_E©áþ¡‡V”÷É:ªul¬ªü„{5VÓZ[púäÙ·çÔ/ä6½k j¹’{Ýø&O'v[¨Çæþ¶¾·Ÿö¿áx{kÈ:d²©A®"È{A…QMÔ e¢Ùèaô ú]¤1{,;ŒÝÇqÜOÛñomB¡‘ðhLÌ&v‘xH¤ã¤¯ll»ÙFØuÙ ØGÈÆäRòWwŽ ?%ò’jGmäÔælá2æêàvã~ÊÁóƒ7ŸO’¯‘ß…\ GPZð†P˜0»p½ˆŸ(*Z/$N¿!±]ÒCJRê3­Cº|ÅzOY-9¹)ù~…fÅÒ•Jk”MT$U–T_ªµ«ÓØ®¹Y+S{ƒNªnš^ª~¦A¦aŽQ¡ñžUGLjL[Í:ÍŸ[|·¢Z+Û8ØÆÙ•Ø·9œ \µÜ!<Â_¯×s¹\Ðëõ|>ßôSe£Ñ`z]!ô†{a³Y yÝ©S§NPÈk`gggªfggG ÀÁÁ©ïìì  ÆÞÞÞT)4c¦õP(...³gÏÞ¶m[{åY¾|ùöíÛ[µ‹'NL˜0á§Ÿ~Z¶l™^¯oÕ¾Ö®]»aÃÓÛ¤¤¤áÇ+ŠÖë10(Óª‡ô^¢ÕjE""eee ßòù|­VËãóryuµ²‰ z¹RP(èO€fAðð$½´Zt?7ݨP¯{Z^¡jPðà™2ÉI)3<ítïnm½ÎØ”ñÉ$ƒaccK¡Pœ5¢Óét:ÝÁÁÇ;88 oÛûR˜y'Q( …bûöíz½~çÎí=œ7`É’%»ví>ÐÃÓÕ¶5ÚOI/¿•\ž——÷ÓO?-^¼8ÀÇ&²gkt”_\yñZ4|äMJJtÕ)***..ær¹EEEˆ´„åe¼k:ÙœÈY³lìYdk:LÂÚÙ’qX4Ó†D¥àÈdl{M“`1ÉÈ 6¥)õµZ½¼ºN¡ÔÖjôB•¶ÎX)S˪´<ªB\SÌM~©21™¶Ž#²dçææææææêêŠÌœÚû˜é|¿²û£,É;!Ž×ñ”?÷õÎÄÍûê^{ó•àpêJæ¼ô#,åêDwu¢Gô|^¨VëòŠ« Šå%Šätá½;q—.]€éÓ§ûøúx{y‡‡‡™Eè}¢c OHÎô²cö5’œ“û§·xG/•œœ‚ªN€eƒ?ÿ×ìÖ‘œÃ/•x·T‡Ë妥¥%%%¥¦¦¦¤¦˜d¦g åŒ1N]}^n–.š‡Ú”Ö #\=2È¿‹M{Yc¾ûñÁ¾cÿYM[gè°‡0s鵫‰â7ú ™Œ ôg6Ü:’H5yEUg®¥?9›h ÚÏ×/<<<(((00Ð××·½ÕLsi$<Û¶m3¹ µ m 9ñ‰ùQ2­Ð$§Å':ˆäˆ¥Ú¿võéî÷Ìà;§ jê¢;Ò­ ­Z¶Ç%'ÿ¥’\u´ZmrròÝ»wSSSï$Ý‘IeH¹“=aòNh ÓßÛÆß›A ¼ýQ0¬‰ÐR€Byƒ#ꘇ@Âãšßˆ ƒhðïjºzc~±<ý‰äaº0)¥t÷î ¤F  êÛ·¯yC襡ððùüãÇ·—ð´±äÜy ØðÓãÖ–œ£?÷íâñL]ɱeεƒä@T½^ŸššzãÆøøøûÉ÷‘ …„é×Ó¶Wp` ³k†yÛÿƒ‹Cùz[ûz[OëÕʺÌiZvejº86ñbž€Æ ƒƒ‚#"""##{õêe^ˆ{·0 Ͼc1Ð.Âc’œà®¬™Ceó›ßf#J¸Ò‰óN ’sñZé/‡òœÈËôR jÙŽ4šú)Ÿ}©ähëŸnýz„´J-­R·l§ßm{½ä@ÇQŒŒŒ[·nÅÇÇ'$ÜT«kÀІ<Âi`_§†› ­]*‘+K"aÙ’ú†:`q(¾@ ¼^ Šå +Âë átõF,®=äaº¨˜«`1É}BíQ(T£1kµú×OÛ}ü&hT|ïÐgÓ (ã)çHî<¬8u)cË–ä-[¶à ø°Ð°ˆˆˆ¨¨¨   ö]±1ÓDÚ]xNŸ>,ä¦<ö¼»•zaÙ`ÿ©«7þr(¸|uŸûZ£#,u|O?“äÀš­÷kµF˜ðÙß­tt+çùLßå5Úó¯(•J/_¾|õêÕ„„›bq%PH˜‘9Dô²ïÌ~©n;²uO ·¼fÂ÷"®bÇÌ´˜ß†=ÊCÕˆUxú5ªS%×lÛûhóW½Úë–|}‹@ÀDE8fçË~ü=ýôïîÝ. d™Æ\WoxêäÊnݯ˜;Õ¿½Æÿœ8T'5:ÊuÛº>e<åƒtqÂ]Þ?—î&&&~óÍ7d2©wï>Æ 1b„ƒC oœ¾5yyyþü¹sçRSSÀ‚ˆþbªÇ ~N}ÚcÐ6<Αôýw1z±­»Õ™Cå T{7÷ÿá×ÔËñ¥¥¼j îÐ?9Ý|m¦õY¸.ÑÚ’ðïiŸMöõñ°^ôõ­ +Â*„5íuËNɇt{þG íÎúíH–—Uq™âû•á_mM¢Zà<]­¾Ùž<>ÚcÎ$æ§+®o^¾ü»;«3mH«7ß7ÍÏÕ…ÖÁ%§®–®–³?ñ3©+ÏÆŸ¾’·víÚµk׺º¹Ž;nèСáá-ë°)¨T*g6>æÀö>IíƒÇë3Ú]xÌ´­~!322NŸ>}üÄñâ¢bpw&¬úÜoÔ`×n>¶sZóRÐhTCgÌZŽH@À¸anXjD”ë±³yö,2$ÜãeæÈÿ“Ç*¤‹ËøÊnvp`G$v³½ “puõσÓh´zÄRîãžXjô`÷"OW«ëwÊ‘ Öù%U™9¾¨Æ×ÛZæ›YBw(P(TH7»nv›¿ê•‘-¹‘T~ôLþ–-[¶lÙboÏ šdM'íßÞßÊ’X\¦À PÊAvàÿ<ñ¤/N;BdoÎÄϯL냈ý™ØÂ¨¾ÎyÅUrEHdµÖt"°lÉÇöD˜ì\Óæy$¨?O<ñïÂhÇñ· ¾6¾6Kç—)®ß*?}¹èÀ°w°Ÿ1}Æ´iÓÜÜÜÚ{ŒfÌÂóžÒ—P¯×ÇÅÅíÛ·/öj¬Aop²'l\Ö}ÜpwWËö>ÒfA£â7­ >íBx»°DÁ±·XòY·K×Kc.‹eš;÷û¶ö?y1¾Z4ouBXëÎ}ÁÁ‘+ç}ºâº»³¥¢F;zˆÛ½ñäŠÞ ëÚ “¼|^PôÌó¡ÝX™9²î¶‘}óŠ«vüžö8Gò0C|pÇGJUýšEÁ³—Æwó³‰O*ùmØœÉ~_¬I Ó2†É ÿu*7<Øþ}txW'ºëTúÜ©þB±ú|\ñoG³7nܸqãÆÐÐйsçš#ñtá!àÑ»š…ç=¡%¯ß·ß~»ÿï5o’û'£¼ztgv‹ç!¢'§W½XZkAÁÒ¨xØ×ip?…²nþÔ®XJV¥€@æá]åÕu gvE¡P‘}ÃYµ=âctíøÈv<„¡‘.Â9bI-ʈäe˜1ÞgÎ$©\³ä³n(J©ªèÉ íÎ’W×!%Gy àR_oDÆŸpjl{_‡VÅ$Ïê?wªv®ìô•Ÿ¦MŸ>}Ñ¢…S¦L3gN»Û|à P¨ßõeX×n7 Ïû@Ë\¼ŒŒŒ;v=z4ØßrÃ’Þ£‡¸!÷å7h![R^ÑÔä›-Âý´7öÉÂâP sÒ «mÈí8þvyrºpÙÜ@ø75€©™Œ5¥Þy£<År dÄi>¦aÄ d«ÿu:ï‡=¦OŸ¾aã†uk×™µ§½0 Ï{ÀÛ\³ÄÄÄåË—§¦¦â°¨ËºÏ›æ×"ùЬ餶´"­ªÚ¬¯†èêR¹&¿¸ªJ¡ükK]ƾº(‘inÝÀŒÉ/_ÒQ(Õ<ž´B(+-¯LK/NK/Üøóc€Ç(T§ã\W|ت¶HBkX%WÔU)ê*ejОVkLu„¢ÚÛ+Úåô¶V–Äų»ÍŸÚõÄ…‚uÛ’í)*üoo83­ACá9räˆÙâããÍT§¨¨hõêÕ111-«7ï+B±šËSryÕ%eÊB®üqެ˜«ª©ý$l :‘É´>è•k•t*™îCöóq2•(”ê»÷scãí=ÿÇÉ¢ó|Ô"¾Ç|Š'¨)âV—”) KO ¥9Eêúú§ÿùE¶-–É `ÛvçºõÁâPSÆz}íqâBÁÊMwÃÂÂvîÜÚÞã(ã)¥r òúÝ„b5‡jø/ÎΕ!ZM§J®©Qéš¹ÔñF˜„G$ÅÆÆR(”f7i¦íhªêhµÚo¿ývÛöm½aÙœ.+æ·oàg]½ñV2ß`00‘ÿLA±ÜôŒ¯«7–òª_óÈo4>»õ ÿ}m"|ª´¼ºˆ«È)”¥dˆï?’×ëž ƒN ðw ìnË`PXÖ^îöt:ÙžõìïmkCC£ßÞØN% ôÉØ¾½­=üOÞ[¨ŽÑh¼\QÄU<~"ÍÊ“ÞO¯j(0l[¬«Ðnl†5‘iCd3ÉVtJ0ùôtÀl ­¢=£»Zùþ6yòä;w2íìÉ´ûPFJ†88€©Ré9˜qhWÔëë§eŠ3s¤3>ö‰¿ÃcÙ’"û8N_‡|ë«­w‘ÈOMçQ–$-K¼r~p[ò¿Â“4xð`³ð¼[4Iunܸ1gÎgÅÅ%Ã#í·¯ owç¾@5ù‹ØGxZщӗ\ÿò³n=9«¿¿wú¡H…Z­îöþkƹô›;ýz9@áç,H(G{¢gg+¶ÉΆ̴%ÚZ“ìY° ã+2bI­ÁhUÖ FX-–Ô–(žÈ3ó•Fãó{tp7§)ü¼<Ü]í:;Ùq8 :µoÊ ç:P\®¸_jgKjÊ!TˆÔ¥eÕO ¥9Ï–Q¨N¡V3ǹ{º[º;Óì©.ŽTóŽQCÈd,7yÒw?>Øwôhlì•;~œ6mZûiì0÷3 ÛÀ£È|ÉHž®–XJWo¬Õê¤r •ŒÏÌ‘ DjµZ7z°+Ý©ZYw/U(«YLòÛ?BÔjõE¥ÕÖVäyB­Ö¡Ñ*Ä*ƒþi£Vx;8€ FãóÛÀeÂ,<ï(ÿ¡:Z­vÉ’%ûöí³¢aOþ:`|t‡0súfGòîý‘u€¡œû‹I¾4Ag0‰É«”ªgLè‚Çal­IPP,.þ8Ú‹CÝO^¹ÉíæËðõd\¿Sîëe½ÿ‡^e|•T¦©«òKªÎ]ç7TŽ&Ò©<} 8ÊÇ‹Óͯspw·îþmm´V:|…R-ÊYa©ðq79%ïñ“ è„ê4sœklBÙãÜšaÓãšÞ 2YÊéââDuáP›ËH5Rym­F_!T©ku2…V]«“Vi@®¨G" ”ðä:]ãÓîÙùy ÜÎÎ@!ᬭ4 ÍkcM"0LÓ†Ø^a,&yïÖþŸŒòœ»:aúôé'Nœ8räH»Ozp8”P¬ûé•ÑC:syʬ<éÍSc2ó$Ó]cXz÷pxœ#QÖÔõ aeçËÙzƒQTYÿãïiÛÖõé7.&;aJF¶ä³ñ#º^¿S6yŒ×ìOüþüçIÌ¥Ân>¶ÅåÕNö¿lêgêîÚ­²;+¶­ë3îÓØî~6ªZ]A±Âôت˜…ç]äuª“ýñÄŸd?™6ÖeÛÚ>'—ZlBÙ‘Èk2ÛÅꨴúòMáÚE!Ý|l&}qõàŽ‡ÿÉ °[¶áΙßÿòp|´û®é»7ö;x2Ãpì)Ñ]_<¢jeT®‘UiE•jɵ‘zíöG£qúõ²[k2úû\ÞÉ‹¼Gº;ÓóŠå7“x2D÷Óé™å%"Má°(÷ζ[++ ÛÖ’É´$à±dÞÊŠ¦µµ—R!”@µR£TÖ@Yy¥¼Z-—«ò‹øå²†6l8,ªOcÕç~aöˆ ›o¿#]Ü-7­êi:µF×°q6“ŒA£ìlÉht'&ƒÌ°"43újOYÊ«F–æÊ*jJʪ_ooAÁ:Ú??|Ó‘jAjXAgÐç—>÷¦:—óšÖœðnN47gº› Í×ÓÚÅ‘æÂ¡µYˆ¿Þ¡öi±oÝ“úÍΫ¾¾>ÇŽýÝ^n=1— ›”@?;WËŒlÉÎo{‡t³3þމ% YO€ qÅ傚ˆžœì|Œâþõ¶ämëž[.ßpg÷¦~!ÝìV|ä~xô`7èÝÃaÊ0£Ñˆvü½¡ê T+ë²ò%?~ÓÛ‰CM¼Çk³TLfáyçxåíæôéÓÓ¦MÕÕ×ýµ+ÉÞØq “Ð ßj´4¦ÓÀ>¶ˆ·ƒ®QÕÀ£¬JeM]Ü-î­ûLÒÔ±Þ6 "²ú|ðxÎKE”FÅÓ¨x×çûô «×nÔ¯—=²|pçapØ––„° VX )G¶ßyO ,*­®”jJxÜÌ,­ ²¾™‡Œ,vv¢…8¹p¨ŽT›Ì±·x鼄JÁ¿µãT‘H5WË⸮—›ì#P¨NÞîLOïÈ 6ƒÍ²"“pvLKϰ¦I¸æ,3ªTÚ•FYS[£Òˆ+êÚºJIuYyey…¤´L|ðŸ"Ó.‹Öß¾_¸ýÀ>Nm°L `Ö/ ífÿÉük‘‘‘[¶lY¹rekwú"ÁÌGx¿Ö^n–[÷¤®ßqŸJÁ—W<34ðo²™@FŽ4¨«-`q¨î~¶¥<%tq· åãþ’ëH£â7®è5rÖ˜3Ù¿O†}2 Ï»ÅËUgÓ¦Mk×®uw&\:<¶Ýwq^dÌ÷øÛåˆWf•\è]èJ¥ùTYS‡ìdI˜‘ƒÝÎìšž-)æ*Õµzà T± ŒFc‹/Î8°)lJØË>’H5zƒ±FU_£®‡ÿ²ÛFv•HD J€v\Gz‘*¹fÍ–û¿/4ŸZP°Ã‡††yz¹Û»83m›cñz(…B`Ù½ò×(É K„Å%´ÇÅ'c’ÎÄñ’GGq6Õ³ ~Ã=9¯òÉüØU«Våååíß¿¿]Iœ¨ ­×~ú#@@_88‹C¹„üùï9lê\8E¥ÏìqòKªØL I¯ÛÛS«u΋ôk“‘m×î~¶­ýèÓ³ð¼C¼äW¸dÉ’]»v а;öKTÇ4Œ^µ hʸ[É2&þ6oÏ÷Àå+7íz¨ªÕ!O|Ð+Èþ—?2ËÊ•>^VGxMœ[Wo¸_²ayXO«m{ÓÚÒê™Z™ ½ÞÝøö ×Þ:vž;krÄÜ™QÝü;·žÌ¼),;K–eŸž]fL°{Ûg9ùüÎ&mþñlrú^êŒ6mñâáè%ëoï;tH¥Rµ¯#ÉΕeæIÎ]-Qit åÿe÷B£Q—o”†?ŸŽ`±vÿ™ašÐ¯Z´h}âÊσn$ñC»³šb ˆFwš³2~õ‚Vt|­FÏa[´ñ!›…ç]¡ñ¿‘œÑQ$Ü}{ïåШø ‡¢ ŠåõõÆ/? D–³n|RWo¬¯78q¨B±Ú’ŽÇâP§öˆj‘ài§–_,=Ø•LÆ~½¸G¹°Â¾!ûêÍOÈÝf å¯'#G‚Bub0¨bÇ‘œaX[Ðid*/¨Ôˆ%š¶1ì&0{·öw`QÚØy~Ò(oñÿ:Z8³ë¥ë¥9òé¼Ç q³ ã™ Ò¤QÞȧƒû;ÕÕë1hÔÀ>N$"NîšñDj4w}ÛFqïìHOJ©èjß?ÜL5©c¢»ÓÑžJ `nœ—XÆåWŸ90¼],é×,!ÐË6š…§Có¿ÔM›6íÚµkx$»#KމFË&¦i™®Þ8gåqÃ<…z¯‹C™Üß°8”«½mÆùûÑ,‘Dýâîë›òõöû›¿êÕ6c~= lÝuaë® ÎúШàž=¼=\Yn®¬VµÿOT*mi™8·€Ÿ™Ã½v#=%½ p¸Nm?“#@8räHôøbî  õ¢Ï–é?‚B¡…ÝC’®€)œ ’…¨aÓëF!‘…e¤}S*÷öbéÜ@[yêâD³ðtXžKËå˗׳>²—í?û†t|Éy XêMÝÜZ]½QZ¥ éÆJËú3ï$W\¹ÉÕ Á]YYyR ºÓW_„ìùëq]^¥Ö»¬²î—+IÔðà‘HQ£ãØÙ’î§ Šö>&ÖÏaóW=/^/‰O*ß{0~ÏgyàÙLJOG'G[g'¦½¥•ÅdJ@§‘‰D\óº…z^&«1Èä*HƯ•ó%YO¸\žÂT3ØŸöÍ’€a¹ÜO~±.¹íOÑšÅ!Ò*Í®ƒG½¼¼Ö¬YÓöøA¬ŸÌÂÓay¦.|>Ò¤O˜Ø¿÷ ~§%§£qón¹“IJ%8ö$ПùljìC»¢ªäš/¿½}hW_ *åU«Uº5‹C.ÄßN®@;÷?Þ¶í‡ùóçw„éΈ–F²$ƒú;5,é6èôSþÜö>¾Æ˜„'888!!ÁÎî½}°~‡ÀdggÇÇÇ/›Ó¥ÝÿÞgâx6~¿»p,8,jgg ;²-ƒlkM`Xé4<‰€†%ypF²¼ÔjuŠêºÚZ½¼Z+“kKÊ•Y@ßÖŸOñpu¡»;Ó;;QÙæ@–­ÎÚîîü¦/²ìæãÁ(âÊØ”Ëñ¥¥¼êð`û_›Ä{<û0CÔÕÛ¦»Ÿí‰óùÅeŠ@æýTaZ–˜fAø8Ú1N˧dˆ9l‹¡‘.€42 œãín}!®ØÆš$¬Tuä,¨(jÕü ©‹OŸ>=a„oÿaºè÷£Ù5êúúpf~ìƒB¡¦/޳ ã>Ÿîïín]%×ìÜŸQP"ï×Óá³É¾(*-KÙÇÑh4þ|ðñýTáȨÎjnö'~‰÷xÊšú„»|maö$ä .{ü|>]8»+bú”x»[é ††Ã8s¥–o¸½m]Ÿâ2ÅcOx5ÑQ®!r£IxúõëgžŽæòåË`2äo/v|Ý;ñ~…°RU!TgåKÎÄñÞ®S KWš—«•‹µÍ,¤Í˜H}\iÚéAfxžxR)«4Ê{íw?ŸÖõòR 7}|—i‹ãÎæíf-9[øågŸÍ;q¡€f€rAÍðÈÎëwÜ·¢çHôãÈ(·ov$¯[røŸ¼î~6-õðÞzDtHÉ€pGøò³À*¹&rÜÙ˜C\8ÔO—ݳð˜iæêÕ«Æ¿Ks}›IT?ç¨~Î KçJ©L+¯ÖVÊjµu¨ªõðX¦ ‘€GÛZ“ð84²>æhoÑA ðÊxÊ݇2($œ£½ÅøáfWÕÊ:“‘ò6=»òÅ•·÷‰K×¹wFÒ¨øÏ§u½~«ÆG»;q¨ÁÌRž’BÁ0¬‰¾6­þï³yéY’Ð 4¢:àÄ¡Fõu’ÈjÏ_+vs¢ÿÄT)«½ý@ÓÆwyýÚ]G€FÅŠ`&$Ülñ–Ø”ÛgÆ=)åÈeUZ‰¬ÖÓ•¾eOêþÎŽ@*ìÚŸ^¥¨›?½kÃuü|þƒK“ÉØåóÄä!…c‡¹»:Ñ]è*.\/ðe¤dˆÀΖü(Kr7E°`†Ÿ ƒhà íÏjtŒÀb’ã¸#uF¦J+þ| £#¨˜…§#)(Ìï×Ó¶ã„ù2ü¸¡Cübß©\Ó;Ä~p?—'²eßÝÙ»µ?²ÕD§áiTü…k¥SÆzU+ëÕu{J­FŸ/ss¶d2HXªaMà T–4<¢[|Ê‚‚m¨X“Þ!öˆÄ%p“‰,,05*НÕè4Ôb®!g®¦gKçLöóõ´.ä*~„D£RðŸO÷§S bI­…ö\l1ÝN oAh7ÖÕÄ ‘HÔ²7;¡X=dÊÙ±C^nV7’xxü3{K+úË„uõÊ¿›£8,Z£5@‡Á$<}o™…§½@ *„Nöm1é‹CøÚ`0ª•uv>L˯Út·Œ§,ç+¦‹öɼý@°açCˆ¹Tø0C¸p]¢Ñh\¿ý©æÖ=)3„ßüx_­Öø;ëúí²]ûÓ³seí}dÿÁšE!K¿½½ûÏŒ]Ò¿Û™òù´®³>ö]µ)é̕­¿¦ŽÚøQÂÕ‰~4&¯®Î ¨Ö>Ì?Ÿ_]ý’(Ýó¦ú}³ãÁý4Áê-Iˆuß»‚µ¤RiË6{?MÙÛqÍâðvf®r e£‡¸ŸÜ7ÄÆŠ,­Ò.ùú‰ˆY³8dÿö2Ŧ/ŽîzêR\ˆ+~Uã!ݘŠêºè(×a¹œ‰-ªÓé#Âì/\+€jeÝ­{‚FõÙ¶X£Ñص‹Í¥øb­V—ãKû…·]äé¦0e¬×_»"òó ""úŠD¢7ýzZ¦x÷Ÿ»ÿÌØ±/­UÇ)«‘Ž¾Ý‘\­¬k~ƒMétëž”ŒlIëõ‚“¿ˆ™Ö@¯ŠÇ¡ÇG»ÇÝ*×Ôé¤rŸ·µ¿7£˜«(Ô”òª`ä`·ÑCÜbu)O9a¸GüÝg5½Ý¬.\+2À‰HDÿy"¯g0S¥ÒŸ+~ÓÌömŒ›rýø˜'2ƒÑ8jW,eð÷r³*(c~F&cL`X`Áô†ÑßËæIÌÇÃ:ПY_oÜ·Å¥¼¢†aE€ºz#„‡°ÀÊ’èãÁ05²ù«žŒö6i"4 (Š–m¶wˆÃþ¿³Ç̶̺# àR]£ótµüh˜8ÊÝM™<Öë³ñÖt’Î`øeCtv¢ÀêÁ+6Þýû\nD˜CÃ" u¦÷^³å~¯èSXìóÅ·äG¢^ѧ¨TÌ€ÞœFÂ?u¬—WŸ¿ ’¦¯^УÏèS$"¦›íÒ¹ÝÛûÜ7¦áŒ'//ÿ¾èÏ ôgÆß.ç j ñBõµ/(– Ä*†%É×Ûº¸L‘W(ÎiÎj?‹I^0#  X~ü\>ŠOËKešú:•Vרëkkõ½Cí%RÍý4AX »,ÌÐjõKÖßör³ä T²Äal4¢ZOWK¸v»ÌÉžÚÌûOÀKdš–´™Fð*MŽ@À¬Ý’|lOÔÑ3¹¢ÊZøéôÈÞŽQNÈŽ«¸²Ä Ã’øÙæ‡w üå`ÆÆ•=Æä}6ÙoëžÔW¨&R%×ìü=ýç i™âôìÊÐnìm{ÓÈ$lWo›´ìJ¦ iïáÌuKB6ì|¸ó»¾Íé¨ë¶Ý[µ BýôÇ£E³ºÿ°7å㞉÷ø)E‘úãQ±7¹¬éŸû`œ¹¼ªtG@WoÌÌkÅ¢PTûú dÜáòÎĹ;[þ¸¾ô ·?p<›€G“I6“ÒÙ‘~úJ!Ó†Ô;ŒÇ¡0ÔÖ=)Á]miT|X ©i0<µ±&nÛ›†Çc‚˜ kbz4ošßë{—)jÓ,§˜0 ·ˆ7’™¦ó$¯ :H~kà°É?¿âíf™+ûycŸæ7øÎ1e¬ oºÁS­¬ûå`Æëû@z¶tÊ0]½ñ‡_S-é8WKO7ËÒr%O :}¥°sóLgFãúíÉ›V…aq¨ ¡ªOWËÃ1OÈ$lïP{ÄQ$¯¤êïsù®.´ŒF¿ï¯Ìуݑ°ý®Nt6…€Ç@ïö3D|¡*ö&ôFcszÁ„…†:t¨J®iwäÙó¾º×ŽðpµläÔÙ0)BÃYê†ÏSÁ-žÝ­a5SÀowë5‹›4±½õ@4älSj"AÌVk:‰mGòt³td[ØÙ’™ ›Iy럲P¬F&d K"ÇžòÒ-ë—fÕ{W‚/¼)±‰åAAAÇ7>¢'çf¨}›%€è˜LåõɉëÖ­Û¿¿²vë=:ðÛ±L9¨óÖ=)zÝÓ¡‘.ÇÎæ"|<¬-é8k:¡\PÓœ±;S Ñþ>—û89×ّȈŸå†ÇáP¡ÝX¶Öä”Ç–’œâ2Å‘˜|ý(cZ–8,U!RŸ¹RˆAwÄö¤_/ŽR¥}üD:a¸gs:ÂDFF:t(ñ>¿#»Ú½)zƒnïmmIhvc/áÀñ'ãÍo§•îî²fé˜Ë+%ʺ:è †Š hêê…B¹H"ÏçªÏ]/hÁ|ÜÉA]m½Ý­zt³ îj×tïÚÓW 3²¥¾ŒâÒꇕçk4µ*.Sœ8[Ð0(Âêïï½C!šHn¡¬¬B;qJd{äÿ@¡P²ä@üír&ÓvéÒ¥MÿJ£`B>Ö@ `õaäÉrÛº>©¦™k SÆz5\óüvYÄy‹Ù ôgJ¤š\Ìtu¢ß½ð|=6ПY%טt1IU«uÓÆ¢›©s˜#FÐhÔŸÿÈ|ŸTaP?çVú_ÝyXÑ‘U‡Ã²1¤Ç[|‘W!W**„2H^R"ÊÈ*=sµ°&¦ù´oëYýŒ†§Mi*z  ò߈Kàþ|0cʰ‡é¢‡"šaÜ0·Ø›ÜëwÊÆ w'±WKõú§ƒû9;q¨:ƒáŸ …êÚú‰#= Lv®,1™7c¼‘ˆŽ¹T\)SG„r|½­ÕjÝ? `ô·ŽoGþû‘l˜4iRË6[%×Ô¨tÍY^‘‚b¹ ‡V¯3TˆT/N:Õj´JÓ”ÕjÝK[è8Äß.šû$;ÇËëí½Ê^ Å—¯Q(ÔKç­½NþâX‹vAQ(”O?ýìÖÉäŠV=€÷ ÄœcÁW‰Óߨø²õ(ã)—o¸ý¤PmcC{»8öŒ nn#†ô˜7sжӯŸÿVÉû[û{ì©Õ+G $¦.NÌ+­}£6ÃCìã¹ÅòãgóÇ qW×Ö=“ëëiíífíhoñùW7ûôpíΚ½,ân 9.ŽÔk…bõößRGF¹íÚŸ~ézi¥L=2ÊmÏ¡ÇF£qâüXÿ.Œn¾¶SƵ÷YÿøÕχó äëëÛ" ÅêÕß߀¤‡‚݇2Zv´«¿¿'•kJË•¿|IËyÅUMìñU-tɱaØ4GrLF]}³ö9šˆ)h}£V7)JrÓAÀÒ¥Ki4êÜÕ msî: ©&-Slº¨ÕÊ:cƒí²/öÌ}Ö.ìzî:¿Çðs.!.ßpûŸ …¹…²6>F£±¸Lq9¾ôÛÉÝþíö÷ößrfMŽØ´nr ö²³Ù}Ëú)¹÷Û¿ði“¦:Ï©«×Ó,ð®–ã£=N^,Hz ÐÖ,(8 C `~Ùq7EpòBA­Æ#rBºÙEôääÊYLòâÙÝÏÅ)j´^nVÇÏþv4kÎ?Eu]VŽìÄùücgsóŠª$Òm¹ô»ÛFÃÓõë×·TƒOòe9•&Ž*¹&·P†ü\uõÆjerÐÕs e ºF£1·PöÒÓe4 Šå¦›‹§«å·ËžÙ_ñª‚b¹ñÿwF£P¬F _ZÁÔ2€‚by{_‡ç˜$'1ñVó[Ójõ‹ÖÝjUÛ%„3W ÏÄ­þþ®±y;ùoq€=†hÙ61`gg·uësçÎýá×Ô—Úß{vìK»•\Ñ?œóïi½{°ÌØøÓƒ/? 4­Îí=œÕè´ P¨ +ÂæLö;WtùFéößrrºP<:[uv¤¹:Sm­I6Ö$ 2ÎÚŠÀ´!¾u¡X]£ªGÒÕÈäÚòŠšR^uaiuj¦¢^÷ì'ÜÍióú!£††zº·¼_žB©N~˜#1óçßcßô»)âˆ0‡ qÅwV,˜Ð…q÷á³LkUrÍìeñ;¿é;y´wȰ¿@©zv3ÕéõH\ËÕ ƒöý•I"b/V^¡úzûýµ‹Bœ8”/? €¡\,(7–ë™+…ÿ\*Ÿ;wnhh³Œhr9¾4¯@q#©ƒFÅß)Šj1ØNbimìÑQ™y’y«nŽê6v˜ûÔ/®÷ e§>O×eÊX/‰T3nÎ¥àf~±¢o¨ýÒ¹¦ÕjÝЩ炘媬| dæI¾ÿ)õôC—o¸-WÔ[Òq)âëÇŸíÆù«»ú0æNõ_¾á¶J¥§P0HÓr¿©…O—Ý´¤ã-é¸ÈpÇF!¯Ú…g’cc›˜Ø"'²Amr\ 㶽êëBÛ…’Ùw4«wH ßRž­KΙ3'&&fíöø_$¸ïÂÃtQQi5â °xv·é‹ã›cž &ñ¯gÛ‰CíΣÑx3‰/‘ÕàB£âµZý£,1›I¾ppĪÍw$2Mh «°X‘WRõð‘øŸKå/í΂„bÛ0èNž_—°'¿¤Ê`|Z]­J_>·eЉ>>Ž3&uëêçìãåèåaoËxËUµ‘U©Ä…@ Ë+¬ÈÎ-ÏÊáÞ{øÌƒ}Òç¿/p›ÒÈ…k¥%üy"gÆÇ]殾qpÇÀí{þcèÒoî„tc:shŸ­¸qôçA“æÇÍœØF-\—èæBc3)ÓÆv1mµéêåB¥¢ºNTY«¬©“)´HÜR¹¢þü5®Rý<8•F£ç êím°*¶N£¯ÑÔ5Fñ©Nÿ+Yd–ã`eË 9slm™t'{++ 2 x<¦œ')çIìYÿmW­¬©­Qi F¥UÖ¨@ ’«Õub±\^­NNÉË-7´dÃaQaÝ-×.ìÌ `ZY_üí?{3Ä=,†{ w·¡‘.î.ôúzãÞÍýKyÕ,&ù¯_>"°ç+(ÚmÜpw]½ñúñÑz£Ñ ŠlDoZÕS^]·lN¯·5›I–Ê5G~Ž¢Qñ›¿ê•[(««3NíÑÞ¿ —SÆSž|‹#œ;wžN§·R/ž®ÏZæ°žíðúÙ@êãJ±´ö—?2€NÅWˆU9ùUwðc.ƒá jLªS\¦@f±8Tw?Û†íÿþCÿqs.ëô†iã}ºùÚÀχò¼]I¦¿ÌÞÍ#g]€Y}{tg¾t>žÖ ø/·Ö¦5$§-9“÷q´GPW¦@¨n3ÕÁãÑ…ù²Ü"Yˆ'~®: ãܹóá½Ã{>ÿá^÷”D|¾DƒAwB¬®—}ÞÝÕ‰>c¼ïÏlÕÎ]-qu¡ÉZ±D}7µIáÀ>N± ¥ðÿÖXêU¿ ù¬Ëù%UÙ S·i™â !gwß7úÿ#ö«ÕºÒreI¹B VWˆÔ¥eÕ¥¼Y¥ðö½‚V:$TDO»~¡l ›iÁa“]8´·3‘b1É/Z6Š;`:?ˆRب;'Õ‰ÓðõóO‘t/¾@ÕwÜi¾X:ætK4K:X¶ä×÷ANÑýT¡#‹jIÇ1˹qŽÈÛ¤á€p‡Ë7J1( $Ú1Ø0c†¸@x°=Ó†dÊj•üHØ#“±¾ÞÖ/w„d´S(µµ=TU 7Md¯k–ˆE|˜HD¬% K"‰ˆmº f'€’r…P¬îøÞjµ.õ±ð-äF×ÊxÊ‘³.óEú“'NŽ1¢ÅÛgX³ò%¯7žübf×…ko}½¤Ç“Ù‰óù‰§Ç}6ÙwÉú;›VP¬Þ¹?ýî¹ç™~>Žö9ëË–”W\u'åy˜,µbã9“ý\éUr­Ÿ—MNO¡`XLò·KC—o¼}hWÔ’õwÍà°)JU]gGz›ä7‘w7÷VŠ3ݲÆë¯‚Å$7òo3ZÜ©¦±½yxxøÕØ«cÇŽ yþÔÞþïŸO#"û8ž+^ýýݰ@»ûi"•º>¢'çòÒíûÒFD¹þu:7æ·a‡ÿÉ€]O^,èÂX³%ùÆ?£œì-ŽŸÍÓ §@=°óoG3ÃCØ­ßAWo<ó$ñnÇÞbö$W'ºé¦ß(ñ3Bn¡ìÅÙÀ¿³šïàbMCgäÖ81Îu`_§ݘmó¯k:UrMJ†8ñ~ÅžCOjj ¦y¶Y܇é¢ÈñëŸbNž89f̘æ7ø"Nêöu½å mw?¦£ý³3¿l^wKC!飣\í©I)ÖtÂå¿F@H7»?wFÞ¼ËÃãÐWlèåçÄ¡žû#:6ë¡%‰FÚY6¯;œ90ì|\Éà ’¶J®AÚŸ8Ê‹jW«u;W’–%6¥ŽEpáP‘!/Úæ4¢µ%ÇÌ[ð/§ðð𤤻Æ óÙµ ¥_/îÑR:&¿lê—++)WLå…üsL@£QyEU'~bZ7[³8‰üzíøHóÓ†¾‰õúg[ z½/ÝŠgéÓ×£vþsçÀ¢2ùÄy± §Æ¾Þ]kí–äüûôp@â°5ÞÇò³3íÏSÅœ,¶-6´›kÎt–›E¶cÛ&M¸V«KjÅÒZ¾°&·@ž[Xu7EÈ<3„É^83 x ÇÜ|žx2{e’ ÃæJÌéðððÖëÈ´eJÔ†$l%0&} ðµi´Hîájù*·M'uîTÓ[D+KâŒ}LåV–DSûÈ:™ŒmX¡aM¤d` _´%fÉ阼ܷÖÍÍ-55mܸq¾zónù_?G½ßy ­e!ïÈ_úr|)O¨DÊÎ*P(TÃ?RïÐVÌ#R\¦Ðh ȼÓÛÝúÔoCÑèN‰÷x‡þÉÑëž"Ùé'̽¶%—WOïmE'<ʪ<ðwV¿Ýî?c1(س9â—ƒ_/ ýõðc½áiiYµ_ë•óƒ‘v`òhïÓ—‹¸ÕK? 4-9¾K:áÀŽÈÝ›"Ò³%÷RéY’»)ÂFIÇ-H(G{bg'•ŒcÙ‘VD2 ‹A£ØL2 é_›rì5ªzeM½L®Uktuu‘D-WÔËu%î 'š½™Í$Û³(.j»D°KàÎøò†P¢[¼xñæÍ› „V ýgæ¸_=ëšYr:&ÿ‘whÑ¢E}ûö>}ÚÌewŸÊÛ½1¢ƒçkq:B(n 2N®xž[É%jM'!þ]¬KyJ&ƒˆD…2Ñ^ôu¢%•Àå) â§!3W‹*–hbõÊMIàëiż}\¿×˜?@•\SWo¬¯7H娭ÕË«µMi±z° ã,(¸Ž– A(V/ÿîαó\{{ö¥K¿ú¾Å-}G9“7uq¢§§‡Yr:&ÿí. 55mÇŽë¿YßuPÌ—³½W/î÷⎉®Þ(•kÄ’ZƒÑ(ª¬5žÅ¨®Ñ!©½òKÞ8›‘‡«eWž–)ôgªÕº9+oÆìÂ*µZ=‡ÊÌ‘}>­kãa °bcR©±D"ºÿ¸ÓbéK,Ü–D6“¼ayOµaçC,ºµ²Êš~0Íôà­Ùu }妔zqÁ‚6lh=§œF$ÞãÑ©„_›åno[×GWo\±ñNy…ꯟnüéaA±¢™A»Fc)Où›%§ãÓ¤« fåÊ•cÆŒY²dÉöß.í;R°yuМIþï·•Áë©’kÊ+T%åŠÂR·¼¦RªÉ/©*¯ÐÔÔþw”¤á‘ì7íîÔoþÞvÿûŸR1ØNÛ¿g1É»7õ[²þ¶¦N7m¼— ƒØ¯ç³=óî~6Ð#Ànëž”U ‚}}‹Ã¦ ÿ¨3‹éîgƒÇ¡ HM,µayÏk z  ¦5 "ÞŒF㹫ÅK¾yôË/¿´`´›¦/sd[øÚ ®™yA¥úàÎÈròv²àâáaÍl?=[râ|þ¶uïdª7³ä¼¼Afo77·‹/&&&.Y²ä‹uÉÛ÷elXúq´Ç‡ =B±º¨TQÄUäÊ2²¥Ò¥ Õ‡ëäîDêìD íβ±&ڳȶÖdecMÂý{rHD úlÅÿ-ÜÊlĽ[û7, éf×Сʔù g ×ÐöYþ2}„¼èjo2…è°^~£Ñxézéú2r”Gmñä¯!·PvðÄ Žl ¸ó°¢o˜ÃÎßÒ JªöÉÌÊ‘ Dª3±EnÎôÓ—‹è4œÉ~lJF¶¤¤\q?MîøQ_Çcg ¦ 9Ô¹SüÉdlâ=‡¹p­¦Žóòv·Þù[ú“Bé™+…¦_Ι+…÷Óž9¥1¬ˆ+ç«Õº}G2¹eŒwÛXʾC¦œí̱ìlgË ÛZÖD6“B¥à^5TdïAV¥­Õ긼šRž¢°¤ºˆ+”%o¨1Áþ´ÑCœ¼Ý­\8tgŽ… ‡Ö¡6Ì´ºzãÅøâ »R2r”L¦í®]ßµ¥äT+ëÆ}våè/ƒ0hÔðià4ÛËÙº&|ÊX¯sWKfNð}”%–UkFv›ôÅÕ}›HåšáÓÏ%P^¡\úMÒ¾­ý\i;OIÔ_- ¹ó@0séõ“û†dçË.ß(ýéÛˆì|é¼U7OCþ܈q@¸cX VoN²gYÀÈ™g~Üå“Q^¿Î\¿ýÁ†a±7Ê“ÓÄß, =y!ýöä_6õÛù{:_ úúË)¢) ã\i½CÙ3'úü°'õú­òÉf–œwˆ7V„iÓ¦Mš4騱c6n˜÷Õ½å®úÜ¿ãÄ«ŽåL+®ª³ò%,zM°m±–4òº°¬¶¾þ%üq¸NA¾Vã‡wöv·ôõ´vq¤¹phÂÜÎŒ‰jeݑӹÛ÷e”Uh½™3gNª%§ ?êíø,aåÌçŽ5(ŠaM¤P06 "ÚHÂã¬,‰Aþvs$À°$=ʪ€½9È]~÷Ä̯¿ IÉð¨›wùHjƒñÃ<WžqsoÒ`CWbO£â÷ý•iAÆ-žÝM"Õ+ÕH Ñ5 ƒmý"ù×çMós`SFv]ñÝ]8z&wþô®)"HË”ô fíÿ;[¯‡¯†´øS"9^^^ fÉéø¼¥êƒA´çòåËÛ·o_»=©”§œ5ѧ]ÜÁáëm}hWòZ"ÕHåµR™VRU+«««ë5ZŠj5ÚgQmúõt Ðv6d<,Ž9ÚSí)Çhbëži•ìY¦š ©’k †Æ¹3²%ð„Ôkqr eŸ-øaoV½Îèéå¹oß’iÓ¦µ‹at]½Á”¶ÿ‹îY…/fvuq¤É³´Hp6PitHÄXøcû4º˜²¼7Ê_ÞËñ¥WnrOÿ>  ÃêÙWL£L“²ºVOþ7ÂáÞÍÑQ®ýzrÎÆ/ùæö/û´`гä¼s¼½ê<û>3bĈ#F$''‡÷ÿãdQwÊ’O»ŽìÖAnÙ6 ¢ ƒïxdŸ#1y×OŒ6žæU˜qáÅØŸË d5Rò %˜Uç Ñjõ®•î?–·"##/^ܾVÑÝý˜ëwÜ_ÿe•T1müëâWjë ÑQ®F£ñ³å7»vù¿KÕÇ‘L t©’k>]vsØG/#ÙÐL¦‹¾ÿ9õâáaÈ,ŸÉ –*ËxJ'5þvù«Lcz±È$lt”+Ò×ý4ÑìI>›¿ê5 œs>®¸¥TÇ,9ï"ÍU¡¡¡|ÿðáÃ{÷þú銻sVß›2ÊyâÏþ½ÍKR-2¿q`SЏŠË7¸]Ü­öz š:ݶµ}b.&§Šìˆ\ýý]½ÁX)Ó|³´üs¡ðúírÖ°c}ï•î›ÉDZ®¨ûí‡÷Ó„GÏäÀÄžQýœ7ízȪÖ, 1ÅYù 0©+ŸÍÿõH~½ÎhmmµlÙ²Y³fu„¨ølÊŠyA}FŸ²¶"XÓˆH”tw2ెyÁ¶#€T¦ùhÂÞ0v˜»‡ZXª@*À¦U=箾ñÛÑ,eMÝÚE=P(Í‚`йŽ4èæLO~$üvGòú¥Ïló¶îN#ÑŸ.»‰¼Ý¿½ÌïC>™gm…×é û6¶ ÅB†±qE¯Ï¿ºyàøQ¥zÃò0†5qâ¼X‹Z[W¿mMËÈ!’tñâE³ä¼C´˜ê€ÝÊ•+W®\wàÀ—.Ž)µ ¢çÏè2j°kPWÛ0´AkÀf’Ë5VtüwËÃlÄÙKã+Dª±ÃÜÃYbIíøh° V\÷FÏ‘máÞ™¾~ih\÷ÀßÙ=ƒØë‡8q¨ë~¸Ÿ™+=t2gÕ‚`OWËäGÂÄ{< ¶ÓšE!›~zØÈXî½'#[r9¾ôpLN!W‹Æ ûEôŸ={öˆ#:T”‰£¼­H² “g®¯·5bÇØèòEöq4Å7râPcŽjø)’œ­aƒ4*Þ”†áE ° âÝ ã–˜L(mDäµ›‚$K4ñðÊÄŸúôéK—.ýòW®ƒ¹8,jâ§è„;¶`VÔ¾@u$&ÿâáa£g_:ÿg4Š3ë²éÓ3±E=ƒXGyýyâII¹Ò‘m‘•+‹Žr½Ÿ&ðt³Ü²;åë/{x¸ZÎ[yân•ÿ°¶7þŽíßÙµ‹rÎd¿ë·ËÚû[­VŸôPp9¾ôã5lÜ8vĈmœ~ÍÌ[c–œw¶˜s888,Z´hÑ¢ER©ôòåËçÎ;q)öpL) Ýih?Ö¨A®½‚Ù¯ŠÁn¡_OD],éø;ú[YÎ Xúí“Ü3ØBú1¿ýñÁ÷+{mÙ’S(··#3mˆ6Ö$m}åk0hÔæ¯zy¹Y~ÿËC›êénIµÀtaÌXrÍ’ŽÿnyhXëA†è›Éë:Œù{ Â¨î¥ ¯Ü,9~¾¬^gDcнÃ{9r̘1æ™Í»…YrÞÚt¥‹Á`L›6mÚ´i*•êÆ111/^¸X6Ø1C:÷îáг„uLëæ&Fqod„,£Ý3¨aaCCv$mÃO¾m¯L…­D•\s?M”xŸwåfyN¡ ðü¡Ñ#GŽ:t(ƒÁhïšycþ<ñdæ²;fÉy×iŸý …‚Ø[ëõú¤¤¤[·n%&&î?qo÷á|èâNÒß±wˆ}«fç4óþ¡VëR‹nݯ¸|£,%SެZ5(""¢W¯^æ[ջ˦]×nd–œwšº:´—ê˜À`0ëׯW©TwïÞMLLŒßþ[êößr Øßr@¸Cp€­¯'ã½\…c2Ȱ~Ç}45¸¿SG6óŠÕ?É-®EâNvøÕãIF¶äúÞ­¤ÐÓËsîÜ ‘‘‘ h³hÐfZ³ä¼ì=œ•”V5 r@²%£P(ˆñÛæÍ› Å­[·®^½z'éΖ_³žU aöf…t·íÑÍÎǃñ~D?s`SNþ:`þš„aÓã,H¨è#zÙûx0<]éí>ÏC‚Þ—–)<¿Vœ’Y Ã#Ù›V…µã¨ÔjÝ£¬Êǹ’ÔÇâ‹×ùUÕÏRÆ999NŸ>="""**Êì½ñ>a–œ÷€½‡³~9”×+¼×¹³ç:ê4„N§#Kp P(ÒÒÒ’““SSSoݹ}&. ©ãîLèÓáW0ËÇÓÚÓÕòÝ5‡í=ÐållÑåxî…ëåÇÎs‘r$³›Ý™Ce3É,&Ù‚‚c3)xªeÉh4Š%šU}^Q•@¬®©yea‰29£Ê%%ØŸ¶va× Ã=Ú>­’¥4ý‰äQ¦81Y‘S”ÓhÔàྡ¡¡AAA½zõ2oÕ¼—˜%§eyúô¿S±´8&ɹ{•B¡tPÕiN0`À€·\.7---)))55õèù”?N!å,lï,/†{gK?o+íò "0ˆ ÑhÌ/–gåV–Ès «2r$×nW¾4@–)z©gg+¤¤³³Åv¤Ré+¥)j¥UZÈ-®}±}†%ÊÇÃòó)]}nÎtoF[N¼tõÆR^uiyuFŽ$·°êpL)RŽÆ ý|ý, |lëu¡XÝüvÞE˜6Äÿ\O6KNˢѕºŸ6í´‘ä@»ïë¼ÎÎÎÎÎÎcÆŒ½^ŸýàÁƒ¼¼¼ììì÷ýs©ÜT³‹;%¬»Ý¸annéŽ,ê;˜…By»[{»ÿß|¢J®ˆjEu­FW.¨Q×ê¤U$z©Î`@’“Öë ¯Š®Ý$Ð,ˆ\õëé@!ãìYd2ëãiÍd˜6¤¶l£ÑÈ«P•òªóŠäŸH3ó$É ã¿ù¶i4êܹs}}}»víÔ¡â4—§eiï´·÷žññëîfÉiY4Z×ßÜ©×?m~SMçEÉwQu‚Á`L%R©ôñãÇÙÙÙyyy©©©'.å˜&CNöo+k' ÕÑÁ‘Mí€i^ÄÊ’heIlû¥­G«Õ—WÔ”WÔð5%åÊ‚yv~bÖŒ€'ཽ¼Ç‹ òòòòññqvvnïQ·{÷î½uëV{¢P(»víRkt¯©c–œ–‘œ[$t:­Í:E$çÅ‹øn«Î‹0Œ†Ëqpþüù¼¼¼¢¢".—û(7çb|VÃúV4lw_K/7KWº#›âÀ²°¶"Ø1Èï„uX´Z½´J+–ª+„*X_$Ï+©Ê-¬.«Ð6¬ÆdÚº¸øNîáæååååååéééåå…Á¼o¿ÉWXo¶÷(Ú.—»k×®×Tø@$§RZÛ6™$g×®]?þ¸£m:}•äÀû§:/b²J@Ðjµ\.·¸¸¸¨¨‘¢Â¢Â„E½¡á·pXTwJgÍÑžbÇ$±™¶Ö¦ ™iCjÊzô‡€DªKj¥òZ¡X-–i*„5%ÜžP•[T£ªÕ7ªìääèî>x¸››››³³³«««››Û{|C1óÖ ’3hРS§N½Ç¿¾Pµn[JtÔPr-ZÔ6ªóÉAuA 'ëFå\.—Ëå–••‰D">Ÿ/‰D"QZ^ù™¸Üa1ðVtLgg•ŒcÙ‘è Çf’ñ8Œ-‰DÄЩ†%ñØIjbÏV«Õ)ªë¤2M]½¡\PSWgIÔ™¦¦F_“W)ôåí‹ßeÛ³Ø,×ÈvvvÿbooïææöáÌ`Ì4DrÆŽ{üøñ÷ø7Ãå)§.º®T·ú‹¶ÖØPrÚæèvÊŸ¯Õjù|> %F£A&“©Õm´˜Ûð<ÓÖ…BS(ƒaggG œ ‚ƒÁ “É Åìwi¦¥àr¹...¿l]0#ÀTØA$çðáÃÓ§Ooí^ÐôÉ'™LfïÞ½[»¯F’ãäìX^ö߯®Íá?/âûü@Ñf0 ƒñFû\.ø|¾^¯€²²Æù´Z­H$zéw¥R©^¯• 0Œgµvvvx<Ab0s¬e3‡"9µ`Á•JÕü¦^ÃäÉ“  Õj—-[&•J[¯£ÈÈÈI“&5,ùzÝú¤¤¤ÖëÑÙÙyÍš5¯¿ˆæ¹Ž3fÚ”†s£Ñ¸ùçÔ"9fÚó56cÆLû`4ç¯NÜw¬À,9ïž‘•3fÞô†§fÉù01¯°™1c¦MAVØ,H¨šZ£Yr>@Ìs3fÌ´fÉù`1_o3fÌ´) …L!4Ø,9&ÿ~·çA¨±%tEXtdate:create2012-06-17T12:35:47+01:00Èc%tEXtdate:modify2012-06-17T12:35:47+01:00¹>¸¿tEXttiff:endianlsbU·CtEXttiff:photometricRGB³ IßtEXttiff:rows-per-strip4 X!2IEND®B`‚swift-2.17.0/doc/source/admin/figures/objectstorage-arch.png0000666000175100017510000016042213236061617024052 0ustar zuulzuul00000000000000‰PNG  IHDR¯ìêŵ”ÔiCCPiccxÚYyTá·ÞßpΩÓ9ÍóxÒ¨y5Ïó<’:NƒæSiJ‰ %DJ¡Ð$sR!¡"d(¤"D÷ÃoºwÝuïþã[{=ïÞÏ~ö~×·¾õ~/€h#=!!åˆKfyÚYÒüi¤ç@^`.à¡3’,ÜÝá´ïèW¥'$ÄÀÿÍx˜I ÄBÃ’±H#ÖÊH`%b@659!€°øYþ„ãÀñËoþÐ_~ð³¼=­Ãl:@ÚzFD2'€ÈÀO š2"éa¢!  ºCÿ‰'â_8CÿrÒéý_½›uTRB =ýÿ8ŽÿÝbcRþÔJR´— i º »"™οñ“ É–ž¿ñËQÉÞÀ€ D¦ØûüöÇS¢},@YˆŽwò *ê꼨,#É*ð'ª—éí÷;Æ9Œimܨ?+ÞóO|dÒz¯?xFF¤•ëŸøutGwà@ è,€_Ð2fŒ'¬@O'$»{ÿ®Õãú»ôm8ËÖó·?ÇL²ñúS+9ÒÛþ?ÆÌòöüʼn‰‡GÙ:üÒ€iD²ìÿàæ 1îοr1oVЧÈ`áÌ8ŸßœXAÝÚé×L°r°:°€ ¡‹@g°ëßO0!hÀ€xˆx`Ѹþ¬Þ^† OÿF[ý‰ƒ(ƒø¿8ãŸp/È€LHúS ÅMqcÜ7ÅÍqS\ 7À ÿ¬õN¶LþUõKk0Aõ7bù[ýúVµ…õo9¡3þS“-¼Dü‰Ð¨Ó˜ÐXø“ÿމ6Dk¢=Ñ–¸ÛŽ]Àº°Ømì2Ö4ìÖŠõ`W°–ÿ˜ý÷TXÀ„$p‚`B °€ qÿ­¢”¿¿QN%N]ð&ÄA4Ä@Ôß ¾ðXõ,)@ƒPˆ‡uN{ü3iy\ ×Å-qÜ7.ˆ‹‚*®ƒà¸nŒëâ†ÿ´‹ÿÚ*„X°˜ÑðX›ÌLK°ŠOHgEED&Ó,b˜*4‡8†š MKCS üi¿^íiO@¼ÿŒ1 `¸€t÷Xl:@€T÷?09€Ø €ºƒŒÖú_@2p?ˆ€$È€"¨‚è1˜ƒ 8‚xC¬DB,° 2!ò vC „ ¨‚8ç¡.à 脻ðÁ Ã(|€)øó‚*‡ˆ Rˆ¢Œh!ˆ)bƒ8#žH‚D qH ’‰ä …H1r9†œBÎ!‘Èmä!òA&¯ÈŠ¡”•@åQuÔµ@Po4@Ñ 4Ý…–¡•èi´½ÞE¡ÃètŒĤ1U̳Âܰ@,ca›°¬«ÄÎbmXÖ c“ØOœˆóá4\7Æíqœ'â›ðøA¼oÆ;ð~|ŸÂ—T‚8A™`Dp ø"©„¡Ya1asa¦pp½ð#á9šˆH´È‘‘¢¸¨’¨‡hªèÑ[¢“bübÆb ±±óbÏÄQq%qOñ âUâ=â3’v $nJLJ JšK®“Ü'yUrBŠOÊT*JjŸÔ5©÷4š-†VFë MI‹KÛK§H“_¡°ÂgÅ–õ+^Èe dÂeöÉ´ËLÉJɺÈfÊÖÉ>“c—3‹”Û/×%7+¯ ï'¿M¾E~\AXÁA!C¡Ná¹"UÑL1Q±Rq`%q¥ÁÊ蕇W>PB•t•"•)ÝWF•õ”£”+?T!¨ªÄ©Tª ªRT-T׫֩ލ ª9«mQkQû¤.«¨¾G½K}ICW#Fã¸Æ&¯¦£æÍ6ͯZJZ ­CZÚTm[íÍÚ­Ú_t”u˜:GtžèòéºènÓm×]ÔÓ×cé՛ЗÕÑ/×4à7p7ØaÐmH0´4ÜlxÙð§‘žQ²Ñy£ÏƪÆÑƵÆã«V1W_õÆd… Ýä˜É°)Í4Äô¨é°™´ݬÒìµ¹Œy˜ùIó1‹•ë,N[|²Ô°dY6YÎZYm´ºnYÛYX÷ÚðÚøØ´yi»Â6¶ÎvÊN×nƒÝu{‚½“ýûA †Ã)‡)G}ÇŽN'/§ƒN¯•œYÎm.¨‹£Ë^—ç®r®q®-nàæà¶×í…»‚{¢û%¢‡»Ç!wžšž™ž]^|^Á^µ^ß½-½‹¼‡|}R|Ú}¹|×øžòõ³ö+ööW÷ßè7@4 * 5èx2pfµÍê’Õ£kt×ä­y¤”t{­èÚ˜µW‚¹‚éÁB!~!µ! t7z%}&Ô!´[S;†ýN‡&Ç'NcΣ.c®ncîï=¦ EMK-íDºUzWôŸLQŒeWš¸™F˜m6ßkq²ÍêžõMŸm›Ý1û‡G?'kg5!WÌõ³Û{·G“g•×>ï\Ÿ_¦Ÿ¯¿}€q Újé5AAËkGƒo…œ ç††2LÃÄÂ~0Û"öE® \g-Cùû.n0¾'áfâUV[RKrkʹõGS ÓÒÒ£268gšlÔÞ¤¸Y<‹?›¼Ý2Ÿ3;¹õuÞ“m·wå_-¸PX³£rgÝ®ËEwv?ß3µß'V¢Yê¼?º¬ðÀ¹ƒÏʉ‡õŽDWœ8úºR¦*êxã‰Åj›šÄSÛk«ê®Ÿ~rfºž»AåœÓù˜ »››ž·Z5/†´í¾ÔyyáªÆµðëGo ¶/uHÞ2ë éÊí>u»÷Η‰{¶½ ÷=èëCúõâÕ<~ûDæ)óYõÐÐó//æ^!ÃÄòkÎ7Ô·”Qò;üÝâØÌø‡‰—ïû>ÜœlþxòSÙçü©´/N_ѯÍÓÑߤ¿=™ÙùÝòûÙ†á?ÅöÍÌ[ÌÿX8·µD[\Ž_^È *`Ѱî DÄ©@æÐhtKÇ…ñ{„}Ä$R,[&{9yˆ¢LÝÃ9ËÉ3ÄgÅ_%0#d,/R"Z-V)^$‘$é%¥Gã£}–¾¿â¬ÌnÙd9/y…iÅÁ•­J%ÊÉ*~ª†jj³êý4K´Rµt t¥ô@ï…þ ƒ`CAÃ;FU_­Úebl2fº×ÌÌlÔ|‡…¶ÅSËÍV²VÝÖq6ü6m¶t;v»óöAT‡NÇ<'ggªs¯ËNWg76·›î›<ô<¾xÖy1½%½‡|Ê|}üýžø`ªþ\ݱ¦ È:hamspbˆJÈ{ú©ÐH†"c"¬Ž®þ5¢)2#Ê(jaÝÕèM1f1‹±—ã2â âgZ7°Ì“HI÷“ËRÂÖk§â©ÓêÒ³3ü7heR3'6vm:±9/‹‘m½E.‡˜3™;¸õF^ý¶£Û‹óó r ³wdïÌÞ•[´mwÑž=Å÷ÝWUr²´vÿ…²ß ”ë<²¹âøÑ®cÓUâÇmO¤ž¬©‰ÄjâI…Oj"-²Ù±ía{ήĞÁ~—,IN"ßæ q¤sôS4)»(©nÔFNÎ .~®"n ÷vÏV^o_)?ÿŒ€ÀmÁÕ‚_„ „„;DÂE)¢bÁâTñ+I’Š’/¤JiîÒTé»+ ee9eÊËû)H(¼V<¹2NIOiQù–ÊNU5šÚ¤z³ÆvÍ-'m-Q]T÷³Þþ]ƒK† F'Œ®*1ÙeZ`–g¾Õ"ß²Ðj·õ>›rÛ“vçí¯9ÜwrúàüÃsãróóÔö2ñ¶ñq÷õ÷ ñ H Ì]]´¦<¨ní¥à{!¯è_arL«ðôˆ‹‘ßÖ©E'Æ\ˆýo–°-ñA’hrXÊùT<Í'½&c.Ósã©MKY«³/åˆçnÜúrÛªíÕÜ…Ûv,ìJ-šÝ“µ—k_E©áþ¡‡V”÷É:ªul¬ªü„{5VÓZ[púäÙ·çÔ/ä6½k j¹’{Ýø&O'v[¨Çæþ¶¾·Ÿö¿áx{kÈ:d²©A®"È{A…QMÔ e¢Ùèaô ú]¤1{,;ŒÝÇqÜOÛñomB¡‘ðhLÌ&v‘xH¤ã¤¯ll»ÙFØuÙ ØGÈÆäRòWwŽ ?%ò’jGmäÔælá2æêàvã~ÊÁóƒ7ŸO’¯‘ß…\ GPZð†P˜0»p½ˆŸ(*Z/$N¿!±]ÒCJRê3­Cº|ÅzOY-9¹)ù~…fÅÒ•Jk”MT$U–T_ªµ«ÓØ®¹Y+S{ƒNªnš^ª~¦A¦aŽQ¡ñžUGLjL[Í:ÍŸ[|·¢Z+Û8ØÆÙ•Ø·9œ \µÜ!<Âö9;;»Ñ^Ðô7­ÙÒP#£¢¢222RSSh4šà^ ‰Ñ£G€ŠŠ ‰D"šššR—————t%NÇ ˜žÒÒÒŠŠŠ°’àY***JJJyyy˜’<==ÍÍÍÍÍÍõôôTUUÉd²††Fo•J---¥Ñh999=zùò%ƒÁ<ogggnnnjjjjjª¦¦F&“åääTTT:xE6›]ZZÊ`0h4Zvvvffæ•+Wrss±½cÇŽõòòrrrêà…?Ù°çÁ`0²³³ FNNƒÁ(--ÍÏÏÇ~£rss‹‹‹FÚ„ŒŒŒ’’æ2~UŽL&ÇÆÆÆÅÅ•——····´´444422ÒÔÔ¤P(JJJ²²²˜S*•Ê`0 ²³³ïܹ*..>yòdÁƒvÝM...VUU5227nœ±±ñÂ… ñx|×]®555Ÿ>}Š ær¹ýû÷÷÷÷GNìR Eüüü>$&&&''ggg§§§7mSTTTSS311Á,¦¦¦&))‰íâ·XÉÉÉaªb2™üæ- ÁbZNN‹Å¢R©ß«Ê‰‹‹[YYõë×ÏÈÈÈÕÕÕÁÁ¡{î•J½~ýº··7™Lîž+¦¥¥™˜˜tϵZ€Á`ܿ˖-………ðo“ÖòˆµrêéééééijjêééuÛÍé• Š"qqqQQQááá/_¾ä·‹©©©YYY™˜˜èè訩©ÉÊÊvJõ°e¸\.F£R©åååÙÙÙ999ééé))))))\·n°ïÖOA~~þìÙ³­­­q8ö窬¬,??¿Q[§®®®µµµ………••ÕСC[?ÙP„HLL¼téÒ;w°v1]]Ý1cÆØØØ˜˜˜˜™™‰ÚŸ}.—[SSÓ¹UcD[ár¹XA¾   ===555::úÓ§OØÞzzzzyyYXX;Ò²¡H‘‘±~ýúàñx//¯)S¦ØØØ´æÑ&“ùéӧׯ_FGG€››ÛÑ£G‘[ÙPøÄÄÄxzzÖÖÖnÛ¶méÒ¥JJJÂŽÑ{(..¾~ýú¶mÛÂÃÃmmm…‘è‚l(|lmmKKKúô%i—’]WW' ïÍ’mhh¸hÑ¢“'O ;Ñ… ì~"°þÉ ë È`0 FZZÖcNضÿ¯_¿~&&&nݺµ‹xùòåÛ·o7mÚ$ì;!ŠÐét???.—kjj*ìXDdÃ.¡ªª*111>>>555###//+=5=ëóéÓ§õë×;ð®âüùó‚ƒ4:agQä Óéááá—/_ ár¹jjj666ÂJ¤A6ìLòóó/]ºôäÉ“˜˜l(‰DêׯŸÍ¤I“455µµµ±q©‚=cÞ¾}«¯¯/ìð;&“•ššª¢¢2vìXYYÙ„„l$ohh¨»»;äåå=yò¤ººÚÜÜ|ذaXÇæÐÐÐþýû‡„„àñø‰'¾xñbذaü¤&L˜ --]"---""ÂÄÄÄÙÙ¾|ù‚¥€¥ÿs‚½<ùôéÓ‡bccß¼y†††£G¦R©qqqC† 100˜?þòåËE­‹‚HЀè JJJfÍš…ÝR[[Û]»v={ö,++KØquk×®8q"öyìØ±cǎݸq£†††››[CCÃÝ»wõôô¬¬¬öîÝÛÐÐðâÅ ·xñ⃚››/\¸Ãá444˜››{zzZYYa#^ÆŽ;qâĵk×ò“jhhxðàvº‘‘ÑâÅ‹±ô­¬¬øéÿ´\ºt ûR(”)S¦=zôýû÷ü½4íÚµk // ìxEdÃN ¾¾ÞÆÆÇoß¾ýç1  ‚6ܸq#ö!66 0©544ÔÕÕQ(”Ó§OcÇ”––R(”þù§á_bflhh€íÛ·cŸ£¢¢€F£UWWËÊÊ^»v­¡¡F£IHH¼xñ Kÿg¦  àÅ‹Ø o>Œ;:$ìE œ°Ë¦½‹?sæÌmÛ¶¡N‚ü±´XS@£ilRRRJKKÕÕÕãâââââ²²²¬¬¬"##±½‚È}ÀîjiiéÛ·o™L&vznn®­­í«W¯„cQACCÃÕÕõ‡c“,--ïß¿ïééyðàAa‡,Z vÃN€D"ùøøøùù%&&.Y²d̘1ÚÚÚÂJh´|022ö]- ;‡£Gº¸¸ìÚµkÙ²eË–-344>|86žDGGG[[›ÿà'{YtìØ1¾øŠ‹‹¥¤¤°Ï¾rrrpæÌ¬zqq1‰DvžZÅ»wïfΜÉÿ‘B¡c3aM¥†††Ý0MNvvöÙ³g÷íÛG¡PøTÈ†ÆøñãÇŸ‘‘qïÞ½˜˜˜›7ož:uŠ¿×ÐÐPUUÕÌÌLNNNWW—H$6- P( a禓QQQÁ^²›ššêéé;vlÿþýx<>//oÀ€‡ž3gNkÒqppÀ~}}}³³³MLL®_¿îååß›^[D˜:uª‹‹ •J-((ÈÍÍ-((ÈÊÊJJJºzõ*ÇYYÙ±cǺ¸¸,Z´¨Ó¯ž””u÷îÝ/^„Ù³goß¾µê4FØ —½–ÚÚÚC‡ñë€x<^WW×ÄÄ䇦‹vìmFð- ?~lî ìÇ£Gâp8;;»†††÷ïßS(”Áƒ/\¸B¡,^¼˜ÿNùÁƒü4o…`R¯^½’••[Ù°K˜>}z```PP»»{wΙŒè„††Î™3‡Ífûúú@uuuii)äååÕ××3™ÌüüüüüüF’’’jjj}ûö555544477·²²B½¬[j7ìH$—ËÍËËC3"Ú»»û”)Sþúë¯U«Va[°ÄÄİ×P¦¦¦NNNØÄר2UØ'aÞ³AeÃ.¡¨¨höìÙaaa$iæÌ™#GŽìß¿?ª§ ZCvv¶ŸŸß±cÇ<==Ož<©¥¥õÃWíˆNÙ° ‰ŒŒÿ˜²²2¬]R555ÁšrrrØŒ 222ØÒÒÒÊÊÊÊÊÊ’’’ü#kjj>}úôñãÇŒŒŒÌÌLÁЧ†††ššš†††¦¦&v"…B‘”””À)++ãñx)))ìr-@£Ñ°àÙlvUUUUU‡ÃÁ6—––æåå5Z]CCÃÔÔÔÄĤ_¿~ÖÖÖ]ô²u‹é‰ þ,hiia£}Ý€M\\œ°£@´ ôçëg¡  `èСØt/ˆ.åÞ½{YYYÂŽÑf "lmmW®\)ì(z?999Ȇ=´@ "ª)#º*•zïÞ=ì3@ptt´´´lS AAAhˆ7¢+@eCD·B¥R÷íÛ§£££¤¤TWW7räÈ·oß¶)…K—.5Z‡èPÙÑÝ(((Œ?ûÌápîß¿ïààT*õýû÷ŠŠŠx<÷’––F&“­­­©TjlllÿþýùéDDDTVV4›.;..NCC#))iذa=zúk„°@eC„p`³ÙÙÙÙÇ€{÷î1"##ãĉ“'Oær¹‘‘‘#FŒ8~üøóçÏ#""\\\²²²~ûí7¬,YSSœšš:hР¤¤$°³³›;wî…  †°3‡è‘ ²!¢»ùüù³……Euuuvv¶]ÿþý¹\î’%K"##±iÍFŽùâÅ  P(àîî~ñâÅÁƒ/[¶ ;æÁƒ‡ ___ì°tÃL\ˆÞ *"º›¾}û&''geeq8œùóçO™2…F£UVVbš€¡C‡¦¦¦6¼xñ›çJBBóÝû÷ï-,,,,,V­ZõåËì0CCCagуAeC„ÐÀãñ#GŽ\¶l™´´4›ÍÆfÓ€¢¢"l-M>jjjüÁÎØ”$)99Øl6“ÉÄv¡ • ÝMuuu\\\\\ÜÛ·owïÞ=cÆ 2™|X¸fgg‹}‡ììlWW×ùóç 7BÑ£!üðˆ³gϺ¸¸øûû¯]»Vˆª©©½|ùû¼fÍ55µ7òwÍ›7릋@ íãeø¸¸äää“'Oæçç?}úTpW~~~xxxii©àFƒ™‘‘!¸‘ÅbÅÄÄÄÄİX¬¦)4:K!..®Q$D"Ñå_Èd²ššÿG"‘èáááèèÈ?¸¨¨(<<<55U0Ù¢¢",ñFa ZCAA˜˜Ø;w7êéé-]ºTpK£ïܹ#&&VPPÀßÓ411±F'Ž1BOOÄÅÅÅÅÅàÀbbbΊ‰‰i´åÀÒ1bD£¥ƒ)˜–YÁ¤šÍ)a ™mö¦µY ,§MoÚ÷2ÛÐÐБ'‹àóƒ²á¹sç\\\LMM===<<<°í“'O~ðà––VQQ‘ÏÞ½{`ÇŽûöíSSS+--urrºsç‰D ñööÀápîܹãääÄápÆŽ©¥¥•íâârÿþ}"‘ˆ¥ §§WTTD¡P?~lhhØšlLŸ>]OOïÂ… °yóf???,6''§+W®P(”cÇŽ=|ø°ªª*55ÕÇÇ{m+222k×®mô8–,Ybdd$¸ÅÍÍMðGCCõk×ÊÈÈ𷨩©5MgíÚµƒ Ü2yòd:$ Ûbggרv²víZ55µF[ììì¥Ó(#nnn‚ñðƒL ˬ`RÍæTWW·åÌ6{ÓZȬ`N›Þ´¦™UVV®¯¯¯©©‘——oÿ£EâëëÛеµµd2ùâÅ‹ wïÞ%yyyØ.==½¬¬¬†††ØØXöðáCìCCCCyy¹žžÞÖ­[ I$?ýÝ»wS(”òòòÉäòòò†††¼¼<‰ôðáÃ’’ðòåKìÒ\·n]³¹¸¸Ì›7¯Ù-/^$‘H X¶¶¶Øv___,§éééü\ DŸ‰'Nœ8QØQˆ"ïß¿Çãñ§Nb0ÂŽ¥Ç3oÞ;;{æÌ™T*uûöíȆÂwí‘‘îëëËw\iiéºuë‚‚‚¼¼¼°‰ùG’Éd ¸1??H$’¡¡ahh¨`ÊØËß]»vmß¾=222((hß¾}UUU{÷îõòòòòòJNN :{öl\\\tttÛòC ,Y²„ÿºÛ™LŽˆˆ••v ˆÆÔ××gffòx<pttüô链¾¾°ƒê |·lxîÜ955µ-[¶Ìý—µk×ÚÚÚ€ÀÌìÇÝÝýÚµkŒŒŒÄJްyóæ•+W80##ƒÁ`èéééééagdd\»vÍÝÝ@ ¸¸¸=zt„ ‰‰‰ÉÉÉ}ûö---µ°°Ø²eËÖ­[Ûš, ---ìŠ!!!¨b»!“Éx<^ØQ þãÖ­[K–,ùòå ¦B’’ÒÐÐ ‰Â­7Ð|ÙÃá\½zuÚ´iª–óæÍ[±bEjjêÕ«WÀÅÅk•›9s&œ={ÖÝÝ}Ú´i©©©7oÞ uqqñôô9räºuëH$Ò¡C‡ÔÔÔ¬¬¬H$Ò’%K¦OŸîééYTTt÷îÝ‹/šššÀÈ‘#—,YÂápöíÛ‡5m¶‰íÛ·4 #??ß¾}~~~¨‚ÜzÂÃÃÑÔª"KNNNjj*‹Å’’’v,½<`½ö·744,\¸PQQQp»±±±”””––ÖªU«>~üøæÍ333¬)--=o޼ˆˆiiécÇŽ999À´iÓddd"##¿~ý:iÒ¤cÇŽIHHP(”qãÆ}þüùÍ›7l6{çÎãÇÇápÓ¦M£Óé111_¿~]°`ÁæÍ›q¸fJ¯bbbVVV˜=mQTTœ2e F çñx;wîÄL úúúö i.]º4qâÄaÆ5ê;"DnݺS§Nv §¾¾^EEÅÃÃCBBBp»œœ*v   ÄÄD1ðõõå¿öEüÌp¹Ük׮͜9St*È¿üò  ;!#ØV(ƒÁ¨©©±µµEc±:Âüùó/^¼ˆfm@üŸ3gŽè¨AAA………ͪâââÜÝÝÛÑÂŽh ²!øï¾¢F^^ž——×æÍ››U!üÛáë`‹è èõÂÏΚ5kÊËËÏ;‡Š„"ˆššÚµk×ZŸJ&“ÍÍÍÑȼNÙðgGAA¡¶¶VØQ šk+ìׯŸ°ùY@6üÙùã?„¢1t:]NNî{m…ˆ.µ"¢Å­[·tuuŸ :·lÙröìYa‚h.—ëëëÛÊÑ&;vìh4ï,¢} vß++«ØØXKKKa‚h†úúú6µÚØØŒ=ZØQ÷ F¬­­…BkY·n°Cè&._¾¬¬¬Ü¦ÙhMˆè¨¦ü³€MòØãó] *l÷ZX,Ö‡”””<<<„ ¢1X¹õ]Ç_^^.8ß;¢ÓA6쵉ć6š5!t¸\.Çkk[¡™™™ƒƒƒ°cïå šro©PÔ¸|ùòرc“““ÛúÚDØÿ ö6‚‚‚„â»Ô×××ÔÔ´R…§OŸŽŽŽF*ì6 {—.]š0aÂË—/…¢êëë?Þšêëëž|ø¡C‡+~®Y³ÆÞÞ›ûvݺuS§N555måòííæøñãšššØß†¦ EïDT6ìy”––òx¼Ö÷áè,jjj¦L™òüùsKKK:niiéââ¶jÕª©S§À¸qãâþǯX±¢i";wî>~ðàÁ+W®tppxóæMYYÿ˜ÐÐÐ9sæ,Z´ÈÚÚúåË—ÍNjëááñìÙ3àr¹NNNl6ÇÛÚÚêëë¯[·®®®®ûï• ¨­°§€Ê†ˆ–¸VPPpïÞ½ñãÇ[[['''9rD[[ÛÇÇgÞ¼yÆ £R©Ã‡§R©3gÎttt”——×ÕÕUVVn”àªU«°®ãzzz3gΜ9sfUUÕáÇgΜÉd2œ’’Âår±ƒ'OžlØ0mmmUUUÁ¶?,HÁ¤°Ì &ÕlN˜Ù¦7­5™õññiö¦5ÍìÅ‹;ëù"º¬Ý......Âv2¢=Ô××+(( <˜L&ó7Љ‰988èééñ·°ÙlÁ~¿,K___ð¡×ÕÕ©«« ¦Ãår¥¤¤¥Ããñ  ˜T]]‘‘ÑÀ¯>dÈþx ,ò‹K,ëÇááá»víúøñcIII}}}EEEeeå‘#G¢¢¢ª««ÕÔÔ$$$ôõõ“Â2+˜T³9533k9³MoZk2[WWgjjÚô¦5ʬ‘‘Ñüùó…ý½@´   ÄÄDT6Dt………[·nÅ\¦¥¥µoß¾’’’Ý»w@rròƒ¼½½ù¦³²²Úºuktt´°£Fô~°²!²!¢;ˆŽŽž6möVÁÉÉéîÝ»õõõØ®ôôtؽ{·àÁ[·nåÊÔÔÔ¼½½yò¤……EëS˜6mZ³•åhT‰Ž‹‹óööÖÖÖ¶³³Û±c6Q6Ñz "22ròäÉúúúûöí³²²zðàAË•âïáåå?š¬YšV¢9ޝ¯¯¶¶6ªG#Úª)#Ú V)Æ^‰H¤+V|þü¹ƒi:99uJxM_F£z4¢PMÑ+ÅUUU~~~yyyÇïøº¦Ó¦M‹ŒŒÌÏÏïxM_F£z4¢U ²!¢5„……M˜0ëAíééùðáÃÎM[À¤Ñ`áN¤Q§n---ooï‡ÖÖÖvù½Cˆ6¨÷5¢UÔÖÖ`¯D:«Rü=:±²Ü¨hª)#~@vvöúõëÕÕÕ—,YÂáp:±Rü=&L˜ÐY•å@õhÄwAeCD#+ÅcÆŒyòäI÷\÷õë×Е•å@õèŸTSF4†Édò+Åd2ÙÇÇ'==½›cÐÒÒrqqâMh¶*ѽTSFüGFFÆš5k´µµ±J±¿¿^^ÞÑ£G »9’I“&EFF––– ëV4[F•èŸT6ü™yòäɘ1c°oBwVŠ¿VY>{ö¬°oÌÿ*ѽTSþ©a2™þþþØûaUŠ¿‡–––›››°£hô2º·‚VýIÉÈÈ8qâÄÅ‹ †……E@@ÀܹsùsÙ‹“&Mò÷÷/--¥P(ÂŽåÿÀ*Ñ‹/®ªªzþüùÃ[[Û1cÆŒ=›ÌÑAí†?OŸ>;v¬‘‘‘¿¿¿‹‹KXXØÇ/^,R*€ñãÇs8œvŒYî6Ðàè^ ª)÷züüü°÷!d2yݺuYYYÂŽ¨%êëë)ŠhV–[Õ£{.¨Ýð§àóçÏ+V¬¬RÜSþ½½½ Byy¹°iL&³ÑŠW¶¶¶¾¾¾±±±Â Ñ<¨Ý°;(--MNNÊ¥oܸñîÝ;<øðá@;fÏ'‰‚ „ ¦ßh…øN¤oß¾çСCnnn]w‹ôôô— Å`±X_c@^^~æÌ™3gÎüôéSLLLtt´¯¯¯¯¯¯²²²½½½‡‡G¿~ýº._àää„užG´ T6ì" MŒŒ„ýx;¦ËxbK¶÷t,,,å‹Éd:99 ;®NàêÕ«Âþú÷$PÙ°k)**rqv.(Èz|cE‰ÔýÔ×s@\ß¾Óy\ÞŠM÷b?@£ÇŽóññ;ÂäuÝŸ¯Žó5»tú’<^CUU•àöªªª‘#GFFFžÜ?ÞÖZ·‹®^Ǫçò¤¥%º"ñ³Wßœº§‹âïÅ v |¾^ne©%ìpÚ Ó°lÓíØ…æF2)éÕ‚»ø*¼uz®”´¸°#m3Ù¹e“柗Ç++þ_‡ ¾ /Ÿ:{jì%såVÜ©ËqfÆÒŸ¾Ô;– êaÓùôžº·ržéâ9ÿ×¼Õ Tè2þ8­”uõ¯¡Æúòüí½C…sVÞê |d›³°cé© v2½I…KçöZš+ò·÷*ú‘"¢ _;A6ìL E–^¯ÂÉc´ý÷¸HÛÙLŒÔn؉ôbÞ»wÏßß¿÷©Çãõþá3LØ1 ;‡^¬Bè*äòhÅ´ÜÜ\¤B²a'ЋUó¾zŸ kYÜ÷)åu,R!‚j7ì(½X…AO¾^¿Ÿ=ÆÝ¸÷©ð7ßWUH…ˆÿÙ°CônnÞ?ÖÝøö™y½O…¯ÞÒ‘ @6l??ƒ o!ŠH…]²a;A*Y íÙ°= Š,H…ˆvƒlØf E¤BDG@6lH…" R!¢ƒ ¶¤B‘©Ñq [ R¡È‚Tˆè [R¡È‚Tˆè, Ljjª£“cÏU! ö8 »4Nù¤¦¦º¸ ­f–õ\ò8 H…= ¤B¡€Ê†-Ñ;T¸lÓíÞ§B@*Dt.¨lø]z O]Žûôr²àö^ Âìܲ^©B@*¨lØ<½I…+ç™ nï*t¼WªðÊ­8¤BaʆÍÐËT(XGî5*¤•²z¥ 笼õ1l2R¡P@eÃÆ Š2‚oNø{“ 'ÑF*ȆÿR¡(ÓìKä^¦Â?| ;–ŸTSþ? EL…uõõ÷N ×וÃ6ö*ŽÖùÃÇ …²á¤¦¦öJ@¯Qáe¿aZê$lcoSá¤B!ƒjÊßÀêȽR…AO¾ö>Ra³D¼-tÚ ºeÍ…½R…›÷Ç×dÿÙËTXËâ"6%èÉ×m‡Ç/ìüõyü„D"u<ÁŸŸ½lØÓ_"·rra‡ÙZVá«·ôð ÿ©°ãüÔeßD…=±`ز _Ç–=¸4GØ1¶¤B‘åç-"Š,?TáÓ› Ý\L„f{@*e~Ò²!R¡È‚TØ& ;‘ŸÑ†H…" Ra›@*ì\~:"Š,H…m{Ü^“¼ ;‹Ÿ«Ý©PdA*l|Þ¼qu´î,~¢²!R¡È‚TØ& »ˆŸÅ†H…" Ra›@*ì:~ "Š,H…m©°Kéý6D*Y ÛRaWÓËmˆT(² ¶ ¤Ân 7Û©PdA*lH…ÝC¯µ!R¡È‚TØ& »ÞiC¤B‘©°M v'½Ð†H…" Ra›@*ìfz› ‘ E–^¬Â3—ß özÕ]î5*ü}…å,¯ÿóB/PáÐqþõœºË~n½L…{?Ûº?´w¨Á`0 ÈÎÎ€ŠŠ ÁÁápòóóù?’Éd2™Üô0555"‘H$UUU±ÏAKK={ { ÷n´ïÙGpW/Qa=ëÒ1¤ÂÓu*äp8EEE999 £´´411±´´”J¥Òh´ºººn¾{:::ŠŠŠ EKK‹B¡Éd---555UUU==½fÝÚ¥ôfdd¸¸ ¥ÑŠgþÒ/òí×È·_…Q›‰‰Ï¾ø±© >ËÚ¼?‡l¯îZ´°ÃlO„O…¯ÞÒGº¤¦ÓRÓi³ͤe”ø_ˆéôÅ?;E…,+999---;;;######++«   Ñafff&&&ýû÷'“Éššš /////ز²²²²²üã544ZƒÁ¨©©Á>³ÙìÒÒÒòòòšš‡C¥R€N§3ŒÜÜÜ¢¢¢/^äææ6JAIII___OOÏÐÐû¿ÿþ ¥Óntz‰ £¢¢h´b¸øñZàGa‡ÓN~iØH…pòÚGàñ6ïy*ìÛϳë#UÙŒWoéðøeæã—™Â° uPê\=ùÚ>2Œøøøää丸¸„„„””þ.EEE++«1cÆèéééëëkkkcµZUUU|—-áܨ⬧§÷ÃSètzyy9ƒÁ(**ÊÊÊÊÌÌüúõkbbâÝ»wùÇhjjÚÙÙYXXØÚÚÚØØtn»—؃?[]UFØQ´‡BZµ†ÍŠ‚t³{ñоwn´°cl'þWn‹i¤B>AçÜÇy;Ævb:ôªž¶\çªpóþøúú˜Ö«0222$$$<<<&&Û¢¨¨èææ6kÖ, ‹>}úhkk îD%%%%%¥¦ÛÙl6•JÍÊÊJMMõêÕƒ°]ºººîîîcÆŒñðð ‰  WÙè¹ð› [£Âüüü'N\¹r«ü:;;{ö¬ŒŒÌĉ—/_>pàÀv_ÙX,γW9Øg5{k5aGÔi|É,OÍ(Ã>ÛY©õЂs³?ýV¹–“•p´Õ—èÙ}Åßœ´|dUUÕŽ;üüü8Δ)S¦L™2|øðîá \dee-ZtáÂ…¨¨¨   “'O^½zÕÍÍíäÉ“†††íH¶g‡:…òŠºñ C_¿+xý®`Íö׳–?vDƳˆœ#§ß¿~W‘kévíuLAÇÓøìÈéÇ ·X,ް#j?­‰\UU5räÈC‡-^¼8++ëÖ­[^^^?› !!!áêêêççWZZú÷ßGDD´»xˆÊ†ß8¸Íª«ëåúžß³©R–$.-%ž‘Uab  .«®®ÏÊ­TU–V¦H@!­š¢ …•G iÕÒR—“Ä’*¤U«*Káp"ñ—fÒ£ó­ÀÞZÝÿâG{uzYå•uºdȧV1*ê°l²XfU=–Gì3å$±œ²Xœ:6—ŸM¡ƒ=2°uãUt¾“½&/–[ÀTU––—“¬gó2rÊ¥‰âºÚrPRZ+-E‘€²òZ<Çfó°œb[de$…RÀlSš[·nEFFÞ¼ysêԩݪˆ#++»téÒ‘#GÚØØ´/‘øðx1ìâu/J_³="øâ¸K~î Ö>›:ÎXSUvÊ8#ã>ä_7„-™cÉãñ.Ýýõ@„Š$Ø#‹û@£Òªì5R3ËjYœ/‘óÀqÜ¿CÝœux<Þ°É÷B_åΞdú0ìëß—’.¼þ·§–iá4“ç9³'™FÅ ¢ÝýeÞv÷²2dÈéÓ§'L˜Ðuýcz( >>>Ÿ>}jßé"W„.ÞÓÍ/}¶³R€”/tSCYÌòr’£‡ë~øTB$.1ceøþ­NØ« ïfwfÀß?x2v>£ª"õ&ÈËÍY$$ÄŒ  öC¡‡«v€Ë ­…àæ¬ã`­vätâ±CÀÊB¹žÃýœNûžfß__µF ëûh:‘H+5¨góÞ¼§¨8nÒ£¸Åà¿Ûuí®·“Ç ²U€Y“LÏÝøWï¥5Ù `*œ4iR;ºZYYMš4ÉÔÔôäÉ“yyyݹÂ`0‚‚‚<== žžÞš¾Í‚ʆßÀ×!àq )N¨çpùkY\I <„¿É77’yôâë”qF0Þ£O¿á×'1ìg¢,R¯n]4eÍHWúß âëØßª‡5µ\Ey1¨®®Œ-P¡HÆ~(r¬ Ëçõ É,-«=ÉTعù?š>2yy ÀšÿØõ\ìC]W†$>•éJßy˜¾q¹-‘H°±T--c}N§?wúà°îŒœ¯Â7n´cÀɳgÏBCC<¸lÙ²eË– |øðêÕ«sçÎa(**ZXXôíÛWOOOGGGOOOCCC[[[Ä‹‹‹ rss³²²²³³¿~ýúáÃþ@f‰äêêêëëkee5`À€Öþ!Ȇ L‘J~9»ÑÆã{\ùŸ­,”£‚' îå“ÓÕ–ãïªd²W{÷ðD„¦u[eŠTðÅqü}¼­}¼­ù?**ü·W°ZYÍž1ÑXعù?š>2Á̪«Ê<¾:QpïÞß±D"{q5¬z7G¬µè\ "++ëêêʪÁ`0’’’ÒÒÒÒÒÒ¾~ýúüùóŒŒ ÁãÕÔÔLLLŒ555•””°¹d€B¡HHH•ÎÍ>“Éd2™€ÍÚÀd2+++sssùÓ7äåå7š¾A]]ÝÌÌlìØ±zzzýû÷755íºÁ6"ô«Û£YóÇ«O¾†Ý™Øñ¤DŠø$Ú¿ãåd$ú)u<5‘"àr’ÿ…þ{\ºçr]§Â¦Édggggggþ.—K¥Róòò0ãäää””””——=z”Íf·”ºº:6¥ §P(ŠŠŠ­Œ¡¶¶6//¯¡¡Çãa3‰µp°©©©ªªjŸ>}†J¡P°½°b¬´´t+¯Øq ;‡_F®XØëÏÜ›0Ô#Ï™Ôw¸Soú ¨ ne¡Ü ×êN6 ×ÖÖn¶T…M·E£Ñ¸\nÓI·ªªª8NÓé¶€F£1 8NBBB__Ÿ¿WJJÊØØTTTH$@ÐÔÔl4]˜ªªª¬¬¬èŒ¥A6ì† Ôv]‚¼œäh7ýާ#‚tATØ2ØúÖs¡vq!RV^–…ý•€è¸Â|jÕÏ ‹Èý᱉É%ÉŸ¿½[d±8üÉZ ŸZW(ìÀLEås5É,ÿ’YþÃS¢ã ±å݆ˆ«!²!@V^å˜yO/ÝþŒýxèäû÷<³ÏöWåÝ=yz[¹vÿóàñ÷°ÏØü?<åýGÚ¡“ï…øÙ´'ŠÿùYDγˆœžrèäû¬¼Ên‹©°glø YiÜß.>õl^!­š_läoitXYym7:Zäw†œUTÖ5й¬¼V°À<¯V-šEà×ï Â"šiÏÂb®góZØÕÕõMŸc'‚TØã@é:šRkàñx‚qý/$\þhÒGñcZÉí€ÑVʉÉ%<ÐO¥¦Ž]^Á€z6oþšgùEL))‚’¼ÔÙÃnÛÉFŒ×Ñ6-·¹q?uúÄÿ: ”•×Î[Z[Ë©`Öë+`1ï8óài†¼¬$(‘¥ 1¹dÖÊ'æ&J_¾–mXj+˜‚(péØˆqó~ Ÿ%8Ä8üMÞŠ-á&}ß,>ìëôË(£|jÕØy´5äèeu%eßôw8 þjàgã>ŠY¹gÇjiÚÄw@*쉠çôó§™ËŸŽ{ü­ÿà…›)9ù•ü~mÊýN‡Þ˜è0.°îëbl‹±ÓE˜µêI-‹cg¥ _³™3—?í¬Yû¹\Øyôí×ü:+ó¥#)‰Ï¥2½†øÿé‚m>õÞ…#Øk„°ˆÜ¹>φ8hð¸„g³àuLß™ÄBZµµç½u‹Í@G“ôûÞh9ÙN{£’_Xµxë&bc©šùf®íÈ~;†b[â“h[öÅðÙÔ%d¤ÄWn ½ñ 6Ò|žÏSØ"ö^H†ÛP”'8]ã?ÓŽÃ÷`›&ôGˆèiýÇv:þº!LNV¸¼fJe*£„ø·ý#†ê ²QÇ>ËÊHtJ$˜ ï„äÊÉÉu<µßÙ8N¸uùn3s{p¹ß²Éù÷ð­t,+›6þÛtÓÆ›èkwB$_X5wuàIL ›>cܼ‡¿Î4WRl<ð†Sß )‰ο’ðï k5~Ö¼gv쯢Y$tèP;.A§Ó¥¤¤¤¥¥÷íÛ·dÉ’ÖtšÉËËÓÖÖf0›6möM@톘;¥¯”áŸ0* ´Ö¸ûOfuu=DǪR¤¬,”m,ä°¶ªÏéô”ôjp¬•’Zfm¡lc©_øèevÇÃà«ÐÇǧõý][@\wéØˆ?{7âé¢ïQðx¼Ai®ƒµœì4<ÍÀÚ½ÈuU%²8›Í³±T57VZ»#¢¼²^a*äˆÉ;öWÇSKÕ_gšï<öÌ•¾d•gæ0 VúºÀܘ2ޣσǙPQYôì+ ¶Ñx›Pdn¬dc©ZΨ;{-¥Cü‹hª0##ãþýûëÖ­kᨩ©9|øpû®2räHlúVR\\hjj:jÔ(,T*•+--=tèP :Îd2kjjÒÓÓõõõ---»ú^‰Ðc"6–ªüÆ&]m9~+ÒüiæÓ'˜”WÔñ'é²è«ÿd­¤VpÚ®«'<+*ë¸\ž¢BG§ÿk¤ÂŽg?]>¬˜o…>–—“||u"6!>ÿÄÚ%6Kf[r¸<þ+ å/‘ó—=è‚*455íD‰þãssÖÉŒ™K+©åǬ¨ zë—’ÒZþ’°q¹ÝŠyV5µœN™±QdUÈår?+''wêÔ)̆þþþC† ?~<¤¥¥a«îEGG{yyÀüùóÝÝÝ=êéé¹iÓ¦'OžlÞ¼yÕªU/_¾ôóó»sçNddäž={¬¥¥åííýêÕ+VUUçèè8cÆŒgÏž„aÆM›6M^^þ·ß~Ö9Å.‡­è’œœ\UU•˜˜H¡Pf̘‘œœüòåËÅ‹¯Y³&33sË–-ÏŸ?ŒŒ\¿~½³³³………ϵk×°ø»zr¢ ‘HPÿÿwÄ8®é †2s2—Ëí\¶LSwcˆ4¢S¦kl¤Â.ÍW³¨©õddÄ›Ío[YÀ‹/444FµhÑ¢ƒ6‹ÓâÊ•+±Ñx.\PRR²··ß¸qã¦M›V­ZõàÁ3339rdTTÈËËûùù@QQÑóçÏW®\©ªªº`ÁþlZׯ_Ÿ0aÂÖ­[@CC£´´TNN³m```nn®——שS§æÌ™ƒ] àÊ•+àíí}÷î]EEE2™|öìY쀈ˆd߈ãç>×°yÝ£Ân¦ÛTØÍ< Ï¿t7K4UW¯^-++à eee±É[>«AãñøÒÒRƒ‘žž>eÊlWnnî‡tttú÷ïmÑÕm~þºŒŒ wwwì3&A2™:.!!ñâÅ‹žH"‘**¾uHHH––ž6mZXX˜’’6•C³×â[²ºººÝ+=u‘{„íCMM OÀo=ÔÆ“µÀ¦M›öîÝÛh£……ÅDZ·=M õ°ç/©PMMM’(yîVÜÊhoÂÂgÚ´iW®\A@PPМ9s°º'ØÚÚúúú†††ZZZ>}útîܹqqqßJMËkx<~Ñ¢E{öìÙ¹sgnn®»»û“'ͯ6.--‘‘¡ªªŠý8jԨ͛7{zzJHH¬X±âÈ‘#™™™JJJ>LOOÇŽ)//çW“à×_=pàÀŸþ™——wúôé»wïfeeuóŧØ<<<8õ¢8z¬ãüóÏ?¡K055eÕ²„Eo&33söìÿ›w×®]ÉÉÉË–-{÷îÝ€&L˜€°k×®•+Wžtqqñòòú믿jjj444&Ož¼dÉ’ .`Ú¹sç‘#GõõõO:eiiYUU5`Àþ…ˆDb+2Ýa|}}„Èãëë ÂŽBÔ™8qb[O™7o Þ×lˆ@ Ȇlˆ@ Ȇlˆ@ Ȇlˆ@ ȆD›Å ((H؈.ÙÙÙ¯^µs‘ dC¢Ç0uêT''§ &øøøäåå ;Ñ‚Édž˜LfZZÚǃƒ«ªª$%%ÇŽ»zõj''§v'‹lˆ@ô<ôôô|ˆŽŽNJJÂvéêêNš4ÉÍÍmüøñ$RGÅF6D z0NNNNNN{÷îe0ñññ‰‰‰111)))X™CMMÍÊÊÊÀÀ@OOO___KK‹L&+((ˆZ)’Á`0™LVTT”———““óåË—?fdü7¦’’Ò AƒÆgkkknnnhh؉ "½2™<|øðáÇc?VUU%''§¥¥edddgggddܽ{—F£5: +?jhhP(l  --­   …?#,¶¨^ëÁV×Ã>Óh4&“Y^^T*•ÃáTWW—––~ýú•N§§¦¦6:]^^^OOÏÊÊj„ zzz†††666 ¥ëî!²!Ñ !‘H8p àF‹•‘‘A£Ñòóó‹ŠŠ F\\\^^^||)@ôtôôôÄJJJªªª„ @2™,ÖÐÐ ì0Bø ·(€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ Ȇ€lˆ@ aÐù¤¦¦N›6µ¼¼\Øü))©‡C ›îâp8‹-zñâ¹°cl-¿ý¶võêÕM·ß»woÃÆ œúzaØ*´u´#_G5Ý^TT4i²W^nž°lqñK/9995»wóæÍׯ_vŒ­bÊ”©ì¶Ëõ6¦¦¦º¸ ­f–jØñÔº”ÀÇ©ŸŸßÔ†gúôéwïÞu¢/G’v¤-QYUö: ›î½wïÞ´iÓôµIVæjÂŽô„¾NgVqs¿ã;—¡éF ;Ìð5·,1¥233›µáš5küüüÛiªQd…ìw©çòþyöâââºóº½Ê†|¾^ne©%ìpZ"1)ÿþÓÔ^3»ø*ܽÑ}ËÚÂŽ´%jkê§þz p8±¦{1Z›+?¹µDQQZØÁ¶Ä•[qSÅpÐì)**JÏÈxzc›‹‰°#m‰ìÜ2—ñÇq81¯¡Ù0.žcû÷¾)8‚X“ï&xœ†e›n€X·7ãõžvÞ¥Â!ãNÚb×tWSá?ÏÒön´ÑÕ”j´·g©pÎÊ[C”W/èÛtoQQ‘‹ËО¢BZ)Ë×ÀfèA*ŠrÁ©°éÙ6D*ìf~¨Â{÷î!vH…K~§Ü;TÈår{“ §M›FKÞ†TØ v:=Õ†½C…°k×®W¯^õæää`uä­Bø·kaOWáõëן={†TØ&zdM¹w¨ëÖ;TXU]ÿêÕ+¬Ž,ì`[¢eÖ°êÅÄ ¨ ÛAϳaïP!œ¹– ½@…ob KË9,TD¼¹°e2*ê®ÝO1±ž®Â‹·?Ra;èa6ì5*äráÅ›’Þ¡ÂE›"­û©<½½´G«pцç¥åÜg7öhž¼ôñ¯H…í£'Ù°7©pçÑ·½F…¶–ª¡·—õt~Ψé*<~1©°Ýô˜·(˜ ëY½C…wBro_X)ìH[¢õ*|r³7¨ð)Ra×#Ê*„žbCL…’„š˜ç¿éé(v<Á®£•*ܽÑ]Ø‘¶R¡HTØ=ô€š2_…¯‚VöŠr©P¤è*W!ô"v'H…"E¯Q!Ó â*Ñ·ajj*Ra·T(Rô&.Ût[ÄU"ÞnˆÕ‘cB »¤B‘¢—©ðÔå¸O/' ;– º6ä7öž÷óš?c`»®Ð нO…+ç™ ;–#¢6|s"ìXZ¢•*¼||êì©¶Âö» нR…"^GÆÅvÞò©P¤è*¤Ba!r6D*ìN EìÜ2¤Ba!Z6D*ìN E ¬ŽŒT(,DȆH…ÝI+U8f¸qèíH…Ý¿¹©PXˆÊ[¤Âî¤õ*¼ufž”´xÛ¯ÐMô>^ýkhÓ½H…݃H” {– uµ¤nx" ^©Â¦ßS…µ,n^au7gAø6ìq*ÐFC]…ûàé+lQÉîDí†=H…Îãüõô¤ÏîÙ*¬­©ô<ýÌ>§^¢Bå#;œÕ‘{– ]'üE/k^…ðsªð7ß×oâèÏn{¿y¼f€¥6× ÖírêîšrÏR¡®Žôé}=¸­°æÂEÇ»« ¶Soº·Gª°É›“§ÂâÒÚËÇ\¿×ËšÍ#ýT*¬®á¬Ûù:¶{ëµçð³­ûCWÎ3ýô…Ε0ùòe·å«[õ‹TØÍ`* yžŽT( ´F…Ç/¦þT*dTÔÍ[ó¬© …Òy»ûl˜šš:©°á«ðô>Ǧ{‘ »™Vªp÷F÷ŸJ…‚}¡„«Bè¶·(©©©C‡:K‰×†#v ‚*lZ0ìq*箹ý7‡ŸA…¢<;zïV!t {Š ©P¤ÀT8eŒÎ6|ãž…€TØôzB7Ô”{ “ò‘ E‡–UȨ¨C*ì6~BW— {– Çù?½>©Pø¡ mxŽTØ=ü$*„.-ö8êêH#Š­Qav> ©°øyT]gÞ¨ÂÓû\íB*ì~Z©ÂÈànݤ­`*¬¯¯G*ä#â*„.ª)÷P6*ö@ïS¡( ù*¼ptxÓy0á§T!ˆ¸ ¡+ʆH…ÝÌUø&¶©°Û@*lFEˆ«:½lˆTØÍ´F…‹6EVý©°@*lìñ‰¸ ¡sˆH…ÝL+U8f¸1Ra7€TØ,üæBW!tbÙSaMuùý[¿JJà ‹*„µï’•[æ9åTË*ܽÑÝm¨‘(ç‚Ëå-Ûp÷G*Œ²µT=ºk¢(gäÉ‹Ï |ÂÏ5Á—窪ȊlFJ˪ÇÍ:ýC.žc»`¦ƒÈævzrêrœwß_gZt<5Á7'"®BèÄ9l´´4 ¨ÂÎNk±0•m¶—õ¥ÛŸöŸLvt­‡ûž ‹kܦ‡4ð„bëøž `ÓŸ‘Á¡…°UhªJ|O…ob ½7D ;ÀÖò½)ÛJ£—Èb*ëۤ•[»y›N+PǺixO7ï¶ÐÛÍø…¡ãÜô›ªŠJj 蜻°cü1go¤|J+oV…PZVÛÀƒMËú ²Qv¤-‘žÅX·;Ök´a³*€êjŽ¥üÖÕöÂŽô,Þî6D³Y@M΢¤@l[ºÝKð³¬s·2ºB…{?åR!Fg¾E11 óhi%Šêêú¬ÜJ 5iE)¡æ:´åÝ-ç‚ÇãeåU€¾¶'´e¶^¿+ø”VÞò1ƒlÔZÈKYy-µ¨F_GNFFh­ŠñI4€Ø–ÑV—k!,'+¯‚,GTW•V.`Ó¾7?<ÆÓU¯å sò*ëØ\caå"—Ê„[O§Ù®…Ÿ^NV¾ZI÷­p8 ~߉xg{Íi%¦†Šº+*HEÇr8¼!5…}ZKbrÉÄ õå ñSéS#]kWTÖÝx¶dŽ¥°£k-,Ç{mØ»EýL”C_,Ÿg¾gÓ wáfÊ·>Êáþ­j7î§®ú#bZQq5Ü95ZW[îs:ýkNåh7}aG×rò*', 'à•‰ó§XLgTÏæýu>aía‡Öf¾×ËZØqý˜n²áçtú…[Ÿòb‰·zÛ«“—>nñ±|œ>ľǨ|¶‡Ÿ94ÜÍY^Çü²(„öÁ;#›ú*¯ÙðÎà "‘1‡ÃUW×;M¼5ÜIÛÍYçð©÷ž®zÂŽ®µTTÖ-Þ‘öz&Vàò;›°eß›«'<ŸGæ ;´6³ËïÝòyý½gôÉ>§\kq¸¼ ·>õ8Šþ€“è&ÖÔr ¦¶žH$àp¸Ëí˜ÕuÑq…wÿÉ|] ¬$mm¡¼ÿD\Èó,qaãòã< ¢ã #ÞæGÅÞµãÈÛˆj“5Ö½VŠ ~š¹ûØ;˜3¹oE{‹}Yyí–}Ññij*2nt´è«Ô©`ÖÕ±¹Øç!5ïœÉåÀÚ)_ÊWnyy|ëÓ—Ù{ýc+˜už.úÛs  ^ C† Ò¸t;åî™1)©ô#§ßW0ë´5ä‚/ŽË§V­Ý‘’F9L—()¾f‘•¢‚ÔÙë/ÜL€ùÓL±_N§º†Íár¹ÀI€ŒŒø•c#•‰g¯LÏ©Y°öÙ±.ÒR⾇câ?ikÈa73øi&•V}þfÊ‚iæ#]õ¶Žþð©Dœ€_:Çrþ4swôtÂ…[Ÿ´5IKg[Žó0HþLÿ}TQqµM?Õ=›uEۛͫã4°ÿ}ó§˜ ±×ø’Ypù#%ñ ¦™ÿuþÃàŒzç×YÞ3ú••×?ÿ!+¯¢ °úÑ• GÎÄß É¨çpZkìÛâ(/'ù.¡hÃî×̺ų,SÒèÇ÷¸²XœGÞÆ} ý±Æ¡ë*1,‡U÷-#¯ï—–"¬þãUzN×Â{çF¿K(Ú~8º¨¸z µv3÷ø½SR$ú_øpþˆ{]÷OÿwEÅÕéë ²U¯¨¬Û´'êõ»Ûþ*Fz “Ç(?ÍÜâ=ŒóÐ[¿Ô¦+yz´ ¡Ûlhc©:r˜®ºÍå #´=]tÝuû)À¤±Cì5Ùª¯ß1§ÒJj‡zÝÑT'•Ðkþ8üþmð/'.~àp¹¯xÕÖrGLS ¡JZ·ëõ«{“ä%çú<ãÔ7ÀÔ%gL49¹Øçtú¨™ÁÉ/guEsØ_;]Y¢¯-ë5ÚÐu°¶ó@Mwx»óŸÇâŽïqO¢-ÙøòÍ?ST•¥6î‰ÜvðÍÁmÎOóÛ©Ý:9Vo‹x÷hš2EÊÿB"Ì_ólÑÌ~·FÝNŸºìù²¹–ÒƒŸe=»1›¹ü©Š’t˘ícú“ g™6g§3rwÖ:HK^NÒ{F?¿3‰çPU–²ñ¼¾y…ÃÙÃnïŠøòzN.•yã~úS£ñxܲß_Ì›Ò÷¢ŸGN^e¿á7eø4<÷]"-îñôòŠº{Ò+*ëÆÎ ¾~Œ•…òû©‹Ö½¸wnt§çB™"ux›ƒñÐÎ꣆é sÔ¶±T€%sú€÷Œ~þ£ã ŸßžXÇæNZ"G"qÐð=šz}”ަì?a™ï?–D=˜Š=ˆàgYãFèO\vk‚‰ÂZß×/ßäÀ¶ƒoHÒŽŽ6åî‹Û“´4H‹ºy6­°ûeÑ?—?Žuï³sÝ q Ü® ƒbÞÞ;7ºVí658îñdcÿ ‰S—<½õËû% dÉËã%$ð&C®%„N1Ð%‡Eä®Û<Ù÷PŒ–:)ùåìw EƒÆyÓO¢ý¾ïÍË;“”%×ú¾>zºóëà=]…Ð+ÜæœÿnŽ×(£¨ØB3—ë7î§ î=u%uõÂ8N]UfÙ¼þC³ÀÙNÙÊByíßß}J+~– •Lö‹¨¼¹“ÌÔUeˆD‚·5”•׆E+)ƒŸf¦eÉâÑñ]Ò-cÈ@Mj¼÷á?œ«ª9 ~ ¼èq=û¿n,C³Vÿj¥®*ƒÃá~ûÕæÔ•oyœ0ÒÀØ@Á@—œ3·²º.,"73«¢¬¼6ñSé”qF0eœ‘ž†$†dš+=Ì}ö*ÇØ€ø¸š´›"/'ùøêĨ“ÍM”.ÝN5t¼”ü™Îß›–YÎ`ÔcÙ[« ´VÃnæ¨aººÚrZ¤à‹ã\iEǾˆÊ€šZNðÓÌE3,ˆDVi‰/$“Ås *ƒŸfÊH‹>Í«¨¬ëŠŒ¬˜oUœ8Ï{ºyJÝqÂÃñ‚{oÜO÷ñ¶& òr’+ô~š œ›³Ž±Â/£Œ.ùÈÈ)ÿ',³²š]ÁdÅĺ;kõ5RÂáp|Yüu᳦ºÌû4SCÅЈœ®ÈXôUJ˜s騲¼„ˤ{‚·+ìuÞÜÉFØ«•es-#bKÊÊk`–—‰®¶œºª #Õ[š(þ&ïíû¢ & þ¾šºbAìñ¦C³lû«DÇS†fii}íÜøU8ÌÉxåÆ{=N…Ðm6|S˜\¢L‘š2Îèìa·—wǯÛ%x³†§ðo—y’DU äH’‘ë0ææ©«I ¯- ,6W†ô­Ü'!€šZ®¬4.—ÊÄþ-œn¡£)Ûé¹()­½q?U\7d æ® ƒžÎÌ+¬ŒŠ+àPUÖ'I`ŸåHÌšo¢”&Š@EeݰÉ÷~ßû&5³ÌÄP¡¦–K’ú¯ô*#C€Z—Åâ`¹ÐT—ùe¤aW<Ž÷SKJk –̱¼wnôÞ̓Šãï­©å(Sþë¢@–¬©­yùoY;ï9óÁ£Ù*i%²8¶ŒÀp—:6Wœ€ç?Žã»ðÿMKL.yS /'9ÎÃàø×O/goø3NÐ#L–¬Ì·˜䉵,.¨þûŽ(ù3Ýväà Õ5\y,l)Éo¹Àã¿-NÂf7TT²s©ÌCu,Í(]ñ8X,NÀå$gc©ºq¹¾¶là£ÿþ V0YÄ—@Àáp2¡ŽÍY’Ô³yÓ—=Y°öY|R±Q~Ì’ßN‘" ª†Í©oÀ‡¤$+>wT¸lÓmÿ 1=N…Ðm6Ì/dþq(š_ŒÂãp ÿþjq¸<<@)öC¶%*¶p€… ÿÜí‡Þúïq9¾ÇuúDÓ¬<&ØX¨¼ø·¥+¶P‰uœ† †+æ[­˜oUQÁ––êüj²„nñƈ/™ßúµà ü/%— fFJQ±ßʤ±І:ü_Ûe࣠C=…[£VÌ·’‘–ÐP“€|jöÿçÌè£'««#‡åÂÔ@»9Λ¸Âó·’ù?r8 rDc³¹†zäÔ &V©góž¿Îë£Cæ\]]¿nwìË;“vmäd¯A-eÀ`;µg¯þ+7iª“jj9‹gZ®˜o5c‚IAaµ”¾Õѵ–ZgéæÕÕõü-2D1I <««ãÀÀê )%ß²O56 ž~â⇵‹œ=ì6{’)vŸû›)?y™Ãbq 6ñÛ·ÑÊŒ4ØVcÅ|+‡ïšUD"a¿|XD.‹¼œ„¤®¼‚ ýû*Ç&Ò°í9y•Ì*.E 6.‰–•[ñøêĵKl´5HÕÕðtQ|GÅVä»B0Ð%+%±ï•Ë@íêvgßT…8ª¯›é¦vÉ# <ýjíqÍ}ˆ«Žšu͘(HT$÷ý>xΪg+XfçUfd—ûïqáÿvª}ätBVn嫘|qq —³ÆyœºòÑ}j ®–\Òçmu9"‘pñðPÏ™÷½gZ$&3™œÍ«¤;=òr’g¹Ú޼óË(]e%©Ç/r܇èØXª–•×>(Ü"võBë³×?­ÜòRO[îÔÕ—ü<O·ë¯¶õ@ÌÙëiŵo‹fO2 Øï:d ž}RŠåÅ`õÂæܥ—±ˆøKw?]Ûcãr»±ó<~‘mg¥š[P•\üâö$07¢lØy`‹ÓŸ›í=g>˜3¹oÈó¬iŒ-ú*…Ç|ûó###>ÔAiëþ7–f”‡¡Ùº’TZ•÷ —Iw²ò*˜LŽ«“¦¥ê{M¯_Žªsã~úäq†]Ñf?ÈV}ä0]½¦3€ÀÇ_Oíw& úÚrkw¾Ö×‘Û°ÌÆcúƒÜüʪšú'áY®L<}°ö¾%éSijF9ÓÕ–[2×Âù—;Öj©ßþæõ:cÙ“ ž}¢â¨³½úvÅã€ËøeQÈ0G-MR-‹;ÖÍ@\Ǭâ®ùãÕaß!Zj²³–?±µV½t;åì!Wq‰ÿn¦¡9+éw6¢ã s‹ØõlÞÁ-Îcçq×KÍ(—‘ÆÀl¯¾®“ï®ÜòÒ@Ÿü÷ÅçuÎø‚®Sa-‹›WX¥¡ÛE÷»y:mdž˜˜ØºÅf·9·pLògú×\†¤~ º¼œ$ðx¼ˆ˜C=- R!­úýÇbE2Ña€*‡++¯--caÍ%Ñq…e Ö€~*’8fU½,IÇQiUx.5£,øYÖÙÃn“WùáS‰¦: kMÿn¨Z¿¯°œåeÒt×þq—îf5ä/iáô²òÚØDZ›kj¨Èï(û%³œ^Îd«Žå¨®Žkg¥Š½HO¢Yš*cßàBZulb?Â/™å òÄ\j¥¹±’”áYƧùòr’ÕÕõXwz /‚Öÿ(ëŸËcšÝû1•>ué‹ sîß{ SÏæ%¤W+ÈPÇ«¨¬KH.¶ìKQTÊÌa¤¤Òú±÷]…´jÀšëÙ¼W1ùX„¥eµÒRâul®,I<+·’¢(‘3š9–ñ‚ªþfʺÚrßËE|ÍvÔý['‡õ3m¾ÀÊ-áò²Ä^Â`ÇÙ[©ñ{JFÇ’å%ú)UW×G¾+”Ä;ÚjŠKàêÙ¼´Ìr~ƒÏéôô¯ŒþfÊE©\j¥Ž†\e»†U_[Ãåpy¿n{÷h:–÷ì¼J[KUA 5ÂÌõŠ}•Ë›Ÿæ#,2oÕ¶jüìz_cϽ¦¶žÿÈò©UÙåXäïŠJèµú©`)|É,×T#a_ì )'+é0@õSZ¹¡¾<­¤†,/™‘Í0Ô#š|jÿ0‹¾Jõl^T\A]×ÒLù{aø_H\¹-¦•ݤ»T…¿ù¾ŽŒ/»uó–——WSk=Ý×û,ú*5êø‚Ãá\kcŸÕUeF«þ×cVQAŠß-c­ºàöÏéôÉ¿>ºzÜ“€‡“W’Íø6¼\W[®…_¼ÎBQAÊ£I§<¾s„!¨fuUA=ýº!láô~ÎW?ŽuÓÀþBÈȈcý»q œ½uãA{òr’üà tɺdÁÈÏåGˆýB^¹›ú0ì럛gåV sÔæg¼å?KB³ÿ…‘‘|Xâ8Áo`_#%ÌõØçŠÊ:K·kÁÆ*)›ñ‹)?ïÝ0Ð¥és×Ò ñ_a7zX‚ãU¿Xîvù½3ê#?iŒÑ£çYL–™‰–÷FßÌŽÐûTÝlÃ΢¯‘ÒɽÃ.Ýþ> ­=zN‡áFÜø{Ôù)‡ ôÉ7NŒv8ígæ/Æàw:,Oܵa°Ãi'òr’¡7&Þ þRUÍ5¬Ï¤1ß·©{8¼}ÈÙëɇþ~¯§#zó—No¦è•*„jC2P³çûêª2[|D}>‚Ö€ÃáfO2=©Œ¾j+ e+ eaGÑQäå$»nK—ªpÍöˆ×qt³¾fþþþþþþžžž7nì¶ûÖSmˆøIèœVmD'Ñå*|W:ÐFC]…ûàé+¯ºÓ†B›‚Ä´°!þ±)-°~WÖ‡¶‹øòµ¼­§¬ßa?ê†`l1­€žŸDëº\@qiM›ŽO¢‰iFÕÊ[máz{ÁÒp¹ü¹mOÄkaÈÒ/óåµ0ä‡gù_Hü*v:©_Ú–‹à§™ÊýN—”Öò·´æ>Òª-\¯t].º¸‚ü*2Žþì¶÷›ÇkXjc*ìf„iCX»ë-¿ûž(ýžÑ޳â“+œ‰olj]D~aÕöCïÚz'6}ÙcÁî{›‡¼¸œÓÖOßHì»'tÞÄž¼šÖÖ³Ê*ÖïŽhëY]GW«ðul™à¾É£uu×í„lÃküº!ŒÇû¿>Æ•u›ÿŒrŸ¸tã‹/™å<ÏÿB¢ûÔÀõ»"Êßz~N§Ïóyê8îοwXwÙŽô䫎F{^nXbñ×¹¤Ää’FÛo§{- 7/øvp:¶%>‰6uÉ#¯…!X‹³ãpŒã¸;ÞkÃ>§ÓÛzÝfÉ/¬šçVÓ¶¢!@_éáŽ:¾G¢mÿ’Y¾tã ÷©ÛDc#=øHp$ÜÓ—Ùãæ»O ì”\ð—d0êCnë¹n0×'´ÑX@‹ãw6Á}jà<Ÿ§ï¾õ¬|”î>5Ð{mXfÖ·©ùó©U+·¼twgóŸQXÿó‚-M£§Õæ‘Q¿N7ŠûPÜ´xþ&oÖò'îSý/$b¿8™9 ïµaîSƒþ=˜ÿ+3uÉ#~f;B÷«ð5]1~©e„lÃ9“Ì”ÈÒ_JÜ8iQˆªŠôƒóc'xöñ˜þàÒíÏ/# îžýËH£ w2 ¬¼vÔÌà• ¬žßžÈáòvyÛñH‚ž|ݼ?~â¨öÌÊ¥©.sr¯Ë¬•O½|ã~ê™kíú÷ŸÃŽM Ë*)­õœ´y…Ý™CÃBžgÀšíðüöÄ…ÓÍÇÏÿ§ãå2L…Ì*ÞY¿ö̬¹o‹ãõûé¯cþkXV^ë8áÎÔñFÎ%ËIÌ^õVÿN–—xp~¬¼œDJz5¼Ž)Ø´7òø.—[#;Þ”!¸:M?“6ÏÝí6DgÆD£M{þoèçší…ÌçÇ®\`5qaHf#üMÞá€Ä GG¬]b}ûaÔ³yæÜuwÖy~{¢žÜ¼5¡m½t#0ÚZªîÜØæ$áæß£–n¬/GÇ®ØþÇo·FÆÄ=PÏæ¹M¾?yŒáÝ3£?¤”b‡=˜\z÷Ìhßµ³V>ÉÉ«ìH.„¢B|ç]ú1B¶!øÿé²ãÈ;~É(ŸZ•ø©tÕ‚þXO1ûª—î|Z·t€¼œä [õù“ äy΀~*ò…îh§qâbGW2ÁT8Öݘ^ÞÎ&°q¶ýUöŸøo´ï݇™—Ûb]ÆþXcwã~ÚË7ùÓÆYY(+*Hm\f[]]píË0Gí”/t œž|仂ö]ýÛ­ûW…Á+ô´Û³f¡¼œä%?÷…ëBù^ŽŽ/=\×e°¶ŒŒøš_­ÃßåS«®fÿ¶ÈFFFÜ{F? q¸tçóÃÒòÚ¬¼J~A¸}ªpËÚ_sËÚ‘ˆïoƒžGå Ö—ÏßIß¼ÂNFFÜÆRuÁ4³—QyAO¾®ö¶ÒÒ õ5RúíW+ˆy_(#MÐT'¥|¡[[¨<(ìHñ¯Â'7—Õ°Úów΢¯Òª…–‚õåG/²}Y((*Hmñ±»ø9)µÄÔ˜ìáª'/'¹iÅ·¾ßþç“F ×ÍÈfÔÔrœ´B_·²‰ŸG… ï”ÕUeþÚéð8NGSŠVZÍã5ðGe¨P$ œQWL¯ÁæÚpqlϧF*¼r+.1¥¸V›û ËȈ_=î9qaÈ%?w¨¨¬c³øùU•¥**ÙÔâj¢ä·\ÈH‹s¸¼ò ƒQÏÿ^­šß·ŽÝÎqâ‚*¬¬bmÞõã79ÍòÛ"Ç ·øÅíRz­]ÿoCøÉrÄüšRz­´$ÿKõmäR6µ.ôU‰D²„ŠR;G©þT*Q°!LŸhz÷aæ¾ñó¦˜é“s jËÊk¤x<^Tuˆ½æ×\Ö ,-“1Ä^SGCVMEXRZ{íAj»/-¨Â´ íž'A™"uîÐ𹫟a?´ÖHúTŠ ÆHú\¢£IêßWùÙ«o¥•œüJKSeV/€7¸p3E¡¹…«ZC#N]t12¶ýÅÌ}[Í\®*ÈK ±×4飈M= eåµé95æÆJ*âùÔ*- Ruu}fv ˜™(’å$°>n!aYí»nSÎYyKVºÕ{kµ_gšûleÒGQ^NRCE<ù3ªWôËh—÷õß*dZF¹¾|²¸„Øþ-N8®žÍ;r&žÒ®Ij©ÐuÂ_•ím—À?<Âcæv= õåc?cÙRÒèVfÃ>äi%<‡Ãedký<@iÆD¬Cnà£tÁI7ZÏϦB€ÿŸ.&C®€ŒŒøößl¦.y¼hf¿g¯rôuäWy÷?ÿŸŠJö—¯Œèø"ïéæ#]õŸÿ°tã ÷¡Ú'/}l÷_Tè<ÎŒ›Và“öÏ#?Îà ðqFJz¬òî?jfp›Ëáòþ:—øðÒxC}ùƒ°~W„™‘ÒáSïetöÀбólXjûå+#äyÖ¬_Ú3)@SþúeÙl㿯|i_.äå$¯ùpŽöêJŠ’K7¾puÔþûRâámD"ᨯóÔ%WÌï÷àé×:N,žÕoÈ„; B‘1Ô“oÇE›UáPe€öÏâ³q¹í‡ßªíG}§-{´e•]ÒgzVsÔ0}fU½ë仼XuMýÕûiÛ³·è«äì 5}Ù“©ãŒn§k¨È´00ù{4Uaqiío³ƒílÏÁêË›öÆÀœIfŽn‘d*JÒ{ýc/ùyè’GÓŸ³òÙh7½SW?b§ìÙ4páºÐÍ+ì*ªØ_ü€ÍhÛ&~BB7ÏÚЈ৙#†êÿ­cïd±2`|-éS©¡ÙÑ^‡ÃåS«^¾É7Ô“—"(ŠRZ$lâ€<*ÓÚB¥­ƒ°YdeÄ7ïŸ2Öüâñ™˜ eI8wg­ÎÚЈÄäi)ÜhYymä;*ö×»¬¼öixޤ$ÞeVM«góžEäT×p¬U±ÑµÉŸé±Š4Te†9ê´éw›µáä~—¦*ܻѦž|˳64¢¬¼öýÇÁq²áoò°Ù4°y(²r+íú«ñù&&—|L¥;X«æ0ì5ˆDBYymxt~]wúÄ64\`³6Ü8áø(³© øÙ°ëu˳64":®ÐĀ̯gæ0ŠKj±aË_2Ë£b©Ú²Cja·{@ÊJÒšê2xÎØ@ÇãE½+ÌÈf˜* ŽŽÿ!ج Cì5›ªðò1Wjqõgm$ŸZUZVËÿb׳y_fa¿,ÕÕõO_e×Õq]kc©a(Zí:X+5£ {ˆ™9Œ7±Eò²âÃtZ? <6kÛãDA…Ý¿ž²0m(,Ä´ÌdRÒ«϶ý{ÿ”¤O˜ /ú¹] Lm« …Åú]g®¥ÊH㛪p¼gŸÎa#"`6ÄGSJñ?œÃFD0s½Â«ç~Ée5U¡™±bkæ°0šË]…УW—€´LF—éDšU!ið‘‹´LFEÇkªBþ1ÑñÐ׬KIÏbÀ÷Tˆ“WX)úO¤¼‚]T\ogÕŒ ùÇfCy²ü?aÔ¨ÂÎQ«øž ÈD¿°£ͺYiü÷Tˆ-H°ïï…æiA…x.6©¢G<‘THÀ‰À‚u¯…ãÁáĺB…µ,®ˆ«:±¦œ››ëããS]SyþÈ$£.ŸÛ®}pb–fšÍ– ëë¹éYœú†Ó×?¾xS²x¶­÷ìÁÂŽ÷»ôÑQ’"Š7[*€Ü|fE;éSéŸþI¡·ÉË‹èšñr$IcC•fU•Ìúœ‚Jf5{·_lnAÝ_{Æ9Øê ;äæéo¦QPTÑl©Ë…Œìrv=ïîÃŒ;!¹=M6­vÇuûX‹Ö ¦LÒP%wº ó}µuíÄ6©°×”õôôX,Ö´iSëë˜oCVXYju[ÚAbR~S€¸8ÞÄ@qçÑ·/Þ”`aGÚµ5õßS!èhɾ‰-üóD’mÕáC…lK|O… '+®­NZ´áy.•ýô–·›‹I{/Òådç–5«BÀãÁÄ@áä¥wBrϱý{ßALØñ6ÓÐ*|[&úËŠvš SSS]\†–”–_š£ª"[XT!ì¬}—¬Ü2Ï)§šªúy,žc»`¦ƒ(ç‚Ëå-Ûp÷{*„»zèjÈœó›!ÊyòâóŸ{ͪþíê‘ò¥ú¼Ÿ—¹©šÈf¤´¬zܬÓͪãä¥Ç/¦Ža¼uÍZi‡†Êu);=él¾ÆêÈ"®BèÄš²–¶VA~‡–u'*âMUWï¥ýéŸÔ®$…Ã÷TXX\ã6=¤¡KVÜë|¾§BØôgdph—,ÝéHqßSá›ØBï ‘Â°µt¶ K±æB1•õmJ¹×” ò †:(McÔm¡·›•ÛbæMéÛT…PPĀ㻠;Æs÷az>µæ{=ÏKËjx°pª¡•E—,ÜYääW:õiÙ¼~ͪª«9æF2»þoW°ãлÑîºÍªjê8°{ÝþšÔ¢ÉËÈ‚À§y]¡Â=‡Ÿ‰r©£3{ØØY©®˜oÕÂ,§¼¢NZŠ€-‡$,Vn‹iù€–sÿ¿€œ°Èɯ̧þ` ܸú-ô7¬¨¬«©å(ÈKò;Àw?ñI´C§>µ|ŒIÅžH=›WZ^+)Sl×(ºÎâïË?~w¿`ºyËß™’ÒZ—'ÜïUàÓöÅâÓT…[÷‡¶r)>!Ò}oµüÎ& ¾4}Ù#3—«³–›üê]BQ§Ì¿Öm$&—X¸^5ûþøùÁúö°É¢+*ë.Üìè<:Ý ‹ÅYºñ…™ËÕI‹iÛßã÷mjØ+wS;eR¿nãvpºÞÀ “=²ñ¸é>5û+õ%³\¤f{m 9y•.^wìGÝœ´èQà£t¨gó.÷¤F>ͪpå¼°fN7ÙðKfù±Ó‰ŸÂg…ß›œ7ŸH$œ¸øî<üRTÜUóÈw>ÛÃ÷oqJx6ëÝ£éüÜ&y?âñxÙŒ‡¡Ù­ Üy˜Á¬bçÅÍ žœ3ÿFP6§áþ±ížµ¥û©¨¬ó^÷òMÐä¨àÉYïæ» ÒÚv žEä¤f¶g0!²ËïÝ„‘†YïæGOž¾âEYymiy­ÿ…ÂŽ«Í|O…"^GÆè¦*£²NF†€Mf‹Ãá|פҪ¢ã ïþ“ù*º@YIÚÚBùع„à§ÙÒRøËmÝœu¢ã ßÄQ_ÅÜu8à}гÌzw‚‡á¶5ö8îuLÁΣokj¹³'™ÐËX[|ìËÊk÷úÇ>~‘ce¦¼ÅÇŽ¿NnçRZÆ’‘þvÓ\kÛåÌåÀÚ)_ÊWnyy|kXDî_çó¨•ÓÆ›¬^hM$¼†¸:i^¾óùΩѩe~çò ªLú(Þ;7º¬¼vû¡˜—oò§7ápy+ôWTºq?ÕÿÂGðža†-ÓÞéT×°eIß°°É¯deÅÏ^ÿ˜žS³`í³c;\dINÄ…Çä™Q°›ü4“J«>3eéKwgÝý'b_¾É——%®ö¶š2ÎÎ^ÿxáfª’¢¤÷tóqŸÓé{üb?•¸Ñٶƾ+ª±\.¤¥¾ Â]:7†¶aHIDAT·_ÒçÒ/™å—?Q¿`šùù›)Wï¥Ö×Êùý¦O4-+¯=~þCV^E£îvÀ¨³7’oÜO¯`²Ü‡èìÞ8XFF<ù3}û¡˜¼ÂÊuKl_¿Í?¾Ç•ÅâxWð‡ÏÀ6\n,Gþß'rq$Ûv *=§ÆkaȽs£“KöúǦ¤ÑǺ÷Y¿t€¢‚Ô¿wJŠÄó7SŽít‘Àýy,.ík™¶&i÷†Á6–ª,gÇ‘·O³œì4 ôÉ£\õŒ Â"r÷Ÿˆ«`ÖMg¼æWëN_Vz¸ ¡Ûʆ¶ýU\k©[_ð^v;8]Roo­6ÈV}ÒXƒ­«íÙªo;ø&ŸZõüöĺ.Z÷<1¹¤„^óû¸U ¬®~d0YQ¦FÞŸò<+"¦ '¯ráºÐº>º:îÑ‹ì÷K`ѺºZrIÏg®ò¶53¸‹Ö÷ðÛ1tܼÇ#gÝ÷;›˜\2ÎÃ@\wx»³³½æñ=®‰É%‹Ö=?ºÃ9:xZ>µ ›”;ði^AaµÿW·r[øùÃ#’_Î2Hæ­ íoNIx:SCMÆ÷hb›–uþæ§.ytuÜõûiíž«e¦Œ5úœA7xiý®ˆ§/³M  tÉÞ3úéJŸ?<ÂØ@aüü`K3Jü“󦚹M}P]]ŸKeú_øpú€Û0GíÕÛ^ ¶UO~9ûú ïu/+*ë¥ß JtuÜÑιTfuuý¨™ÁËç÷Oz>ÓÜTqéæ.y-¨¨ µsƒ–ýå©K]¸™RSËu¬ml °dN¿%súyÏèwéöçàgYAÆÝ=3ÊÿÂǰ¬:6oç±CuÿÜèøìUÎËÈ‚ç·'&ÞÖùïæx2ŠŠ-4pºæw6Apï¥;Ÿ6¯°“—“TW•Ù¸|À‡_@Q^lúDS‹¾Jã< ®ð( U½ŠÉ¯eq+˜¬˜øB×AšöÖjØãÃ9q1ÅÉ^ƒYÍl«ÑþI¤[Æ¢¯RvÌüÃ8WUs†M¾'x»Â^çý2²¥ª¸nͯÖBó±¶Ýiã­,”ÕUe2ßÌUU–ŽŽ+üšWYZÆ€¿.|^µÀJ\çæ¬ãé¢ AO3݇èÐJ«S¾ÐjܸߡùÉ›Ò TÝVSNL.!àq}•¼gôóžÑ/:®pÒ¯~õ_wf ¢ø­2¥B‘)*)9’$„¿ÉûuÃsg-G;ulvèŠJ¶Ì¿ÓüJK ¦–+C{—øÍ€Ó'i¨’Ú_ë(+¯Ž/í¦ýÛµ~ð q7£ÞJÿ[w®ªaëh~[ˆ¢(Ŭù&J¬ZZQY7iQA\Ìe–ª²TM-WMå¿þ22¨eqó©U˜}d¤ÅÇÐïŠÇ–eo¥†­¤¾v‰Íá€ø¿Î~8¹¶·¦–£LùorŠTMm=ð{‡œ¸üáï‹Æ{ô±ë¯N’Ç–“ý¯Ÿv<– 8¾k`W¬øó%³œ^Îd«>eœÑ”qFÛ|ì‡Þ˜?ÅŒ@ië¿YÓå‰L&T)ß¾fÉŸé¿,úǾ¿Ú@[5 5i¨csùÓ•ó+àÌ^z6#+¯Â¶¿ª¥Y—tWb±8÷gLŸh:d æšS—< |”á骇í­`²ˆÿv?ÂápjÖ¶‹…Êãñ¼×†}É*=\¿.;ŒÍnàσýUÕ°Kèµü'Ò¹=–z‡ ¡Ûʆ ÉÅÛÅð‹QRD‚Âÿw¼<@)åË·âÉ»„B3V¿ »#O~ö°Ûüiæeå,èo¦ü"ò[?ìS‰uœ¯QF+æ[­˜oU]S¯ ß%“…üòë³ÌöYB'NÀKJâ žË3#¥¤OßVêIùB·³ü¿yOƒŸeéjÉ=¾:qãr;%2QCMº¼‚U¾ iÕŸ3k@G“d /å¢_e|× e½ÿ8óòÝÿúµÈËI%ñ ..Æfsõµå²ò˜X„Çã½~W £)Ç?¸ºº~ïÛ„§3ns=\ZÊ€ý”ß&þW×T'Õ°ê–͵\1ßjþszKJªóGçÓËYË~Î_™KZJ\’ &)'àquu\°í¯ò1õÛ—*>¹ØØ€,xú‰‹6¯°»z“ß}§¿™òË7ùØ·ôço+ šÉ ¶Q_1ßJFJ\ZªKJD"á÷½ÑÑqßT%EÄ%ñ’¸ò 6X˜(Å&~[ç:ŸZŬâ ÎÈõ®ðKVù»GÓ·¯hb@ƦÈötQMú\ ,çÝ{èjÉ)+Iaß+—m^Y¡z ¡ÛʆÓ'˜< Ͷñ¼>ÆÍ€QÁzš}ÁÏ tµäöŸx/'+±gÓÀY+ŸlXf“–Yþþcé¾ßžG~ë$1ÜIçø¹¤J&;4"·´¼†^Îã®òÒG¯…!jÒoŠ´ÕåˆDBÀŸNãç/cù.V@«Z¿Ô¦Ós¡¨ uöÀPk÷Û3'R”¤^Dæ;X«Ù[«••×>(Ü"vÅ<«“—“6ÿe 'çw&ñäÞa‚§[›+oú3êÊÝÔêö½ŒéMÏî9óþÈaºÉitEy1ðYdí>=°º¦^FZüðÉ„GׯuÅãØæcï>=0,2×ÖR-Z÷¡øÉµ‰`nDÙ°'òÀ§MËm~ñ~8w²Ù“ð×ÁZVÊü¥ddÄí,å÷ˆëgJ¹÷(]WC’J«Z:·Ÿó/wÓÒËË*XC4l,Uû›)O_öÄÓE÷úý4OWÝ®h³w êd§Ù×ùʤ±p÷ŸÌc;‰D‚¾¶ÜÚ¯õuä6.·53˜Î¨­«ãœ»‘òâö$ÁÓí­U/Ýù$/'‘\úþc)ÓÕ–ëÞÇcú+ 6·øïq™µò‰×(£ç‘¹SÆvÕpï ~n3þùe”®¶†­´fâHC"‘PÊà¬ùãÕaß!r²’ÞkÃÚ¨ž¸øáÔ~gÁY õÉYy̳×?âq¸‡¡ÙéÙ,·cí ©‹O›`œø©X\B f{õuŸ¸ùÏ(]-Ù#§ßŸ>0¼SÂî:Ö²¸y…Uº]t¿›§[g{O¢åäWJJì­Ô”)RÀbq^EçëëÈ(Òªc‹¤¥Ä±©‰ËÊkKËXØtÄ1•L¶•š¤ŽYUOQ”ªcs¨E5ð5—ø8㢟|N§§e(ȱ³¿ªVÀï+,gy53þÿ‰¸ÎöZH«~ÿ±¸ŽÍé£CæÏNŸD+gÔ¹9ë`“r×ÔÖÛY©aÝhã“h–¦ÊØ7øKfyjF™šŠŒ½µ`CZiµ©"ÉäãÓ|y9Ɋʺ˜øÂ:6×É^£…W±ØÜ×ÿ\ÓìÞÎöZÏæEÅ”1Xr$Ilòj()­Ž§b×MþLÿšËà‡*Øá¼ºº><:Ÿ€s²×,-«åWʲò*) R¡¯s¼gô€w EEÅÕ:šr-LNŽÍözëä°~¦Í÷øál¯ØCçpyöVêØ 3ØF–$ac©Š5n`¡ÊȈ׳yi™åü¼£ã Kè5æ¦Jj™‚¢*MÙšÚzZiM]~ÝöîÑt,ïIŸJZžœ›ûzãrÛf÷¶f¶Wì¹W×ÖOð4À¾½_2ËÓ³#œuñˆzWX^Á27UÂZf¿d–kª‘°I­ó©Uï?Òäd%m5Ó2ËM °îèYy•úÚrcç><µ˜E_%þ7s@?UìF5›íµ•ݤ»T…¿ù¾~õ¶ÄßßùòåL­õtë KUlÕ$>D"ÁãßöuUÁ_]E)Ì8Îe°¶àöäÏôÙ«ß:9O;y%iþ lW_#¥.êX#ˆºªÌhÕÆÍyü|aM×Íîcþšð‹÷?[WÛÛY©]¸â颊 Ñ‘—“äß“®C\âÿî*†2EŠÿ,ú*ñ­ÿ?ðFFF|´›>ÿ3\¹›ú*:ßwí@Ziµ«ã·d1v5MºàFQAŠ*–kÁL ö˜16P¨¨¬ë7üzÈåqE)ßÃ1Ø"ŽXÞ»a|HÓç.ømÁ–|ÜÅÿŒ­R‹}Ær·í@´E_¥©c#ßQKʪM  ¹ofGèúùùu§ AtV‰j}•ömv:z:¡¶®~Ñ Á·1=‹À³cO]ýxévªý•›v8ígæ/ÆÕ5ì­¢de$Žïqv8íD^Nòþ¹Ñçn$—3ê\5tMgÏnàÀVÇ3ד}¶¿2î£ðâö¤v¬uÕ2Ý£ÂÕ«Wwó}ë‘6W½n(@u5Z¤] ;ŠN‡Ã-™c¹dŽ¥°é(öÖjÝS¤íR¤6.·ë¢Ä{« ¡;Ç)#í sZµD/V!׆bZ‚“ø_H\¿+¢åSÖïŠèÒƒ¾|-oë)ëwEغQ/0¼WL+à‡gaÓ=tÅ¥5m:>>‰&¦ U+oµ…ëìKWÀåBòç¶=¯…!‚ߢø$š×žå!ÑÿBbå2¾2Út|ðÓLÍçû`·æ>Òª-\¯t].z· AèeCï ‘_2Û, ®#ú=£gÅ&U9/ìØÿ#¿°jû¡wí8qîêgüî{B›‡¼¸¼Íñ9ó›„BDx[xübj[Ï**ålÚ%ìØÿ£×«„ÞnøÇêþ¿n{qÇK°CLuu}À•¤7±E:š¤µKl´4Hn¦< Í66 WU}ûÝøœNÿëì‡âÒÚ!ƒ4V-èß)ÝÙ‚ž|ÕÑÉ¥¶¹¤³n±Ù¡€„ÑÃú¾¯€°¬;¿pêfO2ÅZ9“?ÓŠãÔ7¸8jb£ëO\üð&¶H…"µeµý÷ú=´‰üªy>a5m+˜É  ¾ÿDÜöµÿ7ÙmN^¥ÿÅįÙLû*Ø<,gÿ‰¸¤Oô‘Ãÿ{G‘{ñÖ§z.ïV@'¼â/É`Ô‡ÜÖsÿÜ8`áºÐ„§3WU¯góÎÝL~ù&_Qž¸|^ìI…„e]¾ûYEIŠ,OTU–€BZõq¹Uf&Š›–Û¶~]öï­Ç §%›•ÇlÓ‰¿N7zý® ,"·Ñ‹àè¸Âs7R˜Õì1n}fþbŒÃárò*ÄÓk'x|›ú—Çã]ºýùah¶¬,ÁÇ{@ =œZÉÏ BzÙpát ))B£1ä“?äpyþº¸:j ™pçÆýÔÛ¿Û5ÔÃEçôt(+¯53xÆD“€ýà ™Û½íx$AO¾nÞ?qT{ÞèjÉýµÓyÁÚg‚õåÛÁéOÆý¾Òþß6í|ú2»¬¼ÖcæƒÅ³,wlx+(Öïz]ZÆ Ø?lêx£!ît|¦ L…Ì*ÞY¿ö̬yxûÓ×RëËÕÕõ.^÷Üœtüÿt©ªæx¯ €5Û#8Üÿ?]XuÜ”ôjˆŽ+\¿;b‹Ý-NŸi‚¯ÂÝÝû™´ù†Ûñ}|D nüãPtJýðÎ3&š¸N¹—O­zS°õ@ÔŸ›/œnpå#Ô³yîÓÙj즩&³`mG—-ÅThk©ºsãȶžK".1×'T°¾œ˜\2cÙ“óûØâtçá—ÓW“ëÙ¼Ñs‚Üuüv }úêÛ0ê¿Îxþ:ÏÿOïS–„tp²‰ŸD… tâñb{‡ÿ¾÷¿¾\H«ŽûP²~© Öýp@?•Ë÷>ÿá3PKƒä2X{þdyžÓÏLIZšK­tsÒ9v¶£Kc*ën\]Ën_ Ó'šj«Ë Ö—ï=JߺÚë2¶cí +wSÃ^çÿ2²Ï [u]ò–Õv,ÇÿRÚ¨az¹ÔJY’„¡¾<iûà«0"x…QŸöäå$/ù¹ Ö—#ß8Ú©{¸ê©«ÊìXçš[H«>}#ýuU™es-5TÄàòÏ< kj9¥åµ7î§u$‚*ܲvDaq{TòýmPÐÓ¯‚õå#g“w¬¨¥A2PsÉì~O³elXjk K¶²PöYhqI4qq1=¹\j¥•êƒgy™û–¯Â'7—µ/KÕ_gš Ö—ï=ÊØâc‹ÍñçFÇó7SR¾Ð)ŠÄqêª2¿¯´Ç;v:qœ‡•VÅåñœ´ž„·ÿïÓÏ£BzMtµåŽítüuCØ„‘†@¥U©R¤ø5_MRèë\Y™oƒšÈPÁdWß úö[·xv‡¦Õå«ðÖ™yr[ÛNÀþaV#®ö­¶òåk MuR-‹[L¯&ý›Y’DyE'úíM…•…?oE;T¡¥™¦×‚óíKÇÍYgˆ½æþqØy…Lu5iì3‡Óєʣ2y¼¬ ‡S¡H@qimqimU \5Ûwih¢Â°ð´è÷Ô_F´y\­ŒŒø¹Cî ×…bCЪ«ëÙìþÀUe©ŠJ6µ¸ZFú[EXI‘ÈáòJè5 F=ÿ{µj~ßvÏ}ÛH…Ûö=n_:—ÛÚŽ¼Á/nÒªíú«ðcÎÊcU)‘¥ÿÍ×·ÙÔº—Qy± @–PQ’nßÕ+™õ÷Dý$*Q°!ÌÒ÷öÃ/ÿ~?c¢‘¡9· ¶ººk²‰Š£Z™)É*ÃzÒ2Cì5u4dÕTd°Q€eåµ3Ú}iA¦eÐp8n;“R¦HÜë²`í3ìÇÖ_2˱( ÉÅ:š$ ¥g¯¾ ¾.(¬²4U&`õÂXsáíàtùv.ÓH…Ë6Ýyð¤ÍÍö|ömq4s¹ª©&=Ä^Ӥ⅛ߒ*+¯ýœYcn¬¤¡"žO­ÒÒ Õ³yù…5ÐGOVS]ÖÇÛž¾Ìnßu›ªÐcú9Y©vV_† ÔïÑgÃîHmu9q ñÏétlÔJÜšÇPÝý”¿æ}+xU«*KéhÊÉÈöoqÂápõlÞ©kI”vMRÛH…#&ä¶óµ;‘H¸tl„çÌ ìG£>ò±бñB)it+3Š©‘ÂÇ´‡Ãáò ¾Õˆí,åçLî‹´yú2[SM¶—ÆÖnýœQó“¨DĆ8îÌw3—ë /'¹q™Õ¤Å—ζ ÈÕV—[·ÔfÖÊ'Õ5ÜÜüÊð7EÞÓÍGºêï>önÛh»þ*'¯$y¸èµïºT8d܉1Ã5Ÿ´•œq3b“*`ù¼þcçs¸PÇ®ßã÷èÚ8ßÃ1;Çô5RÚëÿvœ‡Áám³V>^µÐ*=‹q/$#"°=}MUxêrì²ÙÆ_ùÒ¾\`õe÷ÀÑ^]NްæWCjœ¼ôñÀï¶22âû~wœ¿æÙÒ¹ý=Ï©¬n€ó¬\¼î%ðŠd)ív½ jV…Cl)í_™À÷·AÖ×´ÕŽú:Ï]ýlÝÛô¯åŸ¾”Ø3¬²Ší>-(€€+·ÿfoe¡ln¢ôëúãFè>Ε‘hÇ(ަ*LùR²fQ߃í\9ÇÆRuÅy÷=óœËûyÏå9Ÿ'·>5«VKg»˜2¤L¤ÚðëqZ_*<¶oöŠòઠ½ÐÜÂyœ× º}™ð ÂH_ ÿL(©©gÏ›­)¢Bá=ŒRæ[^–gE-0¢{š˜ï@¿b³ù5sÌû¡Bá=¹Ïjê%YE,(¯z’ BxË©ÄȆÊå‹1ecÊÆºDo„¨ɦS—_kߊ Òˆ ×}úçíç¢Tˆ°Ñ}32ÆÚÌ×c*”ÆHÝ}ØðN´È Tˆ‘švãÆÚÆ×CJj‚$¨ÃízË©Ä8S...®ªªª¨¨ðòòb·¿úý'§©#ÏVB˜1MëiamßQ!‡ÛU\Æì—òî=lpßHuÛ8¬Ú$%‚’B_*D(.cr¸]¹Ï¿?{çÊgÊÊâá)ÈÉJOŸ¦Õ/@S3·æ%›Åîô Ȩ¨î8utÕ\*e¬MîFú;;»úvuÁ3FÜXuæB‘ƒÝTï]ˤ$æa„ ªâ(“TÅN…_ú$;¸x›©ÄȆPXXH£-f³š“oì07#uÑCNnU_*Dèµ¹9Ö–N;o *Dîo>ºMkcÃ@TˆÐks¬ÍÍíýR¡áÑEG~~âþ)õŒßÇR˜ cmï€ÞOo#ÕHb›)Sá(ãÍ©pØg!Fï þš÷KHá{K…c]²×ClcC E·¼¼b¬‹ó¦ÐT—éK…víù÷§Åx[rˆ ›š¹‹>ú³{ø¾z£ŠÅsÕû¥B8è÷0*þ-¤X9Ì@T˜–U·…ž<Ö¾)$A…oùAlcÃòòŠÅsÕœìßQþ‡Ò6¯3îK…P]Ç€_ŽX9ÓQGd,£¢†Ó/@ÍKv·¶®Ó77•H`q¡¼ê•ÿo϶»Nï—  •Õib€oü_Ià[ÿÌ•Ëtú¥BhãðÀ—>KùÿÎ}Û˜R_ù~R!ˆ×Ãf޹†04m¿àu [8JxÙ‘ %;¥ ~Ãी†F¨Çr_¢¼êUEMÙà÷¬ú`ò þ†l6ïU['QE^ìq3ÞY¹õþ¿=üž©STi@PßÀQÇ [c…Ó!ù¯½g‹‹Éàþ†Í-œŽNÁØú$FÅÿ,–ï"ÂhjØÏ¥Xþ±Ì9Êpáùmûî!•”œü†·ß•LŒ&‹åá Ö\^°æ²Åòpt~†Íæ…_þ¹àч@ Øy ÑpáùeÎQË?„²ÏQ1F¢Ú2ú¸™PªgºÌ9j-lÇ_Èø’rfÒC1¼Ò£‰†F·®Ï¶Xæ…Œçu D•áß!¼£T£Æ†E%-ÇOgåÜþ$?qcYÚfV[g`ȸx½ ºö­öGí…mÞ÷¼=©E)®E)®wY8l‰€Â’æÈX Æ';.ǽl┥mÎOܘsû“SçrøÕ7?¦ [µeôÁfó\¶ßI¸êŸ¸±2s³á•~©pë^Yþ󦱶nhøîdšÕ¤ÒG›ó7®r½Õúª£±…sâ·Çcm×ñîR!ŒšjCS —@!(É€Œ¬”ß«ÊVjfmäŸ%÷S«ÕÕæÎÒ8š÷BUëå6sU35³öafMv^Ã'?8õ{vBJŸ×mg­»ûó™RRRÉiÕ§Cžp¸üUL®É9àeÁfóü3“Ó«ŒõÕ¼=©º:J’(HM=[sbÏ‚#š„r¹ü=‡<-jÙy ñ—£Ö²ëNçTÕ±V.›‚LuÜzÓÉ^ïüµ‚ cK+ªY¿]È+­d™«ýz|Is çÈÉGùÏ›ì¬uÛØ¼[f¨ªÈGýŹRn.&:W×Ääª*cÑY(%h…‚<&øR£¼}ËžÛ?¦U±?þú85«FX™7âKZY¼KÑÛ®¶rìtFFN½AaÓÇFÈÈ?"ž^ºþ\—¬„¦ç啯üNg7-œK‹ˆt_´sø ¢$RRR»?3œ×PTÒt>°rÒnŸL¿^xþZ FÚõcãµË š[8¿üþ„ÛÑUT¼ú¿/FEß*ib¶/œK>ðÅ,“_М]^õjç–wTürÔZ œ<›WØ(éÐÂì°˜Ù3AŽ ¶ëêú!•QÞî¸õæµs+ M'‚²Ë«^YÎÖD•y4àÑ]¥ß#žõž¯ˆ—9ü$· É`ŠÝjj¬†ÄÉSÕPghhLTXnM1ÔSA¯Ls ×q…þçLÅ"ß ï4¨ çÎÒ01 R,ÿØýÍý› ¥Šx™yTÍ™¦êvÖºÛ>5›iª~ä䣜üÆËAî÷œóÉö¸ü‚¦†¦ö¯Ètq˜z."¿¸´õzðÊó§lâ Ò×WÕ´9o;´kî9ÿe—có`Ëž;jªØ¸‹ëV,s‰âIfŒsÒg1íãŽ[o_Ê+)g®²ÕÃb1û=çPg¨³Û2¿ iû-ú¶ÙQÁöŒ$Ê_™[Ðô—%8o;ê=ÿîÊ$EøŒ~Oo²rôï+åä¤}Nætt âËNË9óý’ߎ/ýå÷' $²M¿nåÔ¬¼ú™„ú!55³ÖÄPÍPOå#{C]-¹cÞV“u”׺Åê’cþXõá]šã56›WQÃúélæ7^–KèlÛŸ8Í@íÞUÇC»->Ú–ÀfónÄ—\ºþüRà‡{±œ­qí/Ùâ|Ðù\Y)eeYeeYe%¹ßÂò¾ñ²ÔÔÀéê(y{RÃcž;a£“Ñl3éFþfßÙ)È-h”‘–jeqS³j­,4Zjãp2‡¼þÑ‘γ^@îàñ-fiÜyP.‰RÀl3ÊŒ-^[gVײ–­‹b³yjªXYiM ÜÍ»åΫ U{·SÃÿ,G±6¬5¶˜IR'ÊçÝýÄÄPíiQS+«³±™‹lþòóY8œÌ ›Év4 ¸öÃz>™Åî,.cÚ.Ö½9"}ò¾øP!ŒÚL9'¿AQQÉâ{n6OzX¹Ù+¡ôÑfá ¬vá­SUcB§†s ·|yÇdªÚ¹ZHæ·õU§ò?7+âe Ó…ÃN¸ÛÃóçh”°b/Es '· ‘6_gírƒµË ü¾¶škñ÷£Z…žjd²¸ÓH=¢4ÔXíQ#¹\þgô{u íK¬Èx¦ÓEš(+œ°¨(ˋŜ×ÐØÜ³•a9["§TÌ1×@‘ÔxY xäæñ¯Ç—ô´»S‹ô¯ûE[¹Ã¡wÈÛ¥GÒÍ%/žG–•‘·KIñß-Ýv¯•ÅŠHÓݧñ»Ä?T/*iiçðÍMÕÝ>™îöÉô’r¦áÂË…746såÿÑ‹SWU`±ø0I»Ç ¢ö•˶[¤‰¸Å–ÚHV®Ã›¨Öó_YÙV» ñï*ŒÌ„IÚøÉ“$²ü‚»VÙêÙZSl­)Ž[oFÝ*±YØ£øÝÊâ %Óed¥&‘d{–r ŽÎHH®°Y8 |!› ÿì°«)Ë@[{gq9SØ"«l)b´ÿ¿A…0jl˜™[wçAeø;ôòUddÿŸ;¾‰îyI‹ÅLä>k41ú×ok÷·É‡#Õ¶˜¸`:U-04)-ç>k¢*¶ƒß½wÛäõt>W¨Š.^¬r½•w×-JÊËKU±Œðºº`ªž ²r ç˜)‹>ý\+ý÷ ê/†I¡¥µ‰H7·påí0I?U_ÅãS3HN«îHd¾UPXÒ,t[Ñ"áêzÄ™»ºº'ë(å¶ ùq@ü¨Ú}ãôŠšžo\.ÿS¯¤¦¼Mª*ò\.ßmï}0›¦–žS'š$MÄ)È÷ˆH£ˆŠxñ¯VT³¾õO¿{ÅqQEƒÀHK±Ûy@1ñE-öeå¿4Ô#ˆ>îæñgTÕ»¿¹S_û=D:ÒÏKšÑmì'£Ùfá× µ4ÄÔ°/°XÌVúÝj ¨µH iÀHK±Ù|0ÒSJ¦WÕ´Õ4vŠ*r§?®¿ŸZ÷‰””TVn}'¯ ÏUË~úÒb&‰×)ø;£v·ûL]² Á½ü‚¦gŒfqÿŸ¡B56ָܰr cÉG×V.›ÒÆæ…\.øùÈ"Ð%+ýtö±¶&þÄ7 ·|ygߎ9/Ê™gÖøÐ-ï§V¡gçÏÑ‹*hçðî<¨hlioeñÖ¯šëꯣ¥týVñÔ)ªX,æø×¹Çºo˜þ8ÿeþó¦Ï7˜Š½ª*ò?ùÌ£.ps1ј¨ŸT¦¡Ž›;K£©¹#þAmÐù\—5SφE>‘6E—p<0#àðbÑÇͦ¿ûéÑ• v{ç¥ëÏ×.7>Äxíf±–.¯ ))­<7›[Ï'¯ø4ÆŠªs»ÇIÀÿÐB—í·¶»Î8ò$ã/I4„Ú­Úü§ó*ƒ©ú*óý¾¶Âád*ê:ŸHûz§Å©ßsvHœOÕ<ýG^ ï|QÿP-ÅÒJÖŨ"Œ4DÆ–0ʸàØ~«uî·Ü>1IÏ©ëä `££ñ‚ÕWð =ºë¾{ˆÅìÿÂhª½ ‚ôÇõ…ÅÍX9iÚ|äbÊåòï¦TNÑU26P+)g>H­žHTXj¥ƒÅbªjÚ*kX󨚼NÁ½¿+jêÙ‹æic¤¤[8&†j]]ÝO‹š ª–uãö‹[ÈÉoÈΩ¥[²`Ò ÅHûzƒc?‡^f¾Víµ¼òUzvý«6®™±:ÌÀ£ìº†&Î ›Él6ïnJE+‹·h®BŠJ|ç4e<©›ã|ÓOUUä›[8ñIå°p.yË_ypí¯²›çû—A}­Ú+›ÍKͪ-.cêh)Òæ‘Ñžom=;#§nŽ9IS÷(».· ÁÄ8w–†”” jˆ$¾[_uÄ'UtuuÙÒt+ªÛT”åÐ*GvþK- üƒô*·O¦ ‚iÕÅeLCâ âäHíõò¯K¦©õ{ÃkÕ^³rë‘Hâü9$Q-hÖb&©¶ž\‰S¦Í#£ÁlÊ£¡ÄwÒÃÊÒŠW3M'NÒÆ?}Þl9K³“×Å(e²Ú:ñ²ŸïMxô— •´7/µš4ÈθÑâ0Ë™êûvPûý½¢Ê¯©gou6E½7¿ éEóƒÅºÒRRè˜3ƒ„‰J|—”3fÔ©ä>X¤{?­jÞlÍŽN~G§àyIóT=U§ÏþúíøSc5aÏœ;SC(ÕÞ HíõYâEøQ!Œf\))©yTÍ^/‹Ya3ý­§K.|Y ¸@FV …fGÐÕQÊÉoØñuÒ¥@[8ñÛcÇå=ÇÍMÕ%ç!j@_÷!-âp2½HôÒÔXM4ýÒu×~<¸ÈdªÚÍ»åK橣έª"ïâ0¢(€oNÆfѤ^‘ËQÔVa‰„…‚xAYIîãU=uŽl¾”÷äiã¾s˜­ æh€””m¾mþÃÝ ³Í4„ñ ¤¤þí0š¸NÿV&‹-2m¾íÁ¶…–ÚÍ-œÙ¶]\ETUþùÜãÕè Ë>}ˆ¢• Ú[D_}D_TºîYYh­X:åyIsM]ÛT=è¯gŽˆ S²šƒ‚‚ÜÝÝ%]9£ƒ·"JÔPanª¾ÇÃü€ßC^—ÀÞfŠ“ý[-v?.ÿºâ×ÐÜ_Có õ—‡€üíÁ†µÆÜŽü]‡î+*bÐ8ý]„ªŠüùSÌd±øÖVÚîëÍÆÚ¢aâØ~«3¡¹ž_'MÒÆ'\uûÉK!^ޏìè(‘Åœ1Á;Ɇ€vuÇÚŠ‘ÂPOåäw‹GžÏ˜‹Åxn6íùî· -µZ?ô[M Ü‘½ó$”ù• a4Ï)cãx×ñ¦B[64µ¾p!ò_±ƒ ‘…Ç_Äãx`†„Žg T= øñÀ Ç­7"®0¦Ö^ûT~dÒ²ØCº?¿ ÉÔú‚¨UoXÕ«\o E ¡¨´uH÷ï<(Ú‹ò švH|íS" E»¢ØQYÍÒý *hŽWE]Öߤž9«\oH®ÿm*„±eç ¶ÛÞû啯Ðe+‹+ô:Íä ,!$<|9ÔG›9Qñ•gBÿÕˆ}Êx=¥vðø’+³µã»Ÿ†ªƒÇÊ`oÙs[x¢ñ «úEy«$<«~ Í«¬­×Ôµ{Ë #uðø5uí¯}ª•Åmeq%TŠgEÍ'ÿW0¤GÚ9¼ûéM?¦¼I=ó»/ʇöñxsüç©Æ|Ýp¯‡™Çþ»7ϯõGãrùÁáùi™u:ÚŠ_l5×ÔÀ…_/¼™Pfl ÚÖÖC"U5m'‚²j^²­ç“ÅuýaFíDõ‰/_™éîÓÿôèƒEº½v“V^Žap:x.«§¢=ÁFÓ™\·kÁÍÙfàlXþû•S(Š»¶ÎÄŸæÍÁlípûêÞËæ!k͛পþô¿¬};戦×ֳφåå>kš?‡´Ãu‹árù!OfÔ‰î_=Ê®;úĵ‹‚¤óÉä!G•ðÙm¾i×íÌ[.Xì¿Ý[ „^)¸ŸV¥ˆ“Ýîj†NÆ'–…Ç<לˆÇ`¤4ÔåÉ:ä>k²˜5q×Ö™¢9 ÏŠš7ìJ"ª«×Õ M¹në:ý›÷JWØLîµïŸ“ßp.<¿…ٱ†‚|ªjÚ~>÷¸¶®}튛½2ŠxÙ/Üf ÂŽï˜¯ºo˜¡Wþß—ÓmOB}Çï€Õ4CµÅŽW¯Ü`œ¿Vpx¯åtcµ³á hná,\su…ÍäX=}ÞtìTæÈ-y˜Qû¹÷ßË–-Ƴºd¥Sß-ú|o‚è|9>±l·Ïý/Üfx¹Íò>–ŸXÖÜÂY¾þ†ã ý=3C¯>€=>É%åÌŸ,¦ÍÓ±[i>ŽÌÖ·=÷Ê«?üðÃ0ÿñà¢SçrEçËl6oþÊ+Æj?ù,llæºíI€¯Ž$76sòYXQÃBá¬Üú-_Þñr›µo5>±läͨÐËËK__¨ÏÚ/›¼ÐBûðO颉ßú§ßO«:¼gž½Íd›uÑU5m©™µ{¾K¦»S—/¡]È@`·>zŠ.áô÷4@… êLš¼ÿ×C}V… {ÎÙ&¯;èT2BN~ƒÃ–ØNƼ朿V|)×)Xé=ËtâQïù—o0ÐmÁ—ò¢ã_õžÿéGÆ«7ÿ9B±‰÷„ aÌÙPZzBб¥{þ-œ/74râT¦Ï%ká7:MŸªþÇ•ü¯=-ôt «lõ6®¥@BrÕ‚9š&SÕde¥Ý7˜?“3B3Λ?_Ff˜È\ŒÔ ¢óå ‘…¾{¨™›ªÞ3/<æyRjÕÒ…:´ù:Æj>{,¹\þ©BOͤ¥'Ìš>qŠ®rÊ£š‘”¢‡ kqññÓ§GÌ](ÿë1šè|9åQµÅ,Wèê(}GŸwõVym=ûlxÑÁ]º:J_m›Ä&B.?s[oª¡®@P†njT@H…'Ož^~œ,D¢?åþxp‘®Ž’­5åS'£¸¤Ò·_ìÛ1ÇÔXm¡¥¶×VsH\è€ðzã«·ÊE™h¨RáƒûÉÊÊÊÃÈa¡¥ö'ÞGÿ¦\û«x϶™3IÆjǼ­Î†å?-jRV”sq0ÒÕQ:òUÏ>òÑ€LOײ²Ò¥K&Ç%•»ï˜ϔ@WGé‡ <ößµ¥Q ¢æ•Q^8óBQ¼u¯\E¹G‚A]M^6±K+Y?ÍB‰îGä«,¤Â¸[q;wîv>§¿§™Ù\ü`‘.º|Êh$Mœ‰þÖÖijXüšz¶ú?¢ŠxÙ–Ö:ßC SõDÕᇥB++«¤¤¤áå³ÊV/êVñ÷¿–"#+uük‹O¿ˆOxPq<0㻓i òÃù8‰— @(ÎiFn+, j·sºŸH‹O,Û²;áë3p8™Z~¶÷N|bÙ‡’^±»ÀãS³¿fÿñôfBé°uwÄH…~TÿC‡é[¿¼ŸXœü¨zùÒÉ®OûùlNøõÂ?"ž¢uÃÙfStTöø$'<¨Ø±?)åQÍ0Nqˆ‘ Ð|¹æ%>ušvê\n𥼛 ¥_|“H÷˜¥§K0Ÿ¦¾ïh Z§F|ï=ß}_ÂÍ„Òðë…ë=o+â†<¼}©Ævlsn™Ê?2…º:J£×HKIÀQïywîW0ʘsÌ'~GŸ'#+ºúöƒrƒÉ„ÄÈÕD'“xÕéÆí’Â’æ}Û©Ã; +.*\ï`,ÊbAÇ—lú¸f›i$\u¸s¿ƒ™tÍm¾¾&êVIíË6tÏËmælÓ‰O ´4Óc‡!—/.*œ¬£$ª¸³ÊV/î‚´ÉT¢Œ¬ÔÝ+×o3ʘ;·š¡ªöøÔÌH_%ÿyÓfçiŽ+ôU”å45pÉÑÅ%•–V¶ïDЏ¨¾mÖäN‘++É%\^Ãlí·O¦›³òêÕòÇ|¬¬$§¬$—ýQl eEÙÄ+޲²Rñë‡&”–4/[¬³ÒfÈ'>ÅE…³¦kLÒþ÷,¼Ï—óZh«(Ëa±˜Ì[Î7ï¾(­l½ø!Ú,þãä×ãJJ+_ò]ŒfWÙêMÒVJɨÆÊI§Ç®ª¯ÂûI…0š6o&ƒ¬f«>ÌnéK…›7o \Ãæ-ÁWGüÁ åú¥Â¤¤$kkëA4lÞ ›ÅsÕî§7õK…ÖÖÖªr%ƒhؼ%0Z†S˜ðŒÑÞ/†††ººº®aó6iØ,ž«þR!ˆwlX[×þ®„MÉj^`µ` Qá;QŠÚºöæV^OfQ!£”ù–—‰q D…MÌw _µ·óŸ—p õæ>k¨©«#D–W½€÷“ ºÅþ­þèõ«,«o)<==ÇÚ´!§ œœÜosäååµuC€——×@ýÊÆÆf¬­{Sê×ÖÖö[а°°±¶îM!‘ŽŒŒ-¼[ÛL¹°°°®®n¬›òMA¥Rû2™Ìœœœ±¶îMA&“qNNKKãr%uÚLŒÀ`0ƒ¬x–•••••µoSSS"‘Øï¿¸\nZZÚXøF ‰¦¦â×' 66Ç8Æ1Žwo…‡Í8Æ1ŽqŒ9ÆÙpãÇ8ÆÙpãÇ8ÆÙpãÇ8ÆÙpãÇ8ÆÙpãÇ8ÆÙpãÇ8FIµa÷îÝ}½šI$Rxx¸ä~´°°ððáÃbü‰øøx??¿¾é'Ož477»ý'NœHKKÃb±4mÏž=ŒxÚËÚÚºo¢Ý¾}ûÄ^  ŽŽf2™vvv»víÂb±bÉöøñãqqq}Ó_j$8|ø0‹c]åääìÞ½»W¢···­­D"S?~\xùã?ä4þ¾a”ØÐÜÜœ@ /ÛÚÚÄ"?5ÊÊÊÄ{CCCƒF£‰¦DDD‹E ¬¸\îÂ… ñx>>®®®bÌ“@ ˆV~\\\ZZÚÁƒ%T„û÷ïÇÆÆÚÛÛK°šÞQŒþa@gooO"‘*++%ôAAAB¡P(É$22‚ƒƒ%‘yDD‹miiA—h$‰«­­Õ××§Ñh<Oì™óx<"‘èïï.“““ ´´T5æéé‰Çã³³³%‘¹°®H$’©©©«««ä~Ç{{{K.OOO'''Éåÿîb ØN§c0˜ÔÔTtÉb±èt:ú¤S©Ô„„”îìì|úôé5kÖ‰Dáãþþþ ŒŒŒ:^®¯¯"96ÌÎÎÆãñ”ÈÈHtÀ“D"{{{Ô|ÎÎÎýV5•Jõ÷÷oii‘ÐgéÝÅh³!óOmذL&ÇÅÅ1 ooo!QÒh4àïïˆ%ýýý Bdddiiipp0‹²§(Pg’644P(ÑñTBBøúú2ŒØØX‰äéé‰lÀãñööö©©©©©© eÆ ÝÝÝ¥¥¥ ###ƒJ¥:;;þ‹ÎÎÎúúúb/H¯ñ‡Ã122B¤–——gooO&“Ñø‘`DDƒÁpuuÅb±èÍ\³f‘‘Qjj*j>Ðïååå¡O‚$F=©©© F4gQZZ€Á`"""º»»]]]ÑÈ+///22ƒÁuwwÇÆÆb0˜Ó§O—––ÆÆÆ‰D”.ŠÓ§O›››óx<ɱ!‡Ã¡R©Â A^^÷ôô,((HLL422²³³ëîîNLLÄb±VVV‰‰‰ÙÙÙT*ÕÒÒ²»»›ÅbQ({{û‚‚‚¼¼<;;;”Þ x<žJ¥¢5háWmݣ̆}ÇS•••'L¡Ñhˆh4Úš5kP"‰DB½“L&ûùù ovssCý£_Hˆ y<žèxª»»ÛÎÎN”ÑÂÂÂ0LKKKHHû÷Áƒ©T*úCx3š?2 öõõÅ`0‰‰‰â-²M8nêî3=çp8áôéÓÝÝÝ œð2 ÈÎÎFˆ6eØØØ °x§ähö*:žâñxx<^”Î<<Ÿ¿{÷îÄÄDñn:effzxxx{{¯_¿^´ Eh-‹555:› +˜LæóçÏàСCÂÍnTº¾Øµk×®]» çÌ™C¡PvíÚ%–Rðù|<.´¡ªªª­­mÆŒÂÛ¨TjPPP¯R ‚´µµ@NNNUU•°EêêêÊÊÊø|¾0O—ƒJTê*000$$$66V´9¨Tªh) ++KWW·Ws .”™™ÉçóW®\)ZÀçÏŸ‹nÑèëë744 ¿Ñ„úÚµkò%xç0JlÈçó]\\ø|¾h¯èN¡F¯½Z>Ÿvvv¢Rû¹ƒ 444 ,,ìM\jmýîròù|Ñ}I´hÕ·ÒRRRnݺ%^*¬««spp ÑhGŽüNÑæèµˆ««ë mÊd2ñx<ºMÓ’’Äņ۶mËÏÏÏÈÈxíV²°Ÿ ÔVVVƒ(Ëæää0™ÌÓ§OÃ?ŒŸ””TZ:ü8Ž””äåååãã³bÅkb›£_·‰$Ú¯\]]E¿ }A$ß ÌÑÁ(±áþýûûާ-gee ]«233úS("‘ˆÇã7mÚ„RÎ;7Ð`Dèw<… ¯¯/êP™žžŽÅbõõõŸ++ íù@ffæàuˆ¾mÂ~uóæÍ´´4ÑÊõaDÎzbOUUU¹¸¸¬Y³æÛo¿M755MII^fff€‰‰É@}ÞÔÔ4$$dõêÕ¨ù˜LæþýûçÎ+zÏÅ‹7lØÐÐЀȴªªjÁà÷ £Á†wïÞõ÷÷_³fÍ“'Oz±Ã¦M›6lØàææ¬««š’’‚ÖÑúN÷õõ%“ÉK—.ÍÊÊòòòò÷÷µÊrpp úúú¡¡¡¢é ,ðöö¶±±155]·nÝÓ§O½½½ÑÆå@Y¹»»öÙg{öìáóù;wîär¹½>õ/^ŒŒŒôööNOOOOOG‰¶¶¶#JyôèѤ¤$ooÑt‰´zõj2™ìààðã?b±Øýû÷„Aâc˜ššÚÛÛoÞ¼988XOO/44ÔßßÿرcÂÈdòš5kètº††@ ,++swwKsxxxXYY566öjGGG//¯ƒ‰Ä¹sçÆÄÄ®ÅO§ÓœœLMMËËËÝÜÜœEoepôsbtÏܸqc[[F-ÈŒ3ÜÝÝCBBvîܹcÇŽºººmÛ¶¡‰QRRR¿ù¬[·Î××÷£>:vìGC^ÑfV¬XA&“]\\~ùå—¶¶6##£÷1þÉ@…µÉÈÈHÊèîîæp8t:D" ·ÉÐSÎÎÎBG+++áßþþþFFF ÆÈÈ-ðò»VVVb,È@¥@®0ÑÑÑh«N__ß××Wèa#jò9GÛØØ`±X4µéë áææÖ÷‡„ ç#Nï·h¨¶¶ÖÉÉ ­u:99 0D½²²RxÉb±¼¼¼H$·´´ì»ÅÏb±<<<RüKo´åç积¯Á`¨TªÐW‰N§ÓétaÎÎÎÂˈˆ´0§¯¯ïãã3ÈVO¯LF++«¾¥@9Ó`±X2™L§Ó…6¢›<èýB3 '''<O$׬YÓïIAA‹Åãñ’ðvzwñššw| ú$%tEXtdate:create2012-06-17T12:51:31+01:00@(V%tEXtdate:modify2012-06-17T12:51:31+01:001uî¥tEXttiff:endianlsbU·CtEXttiff:photometricRGB³ IßtEXttiff:rows-per-strip6ãV@IEND®B`‚swift-2.17.0/doc/source/admin/figures/objectstorage-zones.png0000666000175100017510000002504713236061617024276 0ustar zuulzuul00000000000000‰PNG  IHDRµ½P6¸iCCPiccxÚY çoµ×^{­µ÷^Ë€·ž3Ks27"{xz‘ñ# X`,jL”¡ƒƒ ø_ÛA­½ûe×dÿ¬±øÄP€PìçC Gq=H5Š vMžh|lÔ>„bvª ŠÏ®á ß¸i ûýÆ]ë<.NÆ(€À@¡Ð‚ M£tòj*‡‘k„HÊJF±5˜â¯/Ê#¹†÷£XÒïŸäý7™~eR(Añﵬ7‚IHLT%ñ?4ÇÿÝÂÃâþÌ!ˆv†˜Pgkô͉Ú-J1uF17ŠXÚlÐ+¢bœ6è7Cb-]Öl„âà8 × <êjˆb~/…FZ¯ñ£v‚¹#üììQÌŠbQjŒ±×o™°ZR°‹û€‰)ŠÑ(‚=h‘Nøƒcv8ÿ¡'%ÛýáßN±Zó7#Šs(´õµ :ÀÅaækónBñ…¨X—¹º#Âì6Ö¿ ¤™9mà…€˜õõ®Ïìbñ[>‹Ào™`ˆ™åo…`šÅºATØzL£cZœÓšDQáº!Éñ§˜Xÿ¶ RÌÐ@ð` 0&O2J@ŸT ÂÐN#3ýù‚}}‚}ƒ}ŠÅ>ÿËmü‡„ôý‡Ný'º3HŸP© æÏl^ŒFcƒ> Ю„ÑÀhþùÖ=}mú¯V¿u BÇÊnPŒ6´ßñÏÚû„ì¦ýË¿¿#þ]'3ðn]ê‡B”ÂÒŸñÿX1Îg‚³À™á6#û«H;ryˆÜD®2riBº[kø_f¡lX…¶¾^ktÆ·þWÄÿ¨QÜ_Ž *££*pZçE¿…üÁm]듇v?TÒvô›õß5þ±´8j]UŒFµ3jc '†ÈbTP‹bôQ¨¢Tãµñ”ë¶Ü±¾–PðÅá± ±kn•H Ž%¢§e€ Ù2‚*'CVRPTkgïï­=ë´~¦Bœ=ÿ QÑ3FÝøÎÐÂѦF ¡ŽÐÄÐÅw¥§ÆÑvü¦aÖX@DOtvÀƒž"@ÕS ¨m`L°.ÀlC­ ÂQãA2HY …à8(å  \WÀ5pÜm ô‚§`Œ‚qðÌ€`‚ Âa‘Ž™NŒN‹ÎžÎŸ.‘.î,]3]Ý8Ý"‘…(AÔ%º·Ó‰ÅÄKÄÄâ,==ý&zMzGúú4úbú:úú1ú_ ¬ R Æ [â2T2ÜexÎ0K"‘ÄI$/R,é é<é>éé'#££%£?c*ã ÆFÆ>ÆÏLtLbL†LÛ˜’˜Š˜®2õ0M3Ó1‹33S˜w1Ÿ`¾Î<Ä<ÇÂÆ¢ÈbÏβŸ¥šå!Ë$+žUœÕ”ÕŸ5“µœõ>ë[6„M„͘ʖÁv–íÛ8;Ž]‚Ý’};{.ûEönöV7ŽŽ·8F9NqNKÎ0Î<Î+œƒœ \\†\\Ù\—¸ú¸æ¹ù¸ ¸¸s¸k¹Ÿr/ðyLyByó\ãyÉ‹á•âuäç=Åû€wšO›Ê—Ãw…ï?Ì/ÅïÄ¿“¿œ¿‹N@PÀ\ Jà˜À}iANAÁí‚‚·§„Ø„ô„B„ „î} s Éaäbr+yF˜_ØB8NøŒp·ðâ&‰M®›voªÝôR„(¢!(R Ò"2#*$j+š,Z#úBŒNLC,Xì¨X»Ø¼¸„¸»ø^ñkâ“Ü–I5#’$I}ÉhÉ2É͸Í›C7ŸÜÜ+K©JKꑆ¥Õ¤C¤OJ?‘ÁÊhÊDÈ”É É2ÈÊî­‘“㔳‘Û-wMî³¼¨¼—üaùvùU…0…³ ʬŠVŠ»›¿)I)Q•N( (“”Í”S•›”¿ªH«¨œRy¦Ê¦j«ºWµEuYM]¦vImJ]TÝW½D}Hƒ]ÃAc¿F‡&VÓH3Uó¦æ/-5­X­+Z_´eµCµ«µ'u$ttÎê¼ÕݤKÑ=£;ªGÖóÕ;­7ª/¬OÑ/Óc bàoPa0a¸Ùp»áÃÏF F4££yc-ãã»&ˆ‰¹IŽI·)«©«éqÓWf›Ì‚ÌjÌfÌUÍwšßµÀZX[¶²°¤Zž·œ±R·J±jµf°v¶>nýÆFʆfÓl ÛZÙ±±³‹°»fì-íØ¿tpˆv¸áˆstp<áøÞIÑ)Ù©Ý™ÍÙǹÚù‡‹‘KžË°«¤kœk‹“ÛV·ónóî&îùî£ò)ž¼ž!žM^x/7¯ ¯¹-¦[ ·ŒoUÝšµuÐ[Â;Áûá6ÞmaÛnù0ùP|®úb}Ý}«}—(ö”2ÊœŸ¥_‰ß Õ˜z”úÑßÀ¿À*@7 ?`"P70?p2H7èHÐT°~pQðtˆqÈñ¯Û-¶—nŸµ­ ] s« '„û†_`hŒLˆ|%•5­]=C³¦UÄ@1Þ1M±ìh’Û'·'nl‡ÞŽ;~Æ»Å_M`IˆHèJ”JÌNœH2K:·³“º³%Y89=y,Å0åÌ.h—ß®–T‘ÔÌÔñ4ó´ªtbzhúãÝ »ówÏpÏhÎÈLË|»Ç|OMc-kh¯öÞÒ}˜}!ûº³•³e¯äøç<ÊUÈ-Ê]ÚOÝÿè€ââ«vç©å:„;qhð°þáª|–ü¤ü·Gl4 r ¾ú>,R)*=J<wt´Ø¦¸é˜è±CÇ–ŽzÂèDm IvÉüIÿ“}§ N]*(Í-]8rúÙó3eâeEå¸òåïϺm?§qî|oEnÅreDåh•SUëyõóç«ù«ójàš¸š© [/ô^4¹ØtIöÒ™ZÎÚÜ:PW÷á²ïåÁ+ÖWZ®j\½T/V_ÒÀÖÓ5&6Î\ ¾6ÚäÙôäºÕõ–fíæ†r7*o ßµ{údÐuðÙÐÖ¡ÑgþÏ&Ÿ‡=ÿúbÇ‹Åá´ìHÎKæ—E¯ø_•½ÞüºvTmôÖ˜ÉX×ç7Ão©o?¾‹y·4žùžô¾hBhâü¤ÒäÍ)³©Þ[>ŒŒú¸8õ‰åSÉgÉÏõ_ ¾tÍxÌŒ¥}]ý¶–g¶ò»Ê÷–9‡¹W?Â,ÎçüäùYõKãWû‚ûÂÄbü~©xyóróŠõÊÈjøêj…FYO´Ã|«DëOØz 2þ®6‚&ðzÆÀt@4hBïtw¨æ„!|È L4V Ç„§'ðÓYsè'H[ß1g°ª³MsÔr%ð˜ò!ü÷È|µ"ò¢•â >’'6?‘ÆÈ(Ën“;$ߪ° $§ì§rDõÚ Í­ZÙÚ:“zœú†a†…F·§LÙÌôÌ©,¬žYOØŒÛNØMÙO8|pœqšs^pnxwFvOn/á-â[¥½å¶ÉùÈúÊR¤üĨBþ<,Ä ô#x&äãöñÐ×a/‡"žFDõEÒFbÆb§âæâá¦Dr’òNËd¿”¤]GRëÒî§?ÚÝ“1ùbÏXÖÄÞ/ûæ³Wr±û™p$çIR;l”osÄ£À¿0¦(íèÁâ’c5ǯŸx\2zòW)Çiå3.eqå%gÛÎ}«ÜTåx~wu}ÍÔE¡K.µÙuw/Ï_•®÷nHm,¼VÛÔuýmóòM®[ ·­îÞ͸WÚrýþ`ëlk»|‡ýÈG9•]¿ï!ôŠ=1èsëØý4°t賆ç7^´ ·t¼ì|õøuïhÿØó7co§Þ½o~Ÿ=á:)8ù~ªæCØGéSÓ¥Ÿœ>c>×}ñø²:SúUÿëÈ·øY–ÙªïúßçBæ–~äÍ Ï×ýÔÿÙýËû×ôBâ"~ñè’ÄRÓ²õòèJÜ*a5cuu=md2hV: vȺóÇv¤ã„æF“¸|áQ‰~à c23#Ë ¶P2çkî¼&|SéB€œ"<ŠÆ@ X¦øQ‰2ÉêÍõR-Ò2“r@žMARÑHÉU9Zå€j¹Ú-õ³Z Ú¢:šºözAú©G Ï5?66í3k6?c‘ceånmb#gËe‡Ø}±vèplp*w.pÉts pwó°ðÔö’Û"¼•ÛÞ{uÛ¸Ïßs”L??ªž?ŸÿÏ€¡À† ‚à!^ÛuCEÂHa¿ÂßG EvEݾMk޹ÛwyÇéøÜ„„Ä$Ï6ɺ)Ê»$SùÓØÓ‰»áÝ‹³™Ó{Þd=Ûûd_{ö휫¹UûËÔ¼™÷èÐÈá™#˜¾BÅ"›£¡Å¹Ç.Q‚;©v*´ôÜé7e"å!gëÏ-WšVEŸßW]^s÷³‹³µÌu2—­¯„]=Tߨ0r Û¤xÝ·ùж›K·Ш8}o¨e¥Uð~›o{fÇù‡Ý¾v <6ëŽê9ÓÛ×õ«D>­|÷LäyÀ‹Êáᑯ/^C£¸1âÆ·¤w ãÄ÷˜÷Ës“§^}èûxºñSÅçâ/Ù3 _­¿ÁßgC¿ 6wà‡ÑŸhþâÿÕ·³h¸øséòrÈ yeh5rÝÿ0Z1p¡`V1GÁ#ÙA¥hv O!‰nÌcl.NH¦+!3H“3Î3³ ³³—sÌqisGòòVò•ñç Ä: ©‘ÙÈ_„{6]9$+æ,®"Á!1+9´¹IªP:VÆ]VSŽCn^¾_áªb¡R¼²§Š†ªP{©~NÃG“Só‘V šŸ¼Ö9¨«­;¡wD__Ü`¿¡²ás£TcQã“SvÓf3Š9ù oK’e›U–µ ɦÛö€=Áþ¾Ã.G5ǯN5Î.‚h®PìæêÎéþÌã¤g€—¬×¯-­[s¼M¼—¶5úDûÊø~ œ÷ ¦JR§ükÂå¿5'…h…,m¿º+L?l9üfDR¤Fä|TSôNšA >¦'¶8·r<&~0¡&1=Éc§R2)y*¥}×¹Ô¬4jºÉn± \ÆtæÐž{Yµ{OïËÏÎÎÉÌMߟ~ ý`fÞÞCy‡ç?rº ¼°¢¨úèÕâcßOp”¨ô:•Zzötû™Ùrþ³fçâ+ª*‡Î“ªMkv]h¾ø«V».åò«„z—†òÆM×ko°ÜLº5ugëݧûý\Û^vPNwÆ>Ý{{™ž÷KÜÜ2´ø¼j8à¥âkìè§7Ç¡ é©ÄcŸ#¿J~‡çÙ¼—{Öüÿûdk §ÀQ7ÜWp~@®âðÀ€‹&€G$ì’ £Ä¿÷3ZeêW´ÆNhõxô€7`"@|d¹ õàN(*‡š ÇÐè'ÌKÀú°LƒÀÕp<Ð!ÒˆœF#Ë9 S„Æ«ƒÃÖb?â$qA¸JÜ^‰oÀ/Ì ‡ #tRtItDAb ñ!=™>‘¾ŸA­…>‘ìIõŒŒ¥LìLyÌ ÌûX°,{Ðú%‡‰­ˆÌ~‘Cƒã!çί\9ÜÜ­<¼ ¼õ|>ü$þ[hK ¾*";“„;7åŠX‰2Š>Ëw—x#Y±9BJMjYúÌYW9²Ü´|£Â>Å %ke%^UXõ‹Ú°z§Æ Í:­sÚÇu uêåègì1Ì6Ê5>dR`ZbVÆíË«aë6?í{&>G1'eg]SW7w_0Ïx¯Ì-y[K¼k¶ÝðyìûšòJï/`˜t=øûv¹Ðè°«á?"õ£öF÷ÆðÆúÇ]‰Ç$¸&V%-$;¥œßµ’¶%ýFfÊžW{uöUæ0çîÝ¿t0>oþpÚ¦‚Ò"Í£ÃÇ’Nl*é;•vZéÌDyÉ9‡J¤ª©:çBÅ¥w—å¯f6¼oòn»•yWû>Kò«Ë´goß»A«çý/}Çèß=˜Ìšöœ™ý<a1týüàJÀ¡Þ? .àøa nHõ½+´Úª ÛÐô†`nX­óàtø$| ~ ¯ µ¼œDz0Œ.&­Í¿c•±1Øz´öÖÆ¥ãÚñ,x/üYü7‚!áaŒN•.‡nŒ¨M,"~£w ¯c`gH`xE2G}®ÌxI›©•Ùžù9KËOÖl6A¶zv[öIŽ NaÎ{\þÜtܵ<î¼0o-Ÿ7?ÿ=}‚ŽB‚B_È­Â%›vˆ8‰*‰±ˆÍˆ÷K4JmN’Ú*­+#(³"ûJ®Eþ êéT¥då*ñª jñêÉÉšZ¹Ú‡uNéVé5é·Œþ0&™H›ZšE˜Z4[öZ½²žµÅÙñØË9;z8Ñœ÷¸»^t»åÞíñÊó“×ÂV¬7Ó6n²¯8EÚOŽªæ¯`¹îåÙí²¡‘auá3‘ªQIÑ7i«±†q;:èm“òvö¤°írHÍKëßÍ•á•Y²çÍ^í}e9˜ÜèýCõóª³æg™*t)j)V8Vp|¥$ìd©Îé³e å1gŸWèUž9OWuAòâ›Ú²Ë!WUëWŸ554¹™r;ü®oË–VŸ6jíQJWawUo[ßäÓ†gùѯlÆÔ߉L? ÓK_¾û9÷ýçÊëºÿ7@ù <“ ñCjè ¥BÅШ zð¬{ÃiðY¸žC6!®ÈA¤ ÄqÆœÀ¼Ç*aÓ±½8Q\®/‡ÏÆ Ø®ÐñÐí¡ûJ¤è-èo3h2\!)’]—˜ª™}Y¸Y†YËØ¢Ø-8Ä8 è^ãîçyÄÛÊw—¿I Lp¯P,9H˜²‰"$'–!^,qY²có¸4á“U‘³—VÈVlWfPqT=¦6ª!¥¯Õ¦Ã¥¦÷À@Ð0ÃhÒÄÍ´ÝÜÐ⎕±u›­¥Ý#Çng{—.7+÷6Oc¯{[u¼¯û¨û6ú©Poè¶Û‡< µ ëŒ0lÖ¡ÕÅŠÄÆÓ'¤$~Ûé—ür—SjgºíîÞL·=/þ¾¢Õ ^Ô¸t¬ö×e+ õt > M„ëîÍe7>ÝÒ¼vçþ=b‹íýüÖ§mœí>g¾í”ì¢=®ï^éµzRÔ÷v@îiìàõgÐs£{†Û^¯Ô_Žæ]Cë’•qž÷òF“ÖSŽ?ÚM[}Òü,þ…ôen¦ÿkÝ·ÌYw4ƒø0W÷#f^eþÓÏŠ_î ¸…úEÏÅ•¥òeÓåÉU‘5ÿÇ*+­_ƒZL¼Z]ŸÀòáÕÕŲÕÕår´ØànØïß]ÖïfJ¶¬¡».­iÿúûÇh‰Ñ´Þî>ÜPLTEÿÿÿ”””ßßßûûûHHH×××   XXXPPP¿¿¿TTT``` ¤¤¤g^4æÒ|ãÏzMF&´¤`Å´j 5/ÜÈw„wE` Ø*1Õ(0H ¬¬¬ýæŠÿê‹ßÌy  íØ¢“W î-6ñ.7»ªdœŠpÛÄÄfI7qh·$m—¤Ð\ÐBK ÖÁX°{ì`Ù}oüûýtù'ÇŠeüÌ"úÞ烪åù=%ý$W²JJ²ÇËN”8õ4!éé89áåb¥¯x%NÒ5gˆYÏ\S„ö_…Ôÿ«-ÐÎDÊð¡ê×b%Ñ^‡•ôõÅè¿(í¿!;ÚÎzSÁñæ·˜ÊfU¿µð¤o{»I¡½ã…g}—Y¡ýîBã=ïÍl_Bê?»Ú9kà¬sÏ+4Î= Î_ºl^uÁ9Ï»àB(Ô8Úû Ïú€µ‚G»èâ㢳aÝÍí_‚ÒÿÚìjxh—^f”S5+Úõ¡´Úú§ÕÐÐ>r9rûJÿWdUCDsjxh½ÒPCD»êjC mÃå¸í«ý,›&š¡†ˆæ6Ô0Ñ܆"𡆉æÎ¦†Š¦«a¢j¨h†&š®†Š–M MSCEÓÕpÑt5T4M -‹2šª†‹¦©!¡]"©hš.šª†Œ¶\ MQCC+UŠVÕ°ÑT5d4E m™:WÃCÛè6ÔÐÑ5l4®†Ž–©†ÆÔ®ÁFãjøh\ ©á´/¢e¨©}«á£1µO £15”öMhfµb 15t4®ŸÄFcjŸBGcjøh&µ¢ ]åÁGsà °ÑÜŸÁßÒ6|ösE@ÕŠƒ¶¾h?_ ´K¿P´MÅ@Ô„VZ´óƒ–V#4ç j„æ 4]М„¦©ÚÅÅB+ÍDSÕ í¢b¡mtgSch^ûÅ‚ãºL4¸®àœ×.Cƒ/^é——¡}å«_+8¾ž†Ð~fÿ¢Îu`Bs_”ÔTô7°.LѾy>RRm÷°*͆¦¨”y \/&¾üIýP.&½|IË®’^ P7‚€¶¡¥ýÌþÍj¹ð¨ÊTó"$õ fµ*„¤ejA„Øœ¡†Ñ~fÿ¤Fj©CצƢnh˜Ô§&«×Ý’šÓÔXÔ¶ šãÔÌAjé 5RË^5©‘©‘©YUMj¤Fj¤FjVU“©‘©‘šUÕ¤Fj¤Fj¤fU5©‘©‘©YUMj¤Fj¤FjVU“©‘©‘šUÕ¤Fj'©šxA/–ZÂô7$µ†—¸Z²Ü„bƒ`W­UKš«“/€kÔ¦š~Zk®:«j àVRÛ­÷¿;·Óž;·ÙS³Û¾Ü¨Ü©Õ˜·ZB¹›²>Käzy0e[mDÉ:c+gl‹E¶W‡a§=µq%iŒç¨sÊ;ýÝöÔö(÷ˆîÝ·Onµï€M5µýèÎQ©®áßð;’·šñ&Ïö\/ÐÖ-Em«)1‹æXú`’M£öÔ”hϽ©5•±ÉTÊžšîÚwwn´­o´©¦DuÏpC®R§g2~Z¹òæÔ–4=+ËýÉp ʪæ\Ô iËÔF«òRKÌ÷°—‰ÇÀ?Çð\£Óà ±™‰ "Ú:PvŽ‘iûjÛ%N<ÙŸ‚.ö»‘.¨á‹4ÎC*¤¯#uì¿îXj»à6½÷¬»ëp0xßýGöÁ!6s`×QØÿ€¾Ð±ƒVä¡–ðÅØa@¼“÷ß ¸j½à‹³þXÿnm!ÿP<`¢ÍC­QÙ”ÝÒ̬{Ú½þ¦Ú±aYõŒúRIu¡î”Z¿Ä¿ ´uh¢1ÆÞið?Ôœ„‡äºX¤y6”¾qrDJÚV›ŒùMƒÏ7^=£r•Ç·8Ø!Xs³µ¾ˆ±\s«§Ï¾Ú=Àïì½mݶ‡9tðX°bߣßÝzüP0ø½ã÷ìÞ»î1m©Ç¿ÌG­Wâ«f8UXŒ5±z’Í0Ä:PúW`Úþ!Ù²ÿÔfSÞ8™y6Mô„eo›I‚\­¼eåþxÚ)µ$Ôó5ÎÃ&;`»¬ qÃ3rkª­]éoÒm°¬:C-^æ¯V^y–¯5l¦™í‰ QöõòíµåüÒ¼°ËÏ¡öľM¸ÊØôÄÑ'ƒÇÙÌ!xžâÛáqu©½ûOä£Ö ílÚ&u²é"¸ʆ ÉS~Þ¿KñvÀÌìÈŒZöo­–èrñÑEv)ïdGDözU¡*³¡²&^ˆÚ RVzþ+ ¶Ž_÷YæeüŸ×—«vÍOZ¯kj-’r$2¦ìýÚa  ØÎ0Uu0ͳB§±dÃŒd”CíÄw>Íÿiº|AjÏ[ €GË*—µöJ}²¡_ð´L¹bj-ã õÛRë„5眖Bå¾XuZ­:Ö5×âéUœˆù¦z!, åX«Uèßóȉ÷?ù“}0Ô‚{aSeź§‚ùª e´ï‰´v°·ÐP‹O§xÿê,IÍõˆc„Mµ*ý<#rC_¨e‘­ûUüFøf$šs§SŽ˜NpWPk3ìÃ6´Ð7•xæøhK(Ygzñ*˪EµZ='½vŽ…ªÓ â©';CÝ‹úv;Y>3&|Þ²¢ÚfýÁB»ƒ?}lÛÏøÞr3ÆÐ|¼ûȶ]OˆP[m©‰íO¨ý·™û×9Þš©OØè3[j/<ó™¿)HÔ²WMj¤Fj¤FjVU“©‘©‘šUÕ¤Fj¤Fj¤fUõKEmÒn©¤fÄÿ_­µfj–Ô§Ñ¡:Rsœ€Ô±˜ë IRCU;|of< ñsøE­u„ôëÒS-µ+–Jj¨jÛ Žþ,–òŽMH UmײgÚ„Ë…8j\t‘-~™F‹…GV(•ÔPÕ0Æ5ÏÌx|ÅRImÕ©yÛsÞ¸´ÊÕf‡²þø¥«æë°QêjVãnrLjõa·½RW­Zâò´“JÍv¬RµÚ–”:0“šSÔ”'£é盡lƒh‡¥Úm…Å­ë…¤ëA»í"O“šeÔ§ÐNU‘ÃÆ­ô'­šœ–ô7Êßš-\póÕBÜM­…G`‹t @eFœ µc ¥qÍqj,FæjHÍqj²ŸñšÓÔlWMj¤Fj¤FjVU“©‘©‘šUÕ¤Fj¤Fj¤fU5©‘©‘©YUMj¤Fj¤FjVU“©‘©‘šUÕ¤Fj¤Fj¤fU5©‘šc՞¯HÍaj í׿!5g©©h¤æ(5 Ôœ¤¦£‘šƒÔ 4RsŽZÍAjÑ“\M@+ŽÚBÔžMCí·ÅP»«j"ZQÔž³ñLï|«fEAmñwEP[ÂiYÿi´b¨=øj¬èßã«m,|µ%¤ö3ûЊ ÆÐþ€­ÆŠ^óGt5†ö'tµ%¬ö3úÑðÕÚŸÏ@Vãhçü[¡ýõoØjKhí›û7¡¡«q´d5­[£ýý4dµ%¼öMý›Ñ°Õ4d5 [MA+AV[Bl_ì? YMEÃUÓÐÕT4dµ%Ìö…þ3ÑpÕ44T5 WMCÃU[Bm?Ýÿ24T5 SÍ@CUÓÑPÕ–pÛ7ú_ކ©f !ª¥Ñ0Õ 4Lµ%äöõþ³ !ª¥ÑðÔ4Dµ4¢ÚvûZÿÙÐðÔ44µhxjÿL£á©-¡·¯öŸ Mm­P5šÚZ Omm O ¿}¥ÿìhL­9Px$MU³²Ë’–ˆÆÔ’I›Y¥i4®¶!î/BûJÿ×”X¨!…P5+'D4¦†W[½í›ûã_H/ðo1éNAÉyŠ©èÿ"Uú¼€VrúóHY‹Ð>œj…ö?]o×Gû*%tEXtdate:create2012-06-17T12:34:06+01:00œns%tEXtdate:modify2012-06-17T12:34:06+01:00tÁÖÏtEXttiff:endianlsbU·CtEXttiff:photometricRGB³ IßtEXttiff:rows-per-strip6ãV@IEND®B`‚swift-2.17.0/doc/source/admin/figures/objectstorage-usecase.png0000666000175100017510000017122513236061617024570 0ustar zuulzuul00000000000000‰PNG  IHDR[½4°B%ÔiCCPiccxÚYyTá·ÞßpΩÓ9ÍóxÒ¨y5Ïó<’:NƒæSiJ‰ %DJ¡Ð$sR!¡"d(¤"D÷ÃoºwÝuïþã[{=ïÞÏ~ö~×·¾õ~/€h#=!!åˆKfyÚYÒüi¤ç@^`.à¡3’,ÜÝá´ïèW¥'$ÄÀÿÍx˜I ÄBÃ’±H#ÖÊH`%b@659!€°øYþ„ãÀñËoþÐ_~ð³¼=­Ãl:@ÚzFD2'€ÈÀO š2"éa¢!  ºCÿ‰'â_8CÿrÒéý_½›uTRB =ýÿ8ŽÿÝbcRþÔJR´— i º »"™οñ“ É–ž¿ñËQÉÞÀ€ D¦ØûüöÇS¢},@YˆŽwò *ê꼨,#É*ð'ª—éí÷;Æ9Œimܨ?+ÞóO|dÒz¯?xFF¤•ëŸøutGwà@ è,€_Ð2fŒ'¬@O'$»{ÿ®Õãú»ôm8ËÖó·?ÇL²ñúS+9ÒÛþ?ÆÌòöüʼn‰‡GÙ:üÒ€iD²ìÿàæ 1îοr1oVЧÈ`áÌ8ŸßœXAÝÚé×L°r°:°€ ¡‹@g°ëßO0!hÀ€xˆx`Ѹþ¬Þ^† OÿF[ý‰ƒ(ƒø¿8ãŸp/È€LHúS ÅMqcÜ7ÅÍqS\ 7À ÿ¬õN¶LþUõKk0Aõ7bù[ýúVµ…õo9¡3þS“-¼Dü‰Ð¨Ó˜ÐXø“ÿމ6Dk¢=Ñ–¸ÛŽ]Àº°Ømì2Ö4ìÖŠõ`W°–ÿ˜ý÷TXÀ„$p‚`B °€ qÿ­¢”¿¿QN%N]ð&ÄA4Ä@Ôß ¾ðXõ,)@ƒPˆ‡uN{ü3iy\ ×Å-qÜ7.ˆ‹‚*®ƒà¸nŒëâ†ÿ´‹ÿÚ*„X°˜ÑðX›ÌLK°ŠOHgEED&Ó,b˜*4‡8†š MKCS üi¿^íiO@¼ÿŒ1 `¸€t÷Xl:@€T÷?09€Ø €ºƒŒÖú_@2p?ˆ€$È€"¨‚è1˜ƒ 8‚xC¬DB,° 2!ò vC „ ¨‚8ç¡.à 脻ðÁ Ã(|€)øó‚*‡ˆ Rˆ¢Œh!ˆ)bƒ8#žH‚D qH ’‰ä …H1r9†œBÎ!‘Èmä!òA&¯ÈŠ¡”•@åQuÔµ@Po4@Ñ 4Ý…–¡•èi´½ÞE¡ÃètŒĤ1U̳Âܰ@,ca›°¬«ÄÎbmXÖ c“ØOœˆóá4\7Æíqœ'â›ðøA¼oÆ;ð~|ŸÂ—T‚8A™`Dp ø"©„¡Ya1asa¦pp½ð#á9šˆH´È‘‘¢¸¨’¨‡hªèÑ[¢“bübÆb ±±óbÏÄQq%qOñ âUâ=â3’v $nJLJ JšK®“Ü'yUrBŠOÊT*JjŸÔ5©÷4š-†VFë MI‹KÛK§H“_¡°ÂgÅ–õ+^Èe dÂeöÉ´ËLÉJɺÈfÊÖÉ>“c—3‹”Û/×%7+¯ ï'¿M¾E~\AXÁA!C¡Ná¹"UÑL1Q±Rq`%q¥ÁÊ蕇W>PB•t•"•)ÝWF•õ”£”+?T!¨ªÄ©Tª ªRT-T׫֩ލ ª9«mQkQû¤.«¨¾G½K}ICW#Fã¸Æ&¯¦£æÍ6ͯZJZ ­CZÚTm[íÍÚ­Ú_t”u˜:GtžèòéºènÓm×]ÔÓ×cé՛ЗÕÑ/×4à7p7ØaÐmH0´4ÜlxÙð§‘žQ²Ñy£ÏƪÆÑƵÆã«V1W_õÆd… Ýä˜É°)Í4Äô¨é°™´ݬÒìµ¹Œy˜ùIó1‹•ë,N[|²Ô°dY6YÎZYm´ºnYÛYX÷ÚðÚøØ´yi»Â6¶ÎvÊN×nƒÝu{‚½“ýûA †Ã)‡)G}ÇŽN'/§ƒN¯•œYÎm.¨‹£Ë^—ç®r®q®-nàæà¶×í…»‚{¢û%¢‡»Ç!wžšž™ž]^|^Á^µ^ß½-½‹¼‡|}R|Ú}¹|×øžòõ³ö+ööW÷ßè7@4 * 5èx2pfµÍê’Õ£kt×ä­y¤”t{­èÚ˜µW‚¹‚éÁB!~!µ! t7z%}&Ô!´[S;†ýN‡&Ç'NcΣ.c®ncîï=¦ EMK-íDºUzWôŸLQŒeWš¸™F˜m6ßkq²ÍêžõMŸm›Ý1û‡G?'kg5!WÌõ³Û{·G“g•×>ï\Ÿ_¦Ÿ¯¿}€q Újé5AAËkGƒo…œ ç††2LÃÄÂ~0Û"öE® \g-Cùû.n0¾'áfâUV[RKrkʹõGS ÓÒÒ£268gšlÔÞ¤¸Y<‹?›¼Ý2Ÿ3;¹õuÞ“m·wå_-¸PX³£rgÝ®ËEwv?ß3µß'V¢Yê¼?º¬ðÀ¹ƒÏʉ‡õŽDWœ8úºR¦*êxã‰Åj›šÄSÛk«ê®Ÿ~rfºž»AåœÓù˜ »››ž·Z5/†´í¾ÔyyáªÆµðëGo ¶/uHÞ2ë éÊí>u»÷Η‰{¶½ ÷=èëCúõâÕ<~ûDæ)óYõÐÐó//æ^!ÃÄòkÎ7Ô·”Qò;üÝâØÌø‡‰—ïû>ÜœlþxòSÙçü©´/N_ѯÍÓÑߤ¿=™ÙùÝòûÙ†á?ÅöÍÌ[ÌÿX8·µD[\Ž_^È *`Ѱî DÄ©@æÐhtKÇ…ñ{„}Ä$R,[&{9yˆ¢LÝÃ9ËÉ3ÄgÅ_%0#d,/R"Z-V)^$‘$é%¥Gã£}–¾¿â¬ÌnÙd9/y…iÅÁ•­J%ÊÉ*~ª†jj³êý4K´Rµt t¥ô@ï…þ ƒ`CAÃ;FU_­Úebl2fº×ÌÌlÔ|‡…¶ÅSËÍV²VÝÖq6ü6m¶t;v»óöAT‡NÇ<'ggªs¯ËNWg76·›î›<ô<¾xÖy1½%½‡|Ê|}üýžø`ªþ\ݱ¦ È:hamspbˆJÈ{ú©ÐH†"c"¬Ž®þ5¢)2#Ê(jaÝÕèM1f1‹±—ã2â âgZ7°Ì“HI÷“ËRÂÖk§â©ÓêÒ³3ü7heR3'6vm:±9/‹‘m½E.‡˜3™;¸õF^ý¶£Û‹óó r ³wdïÌÞ•[´mwÑž=Å÷ÝWUr²´vÿ…²ß ”ë<²¹âøÑ®cÓUâÇmO¤ž¬©‰ÄjâI…Oj"-²Ù±ía{ήĞÁ~—,IN"ßæ q¤sôS4)»(©nÔFNÎ .~®"n ÷vÏV^o_)?ÿŒ€ÀmÁÕ‚_„ „„;DÂE)¢bÁâTñ+I’Š’/¤JiîÒTé»+ ee9eÊËû)H(¼V<¹2NIOiQù–ÊNU5šÚ¤z³ÆvÍ-'m-Q]T÷³Þþ]ƒK† F'Œ®*1ÙeZ`–g¾Õ"ß²Ðj·õ>›rÛ“vçí¯9ÜwrúàüÃsãróóÔö2ñ¶ñq÷õ÷ ñ H Ì]]´¦<¨ní¥à{!¯è_arL«ðôˆ‹‘ßÖ©E'Æ\ˆýo–°-ñA’hrXÊùT<Í'½&c.Ósã©MKY«³/åˆçnÜúrÛªíÕÜ…Ûv,ìJ-šÝ“µ—k_E©áþ¡‡V”÷É:ªul¬ªü„{5VÓZ[púäÙ·çÔ/ä6½k j¹’{Ýø&O'v[¨Çæþ¶¾·Ÿö¿áx{kÈ:d²©A®"È{A…QMÔ e¢Ùèaô ú]¤1{,;ŒÝÇqÜOÛñomB¡‘ðhLÌ&v‘xH¤ã¤¯ll»ÙFØuÙ ØGÈÆäRòWwŽ ?%ò’jGmäÔælá2æêàvã~ÊÁóƒ7ŸO’¯‘ß…\ GPZð†P˜0»p½ˆŸ(*Z/$N¿!±]ÒCJRê3­Cº|ÅzOY-9¹)ù~…fÅÒ•Jk”MT$U–T_ªµ«ÓØ®¹Y+S{ƒNªnš^ª~¦A¦aŽQ¡ñžUGLjL[Í:ÍŸ[|·¢Z+Û8ØÆÙ•Ø·9œ \µÜ!<ÂNsÏ]éΜÅçóýŸd/Û'JݸqÓÕÕUÔÏ eàÓëF`0£cß½»vÂiúý®ž^]ÃZ¾%ìÞóÂ+¾WfÍš%²ç„‚òc€ëÕÔ †»ûèœoO27•ïF 2Ò¤›»ï9önöìÙ€J¥Wé]#ÌšõÓ‡„¸÷ϧt²¦Ð. fç:˜ç3OII ­>  ô½Xk8qâÄš5k¢N´µTîyj|>æÏAaï˜))ŸÐ†F”^¢·ŒPXXhhd°ã“M+† *Íê–žÝÍ)Óæ\¸pAxOåGÓó$ÚeûöírR°záÐv÷ÖÖ5vp.§‰Ïfs¿Þ.#M:{ÐáâÅ‹iiiBP((?½b„ÂÂÂk~×ön´&Û¶SddטºN3¹<|Ì­ÂbÆ×ç^¾•BÔ;OÒ»°ý7_ïõ§;ÄXòÈ‘#¢~n((“^1Âõë׉xÌ´qz_ïúeGçŽhhhÚz¨íw>¯ nÁúˆe³ô÷¬º÷¯¤°¨‚¶ÙÅ`VÎ7óó»Æ`0EÐôŠîßó_ Ê‹à`jj*))ñ.±´—rÌçóo=ʵ··ï݃‚òCÒ+=cÇzú?þÜK9ŽûPÞÀ⸻»·ÞÈãqymæñÔ»tp ™@¦7ÔaÅšŸn •—”?üxo«• J'éu#€‰‰ÉÄ ïÜ Ùy8´žÁ2Ñ—‘” t| ŸÏüâóÌe/|ïçü¶å·K—.+jšà”˜›Ûô‡ÓT :8×LmðOö>qª…޵Y • JçÞÚÐ\.÷âÅ‹;vn//«?Juš§¾õPEmu©Ö+DW×°>¦V†GõûTZÑ4aÂøC‡þh½`üÓÃÓ >{Ž•RÖëäuÿ 9¹íöÆ¥n¿ì™~Hˆ¥_"<# °ÙìG]»v54,”ÙÀ]MŠ8Ó,Ö\ZÞTSךêjÓfxÏ›7ÏÔÔ´Íé6ãæÚ¡x"ÕkžØÙ&CT ((DØFhÍf'&&¦§§çææ"[¨TªžžÞ!C´´´:8±:?ùî&[Ý^n«.wþr¨PP:ƒÈŒÐÒï¿<>Ûåç &£vþ,T ((ßE-‹GNk0»®*Ö¯Æ0ªŒJ'ÏBQP¾K¿4¨›Îÿ,5䲡ãl¼xg{4[¤À‡f{CÇ®^”ËåæççÓét:Îf³;+Õ=Ä{’raa!@Àáþe›N§ÇÄİÙltYm”ïÐÜo©¯,¸è£ôh·ËéÒ‰ç‚ÿ’_L<üWW¯˜““ÓúÑáp8//¯’’ÞÔ®]»œœœz’‚––VhhèT}½’’’žžÞ®]»˜U”I/ÆPïm¨²j£×ú~ Ž»»¯K'.q]µwÆÛnoü'äd7®›““ƒ<»„„„ÒÒÒiÓ¦‰úI|‡´´46›ššºsçNQ祯#ø\„‰š™«Í̽Q~›l4ÍÝ;â×U°íöÆ–¿»©©éÍ›7µµµCBB\]]Ùlö¹sç©TêÒ¥KMMMq8îéÞ½{\.Y™.((zGZZZ\\œ’’ÒºuëÚé###¯_¿Îf³---—.]ŠT"##oß¾Í`0h4ÚÒ¥K‘Á¹¹¹'Nœ Óé³gÏn“ÉÜÜÜÓ§OÀ¾}ûæÍ›wåÊ{{{???77·Y³f=|øÍf#ÿ€°°0HOO‰‰ÑÓÓÛ´iÓõë×ÃÂÂôôôV¯^Ý%”>E?.# ›ºIÛzÒó#Þuå¹]:±‡%555KK˘˜3fŒ¿¿ÿ¸qãäää†YYYÙÒL³yóæÍ›7#ïÚµ‹Íf?|øÐËË+++ËÉÉ),,lñâÅ­S ?~¼––Ö¸qã|}}çÌ™‘‘‘ãÇ722òòò*--uvv€ÊÊJ[[[7nÜ®]»JKÿá’H$*))áp8---"‘¸k×®•+W"[®\¹âíímnnîææ¶k×®åË—@xxøÌ™3“““ííí/\¸0tèÐÈÈH{{{__ߣGŠô£F ¢®¶€FFÍ•åz7~5ç6±ºz.Ò¦ÐɃ‘v„–Z‚““ÓÊ•+CCC©Tj}}=²qýúõ8®¦¦&''ÇÈȈF£åää”””‰D‹åãããææ†ÿüùs%%¥æVí–––GŽAö–””àp¸¤¤¤ÐÐPdcEE”””9rÄÞÞ¾õ‘mÚBCCµ´´¿À××ù[MMÍÏÏù;!!‡Ã•””ìÚµËÜÜÙ¸mÛ6SSSäï]»v7NÈŸ,Šðé÷e Phc7Ü­+É ?ßåò?RRèI1Íf+))¥¤¤˜šš¶ªíííÓÒÒääälll‚‚‚‚‚‚œœœœœœÂÂ<<<ˆD"¨©©!Ç‹‹‹·ùmOLL´°°@þVRRÒÒÒŠwrrÂáp«V­rpp066F®žœœliiÙrdKšßBSSètzaa¡µµ5²ÑÜÜ©Â@ËPQ׺"Ã`0zããCéS #€œ¶¹ãâSŸ^^H{u¥«ç.q]ÕíºNONN677'‰l6»e{Ë—gÒ¤IÁÁÁÁÁÁnnnÁÁÁOž<™4iÒwS&‰<¯å¿H£Ã¾}û–-[fddtìØ±„„d‡k}éN‚(©Mž‘¦Š6=—(?Ä`ä2Ïtô²Ð³Ë*s»zn÷ ÆâÅ‹ÕÔÔÜÝÝíìì’““³²²]Ož²óç@XX2œ¹Çe!êA“½KjˆïÉIâÛ½Ó;?oúáÇT*ÕÞÞÞÛÛ[KKKKK ì¼k×.Ÿ.]Ô××·“¢}||p8\BBB7Îmnn†¯Fd£  ðß#—yÅi‘áçWÊi‘Ó6ïêé§°råÊãÇ/\¸¸\î˜1cvïÞ}ùòe!L@ž9sfBBBKg' JO°í-¸ü|~扤‡»F=Ù7¾§/q]Uþ –:hVX¶lÙ²eËÆŒsúôéÜÜÜ—/_^¾|vïÞ=þ|˜?¾´´ôõë×ÓÒÒÆŒƒDU¸xñ¢ººzXXX\\ÜðáÃÛ,i$--}ïÞ½¬¬¬U«V :”ËmÛ&2{öì3gÎHHH´(·oß–——¿wï^ZZÚòåËõõõ‘P·ׯ_ONNžÿ£;ƒÚ•‚—Ë]µj2‘Íf=zÔÔÔô»“޼½½O:Å`0¸\î¾}m£Ezyy={I399YZZ™ Ñ.NNNË–-;{ö,ò_$òrndddrr²‡‡Ç­[·*++‰×®®®111ˆJØlöðáÿΠÊÃïkhIRÎcãÝû["/þ:rI7G}ÝA£Ñž={6gÎyyy%%¥ÊÊJ==½»wï~7©M›6¥¥¥ÉËËS©T6{W¯^©¯¯odd”˜˜xüøñ¯k­9pàÀ“'O¿·nÝ:mÚ4ccc--­äää .èééééé#u¤Ö`nn~üøñQ£F™››#³¤V¯^-ê Etˆº!C¤¼¸pr¤…ùõ$‘v333CCC‘6E„šššŠŠŠæææŠŠŠššd#‹Å*((hnnŽŽŽ®¨¨¨¯¯g±XÑÑÑT*µ¹¹¹¾¾¾uÄ÷ÌÌÌèèè–s[SQQ$Þúr­ÏMMMŽŽn‰õ†““Íáp 8Nˉ¡¡¡èð”8÷±3ŸœŸå?íP´Œ†i·éùDÉ-[¶ÄÄÄœ9s†ËånÙ²…F£]»vMÔÏ凿5þ³É–ÃfÌü3¡ókLM¥À`0¶oß nnn[·nEc– ˆ–ÔPWž{síP5S'ÏÍz’ÎÉçG÷Ü߆†T@ Ø™NßEœB“×úöú6ž¤lbßít¬õFÔ±ê?ދΞFü¸Fš’^3{÷÷áÓwô$—A£P)  ~¬Þǯî½³43¦^J¡ujñ·Ø3ýôxÙ8‘óC—@ ƒÑ6öÙ¡©FNsÅ0=¯…–P?º/N;³´±¡Vs˜G“B¥€ÒßA@’R|{c›´š±l†'  R@é×ô­y 'Nœ˜9sfë-÷îÝsppøÖñ111ìí 3gμwïž©ûR#ŸW§Uç `Úߞ釖ºý‚†iDéô-#Ðéô6S Faá7§*²Ùìöv†ÒÒRd:ãâ“’ÊzÏþ˜Æa `½ST (ý”~Óו•UZZJ$MMM'NœØ&d(›Í¾}ûvnn®»»{ËÆG¥¥¥Ñh´3f s{¸\n```rrrëõñDêØ womþòäü±¾?Ié» ½(ý‘¾UFè€7oÞÌŸ?ùòåD"ñàÁƒHT²*++‡êççÇår׬Y3mÚ4.—Ë`0†zëÖ-633C&Ï™3gÛ¶m\.wþüù­£ƒH)ëúÅ÷s”ÂãÉ3ZR@éwô›2TVVÆÆÆÒh´¥K—jkk‡„„`±Xd×Ñ£Gµ´´ž={ëÖ­ÓÖÖ RTT´··?þ<p¹\eeedÕÓ'OžÐh4äÈ֗бšh1ys´ßfECkãîdl-) ô/ú“lll$4ÍÒÒ2&&ÆÎÎÙ3nÜ—Eh4š““S\\ÜÎ;µ´´nß¾—˜˜ˆ„ zûö­¥¥%’•Jµ´´lsëY{Ê?Ç=ÿcÚŒc =¶„€J¥Ñ·j T*içk¡õ %m:þ/ÄÅÅikkûûû«©©íÙ³§¥¡ÍÛ>,ÎmÍ5 ÷âèÌn‡ZjZ}@é/ô-#XXX$''çææ¶l kù‹‹C±ÙìäääÖ?簾¦qqqÈß\.7..NOOïÊ•+wïÞ]½zµžž^eeeUU•……E\\R^@Žü:š’ÇÆ»%©‘Ñ×¶êÖP) ô úV­ÁÉÉÉÍÍmøðá³gÏÆáp‘‘‘¹¹¹HCTVVNž<ÙËËËÏÏÏÔÔÔÕÕ5,, ÙµnÝ:[[ÛÅ‹ÛØØøùù©©©Í˜1ƒËå®_¿þÊ•+8îìÙ³H0Ò©S§:99?ÞÇÇçáÇmŠ$-(ØØ/8þúüJ%#]›©¹;´ú€Ò÷ésc§OŸ®­­]XXÈd2ÝÜÜÎ;‡”ö?|øP]]½hÑ¢¨¨(77·£Gb0––FÚfÏž™™™‘‘áááqôèQ`nn®©©C§ÓwïÞíàà@£Ñôôô¦OŸÎãñÞ¿?a„)S¦¶»P¢¢¾Umivü½ƒº¶SH’]X¸±Ð(}œ~1åÊ•+¾¾¾BÎa3ü7ـס螄ZjÃö;›Îÿ…YAéƒô­v„¾žH»ùA]ynèéÅLmS@é³ô¹Z÷ÐÓÓ›0a‚ðÃ%d¤Õ ßÞÜ!N‘V2´T²hõ¥oÒoŒ@ D•TFÍ„Ãnˆ½û»ºùhª¬š ’E¥€Òq;Baaaëjjj_+èl6»²²²e©ÅÂçqíU[’5íH¬@†-µ€¶) ô-D»\„–––œœœ–––––•JÅáp^^^-ËŠô„ÐÐPdýeAÁ¨)¹è£ôp§+€ìµfÛí¯:‚"4Do__ß–ÿÆÆÆÊÉÉ={¶ç)×ÔÔDGG 6·EŸ"NOÅE]Ù,ðç€J¥Ð·F(YZZÚØØ$&&"ÿ |øð!Lš4©eæ2²‘J¥.\¸0$$dõêÕaaayyyȺÉt:ýĉ;wî¤Óéoß¾ERËËËËÊÊÊÊÊÚ³g‘H<}útZZšœœÜŠ+´´´YÄÕÈÈhÅŠ´V¨ÛÛÍ;qi¢‘ŽÕDÞ8:x ¥Ð·zÃÂÂ"##œœàâÅ‹‹-²´´trrZ¹rå‰'àÞ½{‹-²··777?~üñãÇ <<Ü××IN§#m¥¹¹¹ÈÞ>øøø„……±Ùl"‘8f̘°°°I“&áp8‡ÜÜ\dáV//¯°°°ùóçwœÃ!ãWØ{¿<6»¶$K°÷ŽvI¢ô D[DA~¢[ÐÒÒ:xð ²KIIéáÇÈßÏŸ?—““knn¶´´:::))©¹¹¹¤¤$88ø»ynbÕßøÅôÆ/¦M¬úïÜUÐêŠh}­áÈ‘#S§N¥Óé«V­ÒÒÒZ·n––– ˆÑék€˜™™mØ°áØ±cHäáÇ[w"}“<¯ûw‹Ã]¾|¹e ‘H€’’’ÀÀ@??¿S§N|wàƒŒ†©ËŠ AG½•íMÝ— ö R@!}¨ANNîÂ… Ç SRR222zóæ Ò1ùáÇ9sæàp8¤­‘?ÔÔÔÙÍÞÁ%q8’ìöíÛoß¾)''·páÂÇÓéôN†rÕ·Ÿ1dÜš×çW–eÇuæø.¶) ˆ Ñ—Zãéééååµxñ⤤¤S§NMš4)11QNNÎÏÏ© 8pÀÁÁ Á†%<==׬Y3jÔ(999$’â·°··Ÿ={¶­­­——WrrrVVÖáÇi4š³³³½½}XX˜‡‡‡‘‘Q'skçs¸<;îÙÉ3þLÔäÈÐ’ŠHñ˜Å˜˜--­Ö“‘étzbb¢©©©œœ\iiiHH—Ëuttliƒ¤ÓéAAA8ŽF£-Z´iJDŽ$‰žžž111NNNt:=--ÍÆÆ¦´´´´´ÔÜܼõEÓÓÓ©Tª§§'RkxôèNWSSsuuíRþ襷*«i:~û3 VðzEG4¢Q7mvJì…I¡§§âbü¶õRúhïŠ0é[µ†.A$5m¡'¨š:ÙÎ>øæÊzE#- Ïž'Ø´ú€"LúMÄ”>ÎÓÃÓ >{Ž•RÖëôÑêŠp@ 8lÆÍµCñDª×þF[j *!Їâ#p¹ÜgÏž½|ù233SEE…L&‹:G]‹#¨›¹$<8\W‘§c=©7.ÆS@}ÅÈr,‘‘‘ðêÕ«uëÖIKK[YY‰:_]€$¥ !¯ùîæª¬º¼î°Þ¸*”^GÔM›ÍÍÍÍrrr-3š››CCC‰D¢¿¿¿¨³ÖeÂÏ­ü{±4+¶÷.ö> ô}Â{÷îµ±±i³qóæÍæææÍÍ;¾¾-²HHHh™Ôìä䤧§çíí™™‰ì]³fͶmÛLMM#""œœœƒ<ˆbX³f¿¿ÿ¤I“ŒŒŒ6oÞ\PPàååµy³ÀB𸜻m|—h1k+zR@é%úÄ(fä»Ýf£»»{bb"ƒÁÈÍÍMKKC6ÒétdÕ–ÈÈÈI“&Íž=ûñãÇZZZt:N§ûúúæææ®\¹RKK+,,¬etsZZ²TTbbâš5k–-[vêÔ©ãÇ5ÊÇÇçìÙ³AAA¹ ç±ñ.¯‰ýòøA-÷5è0g”^¢OŒG ÓéÈØÁÖ ƒ¿5*ùôéÓ>>> .€<|øðúõëƒ ¢Óé'OžDzýË–-swwOOOssóääddcϡʪ^ë°Û#îî>+ï½ôÐÐq (½AŸ0‚’’RËy ééé8NN®ýù¥¥¥­‹–––ˆ;äää:Ö´×Ô2‹á[˽u53W›™{£ü6+Ùhš F4_ƒJEàô‰Zƒ‡‡Ç“'O Ì ™ÔøäÉ''§oML&‰­‹•••Ȭ§ÖÇ·.w´YZ ›ºIÛzÒó#Þuå¹½w´ú€"Xú„–.]J$gΜI§Ó+++92|øð³gÏ8pˆDbZZò•¾wïrŠ››Û­[·’EZZZXXØ×“”´´´òòò ²²288Xø÷5jÕe’¤\àÁÉ<»÷®‚JE€ô #‰Ä—/_²ÙleeåQ£F! ŠrrroÞ¼a³ÙóæÍ+,,433333k)¬^½ÚÔÔT__ßÙÙÙÖÖöàÁƒ66m\BZlmm[Ï} m솻u%Yáç{·HJE`ˆº³ã?”””„††¦¦¦677çääøøø”””477³X¬ˆˆˆÌÌL‡“““Ór|fffDDDMM ò_‹UPPÐ:Á‚‚‚ÐÐÐúúúššä°’’’úúú–Ë!—””´$"pRC|ON‚Ôßž'Õ1h—$JÏAç5ƒ°3ËSC}§Š–Ó6ïÕ ¡sPzjaÀçqïm¶eÖUÎü3@¡õêµP) ô„>ÑŽ0àÁ`qc6?à°ÏÎìík¡m (=¡¯ÌtðÈ’ z±w~oæ7«š:õêµÐ Q(Ý­5´´´ý>Ã-—߇²^¿Ñ+a„¯ gó3D}ç}€]»víÜÙ[ãG}bÌâ@ZZšãH{롪Õœ¥NMŠN^ØÞñ<75À)eõ2CYEQ?ÑÐÀlZºÞŸÑÀEfµ |ÔÂàÿ:»ýÏ<œÿÖ¾müìgSÖí¬X¯^w"˜™½>ô&ôÖO³ítEý„ ‹É™±ä Ÿ êJ$Qç¥ß€¶,ö:­u@"ãñ$qÏŸ×Wä7DÜ{ „«¯r¹ÉÎÙû†_dZ¶¨Ÿ„PAtž}õ/G]Qg§ß€¡wi£d£ŒŠ¢›ÏO‚£3ㄇP ­u`b€ê   FèEÚÕ‚¾åP3«W×nT÷~ã&T=5BoѦN‘U¥<ýç‡Õ(„üü R@uÐCP#ô ßÕ`ðXÅ¿66ÀËk¾ÂÉÕ€—ªƒžƒAðtFTi)÷ÅKs²^† 'oX ¨jÓy ¨èÙLôˆºÿ´8ë³pr8 ¥€ê@P F$]Õ‚Å(-3ígçÎ0éõÂÉ瓪‚A`¤¥¥9:Ž´6—í’°bnóæÄáù…“ÀÒ ò#T‚5‚`@t`5XööyŸ®éÄ)¤1ËÖ–åÖD>|$´<) :8¨@‹î\˜Û  È©)»Ìù)1èMVüG¡å¼_KÕAo€¡§D†ÖCM-B®^£—V-ÿýT ¨z Ô=B€:@9}š´þÉÙ?x¡ÝE¿“B—tÀbó ŠjEå~á_BBB ;|IIÉÁƒì,ý/Έ5µ7ß«nbä±x¡0oÿd?™%Ébr¦-ô}™}í/§Îè`Í®×>±·ü¶UYY¹÷r5dÈ‘Äû8¨¾pâĉ5kÖté FÌm¤öCßEÔBaFÖÃ#çfŒâ:R˜¡ïKÅäx-¸ñùÆ)çÎèà—aÑñÕ|~¯gÌÍÍíåË—¢~<ðìÝ4jëºÑ9>7¿ÚyÒ_f&ª·ÿéNÏÂwQ3г™<:Òÿ±¼¦šŠžŽÐžÃ*÷‘à}ïoJÅäÌXìû:ª³:X»+"1¥6þÅjóÁj»B—Éͯvœp2¿˜!üEÃz ´Œ &&Öwtð^óã3§* ò½·í"IP„ù4úfIÑAhD–ï‰Îê .©:2`eoë@G"/'^Ñ *¤¡è½ Ú²'Nœès:¬Øèù‹0Xºð·Ð†-!ôÁ†Æ¾¬ƒ£»ìÈ$¬¨ŸÀøÑ€Ôúœ@œBòüymQFyTÀ!?–>%…>®qàè~p#´4tæ`!ëANMÙeîŒ÷Ï^NLòÃé#R@u d~\#t©5Q$:@0¶µ4u´xyùŠ0‡-!ˆ\ ¨„Ïj„þ¢¯©ÒJø§ÿuØ‚¥€ê@$üˆFè†äe%E¥ÀŠãÇ,ÞÐPÁׯ ÿê"‘ªQñá{:x~{™¨t€ !'í¾dqfLÊǰHdK~ªð–j²PˆËÝÖŒL¯¯¿ô]4Œ ¬&¸FÜ~T˜–ýäô©§gÎó9<¡]]hR@u Z~ #ôk XqWÖ•øël^R— ™™Â¼ú*÷‘Ë,l{U ¨DÎb„ ÈLH,Í®>4óA ÙñÂX¦5[Çšk6¬—¤€ê /ðCa`è º¸,ÔïŸȼf¤¿{/äáŒbX±}SÆõ†Pô¾†@FEqö®ºÃ ÄxPœ“#älô†úªNik’(À€7€Ñ™&1fñâñkRh_>ºÌ¸xágC°Rè³:ÐÕ$ÿ¹Ãþ‡Ò l# 0´ ib4÷÷ýæ£ìYïÞ ¹â€Ð"…_üï7°›ºN_ÖÁ‘?VéaÀÆG膪èì3‡g|ίúœ_%ê좮…©¤Úç×·Cƒã$•D’‡Éºf#µSÓº¿˜í£:èS L#t5ʦßr `ÌÌK¢Î{×ÙËw*»ÿ#-bð8±›§]Pô ºªàr9³&jýºt¨¨óþcqëQúÃç¹}D€êžº¡e%²Å`EQgÿÇ":¾ä»ÇM¹ùÕ¨`€¡Û:èU8Müú†FiR÷N¯¨dÉËý{nIYò‡¢< ƒi¿a¸º†%AÇú}³±0uà8áÔƒ‹£~pÀ@êkè›:€i‹×¿êöéÎÓü[ÿWÅâÚÊ߬{a4òê†=¯Û=eñúWÓ„LAàYºšdT0`Ê}Vß‚Óį¬a)+þRµº†ÅãAKqàëZ¸wÑø|¾Í¸Û©™UÆú²PQÉ"0R’âm.)k¤(|ë-rÒ¤–Du ZŠ0µuÐ:6…á |Ùa'ä{ì› #ô;DÄ­ûýµ¥Ê»Äòs‡\L ¥¬}‰Ãb +·æåÍ©e•Ì)‹ÛYªÔ7|3DJ=ƒƒÇáä¤ÉÕ5¬Y«žéÈ—7ÈHÏrAÈ+¨›·&h¸¹brz•“­Ú¦ÃM¯9X©Àóм໓u5iË7½ª®e€•¹âºeÛÿˆÎø\#A!àq˜Óœþ¾ò1âmqN~í?¸™›Ê íùˆDh¡ß¡ßé®ùꎗš 59µê·Co|rwÔœ9Ùf¯xþ1­âÖ£ô=lݵ“+bÞ·m~3u¾)™ gèÉʈ§g׬[bá6RT‡]Ý®ˆ+wRgN2|’{æJ/VÏø7b‚ž–ÔÉKŠoýº««H€¬4±ÅÉÊ¥››Ê!Û·®±ºÿ43%½jêÂ@¨¢3m-UÀÌXV˜¥?ê *¶äÆ£¼Ÿ'ÌÕ{ôc#ô_40yl6—HÄUT²´Õ%"bŠn>ʸyz …‚Ÿà¸â2†š µ¢’õ­D0Ì #™ü¢ú 7“ÙlÞ­3c°8 éçÿI³Gb†™)lÿÕªªº±Mó$‚Š"%îC9ò÷Ù«U)c\4o É©U²2Ä{O3qXáµ=÷S,Ùüfò”)ÇŽÚƒêUú«ú—Ò?WŸºœˆü=z¤æ–U+·†M£{éæ§õË,%¨„Òò†©•oK“>U55ñW-0_·ûõ†å–žµ3I*¯°îQÐçg×'EÅ–Æ}(‹ûXöôU® WVñE"r2¤O™Uoß—Ý È(kÏ,öVª¿‹ÕÑ”¬­o *¼r|ô¡Óï5ì<ýôÚda>"!ë@‚ ÔÁ­›·p¸þúUjC¿\åMà:˜ês^G‡ü< œêVä»â–ÿ3STS¡Æ,Ë+¬3Ж15–€ÄäŠÏùô!ƒ¾Ôt5iÙ5‰)•Cäsòê6„€ /ŽPU¦éÊ aQÕt¶ƒ•Z~q†ŠdV.ÝP—F“žßØÄs´Uûø©r¸¹bä»b{+"WXÌ`²8ºÒ œ°èBVÌÅNOÀ44p¢ãKêN¶j2Ò¤Œì2 TpzƒS—O\øàë)|œÿÙ&%Þ“¤¤ ?¡7J½j”oáöYT}‡~6B©UP¾K›‹ê OÑŸî§WuPRÊŒÿØýY½(Ý ¯°®¤¬©®ÕA¢ßÜR¯ê‡Ã_”qýQ®¨ï²Ë8j€º4\ÿ¢ªü‰a YZ ªºyy 2ö5ªƒ¾DÿhGèíʃÑXÏ` *µ»>¬ÞþxË S'­–Ù5«vDÙ[iýst‘$˜—©²  üâ]ssÁw†‡GeÏZqËÃYyýâ¡XŒ²‘^߸ñ÷ˆz&\[¢¢,‰l\÷ @†Dœj„{÷î-]ºÕA |/åõV-ÜÚ4ë}šçŠ¥4%OFì¶@ +¶ÊѾå¿CåUÆ™ôcî¨zH_4½{÷f̘¾{½+ªƒrSR9,€fê«è‚½‹žèaÂP3¢$léZXGÿÙïncW°™üþSBuÐc°»víuþªƒvysïNmE-ü¿_('1…"MSÐPHâ=×`±bâ€b:x¾£“ŽÞ±×ar‹= m®ªзzQ´ “^iãï_þ#d*ØzÍ0¶²A4æ Dͼæ–þ…”‚’é—}mÔ´þž=MœÐë_*T‚¢ÕÁ·ˆ}ÿ9`ƒ«ñ£‡8Ä“ÐW ÁaéÞ§ŒÑØñ«Uuð5B“ªÒWŒ€ê ®nÝPWƒF³7ŽL“T²Áa飽/NõPß¹ÖZà:@‚P–>aDÃL·®%ê¼|ŸÌìŠ ¿?kWXŒØÙ£Ó(d‚/×X]Vûé…ôàqxIi&[^Q¿lãécÔwüÚVÉ ‡wŒÑ×L_FA}Íñä×T…Ŧ61Yâqù¿n$€©«ÿ®è#FÐÔTËÏ/u.ºÀú%& fþ§wmÍÎ×/^÷ÊD) Ô÷N›½§³òÁ­öØVACn>ÌØsâƒÀ/„Q1 ï#¿7îBEïÿϘžë *¶äÙ;Å\ÐGF1‹5óOî±Y9ß\Ôé\nÕÎj¨·{Ããò×/5éG9ú $ýo !¯ÙH—œ>WÔ¹ë,ö¼ŽO¬ˆ–l~ó# Rþ.?úý£ôw°¸žFUŠ-Y¼)RIYùèÑ£ZZZªªªZZZjj½8³/Ópètleõ—ðÃLfL4øÖªg}– 7’Ò³k¿5Õ$çOÔz©•þBXTA`Hò7M‚8oºIïb8Q±%K6Ešé*5TùóàžrzCË.M5UM MDzzz†††ÞýØ×üÓŽîp“%ñxÍ]H¬­oZ6w°¨3Õ5ž…äO£kb Ÿ^±õ•ïqwQgªË$§W€÷DCÈÈ®q™îŸòjn¿Xu2*¶dñæ7 'ZŸØ4IÅbsʪëJ*è¥Uu¥5ùÅUyù|Y5Oœ@ÐÓÑÑ72222B¡§§§¤¤$ê[ýØ0ØDYmé³S¾Æ8kÅ'•EÄ{ºi2”½t3%§ n´£æô úA¡¹\^³§›vjfUxt‘ df,ƒ¬vèt,²º‰H01FÖ¤ÖP‘4}£¶®ñæÃôªj¶¢iöã 7“SÒª‡ ’[èmš•WY°r¾ym]ã©KFÚ¨2Ywg-¸i1D^W“&ª»ÐT“DîÂb°â¾¿b+kXWý?i¨H¼O.?¼}äÍi¯cŠÕ”©Ëç™a±˜#gßïXcÅÁá3ñÎ#Ô?¦V,úÉ Þ%”66òl3ó»|ÑÁ«‰ˆ×R‘ÕRig•Š‚ÒšÂ²êÜ’ª¬¼ò¬üÜ@ÿ˜¤¬Rd…LÖÕÕ504422255544455í§Mý2Ó-”U0€Éæ\¼•ìAÿïý®–cîžÞ?Ò@WZ[]jÕö°ß×Û>Êž7ÝH„·pàT¬,L"bõeþÚëØØÄ×P"L«×d>à„Á`~]2T׿ÊßìNíwrœz×ÝQ9@Ižœ‘]S\Æç¦+ÚRúÍGéï“*ÀÄPæþ…qHƒÎ’Ùfòr¤½'ÞÙî %)îî¬uäìûŠJÖÚÅcç<äpy/oNÅ`0óg˜…å;P«c4 §˜Ó ´ ‹A Ãô[6²Øœ¬‚ò´Ü’´Ï¥3S>y@g4€¤¤„©©é°aFFFæææDb/êþM‰:=âÒÑÑmVO–¦‹Áˆ_nM«¡7@cW‚Š-¯l<3c‚axLáûi¯ïOëò…ÇÑ#‘ò6BIY…ò%çT,òÕÂ`0 x¼f[^õ¥=uÞt£»3K+–Îqʲ¹f_w·¬(-NøÒ€Ã‹56ñx|>‡Ëc²¸Mž€ñòÔ_µ=¬±‰7wª±²*(| o¦¯j¦ÿoݧ¼º>=·ôsaEBjABä“3ÿÍãóq8œ‰±ñHGGKKK SSS!Ü{g¥ ß¾}SYU  )À”5T%Þ½/Ë+¨ÓT—¼âŸ2ÊQ½º†õë®ð¸gÞs rµ×07•ÿi²ÁòͯmU‰Ä>ªE{+å°¨§êAÙž®š|>ñÆ—WNŒ¾õ(ýšÚ/£ÑŽšþóÚ wuÆ.áb¯~+ }ÓŠáÙ5E %òžcïæM3!Šc7ï<¹ÏYS]’Ãå]½ûÉÿ¼gog¦·uÐ. 2 2ÃôçMàòøŸ *>d$¦D†œ:u $%%,-‡1ÂÒÒÒÎÎNNNN8yûarssƒ‚‚ÂÂÂ^…—WT€¡º¼8ž×¤œG¨‰ÿ·¨,A!êJ‘ˆ{|eÂÖƒQUµ,'[µ%³M/ÝJ9¸Å^Y‘rz¿óùIƒMdõe™,.ÒB.*†™ÉKPþ3äYœ€qñ¥Âyj¯ó–oý§§IûsçÈ×1Ež®ÚƒteVïŸ0Z[JR|¸¹"rË"DC¥©-w±l¶Ù¾¿b§. $±÷/Œ+(bÐk·ÿj…Á`Þ'—§fVëËÎjü:¦XJR0Ó·¾…Htð58,Æ@KÑ@KqÚ(K`±9I™EIY…o“s}Ïýµwohª«9»º999¹»» ¹#C£˜KKKÃÃß?œWPˆÅ`\­ôFÙ˜ ÖW57R§I‡zïZ>ß@Èc«kX3–={y{J—™ÚÙ¿öظ٫·Þ¸jkØP3y!Yäóùžs]?é¬.Ý%9úM£3{ê„èw/ýþ³,áYÜþGôX-[K实¸aÏëÄäê¿ö:|÷È>¢ƒïR^]Ÿ”QŸ–÷<êÓ›Ä\00Ðsqqóððptt¤Ñh½Þ*#0Œààà/žgdd€¹–ÏSçáSÍ 5HDÉŽ+Y¼!øÌÑf£'TT²ì&Ý^»dX7tЧpšzwè …nè óô€‚Œ„«±«ñFzf㻟_¿Ï|öäìÙ³0ØÌl¬§§“““££c/5L Ø•••÷ïßzþ¼±©ÉLGqÊH³?»Z›iÓ$ÈBx ÄÖR99tލsÑ#äåH‘>¢Î…»×»-»ýHm ‹#vØýó„òêú·IŸ#²î\;wðàA ™çŸ{ûtfK'bu kù–P.§¹ŠÎܶÚÚm¤FËGƒÃ‹9X«À¾ãï"ãŠÀÍ^cÝ2 AÝŧŒê•¿ p´†&Ažì2t²ËÐ*:ãÉë—F¹¹ÝÒTW[µzÍÂ… »ÔCÑY#$&&:tÈßߟJÄ®þÉù§±ÖíÎ>k[ØMº=n”6ò_6›»x}HbðOR’â—o¥œ¹’dl ­®*qà7»ŠJ–šÕUØy4ú÷ ¶¶–ÊÙ5«w†=ó›ÜÃ<ðxœZ3̬›aÈ<œµÞ%”]¹“:ßûK\¦µ»_Ÿ9èb¬/›šYµv÷ë³\>Ëzuw*¸L»gý’l,•VÎ7g³¹ƒÝü5×ðCJ§©›rÓՒܲrøæ}oÎúÒƒsì|¢‡“æ|ïAµuæn7’CgÿüÛ«„ Y þ×áW’™[ƒ<ÿ© Ǫ1ÄØŠOÕsW…þ4vø¢ƒÖÈÒ¨ó&Œ˜7aħÏÅ×ßþ¶yÓÖß~›¿`Áºuëôôô:“Â÷˜˜¸sç΀€3ŶNŸà3Œ@JR|˜™Bt|‰¦ª$ÂÙN-¿¸þÃ§Š¸åHëORzEZVuÏïâGÖAkLtTö­š¼vÎ(ÿà÷‡}o\¸paÖ¬YÛ¶mû®:2Bnnî† üýýÍtý/coÖ7±©±¬÷DÃ퇣didEyriÙ^VÉ”À)Ê“«j¾¬ŸWTòrDßc£d¤I|>ÿ㧪ž\ÑÁ½gÖ&=šÙ&%)þ÷~—e[¾ÔýTä  ßÐÀi)b ½ ÙÅhhiIâìÕÆHe!9µJ[½§¸žÞr(Þ{´ùÇìÏÝNƒÁœ=àê4õž¼¤iâUÕl]M€*:SU™‚<¨­ç€”aÑ,Ó5‹†@^Aµ§£TP´A–F]ê5Òg¢ÿ‹Ø]gï_¿~ÝÇÇçÀôJ´ÿÔØlööíÛ ¢Cü/ŒñÛ2ÞqH_~Äk[0Y\—#©©PN]NŒŽ+Ùy4ÚgºÉ87¿.%„E¿PZÞKf›þ²-ü]BéÎ#oodtû¢-:xr|™¢lOW=u©¡¥ú勽z‰ùúß#â?–­ÿ=bëêá6Ô³rkîdç\¹› 3'î<ò6:®ä惴¥›^a{Ö‰ŒèàÒΟ¬Ìtz”€¦ºä޵V±kÀ{¢ÁÖƒQÑq%§.'*ÊSŒõe=]´_H‹*8t:Ü4¯ÜI Î ‹*˜´0 ‡—Fuð-ÄñØYž6É÷ÿgëtÿÛ~ºº:'NœàrÛçÛÎ(昘Ÿ¹ssr>\=qÑq|OãØ}—îbf³¹‘ïŠ[Õò êŠËl-•9Müg¡9ùÅõ®öêHÙ;¯ îYh®žBÆ#cã¢ãJâ“ʴեƸhv52ŠÙÙV½E®6ÆÓןÓ7nîÆ(æà×ùöV*Èl«ÚºÆðè îºUœ^e¤+ƒÜ`m]ãÝ'Þ@WZ[]RFš”‘]óúm!…„Ÿ0Z·±ØZF1#:ØóóØ>§o‡]xœÞåP ‰ÉªTdô$ŸÏö*ÏÕ^HÄ%§VEÆ)ȑƻéâ >Ÿïÿ$»ŽÁ¶ªB ` t¥KÊ‚# ›8NÚ݈ÅÖ2ŠÕA'©g6¿öòÀå` K‹+W®µ ÐÖ‡Ú¶m›‹¥ÎÉÍ3…Öv(’y ÝFLíì±]ÖQ±%-:€nAT F à·Šß¹Äã·Ec ÛFˆ–Í„ê K$e-ýÝ/­ æôßÏ›7¯õ®‹›\.wþüù~~~®›²Ôk$úd;àSï˪¹-:è§< ÊNÉlhÑA?%ós ªƒ®b¦¯ziýïg|||ÒÒÒ8в닸\î¤ ^…_ê>bP7¯ÓfóþB»:ˆM,ø8<1>šÅ0 èÕJ+™Õµü¯uP^ÉÂhBA›X–WÒ¸h’5ªƒ®"ŽÇî[5ÙDGyñž?jjj™TÐb„9sæ< Šº²ÎÜP½ûé.#†è¿IÈLMï~#Ÿ0¬'ð¯6:°¢sõqÉß| Ó ªeÉ[ ñå=]‚± ŠÒ´ síÖÎùϲzƒt”å¥i¿ àK`jž0¬ùÉqߪɨºÇ,O9i‰ kÎIKK#%±æææ+W®øøøˆªt€ò->Ɔ½ ¹ßÌ"~Zùg?í‹PÉd¸Ý9zÚù';Mýž§†"X.>ˆüùÀÐÐP'''Leeå/¿¬Z3Ë ÕAŸ¢¤àóÛûƒm݀͂ô¤·¢ÎQ#SÅ1„/Î_ù%ê¼ ´eádû ŽƒæÍ›Ëår1W®\a3™[xˆ:W(ÿÂdÖ¿zx\MGw¸Ã8PÓÕŽ »ÝÔÈu¾zÄEذùÕ=6·w·Eé.™’Ÿ_ðèÑ#leeÅ8­É.CE%”/ðùü—÷O³kÇÎØˆ'ˆ'Å=Ód_œ“#”Õ{:‚H„äÖVÆ–çsøÜTzIT^–³†!…Ї†ÃÿàÈHQÞ%egæW`ÒÓÒ­LµD”yÿ&¨87Ïmò:"‰â$Ì0‡ ï#˜ÌzQç®ûË)³xÀiæ'Ó‹Æß?TV(êL¡ü‹í`ÔädL“)Iíßú9éï#žpŸ¡¨ü%\=EB±¾¶|Ð0{,>ļu»¶ä¿céyµÜú÷¥ù=HEÀHIsóó1P^ÝyŒºÚ‡§µMÌ µkÙˆÇSy\.Aœ8ÜiFò»°:z¨³ÙM4¤e¥±$,4€žâµaÞ!­é†ÒÊ«ê˜L 3#¯ZfC¯9t›ÛÝpZ(=';-1å]„ã8Ÿ¯¿êââNÓ—p,:FæRrñ@Ôùí&fŠj,–XŽäð9çãÃD#›óó›Èߨ8N&$.óËÏE±zUExà%£a6z&þÞK•®¯þò7ƒ±u[ž›ö©¤(GÔ¹î’⤶ãO} Ϭêsq¥4¸<þúcw‹JjV»Kb„aº”³ Uvÿóüz`Œ¨³÷ÃÁå4?Ú/%KáêÕî9 ù¯†Ž±‚šl\Ø]Qg¼§xèšZÈhŒF‡DÌ—Ÿ_~ôöæuuyqh©5ü4Rî”Ò‚Ý7vþ€V„IDЭºÞ¨I›Ú4´@–6‹Õ²ÅÚunI^aþçTQç½G`1˜]vã‚JR‚³?‰:/?(,6gÉ¿½‚n¯Vw5û¿ëßcó]¯¯R=|5dáN_´?R8¤%½Ëüç2q©$í›GÅ d`³-[”UµUu4ß…ýÓËÕå†#÷½ d45‰:/?¹ÅU“ýÛ?(þéfí1Ãþ}ýþ3‡t’•\ÄnÝwïSOÝó8üƒ¨ó<À©,+Šô<ÂUK¯£9fq"p¸ÿùÎØºÎ®.å}NKõMô”Ÿ‡»0¹MW>¼uF~ ¸<þ•€(3¯½ ôâè}ºöÆ­÷¶U>D‹½_o®ÉkÃÅiÎeä¢ ?½BS#ûåƒCŠêŠV#Çw|$™* µÿ˜$#¯¨kjéËíçS†¤Iä~ ίéQPl”Nÿ)Ï}é±åûoo›D Úª§«Ôv¼r;q&¨$Üþ9a;uJ ?›{ïßtü^Ai'×gyõø·\'®ünÜW ƒÅA§±Íöá#'×U÷ûYÒ0ÉxèPšê¾¨~<òª_‘[¶dߟ£ØÊ·{õ×MT#àÛy÷¾ù:דx¹CÿŸÅªþÏ¢ 'íB½ @>Ɔde8OZE¡vj²$42Û.¬*I“6f—ÙïgIc1˜m#& ÏKu^&ˆ ̦ï‹OHòÿUíž±ú7§2uô…ÈyÛË&5<»PñÂ’=~ñŸòD}ƒý$Š¥ãµNG"É̆vt<Ì~,›)ï#E}O=Å\Ym‰¾ý¾¨§hÜÁ›>sÓÄ·V«¿Ù§ßº±]¾Ž€Çü4R.é¨á¥¥*©Ÿ’Føu]üçÝ—qõÌÆïž‹Ò†–P(C¬]:‘(Ïãr¾ÞN&K˜ÚŒúÓ¿gI#ülåRÞXwýC´¨32¨¢3®DYÌÜï¾ât}eæƒõšoöé·”Áa¾¿·³¡ûxŒ×9¯r±YõW_WÎÛ~ ‹Á,˜h=k¬Í0 4îegàóù¡ÿœÇ/êÒ²1TI¥zzI»»†ZJùñÝ+§‰¢¾¿!G¦n°ðØñ.`´ž©º”Öƒýiä𢳮=޹ùâ=ƒYî"é»Ð ƒ B»t9˜çp=‰áz»¦qã«/‡%œ½¥D#Ïo=näàa&ZBXªÿ‚„B™0ïK(”΃#8MŒvwĉæ#&Ň?4æD•ìéê¢ešÉðÛiïŽF??î1SÔyéO°Øœ¨Ä¬g‘ÉE3¹£I\Zªâ1”F%u'To7ÃûÊJàæ:)ÌuRÈ(f¾H¤_ :r-”,Ž[8Ñvœã`k3±§«z0P(vcþ …ÒyÈió›s¦#SÞ=|8ң߬ÅÔ.,f§Ýø‰{ç[ÙhèŠ:;}zfãë÷B?Ü|ßÄåÙé“öÏ3LZM¶G±êzðÛ@…l B^9V%¯œýâýntÜÉ;væ‹ñ#ÍF Ñ•¥uy1¿G»¡P:8™Â¬ûæ^gá4;"ÐoððQ4Ùþ=¥ÝBEk®®õïÑUWЪh{”TÔF&fÞN|žÄã7L>=_ÉÙLRYZ0óʶ€¦qñ(¥Å£”JjšB“êî¿MöÚð, UGÙ¦o5XG‚ü#FÚüV(”ÎCÀ‹ó¸Àçó¿Õú`0ÈòC´_lÄÃQ“‹úv{Ê«Q.wÿ¸ñ1Ú§[öTÑ1É9¯ã2ž¾IÉÈ«ÄbÄ&X¯®Pq0¦ÉJxÁ/ ¢,Møi¤ÜO#å,îÛ¬†ÈOu/C#ú€ý-Ï‘f¶ƒuÌ 5~œjÅ·B¡tŠ ˜Œúoµ`0˜áN‹‚ý/”Yçu£VÒ§P J®7s$ñ™»ž©²Dÿné õÌÆø”܈÷™Ï"SâÓ‹À^Ÿ<Âì8W{ˆ&…,Þ[ v½¸L•„s5“r5“Ú9ªê¹QéuÑi5¾þÁ[N²°1F6ƒµ-Œ4Í Ôd$z~¹¾  Åe’OO¢aqxh5!º]t ˪ãÂý=½×‰ú¦{ÊOƒmïdÄ{ûâ·i¢Î‹P)(­ù˜Uø!­àu|fh|6 Ó$¹™‘¶×a@í^KaWÒÂa²¸ñ–2ã-eö䕳c³oÒ ï<ÎÛþ7Ô$\¬­MµÌôÔÌôULñ¡ãP(‡L‘›AƒŽš ¬>½~²0/³ócŸú&,f‡­§WйÅÃ-T´D^¤žÙ˜˜–ÿ1³è}J^`DJM¬ô(¶º¸¥«Õ-u©‚jè<"XJPS¨©@ô!t&/9™˜S÷ùÓÖã 5,ؘªÛÕ3ÕW5ÕUÕSWè§‚øn(”ÎC$‘€ÕðaHjšúª:šoC/ªùõÝ÷ Ýš–»ß<öŸ²b 51Ö33rKÓrJR Ââ2’>—€‚~ÌÒΩRæÚ*ƪ$ᔾ…ˆ¥‘±öÆ-ó1óÊÙŸ Yñ٠ѱïŽ]gòøÍ` !g;DÇTWe޲–ª\‰‹„BñZ¸©ÛÍ- )ðš8ß=r¸ãô‡—g§%ꙋúô”õ¶îöwþ¸û)v–™µ¨óÒM¸<þ炊œâÊéŸËÂßg”ׇu5#N2'íóÒ2Q# ¿ Ð}k¹a¤ø€ ½æò›?—²r+š’ré%i'" kš€€ÃZ R·¤i¢£l ©¨«®Ð›!P(ÞË;…Ò%$d€Qÿý™f ÊêÚ&fq¯/iïÒ°È>ˆ²„ÔoæâÒ6V JŠ:;¢ ´&·¸2#¯,)£(!=?öS!ò«f¨L²Ñůt%«ËjɾžƒÜwè[FøOÎ0bÈ`‡ÑChÈ‹›^ÂÎ,nL/ªKHzwÞ¿©¡‰v˜‘Š©žŠ®º‚¶ªœ¶ªœº¢´ÇA´„BÑÐ1ØÓÀAccCgŽ´rœrç\RZÒ[“!¶¢z‚bÖÛÛqgãÂv8Mu^ÚR^]Ÿ[T•WZ™ST™_ñ1«èCF òý—&áŒÎ:Äå*†jd]EñÞë8}×_C%á,t¨:TYdKIMSn;§´és3«$åF|RRÙEÇ 3R3ÓSÕQ—×ÕW•§©ÈÓ„Pš`³^>8¤¤®öÝP(]‚LÕlbwÊRÒ²†æ6 ¯o ŽÃõ§Ï÷kˆ8ÜÖcg½¸8Áp˜¹²š¨²QRQ[\AÏ/­Î)ªÌ.(OÉ(ŽÏ(nâò‹¢N6VÅxšˆ¯pTÕU"è(’>F@˜ô㬀²4AYš`kðï.¿¹¨²± Šó¹Œ•SV—[Ñô¹üË4M,FLWMÖHKYGMFUAZ]IZIVRYž¦¥"+¨,…^æ6‚Ë쥂-´ÉÒLFu'6Â3ócLrükó®Ì°ì›8jz©Ýp{ò2l/׃² *Š+è¥Uµ¥5ÅeôÏEå™Uù•-˜©“T0Nzâ ì”´•ê²â=2ÜéßFhç~0bHcDëèqM~AUcy§¨²©”Þ”W‘—žó,œ—^ô%À±¢4Ù@SAWM^SEV–FU‘—’£Qe¥¨4Ir—Љo_deŒ›µ¦“¡P:ž@dÖWvò`ª¤Ô +×Ĩ‡&æ#0ýšµÖ£ïÿéÿ)~†éðž§VRQ[SßPEo ×3óKkªéŒ¼âÊÌ‚Šôœ ¤ó‹3Q%iˉé)àœGŠ+K«ªÊˆ+HâUåÄ;3›¸¿3ÐŒÐ.Aq iÂlA]ABCEVIVB]QFQVRQF‚&EQ’‘T‘$ñ- %ŸcCZ:NPV×ø-HH)åva5 sk·´„„·/­Zy Ò²k»ˆ{:Zw4‰ÜñÁåÕõLvSQ\A¯®m(«®«¢7dUUTÕæU•Õ0[¬-OT¥au0Î:„yÖÒCu¨*Ò„~]æï9?ôÍ€‚^A ?D«íôä’š&:ƒGgq*븅UUuõUŒªìôœ§%Ü¢JNÃÿ—]C¦uTĽê˜<¸ý–þ0!@AV’F%JRIr4*S‘§ðØž4vbq¸FVŽ'’(Cl&¼0³t"“û\_LWñÔ0»‘övçËÓÔ¬èõÌêÚ«©¸¼¦±‘›WV]×ÐX\N/,£3ÿ‚Iš„W•ÅéÈcÔe±æÙaJÒ²J4¼K£à¤úå8—ÞæG7·P–&(K@û½DM~ƒ[\ÓÄjl®iàÔ0¸M5%|.TrÄsÒâllI-·°¢©8‹Áè©J7cÄTäi’djJ2 K£PHD"‡TRð8¹‰`Ä›ºb0³ù). îõã¾0KºŠÎhâ|yeÕu<ŸÉn¢×3 ¸¢–Ëã×Ö3k,È)¬ÀTÖÕWÔ0ÄøÍé_¦ã 1y#«Ï_xË/i^†SW 1MÚ q ¬‚NФ(AÆÈIâȬ‚^^ ÿ#òj„î@ÀcFMä¿ïS>|¬†±nš Ò´6G–×rš¸ü²ZâŽvsMCWWòxÍ 6÷ó§Ìf<9¸¤ÓŒ¡×óÊêÚŸ¼0T Ø€¢Ý/tpXme4cÄÀXS¹Y¬Y¬¹Täiââÿ~¦R­ú÷1/>5s0D£¢ÐÍÁù%ߌÎÀ`²+ª‘h.˜&/»°ÄøÍUTÃëÄ3j2âq •Ь"Oã²”dð$<ÆP«`Šy UqFNGÀaþ¦×ÖÏ Ý´PÇŠ¡_õÞ5BO)(-IL[sñ¯uHÑ´«-Ò嵿™ÙÈ£3¿ü®——WÕŸ^,‡Å“ê™üZÖ—ñ‹L6¯ª>ù›Ãk.ÌÎFþnÆ‘@L +ƳQ#VŦÑiiEíDa¶÷ð møSqŒ˜¶ò+VÍÍbÜ/…mÅïÎÞ/NøÒ# MQ¡¿|uÉâXåKŸ¼GÀc8LWkì[âãrŸ”ÖOTî–ú#¨zƒÉ{W®­ƺFLöë*®¡îNQº¸’¬L—’ÊÎÏ m83IRZR]ÔO«§èQñ?+Kÿ‘Sé(K¥ú÷ˆÌ> úX»Ç ‰N'‰ƒý°A=O­cp8,p8ߟÚÐmu]Y¼ý+Š'$xjIÄàr^g‡f tÔÝ'6)¹–®¶ZøÞH$à ©©SÃ[ƒ6H¡¸J«:;œ¡/CÁa·èÈ(©J¯GèP#t“ìü¬OÙ`7”"-¬ÈD4°ºlPWRVQ€·‰EB}@½ÆhEIWIâÙ¥¼æfQçe‚¡;Ðëëß$4jƒ®†žÐ.ŠÇC·W<27Q­¢CAiI7ÏïclÔU ©c¿(cô<)”6 Fè2.÷UÌg)*Ø 1æu%(ÀênIYIVNSÞ~(çˆßUC ñÕʲ{?W4py=O ¥5¨ºLTB ƒ .¶úX¬P§¸âqÐØÔýÓ‡ Òªc@NA¶0óÜ{,Ô–€‹¹èêÄ5B×Hûœ•ÎÖ drÏSëDqèD¥o"-)e¨ ±É <Þ@ø]•Äa·éÈÿ]R“ÅèÁCAù Ô] ’^“Ø`jêJÊ¿:IÌ¥0ØHŸÍ†´œ4ág¾7­Hµ g—‰:# Ô…ÝÄ ÉQKÓÁ"É =¬5KÉ&úðþ—ÃéAõ£Ï€Û¦'TÛ𢬮穡  Fè,qŸ8pn€Ñ z 2…ÙÅÉN_cªoÄãCJVªHnAàK—)Ñç–£MŒ‚5B§ø˜þ¡¨\l•É$‘ÅÌÄá‰ÀîI[™(nn‰éÀd>Ë´ä™<̵´˜ P#|ŸÒÊò÷)`nJr "̆8ÜÿÒ5ÆbàcúiM 0ë´äVæ7 ljÔßÉn {W¢ªƒ EÓ|ÐIœLvÏZðx‚å ñ´l¨gö4©>ÂDeê2áPö@¦-rP#t¿¹9ì]š˜8Xšˆªù <ž ´ôI$Hü”)Ú;X1±­º tFXEwFy£´5BG$~úX^ Î6šÈD#Ñ‚4a°Yº‹Åb-Q2󠦮VÔ·%ÓH‹¥þø\Îæ}?F J Fø&H(ë!í‡B 8°›S[ÖVו¤B\r®¨ïI`,×–¯äònÐE‘þ j„öé¥P(=„$œnÏvú/11ë! %d–4Ȱk5eT1ÑQŒÝ5B;3J— “ çCZPWR–—…÷)d–4LQ¦™ñ~ Ž ¨ÚA˜¡PºÁ¾0ÜLµ´bàÌ’&`ŶëËß«®‹ªB›» j„¶?Jç¡’€Åd‚J²rªJŸ<@fIÀPeŽ<íPveo€Ü‘AðD ¥óàñ ð¡†VfZÕµg–4¬Ò‘Éá4Þ*¢‹:#ýÔÿ"ªP(‡€p­¤%¥t5àý§2KÄñëÕåT”²ý°~P#ü‹¨B¡t’8¹¾*ÈÃé70!#w€ XoUš6^üOtc×Að†Bé<þc.A&hÇôÆ1KX±ßôîTÕ&ÐÑ&Æ®@Ô¡P:‰,¬FÁ÷·›±› %{€Ì’kÒtY©=™hc—@ ÚP(Gv“àÉÉDñÁú”1pfIÀZ]¹OlÎýº¨3ÒŸ@Àã‹2JçÁãÄ€+ˆÉN_cf` É™d–4(q›Ôe÷çU”÷B©j ‚>¦p²e(”ÎC&‰@C½7Çã CÄ>  YÒ0S¦†ÃŸÉ©êyR??ºh(¢ …Òy0bb1Áw@¶`¬;ˆH€i§ÓˆÅlÑ“¿\^û‘.¸áßš¾5J·ªž»ûvnE­:ÆÅqk%ƒ3¦÷àùnŠ£‡Ð¾Þþ!·áð£"WHÍWöjp=´!ëÁ÷3 DfÿlMeiÂ׻ο,}õ±©ÐêT`°àÊ›¤ú&Â÷/Ð;h+ŠïŸ­õõvf#oç­üÂÊ.Wš0sbóL>öb­p¢µœ·½ì×Û“ò™>*b7õÅ9ÚXœØŽij*ÿé\ëCF¨ªçNú#³” 6:B¸œðÍñ ©Íº8åŽÂ< Jçó›µé_áCnÃè=ك͔•”%…ó”xØ4‚àHZVÇh ŽÈ€)¶Œñ–m˜?ø hÏýÊñ£ (,­)¾fs†ž?‰óKôIéÅ™ŸëØûg·ÝÅläÍ=™™Ù8ÊÑ «Éj6pÞI”VÈ(ÔSžç—™õ @å×F@^==YM9á=ÄNÀáñ¿È€vì>jDÕM¸7OWkiÈô<Áïð4þYÆŒ­kשuÔã¸ïè‹ûÏÒÈ„vªWÈç={ú°ÓÌÀà„Ô*ùèÄ1)9å³¼;8†ÅäÌX|IM_XÞN‹¢ƒ«'gÌ™aÙî韓Ÿþ}åï]£µÕ…sS‰ GN8I&`˜_ý¢¶èàuÀ*óÁjÝHüdÐë³oß<Ü¿PVRR¸v;®ã×ÃÙQ÷öù$²è#î´Àç6¯Øx»Ýç }¤¡E¡BÒAÞ§´¸'¡Ž3§Ê}OÛ½<°ÉÂÖ²}ý Y !£T[ÙÑTED ò.wûzïwu:æ¦ Úø˜¡Ýü_Ã̤w­ÖfWÏu mhDâ©æùÚí¸¹«n¯ò1š6A³Í®>®ƒ«w®žtn÷¡ªžë°=uäÞœ{þërw A|ïÎß"î\\~r¿éH›o¶ïè 1… X¨þ:m¢Çj1r~¼š.U›{èÌŸÞÂÔHÉ+ÔU·»‹ÅäL˜uNÎ`Ûè@Ÿ2ªRî.ºî*ZY‚ ¬&|Og®$@·u€`åéÉn€„0ÞBjZé·tÀbó.ÞLÇazIcÌMFiêÿÔù&ÆtPX /q©3tB6‚¨tü:&-*qôâ…rÒíбæ®õpѽ{¡O|Þ$2XõõßÕÁIß´êÈ4‰!®#ß=ç°3'27¿zÌŒ3ßÒÁÚ]¬è§½¨„žî/ó2?~êÌÁëÀgMˆÓ>òz´Ð=€0 *T–„ß¼g9ÎYÓä›+/t¬W½¾óy#]$Ìz–t€`1Ú â_†ô<©Üüj§ñ'´4HßÒA\RuÄ“^ת*®³qÚø´ý1ÑßÕÅ­¾óz t[ 4#ˆJ ¬À¿ÿT5P°ñó­cö}ѱnŸ÷éSŸ7U._ôŽ@œBæî‘ð"”Õ³ˆnÑAdÀJ!èágW;8ÙÁ1ÑA_{=z¢ŽD¥¾r™ÏƒÑó—¶ýú?R_èG:`19e•PQÊŽÌG)ð6àq·Sèk: 2ñwϱÇßFd–T´{@?ÕÁÏ›î\õï¦@F¡⟇ä&å¸/^N¦I´{@KóA›í}Y3_ª©G;š+Žî95%2¾¾²¦§÷A x6±RRßóäÅ×»ú¯®ß}õD7u½mê 8ësÌ£ç6=TôÚŸIù­ÖÄ>®ƒðÏzÆðu¾=ÿÜK:@0µ³–R„7îwõÄ>«Êí?æynÚ³Äÿ41ökøö@ЫF¡˜ôú óg´Ì´-<\Û= ÿêàêIg9Iþç4>zþyË¡øÞÓVÌzÜOY±iÕÅ]èÆïË:@¤®¼ÂÒnï‹ –&ÆYÐ{F¡€×üâòY ÜæÍow¿Ö‰ IRUÿï^aèô‡š+hã#îÞèäñ}_«G9²š¸—Âcà‡×ô’D©€˜ÀgEåž?¯§´:±¿ë0X÷ÿ«? MX1ë‰s RŠK²ó¾{lÑШ¤-nn‡Þ„¢:€Þ0‚huÐqàƒ  PȈ„ªÐ41R¤qç\LJõ# x 7&¯Ò¡‚‡ Ñèk¯‡Àu7‚huP_YóâüE£æí>¸v;î[#‘ú‘G$À£À4!ëÁjÜ”òNNÒ7Gûõ;€VÌš¬åµ@óÛ:мÓ÷) \ X#ˆV|ïÙùÃ$)pž9ýë½Hð[#‘ú‘‹ÇÀñSIÂ×(ëjê 7zsïr»³¤û£ùµØò¢P‚ÉæþgNצ D«è0JÇ:˜»:´é"ã+àÀV'áëÁÊs½ÒãÛlï¿:@ê ô¦ÆÀO¹-Û@€ Œ rt å»:puÐëSŸwÇ:xôüó®ã)àâ +ªʨ(š:ZDÝ¿Ñz–tn~µã„~¬ƒåóÌæ›þý)­´Ž ?€èµíO]€D®ƒB¡tF·Ïû?Ïßâ»:Ør(þÜñÐÄå’$îîÌ:H~ü77¿Úy =uJÿÕxhR©—âÓ~,ÞðR†ªÝ62uOÀläÍ=õY„:è J'uÐw>òÎèi; I£†.¬JÈI=òí£GV#¢uÊá]ýW€ÇbV 7 *)Zp$dÀë ®¢1h»¾š¬x›½=2³‘7ïÔç\º˜¨tß…2€uqàq{m±·Î1Ôͅχ°O'œÐîÿ:@â‹ãÊŰCH×Ïô¡wþ¯ƒk·£ƒêòÆ««µ¨¤v–ké¾|*êà[¡Pú£¼\ ë”@R^…Q-âÕMI][‡”Ð(C5ì‘¡¤²0¤I Oâß}Ÿ êlþ ŸÛüóÆ;çýâº=£^Û8í‹OÙ¬’Z¾óîÏ?§„$µ]鯛Fè :øV(”þ¨ƒé‹}Ã"?ß8Õ)žDf3{­ çäæWÿr4–χ¥îø @ÚîŸY¼ÙiôÁ°¥Õu¢Î,ÀÿKÿ܈ÿç}u°pCHU ÿì“ïø.„ÙØ6‚fwŒÐtð­P(ýQ3û†Edu²t€@¦H52Eù¾ææW;OúKYUÊÈɨ$¥šY÷¯žú© K-msí¬´$< u~¿èàüõøóíF Wîv:ôÚÆ%C öåºÑNF§Î‡;Z˶{d—Ðtß…ÒOu‘Õnå°ƒAÊd)Z}Mqg/#hèhPo·Ób"N…Üwo¾ÜQ¿ÕÏšà–¦D<³{œûÍÔ„øÏù"Ìp‹þ„êð ´Œi÷஡è ÝP(?Ž‹ÃsDÔùØZ$"ƒÅêÙ +I©m¨¤÷k ¢ÙúÝa =Óxè¶ÇÏ8Ñ,ôÞÛ:øz „.¡è ÝP(?”@RN¦¡;ÑŒzJ õt(²öæe¿ÖÁó ÚìÚ2Ö-©ªôöÛxágXT:€Î¡è ÝP(?š'.‚Š˜ÞIÚÕˆaÄÔm†ÓsYRÑu ¢$så䜯_%ÉŽn»ƒŸ•× µÉF„:€N¡è ÝP(? €H¦@#› Ââ[:›·÷BnI¬ýQAw~–‘!·{ú<k5ªôñáB˰hu1B_ÑA{¡P~LÀÿWvj  é·«c •…Ñ ¦ÕAÞ§4á=ÍïÑC€8·ÓÓãBâ»ÄœB!dXä:€ï¡ïè …â4kZK(”V€Åã€ÃF­¡3:ˆ Xé8ÚJs°Fôƒ‹½·–t—蹜Œõ§ÞñäYç—„ë}AбúŽZB¡ ²·B¶üÈ:ª´°êê;spOè¤ʂݔé•y™(¬‡úM¥„®ï+ŠoÅ¼ï½ ÷’œ'tMÐúŽP(éC¡üà:@À“€ÅèÝa‹ˆ¨TÌÞÖ•(£¢h4Â<æá ¯%Ý%«ÐTÙdç¼7äEU]¯<ðÞÓN{³Q;¦}#0ysOf÷ÀÿC¡Œ]ò% ª²pؽXkhÑÁ?iRÿ™!×Á¸ëqcëªàStlï>Óo#p ,p´¡‰§B"žá^ÕÁ×ÓϾK;Fø¿D9£±…6¡PP´@¡I3èÕ½”íîé$ä¤;Û¾}|OÈ=£½¤  ¿õ<÷&¥ D€îk:€¶Ó!%±ožYYÝPY-Êé4¬ªÊ¤7”†Öâäâ âò–ozÔäHVº¤d”Š0ÏmøýðóŽu°t®¥±¡b\bAWSnhärŠÊºqâwa±šæ®¸ÖÂßVœ94‘Ëon÷ê8µAìÐè§7T‡Ùtö’‚y=:ÐA=ƒ¿g‹ççüªÏùÝ™6*${­5·ìw+†éò7­].\‹êS:1¸µZ}¼å—÷õüËÒ5W»°hOïƒínÐă¡Àÿ+ïöÕCfN2øúàiKŸ¦dˆx.`»àqb7O»|­ƒªj¶ÃÔî/¬ -€F†£‚/ÆéR/ui£¸¹ëXâwOŸ>,µ`OÔ 7†ÃÏs V.òõö_w½ À[‘#aØ‘<\·Ã…?ìE¥ç»­¿ûg‚JÖû•·-#pù͚İûÓvÇÝåí£‡e™õ£–LYq˜eÌ¡÷LÚŸ°Åfñ÷®¶`æ Qçú?\º™â{7­Ý¬|~3<¿æ1ØD¾ËéÀ‡àðòÜüâãsžmŸµÁÊòįuM¾ž&ñõ÷^ncãÓ“wüh™¹8<{킼Ž#ÚÅç5/›e°c­µp2ÓIž‡æ.XÑwJíQÁã0ÊŠ”v®­k¼ù0=;®©&ùÓ$CiT×°?Hò›…ëǯ­©¯ÐÉS¤¤ßÊvFvÍÝÇ™ôz¶ƒ•ê¸QÚ †ÏçÓkží¯³Ôñr²¤oå9øu~HdQ;n”¶Å`E¨­k”’ü÷[Z¢(S‘•ÿ­Ó{Y×Á^Ü·_†ÎÇŸ2«T•%†9˜¦„%ÛºÛ±ù8y¹Þ}ÎJÅ}+Ûyu·Ò+«Yû7Ùã øêQ÷²ÒÄžœÞ:€®Î}œ8?@œ€õžhHǺθÇiâ'§V8%à†åŠ‚òðÛQVž¦šF:=O-;>eÑË! ÓÆ¼|¿óÈ[Øsì]ä;‘Í&þ.—o¥øÞþ4yŒ®£­êòͯ"bŠ`”÷Öe&IHÖVˆ:£ÿeÁº—Õt¶÷DCê/Ç?HñøÅ²M¯D¯Ž¨®ayÎ}dj(ç=ÑpñÆ—páFRx´0F(ö„^Ò´[Fø•¬¼ƼéÆ Æb°"—Û\Vɼ⟛Xÿ±Ìb°bPhnjv† u’‡.ƒ Êæòø\.xÓ}ø<;3‡®¢(1mœ‘ˆ+)k¸÷4“(Ž5Ô‘Q”'èJ'§V…çª(RFÛ«>½p_U¬—÷ú]±•U8†Iq)3(<‹ÃŒ¥­«IãóùO^ædæÐ¬UKË&¸ë"?ãÕtöXW-c}Ùžfô¿<|QpóoäõàñxЏº”ïé%ø°¨§ê1EïKµ5$Ç»éâ ˜kþiÊ ä’ræ¬)Ï^å%§WÊJgN4¢PðHQ”Ëã[™+a±bƒ³óèŸeËЈSÆê ö×;%½ÚÁJÕÓM”¨ œg!ùÒ4q#=mu©ϳ‹ËÖæJ¶–Ê%e )éU¹…µƒ äL dî?Í*¯bšÊqÑÄ`0Ù5_~ÖÖÄa1®ö >øuþ‡OÚ’È×A€yî=@—Êòr¤q£´\¦Ý;u915³jÙÜÁj*ÔÁÆ jJ*ŠÔS—Ÿç8Ù¨Å&–#¿Ã ׇ$¥VÉÐÄü—™C÷tÑÉ/¬;zö=§‰?vÎmu)-5)§eU'&Wüº+Üi„#vjûu>ÜÌ1Á_ †™)|L­š±ìéÍi~³#à±:ZÆúÒr2Äù¿¾(.g8Ù¨9ó> (»¸Œáéó ‹Áˆ8ÊJ=]t®ú§>y™“WP7{Õó–*µuM“×1š.ßJ¹õ(ÃÝQ+5£æÀ_qÌp¯êºZk8¹Ïùøn'.¯yùæWNSïÖÖ5šH++‘•)]LÜ»i„¹©ü¾Í¶—n}€º†æëlÜFj,Ÿg¶lÎàFR3«ã>–ÙWõtÓv©±ÀÛî=Íò¥ âµ9Ò8:Þ•Dérñ·‘&%Íò™n’\n5öÖî£1 ^Y‰¬¯M+.cHJâfN6:°Åþƃ4˜0J{Ž—‘¡®tàÕIÃÌêM2ÒÄŠ*棠ì Ë--+®Y4TC‰'þIë¢ÕÈázºj_¾%àé=6ªñAÞÃÍüî§êޏU ¬H¡PpƒŸ^2ÛÔÖRÙÑNbÞåÀ‚™&s¼ŒŒô¤ýÏ{JHà˜\‹SRÖpÎ/é÷õ¶æ¦ò[× €ôì—§©& c\´¿ü,Ølï\gãwÒ‡Ûr0ÒÂãFI9ËÂÃN™D>óœ_Ò¾Í#,+®[fQÀfs›ššw­µõtÓž>^ËÊá@$`‹K™©™U’Tñéôm-•W/1€Ç/sF;ªc±bJ dI ñOé‚…‚x8uÝÒaŸ2k~ÝŽ'`4Õ$õµi’Y¹fÑPsSù½›mîd€ûHåùÞƒÌMå¯÷°·R©©eKPùÅõÏBsW-jk©<ß{Ãp¸r÷Óä1ºî(G¿¯ 죷u]ª5 …jUsSù5‹†nÙÿæþÓ¬Á&rÈÞšÚ&¤8‡Á`8 èk~ùVßyœùèE¶»“–¬ ‘Åæ509ÄÿßM’ôZ6ƒÑ„cÖ4gæbThŠZ*‚º=~o¤'ã4BÝi„úεֶný<ïKUc#_QŽ $"®¼ŠT*x\X¹-”DĺÚk*È’€Ál’¤~i&”—#@qeÓ‡Ô/õøesͺœ­AjSÆêO«ŸšY5oõ‹wOg"»jë‘÷•@"« J d(*cÌ]õÒÅ^mø* |(HS™E˜,.“ÅŽ/ oj(ÈZCIYÃÇOîÎZºÒ+盟ºœxïiæÔ±ú þ›·O˜, 2é˰1%jMm#P(x Ë;ç—4~”ŽŠ"ùP$$¾¼–²4Rm=»¶¶± ˜d{´£‰,°·"bŠåÉVC•¬†*šq?9õËP…Æ&¾’—–Hš$±¬’ †º4dËŽÃÑõŒ&';U ùPþßz*K#@e5;-«:§ ¶¬²HV… èR‹ûeGha18Mü’ròr9Íàb§†4€¥fV!ß™N]þpõ¸ÇšECÅ Xf&[ÖÐÀa³¹ÈÏÔAr Rb¤’ M\J­BËwO äÖnûã §‰õ ‡ÓL&áhDÐצÅ}(¯­k ‹)´µü·(+¯¦ŽÑè{Ü}ÖƒâR&—Çw²Ux™ Ùyôøä:p¡`e®´r¾¹«½z~¡€g%G¼-¾ùàK¹£¨¤Á@ûKìyNßÜD>àÅg Å@^âßOðÙ«Ü9^†{6ÚÚ[©W6ÀÄÑ:A¡¹ð$ø3h«KÒëÍ4]9ßœBÂ76 rö8³lS(Ráóùy…u ²ÈæJä§ò]"Þ@^A]m=[QþßÞ‡s~Iÿµn™…¤„xi¤'÷¡¼º†Åçóï>Éc}Y»r¾ùÏó§gÕHRÙŽPÇhÚ~8º¡•5LY"‹i`råIiôŠJ„EØXüûzTT²"ÞùöXô“»‘ÖC•îfóùü’²†'¡…`3Lyˆ‰üÊùæ“Üõ²sj»™¹VGÐ¥2‚®&mïF;Y ¦¶iÃÏÃܵjë_Fä¿ðÇVûµ»"ŽŸO¬¢3¯œ :šRȉ+ç™¶ô‰ž–´å^LFš´iŰI +Ê‘%%Äà§I†§¶\ªæÀPêÂÙêÈO‡ Xà=¨¬œe`…BÁÀ™. ÞrˆÂ²M¡þÆÝáàµ8PO‹vl÷ÈÈwÅr2$ÐÓ”–•"Mð PU¤3“¯¦³xŠûP6fös¤ÖpjŸóÊ­¡dq³±éÀf{Á~*§ö:ÿº3ü·ÑM\¾½•òÑ#`Ú8ý¡î×?†ÌŠŒ-3ûA]×[”åÉr2q<FÔ\²18òm±²yÎdÝzFÓž¶+ ûÇ/EC• 2Ò¤ƒ¿ÙyÌz@"áT%Nís`že¤IWÿ={Õsz]c“·ÀÛÄkœnS?.·ÙØ Ìh×ïe\¿—¼ fþ—_àŸç™Ï\þl°±¼©±,‡%q§ö9ÍZõ\VŠ$I!ÈÒˆ^ãt’Ë'ø°X܉:‚íËôtÓÎ)¨ê~€Çþ¾ÁFY‘b3LÙsn€¼,ùê_£§-}B"áåÈîY\ÊD^y9’óµ1³¨*JèëH56òœF¨'§W¹Ï|h¤'m¢G€]ëlVo?s%©´‚ù×Þ‘=Ì$‹Í[»;¢µ”Å{¨ƒ¨ØÀaÄÚlo;fñLPÉåhFrø\>ô6ÔÖ5¼È™ãe3–=ݵκ&5)!8mÆæ©r*ß®SRÖ bqíö³ö)ŸûdÍ’!+ç›÷Ržß%”"Þ ëq·’C;5.èÔåÄ>øz~½«¢’å8íIÜÓÉÈXƒîqm÷ÙAvæÃܾ9XøšÚ„ÑÚR’âA¡¹Á‘ù‡·wêÕœ¼ PJ·iE;S-üî¥ß–•ÚÅ×£²¸âÖþ{îóô-Œ¾{0§‰îúGä£Ü°çµ§«¶ÓõïžÕñë±z{øAr¼ýnœZUSËv°QåóùF#¯~ žM$~ÿ·6 ({â—ŸB¿3܋ſ­û=âs³ETªØ?¸¶;„¬“DÅ–,Þéã@9¾@·E íYTü»„’‡Ï²™Mæ& d^mÈ‹4—Yvè@䨫HÌý%HSM2+·æè!Ãû.â`1f›2q~€–ºdY%óü£D•O9y=K™·ÏÃô‡|· OÀ•4L]Ò4ñ‘6ª¢ÊvçQW¥þvèÍ•»©y…u»×ÙtFDh:hAFÀ`0'÷9³Ù\,Ãf4ÜØÝÈVÉÄVÀs‚EY‘òòöá eëè¿Ùqšø<>_€_­^EJR<Àw‚ÀŸsŒBR:X²)ÒÙ˜0jíÙûš†’²ÿyÈ‚8Ñ%ˆDÓüìÒu YpžæÙÓäÄ„¢£Oé¨2ªuߟœ+Ø×”ÇëÎpI9©Aöªoßð¹ú¤ðLÑA ½ª{Ï2œ?ÜãÒÁ¦H>À«OM?ýUä}¢ *½m£xŒpÍ?möŠç-#FŽ ~ÝÑ8É©U‡Nÿ;äùõý§5¥0vát,¡G’ǃÙYwY‹IDATÒÊ.L²[µ5ôšÿ¿#&øt|ßT÷ ×w!|@E%ËÔùZË€¢ŠJÖª­¡€ÅcšÛÿ~Nð @ÚÉKv^-‡ÛÙyÁ¯ó'ø ½<ÇQo¨ƒÄ°//À×jh¹/“WÐÙΠC§cžýw†U[C;~ŒÁ¯ó[¿K‚¢meaÒ_ÒÒØKGÝz\Yx³d¶%¯è«'g|ë°¡¶ž}óqÞ•;©È+«YL§ƒã9ÜÊê/Ï73>-ùu‘ËL'šB‚²ðxðû±·,v¦§—2×î~Ýò–|Îë¨s¨¶žÝñMuzmãÆß»0™Ëã§f3—l FäËåñ‹K™ A“f}#Òâç¼Zn·~Ì; *¶äΓ<V¬“Ç3YœÀW%žÿò«oÇÄ¿L¬«¢G„ßh+â–û,g®$å–tÖ¿•Õ¬ßþˆ‹ÿøeê4Ò÷Üñ=ÖÖ 8Fþ×:h7VE—ø¢ƒYš~ý^<v¨Ý#{Z0Û±zÈS±.vêšê’-/ÜHº÷4 Ã.š9h‚»nIYÆß#êš4U%±I/Ÿ§¤D”ò ¶a]†o :¸÷¬@M©k=Rlµ[´>8èæ$dÀyu k瑘âR&/öÇV{Muɰ¨‚}'be¤‰$"VCEN]N|žÇbW-Ü™Öïot,×wñÍ7Ö%7WüûÊÇÖý)å5œê ˜à0È@vçZk"wüBBPX®–ªTC¹¯{ßÔÐ%$p{7Ú©©P»m䕲ªRß•p¯Kfêßz”îé¢cj, bòƒ¹5‰WvßãCª$ÒfN6ª®a-ßÊå4üüObrÅžcï`°‰ìö_­z2)àÌ•¤“¾iV]éÐ9½wÄ¢õ/£¼[ª-l6w÷ŸoS2ªðXìîõ6¦Æ²‰É;ŽD€¡. zÍ?íî“ <;oº2í¥{M=t¶jg¹ªž¶#ÈÊÿÞï²lKHKÝ!(47ìMQàÕ‰7OÙ{â]^A݆ß#|f˜øNÐR—€ÚjfÜýy ØzqnA]`pN÷.Ý¢ƒ [ ©] £æá¬¥§EûûÊGä¿{޽a©|ï¢ç–•Ã'- `³¹K6†Ü>;Æï/Ü‚: ÎIL®|xiÜÕ¿Fo܉ hé-ãL\YÔÕsw­µ={5©¥îÀiâ_¾‘‚ð ¨@>íû!,ª 1¹<ðêÄõ?Ë/mBîkÂhí{=W-0_÷ûëîåZ½R3'kê\C•Š»pdÔ‚u/¹P‘ÓœšÈå€@’Wï¥&&W>ó~êXý{=M d‘ûš½êù©ýN÷.zâ°˜ë÷3ºmDWOÎPR’êüYCM¼':ýïd„ädˆ¾þÜåàýóS$‡ïwyxi\~“+î>ÉxxiÜõÓî'/}@Fñuaêà[Ç  eÑm¤†–ªTKÝ!æ}éô ú †BÁOõÔKN¯ zL?J¯Ü$`!¹QwÓ7yEõ‰ÉÝ™ÓÛZvV:ù…]ŽS~p«]ËìeDþXWm07•çpšcÞ—Œ°T–‘&á ¯qú˜\Q^ÅÜ´/rןÑL7%£;a¹Z;«­ër%ŸBÁŸ9à²dc0× ù%uY 4ГÝu£bKã?–O­ƒÁ`t5i.¶òSðìUÞ†=¯o=Jöª›3|[¿RïÞçsšºÖˆk1XÑÓUûø…„æ¦àØÿ«w¼&˜êku9ÎǦ–¶n ÒW±ÑdAþ¿ÑÍXôz¼"‰Lolâò/ƒÑ×–\»ÄššxÜnÍtl­ƒ‹}kk›ºt:ž€¹ttôœÕÏä¤É I!°þÿ`Y,.MŠPûÿ†aX fŒ‹Ö\/Xà=™,Ó%úˆ@P½R’âïwyVnv'.$fd×Ä,{üò³­…Ò™ƒ"ž¼{áû:àô]ÓÂcPx*dåЛšx¿î|•Ûµ©l_ëàýÇ|U…îôV _°Ì\ö¤1zþ_RÖpùVŠÝpeëaŠ¥̈˜¢Œìš דÀÝQëÒ­&›SVÁœ²¸ËñÛè`ä„SCL$»š®µ¶‚>€¼©‘O€¼Üê#çâ&¹ëŒrÔ8{5);š[ S=õ.ÝJ€°¨‚“—»z­OÕK6F´~¥ì-d°Ø.¿3D"î‘Q×傼¶Ít;> ceáíët;õùÞF'.¾/)k8íûÔU©¼O*çñš÷÷î}—ãè¶ÑAhD–©qj ¦Æ²Þ ƒß”€§›Ö‘sq…ÅŒ€ l0Ö—••!egçÑO]N€‘¶ª·¥ÓëØõŒ¦y«_0Y]+Fõ@˶­¦¸Ô¸qÒÉHOÆ@WúØn‡k÷ÒpX±À«e¤I+æ˜øþd``b;øå­a—n¦ÐëÙ+æu­•ŽÏoþZ—9/ßÖùDæM7j™Lµk­­¦šäÏó?|žýç?ñšj’Žºa0˜‡—Æýã—ÄåñîpÐÖ2Е>ñ»“ïíTxv}R—&|­‹!2[VX¸Ï~ÞÉ$©„M+†#S(ø;g=ó‹êàÒ_cïì¹ùàQ”1ÃÖ¬c»Î^ý¨«I»{ÖE’JذÜÂÿIöŸÿÄ+ÉSNüîÔ¥ o=gÚ°–Wj•‘•pÿYV'S0Ò“‘”øòœ-+>º8JU™j1X‘Íþ¿ÿ0{’¦¦ºä¢ŸÌn>HûóŸx'[u[ % óøÊ¸Ë·?…EL­ÝÕVº¯uà{ÂùܵÎÎGötÕVQüR\»ØBJB\’J˜9ÙHRBüÄÅ÷Jòÿóžpý¤ÇùɱÊw¯·–¦‰ëjÒ.ÿé~' “ÝÈûç·.µàö)€pæ5dÄ}zqå5¾V X<ŒY<º«Óëƒô)©ÙÌ6:PS¦öö¼†îqêrâïÇãeÅÛèà裩çóàÔʳœ/Ðy&/Ì/¡§e6´ÑÁòyfÝ›×ð55%Õ÷OÝQ7T=w²@òŒ¼ŽÖ²áo«ÚèÀÄ@¦·ç5td^ƒ“­¼¨tЩXÌPSËAJG‚¢ìí¿­ÜÍ|hÆÁ»ÕòºVó¯ªa@»:@HL®l¶{NbreE—$.ÞF$"™ŽUXTÒÍvi b¢ÓS+9Z®°¤þ}RýÒ9–mt€ì­ÐëA³´+J#¨ y=ÚÕr@z6½¯½Ññ¥ð9¯!<`5ôÒB[#HSÅ•œ‰ _ ê¶qbpxàĀLJf1xÓ ùzwâLa0bßÒš ùâí¬‹·;[ ZRAþ?·Ñˆ‹ãð8±õ{{´v§¤%—ÜNäBCð-ÈË‘J÷zàÄ€Û,°7 ƒó=1½]ÈËnä=îs±vµÔ(¡ûDe¡…¶µ„¹ £÷dÏž>ìô30¸ÎŽQk—¬øÏÏ]%=Ê(ŸUR =ŠÛƒ,šÖF-´Ô!»±hZï‘ø±°þ½ÿ¯˜û|±‘R7t☔œ²Ó,og»ƒÓúæ‚zй5õönµuÝhQçô_:XP¯KtC­5Pÿ)…@»iÓZzï6U‘+»§ JH7Ô ~õÇþ¨>·ù×í÷Qt»t€ÐÖIùÌÑ{²™\ ’Ä7í èFŠ­ãóäj«%‡g¾,—z˜ZlB~VNy»:¸t3å¤oš–åã§¢ ;‹zx!rîJt»:àp¾, èh£qý^üõ{ñÝK_¶6Ïå‡íìé³mM}ûÜÕ¸vu“W7wu(“ÍWQ–Üqè©pŸåwHÏ*ÿ–ž¼ÈÙr(^‚Š¥×±6ôYõÿÇ }Jðµ"Sk›ø0ÑM%//O ÷œÊSâ—ðLÏÏd±9á1EWŽüZðàYޱ>ÅP[&+»£lóx`€€Ã77C‡‹Á4ã°m3Æáñ€^L¬GE¤ô¬*"Iìk½¶)ümÅH+iÉæïä¹™Ïå+†Çá8\.׌Ŋa[]àùŠ8ÈÊÎã7—ËÇ`Å8L‡™¼Ž.ë¬üµàM\ ‡Û_Œ€ÇFŒÃáŸÅ·3’Ëoæóšq8ÌןB—`±9ÏŠÎÑîšz/"ò5Tć™*tü¨[>wäÝÃ4ãÛË• ^‚âºÜ†¨‡úŽ Ýñúš¤û—Æ÷äV{¤{©ƒ9ó?Ï5냽'.|ì ÞŸ»ìzØûø!<.îy܃Ó=0Ñ $ªÚ·öêj’\äåòzФÚO)0}¼nßì}ìS:€®þ˜„EX½iê|ÍjìMd†RC',Jð – –C§cM¯™:_›º0™šYÕ½ù¡º†å³&ÈÀÞ×ÀÞwÞ×l6Úí<#S©¬.Oéè-â?–šqy=8Mü¾Öç÷5g¯~:ÚÏÀÞ ÍžWP×£½/Ó:€.ÍæÎ_xuRrèœÀ«“~Ý^QÉJË® éæäEáUð>©"!hVrèœ3M–m €K·Ra}“§b]í53"}Ò^Ïß;Ÿà·ƒQ_I¤’€Õ ø°Ý`æÏÏÎþá’:'ô®×So³óè•5¬+wPD€¤fV=z‘ýîÉO‘>ÈøôÇÁŸ?çÓE¯ïÐK:€.bæñš›¸üʦ¼I^ŽôäÊD2 ·n÷뼆ƒ•ª½• 2ŸDÄîÛ¦VÍ÷6ÒÓ¢ýùOˆ¸3œe¤IÇ/$¾ÌQ”#sxüS{±Xhóu`ž{OÐ%#P(øë§Fÿ´âLr×›>AŸBÁÝ9òÖ£ô W<Ÿ9ÙÐÓM;:®dÙ–g~“/ÞÎJz9M[CòüÍä-+‡››Ê‡E\¼™üÛ/VO޾? eU€©¡´”¤¸”¤xM-ûÐéØÔÌêJ:·¦¶1àe¾ßiç¦ó2¼ -»ºÉ=p*Ò³éDq¬``k©voZa1#âm¡÷ÏOÏpÑÓ¦!»ž‡å¬]2 t¥ñ8\u FÚ¨b0˜ÙSŒgoÿ#úç <›’Qe3L)sMgP[ÏŽûPާ@’ú”QÝz®ž³lîàes'§VÙMº4 ÙXVÁRV èjÒ§7ÿ×ãFi;ŽPA2¶uµÕÝ'ñ*â“ÊæN3~ó®dÊ= £¬Hç¬Áù 7â] «< d0Hk¬ò1š9É@€O£ Fˆˆ)zS´u•¦ºä|ïAµŒ¦ÈØ¢áæ_šÊ•MM|"Ãçó›8<–ú2ïmÁº—£Fª/›cVæ–Çk^8ÓT€» w_ÜœD¡àÝFj 1‘ýÓ½#HI|‰&ŒYBÄÅøšû_Êö_­Ö.¶ˆŠ» ª$$dx}C#ˆãqFz2Hè&›ƒh•!5³êoß'÷9«©Pgþ¯½ókòzûøMvÂJHØ{OEq‹ë^¸gëj­uTkk«­£u·Z­«jÝ[œ8Q,2!½7ï) %•þÞ|./¯$<ãœóœç{Ö}î{l~™U£W…$“«ˆPÇ“"?"c–/¾Kòò°ž=9X« Z»ù™µ%®ŠÝ0YÊÈ1h ÝNç-pàD¶8n„X05:©€Þ`î…Ç¡ôN”j…öT"¨UºOf_ݺ¶Ï¸á~§¯ÕŽ€l¬®@$~Éìª]Û=S4¥ÙݨÍ.·#êD—Ng®î>œ™žÍN|Ä8}¹`HŒ…*¤ ˜Õ’yñ¡›~}ÅbKwÌœ2ÊßðÄ¢R^x°=‡>q!ºF8däpé7ï—ø+F ô:t*G¦P—” ¦,¾…Ú2”Ý€^~pó~Yz6ûÀ‰l¹\ãhOtu¶Î/á9Ú“¸PS'+¯óÛº7Y-9|:§S ½á´ £JbO"`ÿ<÷¦´’àKѨëÏ\)HzV¹ç` êãqôlžX¢j›sŠ–ã±~Ûóó ÅéÙì¬7uáAö¶ÖdÕŽp>¡˜Y-ÙòkژطÝr™B]ËQ†‡Øsx ¤Jìsøô›¤g•‡Oç ž)Æð9~1O«­¿óˆÑÏM1¹hßáÃÁE°´Ä&_ž€A[œ½Vø<£æäoÃ|=Éá!Ô}Ü3rØ«Eu ¢í<˜nkƒÛù} è·ôŸÞ7üÊmúÑ3yk–v9Ä ñ>PË•‰ÄÊécIDlß®ë—w?v.?ùyÕ½³ãpƦ"º³Óµ??I{Í>{­P¡Ò^ûs …ë+‘jØu²ëÇGær¿ú´óà~A~vqƒ¼Àד¼umŸ_ö¿ÊÌ­=ûû?/Û_Ê?LÏawt ö%À¦5=}<É;¦ðèmß™8ÊÓŠ…Q3'ßzȸ|»x@o·U‹¢`ë7½“*|)îŒ=sµðZ"ýÞ™qv"Þ, ñciv„mû3HDlâÉáZmýâYáz»eäÔNˆÁZ`q¨{gÇ¥g×î=–õÅÜHÓNÑ¡P¨»gÆPíg¯¦¼bíÛÜ?2ÌÞÙÑ2~¬ò‹ê…3Çĸï9’¡PjOí‹€ß~lNwè—!)¯XGÏäÍŸºdv\<§PjYléÂYa.Ö‘aö{7 8sµðÒ­cSü+¾žäkÇ>).埽VøèÂ,7ȃAUTIÎþ>¢–+Ýs$cìpßø±A.ŽVHõ°µÁß=dçÁô»ÉåÇ÷ òµss±:ñÛÜB®—›-2¨\þiçnÎ{Žd%ª}›¶3‘ïC6îz²ZãÂüqy|ùä…·Oï®Ti?™}õîéñ­‰íùqã>&$Òsò¹_.èüêuÍÏ¿§Ý>Ùª]ýï;îcCÚö´±wî?©£²Ï×=ëÛ'ÚeÇ ªaáÌðÖ\ÐäqB­ÒõŸpéâ¡¿øÖÁ_·fRéãÆ}|ò¼êò­’-ßô.(áºúþË[ñ­9«•qß“\ºUqu•× No½K}´¸vâºeÝÖoK%àÑü<Ø´¡~ß#‡xW³¥KÖ=ôv·=±{ØÇNÎ?ÀI Yó~\7¬ì±ÿxαsoú÷v;%ôc§´U`q¨]únþõ¥FSÿÊ&œc~ôíáÊd‰®yàì`uù°)í}?˜èù8´ú÷r7m—õ}ƒB¡éf YÙ¾+Ö›…¸îËön9ýðDwv2¹MÊû&~lPüXSz²‚!ÐÎ-1,¶Ô0~Vy¥ˆÇoÉé¸Tªn}¼-£ÐD ˆÎŠÞùiÙd•Å–¶œ©€Z¥Ó{ G¾ê£6ÁÒF$hì°87Ÿ«¯fBtºzµºµõx|9½\ ÿÊbKYliËÙ4Ì— A¿ZC£:\Dç·\Œ<¾¼åLµ™"ÐNE¸t«¸ëˆ³úÀx{e¥¼lÉMM·÷X–ÉËŽÉ’TÕReíægËÖ'é¿NYÜÒ®ÞK·Š[ÎTÛxõšÝúƒ9|y×WÎ\)Ð]»ù­E3×6q>>eñ-Ž©…L«…[Z]k7ü¥¼¬pNß\ºU|éVqËÙ4Ì—©•9­š½Ç²¦}þ6”éÚÍÏZ.Æ”—Õ-gªm|,9€ö{gw¤WýÔ8XN§ËÍçê=‹-ÍÊ­SÈmo’vŒÉ’Ìþò> eܾÔ7E¼F;p„"ez6ÛPïeA­ÒeåÖ™¤{öеj³q~åhÔ÷;ž7mŽ˜Õ’:‘Nüw/éM†äË$Qawü‘žS(ÆãŒXötÁÆûB@ªG~1Wÿ{GžžÍVĆ¡— ²rë-^íq„­ÕQ=*«$úpDz Mz6Û0XR›fªýåüåÚ¯ gv‹5†o—P¤8ñÒµDú¡Ó9“ÞÒétIÏ*g~‘˜ôœ¹vKƒÙÙª“wÌL|Ìɰo,ˆ¸8Ùy{g7¾ëÀ•?>Ñ¿'/3kFÏIxò²zHS¼êÇä}Ç^Ÿ¹ZxàD’©3®&>f:³|ýãö”b”>m¬qØ‘FÜþ]ߥß$þ¸ûpæŠÉ…e"ÈÉ­‘JÕ#f\½ÿ¤â»_R+ªäšÆ='!55óË;‰íIöã9'.•-œÕcŒŸ‹Ñ±>†ñ¾@­Ò™{ãÔ•‚ ÆÌ½¡Vé^fÖÄͼššÎúü»$}¾¶üš–ôœ9hÒ•öôÉõ~ñ{tõlýY»7ÄlÜùÒPú󋹃&]yò²zÅÆä݇3`ÇôÍ¿½¼–HÿiÏK}¦®%ÒO\(@ê|›ÓüqåL2³¸wsÿ˜ñzF¹ _/ß*3ÜïËù`ú’;i¯kØñüäoÃÝ\¬‚})÷S*Ê+EY¹œ{çÆ€­ î¢ùSÛ’a½$ž_ÜuÈv£Îuq´Ú°¢ÇªŸ’펀ïw¤îýi`X0uA|X@ß}»»=N­BÖB%\¸Q4z˜²º9jvBy¥¨m{Wô{T¾[>ìä%ãìšFÅú^¾]ræJ2)+•ª?öº y&»‚ui;ãÏÓ¯»uó6ÀsÅÂ(µJwín)lߟ¬ÞÍ™:`âÅØ^m{ÄŽçì=Q˜xv^ö›ê»Œk·‘x_{7L$ß~T@ÝòMoX»ùéçOæÝ14,˜Ú3Êyóž4¡Hyü|^ú©(Êß‹|ââ½a‹Q†É˜¿ìTëO¤ñ‡¶útõý‡Æ#¿üzøõúåÝbx}17"|Щɟì=šM> …Ba°p7¹<"ęĿâ~önd˜}ÒüÑåL¢ÎŽ–ÈÛ…Øx–VˆºE8 êÚÙ±¦VššÉCbZ„Òî§T°9²’2áøy7‘cFñjÃM å ‚É+7>öfüØ ‹7èH ²J‚ô2,-±[Üë7uáÁ OÔ×Ûè у”ŠG)U€E£Å’¶D‚5ܲ¶vS[lï¶}Û¯÷˜sg|)PÑv ´¯×hAgõÜ2v¡ÂÕÕ°8T¿hW(,åéGæîÎmÜ~§—ƒÞÑ>Ÿ¯½¨­7®Dâ}-\û ¶¿mQQ-öõjH‰¯—M1CPXÊs´'€‡‹ T³%L–lâ‚ÛÈ1}{º´!Í¢æ¼Hgv3b!`p?›÷Ëô‚«Ø’ð{@¡P>vŒJQ—Nˆ v°Ÿ]Eµ¸¬R˜˜ÄxSØ`-oÓ`§#Șjõy»î%÷võñ°)ý{2)-“æÐ¯›=Ò¨"ݨvOw+d7Xz6m¼u}#9è7ê7"¾-þ­önîßë“ó¹z`o·² QX0U*UK¥š°@ê†Ï‘cØurk_/Wç`¤p>¡ØÅÉèÀ~†r°eϽ_ö=iC‚íiÄíßõ]´æ¡»³Í’"©ÜÿÕäO¾X&«FÁè«Óé2rjÀÝÕê§5=‚ý©R©úòí¶x.1”ƒÉ Ž— ý¼Œ¶ÜÏãÒÍ’ß½þb^¤¿9åUÃZ)!êåHe×ÉìiDv i$Gñ¡á(ªˆÎG~4Š&A´~³¶2Ú.þ§¯{uyY¤ðtµÎ+ä:;ZêtºÂRž—»MFN-²W½¸L€Ç£]-cû{!{O1Ü]¬½]‘0¡=ÂÞÍýûž€q#üFÏIP*5HØÞÑÎë—wŸõeâ„‘þ7”…R}=É‚h‹¾~èO¹z»ääoúQS9>Ð-=Ûèè€àìh¹ymÏ©Ÿ'}9¿óÜwgN ¾û¸bãêžžî6qƒ¼}ýÐÍÙê\Ba·‡‰#†L¹¬Tj9œùùÝN!Ô³× çŇèý둬 ;žcШ{OÊïoÔ½9˜ÞäÚiÅŒÌú8;Z"_éåŠ ÞŽBT«t…t>ÁþTý‘ÕlI¯‡'GFàEt¾Xª òµCv·æ^.Qíø®ÛŽ?²ÉÁw_v3Ç+æ":ßÛÝV¿{"=›î()KGš¥>h_¯Ré¨v<¥Ï [¼[bżfI—Fr°åë¨^][oÅŒÜZ_¡HÉ®“!Æ|7_-K¯s~š—WÂsq´KTH‘|Ñ(ÄÖO| V̶VøFrpâ·9u­·bæñåb‰Z_fµ¶@ZÚ7…| ¤ }o_^V)òv·áðH¾Ê+E¾ÜÛݦ•®hêñÇÖ^¿}ÝHvþ³fSJ+­˜Ë+E4;¢¾Nææs})XJ*UÐy†ÅˆT'’R¥ÓgJ£Õ…‡P[釱b¾øÇ Döl90³^ Íä€Å¡½6ÎŽ–ÈÁú²n›uêÊM¯¢Âɱ^|Ýy'mmð^ÎF‡5ÍTk(¯’7•ƒÑÃ|ŒZltkÄò¹ß'ƒÊ^ x†XHX$úGÓ4_­áBB¹BSßHBì2rŒpNiG!¾Ïz©E¡—¤þHýñžî6m˜»]¾þ9FDb(Ž™q‘ƒ<ß6¥ÑMõ鴴Ķ\=šfª•t9ÐÓŒ"WÊõÓ~™R-ÈÁ9H·³ãPXÊSkꑈi†r ?`ņd*Ùè)‰F„。ÐÁ—Ò#©&xp)/ªš•䯥å·zèå ÿ'{¼<ˆ†rpñ:½”Ña|Ô@%KJ é¨!½TréžJÒÚqq!Þvm»´ÉÁ`P«– jV®Ý)- óJÍ­GUjLŸØ™lÓ!6P¹:Û~1?¦©¨ÕÚ}dz•J-‡/¿ýˆíæD;"Û&Çx•À‰› UöQŒir=bHH.^Må „!¸t³2s9Ùâ¡1>á!mY x`0¨Ïfõ†æäà~Jeúk¶V[/¹ªŽ¯™:¶““ƒ)¶G{ë¯è r€Œšû¸õJÕ¦«Üijó÷7ûS‘•Í|×`ÁpJɫèlÚq·iïÁTÓÎo]«kfnÚfª4Ëeê¦r §cÔF¯©4äÈ ¦^d¸ÛÇNé[Z¨×zLÒ;xçGô4î#\|Æ™µ¿cÆ ‰éNmv®hÑš‡_tШÍÊH¬8)A¦0ÍöD ð²‚2Ó “I8T³rôŒ¹x]êû-¯vð®êñÓ®—§LµÐät9h¾P'V»Ð°ûîÿ± ª1\¾bîÊ'‹gG4;u̬–Í›ì7j¨)= ·Ÿ„»eIO+›õœ­Tjd Ýöo»ùÿí›´ã°qç‹°`j³™,©3 {à¿V=êxòO»ÌïX>cRÓk¶þžÓAä@O3£Š-vTì;-pró¹¥rÛÖZÚ ‹-hÉÎ/2Œö®d«Uºç,‘Db¯_ClÎÞkš+ªÅIO[ ׿—Û»–ë8òŒ6îÑÅÙ„Ñ+ZÃÑ3y-üÕ¶ÅêA/äp]­:‡Ù¿ïâ5ä_«G /ù]ÉÖét/2Ø<´¾ê[GN{NÞ{4Ë´rÆî}\¾þñÖ½¯*ªÅ[÷¾š¿â>”WŠ~Þ÷êý–iŠ”Ñ#OßzÈ((ᛉGxæJÁ퇴 ©i¬¸™Ws ¹S«ÂŸDüŽÌþ2ñc§ë_Øq }éºGÕâçò&.¸­Óé„"e;·Š¾oÔ*ÝÀ‰—î–3{ÿÌ€ûÉHLÓÎã9‡Î›VÀ(+f_~óaYQÊlX:¦/¹C/<{U“ó†ËbK-y|y Cèîb˜ÇÑùxZ£Óùz’™Õ’j¶„jG@¬˜t:]ö.‰ˆ±¶Â! …&;Ÿ£?À„9–Å–2kÄF$QmÝ—ÆbK³óë.ü÷úMÝÊŸôêê<}\ð±soÜ]­Hs¿ºwùðHŠ-!næÕ©ã‚²Ž¿¾—̬ʘw?¹âç}i£‡ù¬Ùôtýòî}{¸š0Í1=ÜÖoK]¹°KŸhW¤¯›žÍ–ÈÕ©é¬ _»¸™W‡ôõ”ÈÔyEÜ«GGî=–UÊ[[c¾^ÒuþЇGùI%ê%ß$=M˜˜øˆ±ûHfÜ ïu[ŸÙXá/‰[õc²D¢ ²[´æá½³ãLëé¼{gÚ÷Û_ŒæÓ9Ì~ׯØûg‡§(«røò’‘Í #‡xÍ™:`Ò¥žQN!ö}º9#‰|ò¢::ÒqŨåëk´:'{Ëãß|1/rÆøà nàÉá)BåþŸÛëìÜ?oòGå‡Oç èíþ4a¢Tª.eˆù‡§8q© 5Õ³«óÒuŽîªTkâßîæ0y”j:K£®÷õ¶ýìëûîŒõ÷&™{}p?wF…8á^YÚí)ŒJÑÊ)3&þ¼7}Úø€q#üÛŸT=ïOÀ(E@¡P÷ÎŽ;sµpíÖ”´×u+v^µ( .îØ{Ô…Óûb=Ým¦Ž ŒŠ=;i”^±4ív<€IMcíß2€l‹÷p±N¸[Ö- ‹F!ªÌáÊàÄÅ7KçtBWF8óùÜV ð¥Ü8>úÂõâaÓ®`±»~ˆéß˽[¤cßhW•J›˜Äxy+>!‘~øL^·‡ÎaÇvÇêt:¥J7Ø[¡Ð°ke%¼â2Áçs;Ϙ¤Vé\¢À7[ŸÞ<1ÆžF?Â·{´Ê_{+™3%ÔÅÑòêÒé_Üííºó‡¾Qᎎ4âÒ9‘g® è…¸ Xôõç/Y0a¤oüØ fµ‰’PÇ‘>ÇãË7îzuýøH; 1ª“ãöýÿðLaMh³gŠwqõè'—oÓ7ì|‘š^³pF§õ_v?ÂÿQJUÿ^îãçÝܽ¡d˜ýgÓÂúŸ5)˜Ã×ýñó gGËôlöÑC-£#~Þ›>y”äeVíÓ„‰ Piàò­½sŠñónÒË&ìH:;Z&]6¡0~Ñí5K»Žá? «‡‹µ«“Õ¶ß3Ïç`q¨î‘N»g.šÕÉÅÑòÜ€Çaê!•©S^Uåpã†x9¿³N§C"»mߟñç®Á¾”ãƒûŒ=gBEx¯rÆŽdr-â’X(R~ºúÁÕ;tO·Û¯´\bnG!ZZb ÔßR?%6{ù=,í@#òÊò*‘óß»‰Cƒì€Y-¹ýqñÜmø"¥ ^.pu²Z÷eôº/£ó‹¹#¦%dÝoˆûÈáË‘uW›ãç ºE8x¸Z …bÕJ†O¿âH#ñÊžQN5uÒ¾Ý]‹CxSàU¶pá×‘ëø™z¤“•[;À+v€×¾-ý/Þ þ]R±QÈŸ*ªÅÁ~vúÒ«ãÉÀ׋ [ü¦=/K‚È0šD®Vªti¹<¤$½Ümüz¦>ÈÄiF:Õ3&͘$•ªWn|r6¡hpßG)…¥<$x‡rq²b×ÉáïÍx,ñ åãe lŽ4Ø¿aË@ˆ?…+P”Vˆ“zu±¸-Î)ÞEy¥ÈÚ ûõ’n_/éÐçXT§†‰^‘DÕ£³2°òr·)gŠ S­!³2ÍðéW¨¶D®P7È»šÝðP ðOÏ>î&s0ÿ¾åŒšY,a¬¾‡xŒ²µÁ‡єʷŽñzv¶C|lòør¾ðž@óà÷Í/‰3Ì©…tò§çi5àæb5/¾Ó¥#q—ŽÄîëD3å¸ëì•¢CgöúRìi­VGÀcÀÃÅæMJ+!oÛŠèü?ÏÜ<1úØîXWG+äÄìü:JÕˆÏná¶{~Œ¹t$îÐö}»›ØèeÎW‰ˆÇ> a/¿u<çábó·{¿´×l{; ÑpæZŸ·í½sã~^×9``/Ä;X^!h¢‹“RÎß,ëª÷ c8<ù˜y ˆgDKKlD(M(Vèÿb•S(4Õ5ŠíÛÑÊÒo~·<úÒ‘¸‰#dJ•·»MzN RÍ^f²ÀÓÍjê¸ $Ù“G˜6,èç•¿}|ò³Cbjj´:Š-þyf â”Q)òó"€¡HùõO) GGŸÜ7¬OWWðv'gæÖ€ZÕà™ÂÇÓvÃÊ—ŽÄÚÛ6Ÿ@MùrFõ¢;;E†8D ;=¸¯«FÆÊW,쮓]K,ííúÊS—$Æõ¿v§tÿ–þ†'F„Øï9’áëIN{]+Sª<Ým"Bì'/¼…E£²ÞÔMå?sBÈè9 Õl1£B 3&˜Òïý¢YÆÍ¿‘šÆòpµzœZ5yT€…èãa³ÿ¯l$¬Ûì/ßñyäG{’P¬8z6¯Š%eT +ªÅs&…ÆÍ¼ú:Ã(ñx€ÍkzO^x;~¬ÿÍes&™8ì?1-aØO Æ"åUÕ¯@h uþŠû{6ÆÄͼ ™Z®ÐöŽvN¸×à ÅÏ‹¼nës[Üë<Ž›3I¦PoXÑsúçwbzºÒX4ÚÓÝ&:ÒaþŠûÁþ”s E¦5âén3kRh×ág†ô¬ãÊ•¢kŽÒju9µ],X¹(êÓÕ÷§Ž JLb¬ÿ*Úp=µow·?þÊ‰Š°Ç)(ØQˆ³&…ÆÍ¼æåj›žÃ¡!7¤µD¦nƒsŠ–‰8iá­áÓ¯„R}=l|)e»^ùxW-î2ý‹;=»:Ÿ¾\p|ÏP™¼Á3‰€%“±œÊ U)iUý{ºæ{òrþ¢¯Š%*,ÎÖ.í†xÜh›gЦ|9€6øG`VKØ)‰ˆÑÏú–WŠÐh”›‹2™¬Ÿ{¯ãÈ‘ú•…_ ‹-¥ØâKÊ„.N$<³fsÊŒ ÁÑšºThD;£¼åser~¢I’·‡­ ¾¼Räâh…Å¡ R¥E&Û¼x»ÛX[âe 5‡/'°@£Cž@\]ý.ŽVïšLÞûgÖî?2¯ÿ5ªéŸþ5Ê›B¡)(ákuºÐ*òþH¥êâ2Ad˜½N×à©)+¡HI"`‘þmy¥ˆ/TùQ”*-‡.(á{{بTº†àÈ™¼Ã;Ãß®ô—mʘ٠d2¾mQÞõ<£÷}À¬–(Õ_O2âb@_\ÈúrVn>yôu9ÙŸ]P‡´ÉþÊŽ ±àõ¯Î)Úå YÑ?‹Ü|.²Þ„øõ@ÊJ­ÒÉj¤zèóB£ÅR¥L®4Ú©¯nÅÛÚà›ºThD+£¼Á‡’ƒ6úGps±Òù è3Üh+¾~M…BéQ:;Z*šÏ¾~8gJB©ÍÈáìüÞÚê} õ4Ã$éÓO `ô/‰a^lqø² ÑÂÕLëôèi弿Mßíÿ7-¦‘÷NKK,ò õE8ÃõOwO÷†ÓàAJEI™°_——ò7¬èipÌûÚùgO#6Z‹Õ×–F. eTÿè‘s‘U uéfÉÂáðþ«G#gú{éýz i°Åá›æÅGKDS—$.Ó);Ÿ7Ðy"M]*´Ö;@øQÞ̃óc_e±1ÔgÓÂM¾*þ>ˆ ³?ñklö›º%³#>°±f{X±0ªˆÎ/«"“ù;9­åÞ™ñÏ3XZ.ùòÄl¯Ù6<Ým®™‘S7yµm^˜ßÅ–øXq Œið?†Íň_Šiƒ©ZÔ¿÷4cq¨ÿ\õ°£÷3åÚ | 9€vFpIzVyøô[ÃìË·Š³r[òºU^)ºüBb€QîÒŸÎy™ùÖ}hËVØIÏ*[ÎTÛÐêŒx±„"媓õ¡n„"¥a±7ËÏû^µ'4λ)ÔJUkc(fåÖ!ñN’žU&=ki—GkòÕ6x|E+¼|«Ø0‘‡Oç´\ŒY¹u-gªÍ|9€v*Bn!wÁê§Ož7,§¦³*ªZ ôÊáËSÓY­º´1\»SÊ ööƒŠéŸß‘JD䯋-ç±åLµ­vÌlýñ2¹fûoÖlzªÿzûAE˧üu±@?7n*BåÉK…­?¾¢J´ü‡úx_¹…ÜÜ–v¬·&_màÙ+V^qkÃy¤¦³&~v[ïóöƒŠ–‹±¢JÔr¦ÚÆÇ’hÿ¨aÞd¿Ek¾¸1ÅÐ¥rVnÝ¥[%zúø OwNwðdn9ómè[zââ‰TóÉïvÆ¿v§tíÏéT’qñFÇúü°3U?ù¬Óé®Þ¡§¦³<ÝlæÇ‡fµäÈ™\ âÕÛàefÍ•Ût+vn|h{†ÈÖ[‹.ëïE`T ï'WvM…"åŸçß°ke}¢]}{/3k.Ü( vÐçëâ ú«×¬êôqÁ홲• VÝÃh‡3¢¨Çź¯üñIÏ(ÃéÆüb.²ßlÒ(ÿ`ªN§;~>ÿM1·o´«>_þÊUƒú¸·³+þìkÁšošŸ0ÂOìKOâ#Æý” O7›ãƒmmð<¾|ÿñ…Jëéf¥ÏÔé+EzÊØ€všTî?ž³ï#É´?îcdmáÌN?ì|ëE#7Ÿûéêû3Æ ‰ñˆ›yÇ—ÿ¸ë¥P¬\2; ²¬VéFÏIèå2?>tÝÖgíé“#rðÝ8šƒqY6¯Ë«,¶¾wsðdnj:kÙ¼.|:ÉÔº­ÏæO ØÇ}ôœ„6°Õoh?ºÐ¸q2‹>°eÐâovb',¸éãn³pF§3W Ï\)(¢ó—ÿdáÌpK:¯X ¿}™[ûÕ§QJ•výö¶{:Aä¯T-Œ¥¢ÀˆñŽ—5ïKÿ ³Z2uÉíѱ¾£c}'~z‹Y-9x2·¨T°l^—{É „)‹oEur\2;âØ¹7úgÔ9˜Ý×2ÌÐú³æO mÊôÌ•‚3× —Íëâáb=aÁM˜¼ðvd˜ýŒñAû޽F25kÙÝ©câ{OXp£=ãµ+Ð~E€¥s"Lí6à/¾@7–~»,:À—ÝÙ)~t`ÊËê½Ç²W-Šrs±únywÈÌ«µµÆó…Š×oê>ŸѶ›^»S2à‚¬¸\úWÄš±FÏB¡ÑI—&¾Î¯[õc2=›÷í²în.Vó§vÊzSwïqEH€]ß®‘aö+u€K·Š»F8æp븲o—E·!»V ßo1äÂŒP”øDDŸ`£×ÿ<Ým ’gNY|ëÎ#”WŠˆD̨X_Ow›Mkz]¼A¿~¯tÙüH_Oò¸þÃú;ÀáS¹áÁÔWY5.Ž–=£ÚÒ•?½±ô‹›·—û<ÙfE0:vVüØ -kûô9VÅ’À½äò%³#"Ãì#Ãì¿\yã~é'³¿ú´‹›‹Õúå= ˆÎçò2¹:#‡=i”?Ó–*úì+tÐ…§73†ïïWoaÜE.‰“ÊÔzsï3׊6¬èéæb5*Ö—HĤ¦± n°w€/å»/»Àû¥Ý;;— *ªDVôl[ÀýÇsÂ^â ŸˆøXr&Qð¥,œÙéȹˆ ±?ŸP¬Óé ͵»ôÐ ê„~÷Wò /¥¤LØ'ÚeT¬oq™ ªÆèú®A´ÀųÂ_e±ó鲘ž®·”@Vn‰ˆ‰éé–öº–Ç—ëtº»+ k„cW>rˆ÷ÐÏýeãì~›Ê× …:°eÐêMOÀÝÕª „‡Ä/¿’Hîâé”H×ét,¶ôá³ZÐË GŠõ ò³»|»ÄØÛé»W×Q­Û>ÀD"ƒþr :‡9$Ü-S«tj•.ánY·HÇ‘ƒ}=cò¿‡«uGÑ¥“ã¨XßZ®¬„!4övú®Áî¹¾T[Âÿ²ïîê=&Äôp½’H€òJQA ¯k¸#‡/+¯étº„»¥b_^%>À{äïË·K”j£gp>z×@O»æl­ßvÆÏ ”RE"b÷ó(fâßI”ß.‹öõ$oZÓó‹o=÷&ÄߎfG´µÁÿ¹{ðìå÷ 4€:| q6ží—'2A€B¡þ2xÜük—v[·5õâ :kqfÿpshÛà+Ê”*?O2’©‚Þ˜¹7Ôí‚©aF&™ ¨åãÙp¢§»ÍÎïû½Ìd¡P¨óâVlL–+4¾äWõ"0¯óëÆÌ½á@%êŽA£6¬ìñÕ†äs ÅuýOkŒÛcÞ~9 ±4»†‚Šô …ikMˆ ³Ÿ82`ü§7`âÈ€¨pG?/òšMOO^Î  º8‘ÌÅÃ#|¹:ZíÚðï˜ i§ÐìˆxlCf÷nî_ZyƒF-™±vóÓQ³ˆÌåCŸ`q¨?wÆ~þ]‘€qq°´µ&ôìêΌ´ëmÄ–ËU™eâ SÌt#]rì ÏFVèâo¾ÔðU6Fúwk™ïÏVÈUÚÝóªéŽkÌßo󅓞˅[n›åAYø}‘?³·5K àŠu‘.esÕóš!¹‰>8œÈ—ª¹bÝù•Μþ²nT¼¾oü GhµðtÀµ!yÍ"i§î¤/Š¥Œ‰¦!_gÿZÈë‚] i²Ýs]§ô¡>ÈnºPÅ€›6‹¡ìL:¸ØÝ«Ùz©vh¤Uf™¬g É áÅé5ò_oÖœXæÓùk»ðM-_å…˜œ#¥FÈ5¥líÅU¾.¤_.Wò%ºÌ2™ ˜£:ý…û']ídJí¼½%eµêh•²]s\&ô¢^6‡! p&žNæ°øŠ^Á6ïc eZŒV™R;ëWú p’^ö6œctõ#!#óiy¢1Ûe¿‡Àó"MÞžª5&§BÖëÛâo'j3Je¿ßæ½ÜBµÆHäšn«ßô³EdEÏÔ~4c“ÔŠªesöVþµÔiŽdJíì_Ë,õA€é{ŠN. žT÷èÇP Êb÷õª3ÉuC#È{®W{P‰¿­õÆ ,æ–ˆÇý\V¸7TßB¯‘w¦öÔr„ôB8‘T{6Eœº5¹]j‘h÷ ^úö`[¬L©¸>ÿâ3Þ”>Tèd5³¿ Zÿæf:oj?ûy{K[à‚¼“?]`n<ÇÜ¿°ñÔ W¬‰û© ¥ýs¡k³9j]ýÒCtgܺ¿µ~Ïõj+"æâj Êb CÚg}IL¨5äTk³·ø:‘á?MSsEšÕ§jsvºQñHçè^–`x—·®åUjÝ¢?»æyXM<ar•“¶1*…#_çí-Ý6Ë)ÛÝ׫–*¿û}0ÐÙÊäŸBpXÔédÎ/Wª?éjw*¹N­Ó!c®e#å‘«‹t"Înd2dé% ;œZg1v+cË4Çÿy„ïÏVhµõ»çyë9ÿLveò9&ÔÆÉ^—I G)_G¼®„2mR®0Èý¬°Áµq +&)WØHÞH«µt¸ò†@1K!Q"p-½a÷NLˆ Ò(ù:_Iàv†pH¤õí >r€D Å,EÓ W¯Ò);è;g»˜jÛ«ñ²c5·¾ñÒ÷Nç‰Çv'!ýR==Æîa6©µ½ƒÊ0Ü› UjKkäùlBeq=V‹+/%û6¾…­%úÊš€7Lù´]åžøn~&x{®We3”6ëÛöûÙÂoÆ»"_#¼,£½°it ;¢qÒya ÕiÅâ@GTf™4³L ®v˜‡9ÿP„ßnU÷ ±î`Ã⫌OÚ;‘)µ³öÐÇvo¹”×*òÙº‘Q ÏqB/Úºó®XýB¬‘„¿ ^®¬€«/¹.D}õp Y¤—Jôõ Vv¥Zc‡ØÅÇrò®ŠÉ}h¦Òß÷qŠÐ¨ÕB«ëqÎð,ñJˆØ†c F¶`þ½ -¶3%Äí½ÇDÑ·ZkÆ»þèdPÿõÁœ(;Ï2U½B¥Ó§ys¼}Ó©„”|qüβß?s3ÉâŒË ;™"©ÚÊ À‹˵J˜ˆE€X¡¾TÅ—àq(ýÌ–!”…3çLÁ-b{å9·ýŠp;ƒ¿íïÑÃFR®¬7lÒ)ÖJ•®©[:•F§Ðè,ñ(}QGXŽÕ¹bÍú‹Üy1–ßœdHØr©bÞ`çwm‡m=ߟ­@£-¶ÎhhäD Xýfä²K•jÓPΨ¿õŽ+ªWPßVé•cí½ìÿQ… “×Ƀ¤ÒÖ eÚÿEhÚj!tñ@—Õ(-q¹¦°Vçe+¬nfKµ·#á S¦n\Oã9ؼ÷©×¦­Ь0‚z®XÓЋy·÷nGtˆ éé! V¼牦ïfœùÊÛTd\6ª»U£Ê 7Ò¹§oWß0e¡Í¿ ^öAFá¢jY6Cfx@J¾xãy&Ò‰BK&µ÷A ã²?—º7š¡ôwÆWËÂÑèê_©¿‰ovjÐÞ«Òê qBžÔÝ׊åÛ7‡ˆ³8»ÌùÌ—hŽ<–võ³iÿê㉤ÚË©â”-Aú·ÔÛ‡²€¢j2-Z^«°ÆZØ¿c Ü“àéˆ×WéC÷jÈ«o]}ìù¿Ìö@Þ&W‰C¿óRk+Œµ_Ò*G‘S¶—uñÀd•I³ÊÞú¢ÔÉvõX—ÕÇ™6–w;ü/W™££ˆ.¤fa\ÚÆóµ;®1?éf—Q*_u¼*éGÿ÷šÃW%âõ¹Ÿ´>ÿì–3úÙOëiùÙâï&z¤Ú•£ß9´û<ÎuÚ®rš-ÖÏ‘xð>+»La8”)µÓw3º`s*$9°%bÛ9rðnM*]i½?ñíò¾e·åRÍîëU±‘v™eÒ‹Ï$ï*=ª5fY¬í¢?[f¸À—G“{ÿ£óéE¬ä¨‘‘V";óD’ü“_;‹zêNº—=ŠQ§0Lv´¿õ’.¶•!¸?±*Ø íg™øZÐô Ã:“7^`m8W1#Æþ S¶ð@uâú·á —`X|kHÙ™Ò® Qz|ÉQÖ„n¤«/ßFa™ÑÏ~eœÝÒCåÛfyÈTÚÕÇ+ÖŒ¥¾kivÁPÇO6—ù9âCÝ-Ï?ã\z.˜Õÿíª9eÑ/Ôzõ±Šs=‘KmŽ·oç*ïûƒ+Òzzy¢»v‹ÕU|Òíß»»lÜŠÉ´†ÿ]IýBl\ñŸrî½ô ¶þn‚;ƒR¨t$<ªóß{é¥2EÿNd;+̤Þää<Ñ•ç<¡\½}¶GkóÖMb…ƃFôrh¯ËŽHc…ÓbÑ’Ý3Ø6.Š"SêþJªË­}?ÙÄrmɉŒ}ú} =Hžrî½¹·Îò"ôx«y*µVMµ~[,Z®WP»æ“k„*72J®Ò&˜ˆEõ ²Óœ”+¼ö’'Rh~›ïáïLÒ—-¡a¼ÐPn1a¶*öìSÎó"ñÔ>ö³: ,ÞVD5¦{ÃàKU{ç{¹µ×ÌŒQ+÷vÄ5*g¡_ˆMï`â¹'œ{¯!î¤m3=‰x´B¥³Äƒ¾ ¤2E¿`2nLwJf©ìâ3nW³m–k—wøbÐÖƒV­ÔçºÍˆeZ4h¬‰èFucp„­N§;ù¤îU‰dvû¹ƒàŸuR­Ö¡Qõ½‚l\íð1¡¤Ë©üÄ,>Í»{®'ÙòÝ–>!6­ît ¹Ô¬wZñûs5¡ÝZ;vlÞÜ9ovþç<Ž˜1cÆT¤—Jú}Oôè‘…\. ðïîÂ;¶ôývà͘1Ó1Ñèê‡n(¬§uzñâŠ@ vÚ̘1óAÙ{«zK÷Èá#aaa€þᇠÿþB¡põ¯÷¬!Ê÷¿çZÓŒ3Æ¢ÑÕÿxž¹á2w÷îÝŸ}öòãÛyÑ]»vÑh´/¿ýöM¥üÇxw+SŠš1c¦ãP+T¯ü“qõµòرc³fÍÒÿnQ_ÿàß×®]›9c: ¯<¸È­ãïÊ0cÆL¸Á_p€E$SϿЧOÃ?5^Î=ztÞ›üàè!ƒ,[~´Ì´äf̘ù¸Ðkä3wOØU1jÒÔ¼7ùäšöôœ:ujõª•uµµ?M¢Îà`D˜1óŸ†+Öì½ÅÚq‹ïçë»ç·½±±±ÍöNE…B±iÓ¦mÛ~!¡ê×§ÌègoÖ3fþspÅšC÷Ù?'ð‰$Ò†?.Y²ƒyç‹Ü’" p8œ}ûöíܱ].“-NžCë8‘ê̘1Ó9²£÷ÙG‹í¨vëÖ}7oÞ<+«YIüwE@ûöíûuÏîÚ:ÎØn„9œúYãÚgUnÆŒ™÷L©}#4ì´~¶Ã;“ÛïÖÆŒ3-£ÑÕg–J^ñN&Kj%g§I“§Œ?¾é‚¢Q´KÞ&N£III¹víÚùsg«Y5V˜éý¬FDQ"<-;²)3fþsHäš%Ò;¼cÉ2™Jãëå?}F\\\=Lr}Ó(‚!ÏŸ?¿y󿙓Ñåh”E\¡_¨u´¿u„—e‡õcÆLGF¥Ö¥—I^Š“ò$÷òÞ)tÒäøÑ£G#Û“LˆéAOAAÁãÇ“’ݺyS$–p˜¸Î¸˜`›ÞÁÖæÉH3fZ‰Š”’/LÎßÎVªµ:{{j\Ü'ýû÷‰‰ñòòzO÷}Š`HVVÖƒîݽ›’òD*“SˆØO¢p=mB=HÁ®DóÈÂŒÈ59•²ÜrÙÓBñÍL•L¥±±¶0hÐðaÃcbb‚‚‚>@>"èÑh4ÏŸ?ðàAròãÏŸKerè㇠FùZ†¸‘ÌÝ3ÿ@¾.—¥ÓEÏ U• °µ±îÓ§oß~ý Ôµkל¤­ÈÍÍ}ñâEZZÚ³§Oòò ´- ‡ŠíêKêêgãë„7{4ó?½FNg+3J¥Ï %I*µV‡ÇãÂBCûö‹éÚµk÷îÝýüÚë»=|dE0D"‘dee½xñâùóçÏRžT×°€„ÃD{ã¢ý þÎÄ@7B³±X͘é°pÅš"–¬¸ZQÀ”¥Ó/ÊÔj­|¼<{õéÛµk×Þ½{‡……µÒ ðС'==½   +++ûufÞ›|¥Rnd\L6Ð…ânéë„÷²'˜©ÍtdJmy­ª%-¨R¾)—=)T×JÔ`I"†††v‰êI&“?vb›§ã*BS òòò²²²rss³33JËâG{Óð]<ѾÎg¢‡=ÞŠu¥áÍ+fÞ+*µ®’«drÕåuŠb–¼ J™]®e ü‰„†…GFF†……EDD¼¿¥“ó_R„FH$’‚‚‚‚‚‚‚‚‚¢Â|:½™ª€0WB°ÊÏ‘èçLôtÀºSñNœY&Ì´•ZÇâ«*¹êŠ:e1KNg)2ʵeœ†X6ÖV~~~Aaaa~~~AAAg`,ÿaEh–ššš’’’ÂÂÂ’’’‚‚‚‚7¥e d¸nd\Ú“Šò´'¸R‰Î¬3ã@Æ›ç&ÌW¬©¨«*6_]ÁU–³•å×ÔÔTW3™•.W$–ìJÆ[â!ÀOÂhœ)XK¬- eoÅãP.v8E³ÆXPf‡tíD ÓÊ•ÚZ¡Z¥ÑÕÔRE=_ªâŠ´2¥¦‚«RhÑôjµP¦kôÂÛ‘mi4𳫋»»§““FsrrrsssttD>ìlu8ÌŠ`4 …Q .—+˜L¦B¡`2™‰¤®Ž]ËfKÅâ*v­V£mt"¢dk [ J«q£á°h CµA€3‹F¡Hx4Ù ޶X4Ê‚ˆGÿuC¸bJ£S©u‰ùªTë”*]X µR­•«u5<µ…«æ¨¤*H7ì€Çãi4[ ™jGsrq!^^^yí©T*òæ·àPÐL³˜á}¡Ñh™àp8\.W"‘0™LFÃáp$ ò×zª–ÑI¤µºŠý/q7Ñ( ?{Ô[hÀׇEY€V NvXâßFH'¥åKaPNÿ6EÂä*ÿ5H|–(4uB-XX #UÕWÖ©À¢ …lù¿^ÊÃŹ"ÛÚR)@c‘å:2™L&“‘÷œL&ÛÚÚ:99!/ÿû~|ÿo1+BÇÂP/‘äOˆ¸ Ÿ‘~Šþ,ƒõèêYµµ å?_fmãÞJE5ë_S‚Æ ] ©G…Å?:,––$;ªh,ƒ1tÚcØ>#¯1òy±ÀðÿØ¥næ-ÿ¢@Þ¨= %tEXtdate:create2012-06-17T12:36:13+01:00Ÿû‘w%tEXtdate:modify2012-06-17T12:36:13+01:00î¦)ËtEXttiff:endianlsbU·CtEXttiff:photometricRGB³ IßtEXttiff:rows-per-strip7”QpˆIEND®B`‚swift-2.17.0/doc/source/admin/figures/objectstorage-ring.png0000666000175100017510000005516313236061617024101 0ustar zuulzuul00000000000000‰PNG  IHDRö‘¾ ½ÔiCCPiccxÚYyTá·ÞßpΩÓ9ÍóxÒ¨y5Ïó<’:NƒæSiJ‰ %DJ¡Ð$sR!¡"d(¤"D÷ÃoºwÝuïþã[{=ïÞÏ~ö~×·¾õ~/€h#=!!åˆKfyÚYÒüi¤ç@^`.à¡3’,ÜÝá´ïèW¥'$ÄÀÿÍx˜I ÄBÃ’±H#ÖÊH`%b@659!€°øYþ„ãÀñËoþÐ_~ð³¼=­Ãl:@ÚzFD2'€ÈÀO š2"éa¢!  ºCÿ‰'â_8CÿrÒéý_½›uTRB =ýÿ8ŽÿÝbcRþÔJR´— i º »"™οñ“ É–ž¿ñËQÉÞÀ€ D¦ØûüöÇS¢},@YˆŽwò *ê꼨,#É*ð'ª—éí÷;Æ9Œimܨ?+ÞóO|dÒz¯?xFF¤•ëŸøutGwà@ è,€_Ð2fŒ'¬@O'$»{ÿ®Õãú»ôm8ËÖó·?ÇL²ñúS+9ÒÛþ?ÆÌòöüʼn‰‡GÙ:üÒ€iD²ìÿàæ 1îοr1oVЧÈ`áÌ8ŸßœXAÝÚé×L°r°:°€ ¡‹@g°ëßO0!hÀ€xˆx`Ѹþ¬Þ^† OÿF[ý‰ƒ(ƒø¿8ãŸp/È€LHúS ÅMqcÜ7ÅÍqS\ 7À ÿ¬õN¶LþUõKk0Aõ7bù[ýúVµ…õo9¡3þS“-¼Dü‰Ð¨Ó˜ÐXø“ÿމ6Dk¢=Ñ–¸ÛŽ]Àº°Ømì2Ö4ìÖŠõ`W°–ÿ˜ý÷TXÀ„$p‚`B °€ qÿ­¢”¿¿QN%N]ð&ÄA4Ä@Ôß ¾ðXõ,)@ƒPˆ‡uN{ü3iy\ ×Å-qÜ7.ˆ‹‚*®ƒà¸nŒëâ†ÿ´‹ÿÚ*„X°˜ÑðX›ÌLK°ŠOHgEED&Ó,b˜*4‡8†š MKCS üi¿^íiO@¼ÿŒ1 `¸€t÷Xl:@€T÷?09€Ø €ºƒŒÖú_@2p?ˆ€$È€"¨‚è1˜ƒ 8‚xC¬DB,° 2!ò vC „ ¨‚8ç¡.à 脻ðÁ Ã(|€)øó‚*‡ˆ Rˆ¢Œh!ˆ)bƒ8#žH‚D qH ’‰ä …H1r9†œBÎ!‘Èmä!òA&¯ÈŠ¡”•@åQuÔµ@Po4@Ñ 4Ý…–¡•èi´½ÞE¡ÃètŒĤ1U̳Âܰ@,ca›°¬«ÄÎbmXÖ c“ØOœˆóá4\7Æíqœ'â›ðøA¼oÆ;ð~|ŸÂ—T‚8A™`Dp ø"©„¡Ya1asa¦pp½ð#á9šˆH´È‘‘¢¸¨’¨‡hªèÑ[¢“bübÆb ±±óbÏÄQq%qOñ âUâ=â3’v $nJLJ JšK®“Ü'yUrBŠOÊT*JjŸÔ5©÷4š-†VFë MI‹KÛK§H“_¡°ÂgÅ–õ+^Èe dÂeöÉ´ËLÉJɺÈfÊÖÉ>“c—3‹”Û/×%7+¯ ï'¿M¾E~\AXÁA!C¡Ná¹"UÑL1Q±Rq`%q¥ÁÊ蕇W>PB•t•"•)ÝWF•õ”£”+?T!¨ªÄ©Tª ªRT-T׫֩ލ ª9«mQkQû¤.«¨¾G½K}ICW#Fã¸Æ&¯¦£æÍ6ͯZJZ ­CZÚTm[íÍÚ­Ú_t”u˜:GtžèòéºènÓm×]ÔÓ×cé՛ЗÕÑ/×4à7p7ØaÐmH0´4ÜlxÙð§‘žQ²Ñy£ÏƪÆÑƵÆã«V1W_õÆd… Ýä˜É°)Í4Äô¨é°™´ݬÒìµ¹Œy˜ùIó1‹•ë,N[|²Ô°dY6YÎZYm´ºnYÛYX÷ÚðÚøØ´yi»Â6¶ÎvÊN×nƒÝu{‚½“ýûA †Ã)‡)G}ÇŽN'/§ƒN¯•œYÎm.¨‹£Ë^—ç®r®q®-nàæà¶×í…»‚{¢û%¢‡»Ç!wžšž™ž]^|^Á^µ^ß½-½‹¼‡|}R|Ú}¹|×øžòõ³ö+ööW÷ßè7@4 * 5èx2pfµÍê’Õ£kt×ä­y¤”t{­èÚ˜µW‚¹‚éÁB!~!µ! t7z%}&Ô!´[S;†ýN‡&Ç'NcΣ.c®ncîï=¦ EMK-íDºUzWôŸLQŒeWš¸™F˜m6ßkq²ÍêžõMŸm›Ý1û‡G?'kg5!WÌõ³Û{·G“g•×>ï\Ÿ_¦Ÿ¯¿}€q Újé5AAËkGƒo…œ ç††2LÃÄÂ~0Û"öE® \g-Cùû.n0¾'áfâUV[RKrkʹõGS ÓÒÒ£268gšlÔÞ¤¸Y<‹?›¼Ý2Ÿ3;¹õuÞ“m·wå_-¸PX³£rgÝ®ËEwv?ß3µß'V¢Yê¼?º¬ðÀ¹ƒÏʉ‡õŽDWœ8úºR¦*êxã‰Åj›šÄSÛk«ê®Ÿ~rfºž»AåœÓù˜ »››ž·Z5/†´í¾ÔyyáªÆµðëGo ¶/uHÞ2ë éÊí>u»÷Η‰{¶½ ÷=èëCúõâÕ<~ûDæ)óYõÐÐó//æ^!ÃÄòkÎ7Ô·”Qò;üÝâØÌø‡‰—ïû>ÜœlþxòSÙçü©´/N_ѯÍÓÑߤ¿=™ÙùÝòûÙ†á?ÅöÍÌ[ÌÿX8·µD[\Ž_^È *`Ѱî DÄ©@æÐhtKÇ…ñ{„}Ä$R,[&{9yˆ¢LÝÃ9ËÉ3ÄgÅ_%0#d,/R"Z-V)^$‘$é%¥Gã£}–¾¿â¬ÌnÙd9/y…iÅÁ•­J%ÊÉ*~ª†jj³êý4K´Rµt t¥ô@ï…þ ƒ`CAÃ;FU_­Úebl2fº×ÌÌlÔ|‡…¶ÅSËÍV²VÝÖq6ü6m¶t;v»óöAT‡NÇ<'ggªs¯ËNWg76·›î›<ô<¾xÖy1½%½‡|Ê|}üýžø`ªþ\ݱ¦ È:hamspbˆJÈ{ú©ÐH†"c"¬Ž®þ5¢)2#Ê(jaÝÕèM1f1‹±—ã2â âgZ7°Ì“HI÷“ËRÂÖk§â©ÓêÒ³3ü7heR3'6vm:±9/‹‘m½E.‡˜3™;¸õF^ý¶£Û‹óó r ³wdïÌÞ•[´mwÑž=Å÷ÝWUr²´vÿ…²ß ”ë<²¹âøÑ®cÓUâÇmO¤ž¬©‰ÄjâI…Oj"-²Ù±ía{ήĞÁ~—,IN"ßæ q¤sôS4)»(©nÔFNÎ .~®"n ÷vÏV^o_)?ÿŒ€ÀmÁÕ‚_„ „„;DÂE)¢bÁâTñ+I’Š’/¤JiîÒTé»+ ee9eÊËû)H(¼V<¹2NIOiQù–ÊNU5šÚ¤z³ÆvÍ-'m-Q]T÷³Þþ]ƒK† F'Œ®*1ÙeZ`–g¾Õ"ß²Ðj·õ>›rÛ“vçí¯9ÜwrúàüÃsãróóÔö2ñ¶ñq÷õ÷ ñ H Ì]]´¦<¨ní¥à{!¯è_arL«ðôˆ‹‘ßÖ©E'Æ\ˆýo–°-ñA’hrXÊùT<Í'½&c.Ósã©MKY«³/åˆçnÜúrÛªíÕÜ…Ûv,ìJ-šÝ“µ—k_E©áþ¡‡V”÷É:ªul¬ªü„{5VÓZ[púäÙ·çÔ/ä6½k j¹’{Ýø&O'v[¨Çæþ¶¾·Ÿö¿áx{kÈ:d²©A®"È{A…QMÔ e¢Ùèaô ú]¤1{,;ŒÝÇqÜOÛñomB¡‘ðhLÌ&v‘xH¤ã¤¯ll»ÙFØuÙ ØGÈÆäRòWwŽ ?%ò’jGmäÔælá2æêàvã~ÊÁóƒ7ŸO’¯‘ß…\ GPZð†P˜0»p½ˆŸ(*Z/$N¿!±]ÒCJRê3­Cº|ÅzOY-9¹)ù~…fÅÒ•Jk”MT$U–T_ªµ«ÓØ®¹Y+S{ƒNªnš^ª~¦A¦aŽQ¡ñžUGLjL[Í:ÍŸ[|·¢Z+Û8ØÆÙ•Ø·9œ \µÜ!<Âg¦Ïœç{Îù~)š¦A ¨b±8111##ƒËår¹\>ŸÏãñ^¾,xù²À³g¼ª•Œ8jjjmuÛ¶m§gllÌápÌÌÌÌÌÌLMMíì옾I("ñU@,ÇÅÅݼy3&&æáÃèÄÄÄÒÒ2Ù! u–yG¶ugqMZ¿-t2ªv\Lç½Ä«b*ý;)CZV.•jÑBÃÆÆ¦{÷ÎÎÎ °³³SSScú^ „†ƒH¸çO·×Équu]¶l™X,fÚ(áQ<¡.ðx¼I“&Þ¿ÿïÒYÔŠù4㛘’²rl9ˆM`oß#00ÈÌÌŒi‹„wB$žðÑ„††z}:YR^tb‹xH?¦­aˆð(L_ʉ5ýÿükøðáL›C ¼â¨!|¾¾¾ÃG ·èøêA@óÕw.½pïOIO›×#FŒX·nÓæo‡Œâ µE,/Z´hÿþý §aýæ#̨e娏‡Úz„þì³ÏŽ9BÎT "ñ„Z! 'Mžtåò•ÍK±h:ÓÖ¨ý±ø' <èâ…‹ZZZL›C TB$žða„BáˆÃ"îEÛ‚ îä ó®ÝÁäÅ,+k›P‡Ã´9‚"ñ„ÀçóÝÝÝ‚öÒÍÙùþAbà>“mÜ©sØÍ[Då *‘xÂûàóùƒâe¤†•ØÛ2mÊ›·™,“Îa·n•'¨dE á…ÂÑ£=ˆ¾×{[„•ff¦ ô ŸÏgÚ¡ÙâE"‘ì_±eË–dØU• ÿûÍ´c7¦­iTÈ<6–]º…‡ß!³¯‹@ L[ÑthR/‹y<—ËÍÎÎæóù€Çã …B>Ÿ/*.Îâg(¾*ªMk:hÕ¢e‡ŽÆ²}}}---333}}ýŽ;ëëë3}ÓÊêÉI“']<žøßëFt<}F 2äò¥Ëd%eí ðšâ%K˜6¤éÐX%^$%&&>}ú4%%%111%))›–““W£šEG5MMÚÌDªÙ‚æè€®6ÚTI,×RƒÒ×£”•!'¿Ú¹Ï²@T†¬\ªDÄz–EñøÒb‘´j-4Ì:u²êbcùöööMài`Ñ¢E»wï>þ+<ɶͺré<aæÌ™GŽaÚ–ÆL߇ Ô›9Ùši[=R þ÷ÓƒŒeFâ…BadddTTTDDD“øÄ¤¤ŠŸz c¶½­ÄÒ†z0îŽ>ÚëAOWYQ±ø¹È{‰œ|¼ÈCN>+í™41õð *Ô_¯]Ûî=윜œúöíkiiÉtç}˜7oÞ¦oXßֿ̔µæÌ¾Søn36nܸråJ¦mQu*ôýäoCZµ$Ï=õB,¥—¬¹{Ø?ÝÖ\µGñB¡ðæÍ›!!!7n\‹—izsv?{‰­%ºYÂÔ&h¨3m( ¿<$g !ÑOØw£i™èëµk;ð“Aƒvss³³³cÚÌá2ÐeÊéÁMªûÅhD¬ØŠÝ§ÙW._qsscÚՅ軩Ð÷µ_CøZ%%>%%Åßßÿò?ÿÜ{p_"–´nÉ>PÒ×=»ÂÁ¶1E¬MæRžÒ± Thëa‚€a{ƒ‘£<Æ?|øð–-[2m`Mø|¾ƒ}#½‚? ‰O JJ1v+‘«ûÈØØ˜isT¢ï ¤ª¾qÅ鋪$ñ|>ßÏÏïôéS±±¸÷gí/à;kU§×ëîrño®ßCà5vv¤ukÍ &~öÙg*¤Ð}¨û½ð÷ÏJ-L˜6¥ ÁËFOµ.]{†‡ß%S¯5 ú®@jè; 2¶gÏžsç$bÉȬ‰Ã¥ÃÀ Óf)‰Ññ¸JbeHL;™Ì_°ÐÇÇGWW—YÃ|}}—.]° #?aºš7P#çÒ«V­Ú°aÓ¶¨DßÈ›úUøÐÐÐëׇݺebÈþòSɤQ05bº«вrÜ|€S(ÿK´NíE_/^¼x1S«0cbbœz;Í'Ùµ†é~i¢¬ÙAm?Î ¿N|Ë ú®@Þªï`Vâ¹\î’o–u·b=C2yDSðÆÔd.õ»?½ÿ ¥¥¥å»mûìÙ³رXìÔËQXðäÞ_’F4ÕѸ()E/¶˜êÿD§a¢ï ä]úàôE†øùùÙØt¹vùÂÁ¸û§Ä{LóÕwVfô–åˆ ¦÷Ι3ÇuРÞû¾yóæØG÷­'ú®DZµÀÑŸ%©ié$Ñwò}—Á€Ä¯X±bæÌ™Ÿ”'\’z»®&DÇCÓ^þ×¾/zǤEˆˆùˆJJqíŽüµ×7Ô¾S ,Æ¥[òÂ^ãqáFu‹©Nï OúRÿ>¸ãÔË111±a®›˜˜¸aÃz/¸ôj ;m¶ØÛbé,jËÖ-qqqLÛÂDßÈõ /ñëÖ­ûå—_VúÀïWZ!ªA{~ —ÿÀÎÕ°2…û,¤fÖöÜ£8t–’½þl,Ý׎âè9yá·³acN5dÿLp§ïúKYÒì‹cõÕ¢¯ZiH×.jÈ»l¾|ÿ%m¤‡ù>>L DßHmô ,ñaaak×®]:‹Z¹ îƒ÷t·†c78vƒK/lü†mqý®üP~!¢ãüByII)ò !,FtW!‘".©Zkø¹HÁË–7¨¬Ìè«G¤E¯ò§zy)û  ÝþƒXG»þ>Œ–&¶®„ß½ëïïÏ´- MPPÑwE!–ÒKÖÜù ¾£cÔ¸”™v'*P¢Ï{t<\¦!-ƒÊB‹!øŸæzá ?VnÃ'}Pð’zOûýÏá¸pûOáU¢ÐËé<K( :*^ßPƒ{Ó]Ìñù²ÊÂ^ã±v1Ƹ"# ^_ãu :áN4õÛjúóñ i…Óð÷˜QâéÓÛ>Ðݵç\åý}ãÆÁƒ+éã‹Å6]¬õZ? =&QÔ/.¡6 Åâ L›Õ¼kxxøˆáÃ>é«Mô½þü§ïÜê{ƒN· ‚°[·¾%UìÌjvø¹àç"1+¶"û%÷¥xÙXü®ÅÙ]¸~‚^=;þ‹uã¾C "qÕ+`ø'ˆ ¬lmH¿·˜»½{ æ‚âú1zÁ:DÇˉJ‘‚ë'èu ñë…Ýר!´®¶ZPPâ?‰ÿ8pà@jZú–åDßšÍK¥©ié(îë¢Ú}W µ×w ÷Ï-›?ìÜIÁÍö›‚Îîèìlj¸Ž¿vÂÊŒ66DÎ=ØÛ¢¬É\ª¤ Å¢ÊSÆ¡5ÔQû úù…Æ×ŸS2)tì7g\ —4B^îÔƒzœ¬0Ç=›…ž]¥G Pp—D"ц ëǺ²zwWFó„÷áØ 3ÆS?ÿ´I$Õ¿5‡è»ùX}GCJ¼™™~ž‚ýB÷Î -i!(zˆ¨ÀÊ™A¡pž„v}à³ qI•õmÍ?z ƒºm*-ïjÁ1çµ[Ó/ÊÄŠ¼»Ç‰”ººú¤I“ÆŒÃårÛoÈÎÎù~.‰%É ßÎBvNn“È}W uÐw4¤Äs8‹Îæ‡þRð õÁ1Ç šp‡GÁg 6O<ÀõôÔÑõº„Q{xõº²$9CéÑУW(ÙºuëŽ;ºwïîëë+‹Ò¸X,þù§Mã\)’Ή)¬Ìè&?'ú®@ê¦ïhà5«×üx'Zzò‚Ò/”‘¸8Ñê(+ÇŸ—©Ìo©ÖNY/Þ2î–TŒs ÐË~r'Lb*.‡Ã­¿í㻟ÙͧM›¶xñâ„„ww÷¥K—öîÝ;""¢þퟒè{=©§¾ƒ©0djjj Ì(èÐg2üˆdnƒ P5ÊÊqòºf-ߊéŸÏˆ‹òþx³NNN÷îÝ{×4lxxø{ÎÍËË;wîﯼ‰¾«l¾˜L_¸x¡Ï) ™¾wé¬pp(Ñ÷zR}—Áئ—qãÆÅÇ'¬]»öÜumûqôÜ•ÔÍ”¤™-áË-À¾SèîÁž» Ö.·oß>räHmBÆ«©©U†íׯ_dd$€¸¸8WW×÷ÌàÄŸÿPþ—h¶{ĈK¿[Zç@AAA_}õŸÏÿꫯÂÃÃ###544îß¿ïààðfå´’Þ: aº•LYÂÊ(°‹‰‰eÚºCô](Pß™ÏúTyWbñ•+Wöïßéò%‰XbaÌžâ!që{ÛØ†ªâdd!"®SA¡HéŽFæ~é3{öìúgm«W¯Þ»w¯T*Àf³ŒŒ=zT#Y Ç3119¸Þc˜î Bdñˆ’““---™¶¥.}W Ôw¨”ÄW  ƒ‚‚Ξ={éÒ?¥¥el5z>éC÷ì ;k¥o8R8©™xô”zC_¾­þ”[À¢³ùÔiÞãÆsrrRà…òòòÌÌÌŠ‹‹e(‹Å:tèåË—«Ö‘eg͸Ñdóâ6Rr `êŠ7®\¹’i[>¢ï D±úÕ”ø D"QDDÄ•+W®]»+Kô´e÷é!éÙM`k)_Ψ:”•#ã9•˜N'¥âa"®G¨ ŠÄôÚµ:løàÁƒ‡þ± `jÉôéÓOœ8Q£píÚµ?þøcÅÛ´Äýóûˆ—FåðüŠ•SìÅ´!Ñw¢p}‡ŠK|U„Baddäýû÷#""¢£#Ÿ=ãÉÊuµÕl¥]Ì¥M`fŒŽíÑ^zº áÛÉ-@þKdçS¼l𛉔gˆOV‹K‘¯liÑBÃÎή_¿þÎÎÎ}ûöUö¸l½üÝ»wcbbÊÊʰÙl‰DÂf³ƒƒƒ‡@ èèÿúdþ4¥wác9r–Z¸~ñâ‡ÃaÚ–ÚBô](CߥR¬ÚÖH$¾ ...666111%%%9éijZzÕ ºÚj&ÚÌDª×†Öц‘!ÔØh¯ èØþ×(zM½zMÈ˧DetÂb¤=§JD¬gYT·Z¬6Ú––VVÖÖ666vvvݺu³±±a¤gÄbqLLŒì·0***!!A__ÿßÿ533ó÷÷Ÿ2eʃ¿Ð´§²)©™è>'NœðööfÚ–ZAô](IßÚ‡s!SâßD,s¹\÷üùsÇãñòòòø|~nNv‘°èyÖ ™Ÿ§Îè´ÑÖÑÕ1édª­¥ÍápŒÿÃÐÐÐÌ̬6ËA DEE‰D"¹sçžõ?š®˜@f…Óe{ÐЩÇgÚCô](UßwìØÑD$¾6ðù|YT¿ÚæÕÕÕ•-Gáp8M#5Më~]“÷®cÚÂ;X¶çÂ8ÏŸ¿¨SJE¦ï%¢’]ë{ë·k*ËݘãŸë™~g3”¤ï‹/nFßœmÛ¶Ý¿²|„ÄÿÌZÌÌÌú/¢U*6]¬Ÿ&%3mE“bù\|ª Ýˆ5ôÙdÜ,¸yó&Ç®LÛAx7Ž])€¾ÿ¾ŠK|‰¨d¶'VÎgÚŽ&Áã$Œ[;+Å´ö¦¾ƒH|3!&&†Í¢¬ÍÉ›êÒ¹­Ù’Q·¢ ‰–fe°UB}ÈÊQXSoÕw¨ˆÄ‹D¢a˜dîr55µ#cccYuÂGÓ«KC¬ˆW]Ø,ô鸸8¦ !4>Þ¥ïP¶Ä‹ÅbÇår³³³y<ŸÏ—-t‰Dii)b±8+«Žö:u2Vg«™˜šÉæE9Žl¡KÇŽ9Ž’¶5^âã\z}WuœºÑÇ‚#™¶‚ÐÈx¾C±Ïçó£¢¢dkÕSRRÒSSRÓ¹U+h¨³Ì:²ŒÚKµµè ƒhŒ ¥¥¯W͇ [º.‘‚ŸWý9K – +›'–à?#ŸËz" RŸI$ÒÊØjlKss«.6–––fffvvvööö*»´QÙˆD¢§É)_O¡â¨QiºXÐ99y Fp!á]¼_ßQO‰çñx÷ïßó0îÑ£ü‚—²rÃvìî6ÑOÒùSwGíõ § -M)ðÖxÁu–N ‹'@n>^äQ¼’´Ì”§éiGo²d!¶7èéØËÉÉÉÉÉiÀ€ÍGñe^/ãDßUS#HIIQlü"BSåƒúŽ:H<Ç ½|ùò­[a27‹†:ë“ÞÔì‰[ ؘÃÂZš øt´¡£ TùÁÒÂ"$q‘ÄEÜÓÜØ§WþéŠl¼oÑÙÜuˆ›»»»››[Ó–ûŒŒ Èü˜ÊÓ¡=ddd‰'|Úè;j/ñ‘‘‘AAAýé/[kbÈ3XâÔ½ºÂÊ\ZŸ¬ÊFG½»£ww` iY9RûÓÏú=tè€^½'L˜èååÕH£¹¾Ù(¾ƒ!Óv>„Q{ v[ó)’ÓQ,‚^[ùcŠŠ ,F‰¨fÔÕÜhµ®m«²r˜Y\TK}dz>q¹Ü+Vw4êÝ»÷Ï?mêfšrp#_ÄÓ«’í?À{ l,ê•b»áÑP‡½->½ë.~ðö¬¦ÚkƬZµÊÊÊʦ‹õæÍ››F–µ d·£jQ9 oÒª Û±›’ÄGÄÀn'ÂûØŽ„Ç\ddÉ]¿‡ˆ&m;s7Ä&T+ñBîÖ¶…¸$tvgÀòÚë;Þ#ñAAA#FŽ077ßòëæÁŽü€]àÝ¢Ol¥½ÇÈœ!M;kÌšDì–fÜÀñ_Ñ¥cêÿþ÷?cãÉ“'GDD0mbàñxÖfd¥iã@§ % ™¶B1””bÒWørK‰„«È¼…V-1÷¿øË6#÷%ÃJ¥øbJJ™î©´¹öúŽ·J|pp°ƒ½ýøñã¹OCö¬¦ÒB郛葟@G›é›S&íà9gvH.aí|ÉýÛçúõë7Øuðû³]7 „B¡®6Ãs­n@Óþ-+¶":šöulÖëªFƒÎ“pó<Û{¯ñ¸pƒÙûþhºt–¨Â(~ݺu¡¡¡UӾפ4á³±RY.x=¬^u5‹Á+üKU ¢cpä,u.„Ê/”—$s©ØÄ%áäÈ ù¹¸‚}§pýÊÊ+/›€c¸ù€Ê-¨ö‰ÿûGÎRn@Xüv »S‚WØvøíG…ŸpýñïãjåÉ\êä\»ƒ¹¦ã’p,WPq çcõ5$>%%eÄÈ£G––Äùï@äß’Y“èæ–!ÈÔßÍÁã`ÉÁHK8pàôéÓóòòêß2Sðù|C†Ÿ»÷GZˆü@Ð^ùëÿùÔ·åÙž•-'\B?GxøÐ²ÿ±-Ëѳ±Ålh£I—”¼fÚ >|ØÝݽ}ûösçÎ –ÅïûX¬;£6~܉¸$¹ÚÛ"ø ´4‘B½Æ“T:í9`ÅVxÌE—> ÛaˆŽ€»ôšß0d:6îBìœ ¡zŒFxžeaᘰ@~ßC:ŸbÛzâx-‘—/ZÏ¿C:'ÎS½Çƒ—ý ðÇ/Ô¦ò+V%5ö£±ÿÒ2ñùwøl©ü® ç:ò1üÎáóï*ëo?Ê2)\êò-Ø«éÿQuÐwTn=yò¤Ï—ꬲƒ1Å£î3¨ü\ù 6»¾ ä ‹Ô|zxk¡ÂÑP‡÷L!9p«wžºôOðÙ€¿ëœA»8|øððáÃU6¼I«hUeVJO·æ$UY9Â#ñZ„Þv•‡$Rüû¹/ÁÑGïîoo¹Æfú KpÀ±O0¤Úë¡UKHæRíÚÒ‚Wx’‚¶mЯgåRj&ž¤€£{Ä%Á±Ã¥ß·1lRðåË—GŽ9t覦æÄ‰Ç/K/SKZµÀå?0 úLF;mLƒa.ÒlfM¢wŸÀŒ‰ãJ‡Gaßi*ömj€Þ~”5ã{éã`¸v/Ê=ÃãæÓûÖÃs8̘ljÈ-À«×X½ ±A”• à›Ÿ•—náÜU<<ƒvòòu¿áষ9¨½x:æ®D¸µYÖ¶QcÝéí?ÀÒ¹p™Œ³—1Ö ó×àÜn ›öbÓHLÅÊíÒ 3ú÷¤|VÓgù‰ÔMßQ!ñ«W¯Þ¸qã8Wê·5õ¶×˜h§å>Xà]ÛYÙ’RüvË}à—ðóR‹qúæzU+l4Ô±h:ƺIg|ÿÊ}¨ûáC‡g̘Ñþx–/_>gΜþýû5ÊÓÓ³jN’‚‚¼nvª¾(~ô—èhˆœ|L¦®£»¡¤S–€÷Cúáò-tµ¦ŽÿJËùßCLðßÒ”ßcíbŒqŪˆÁ{î]pé&†ºàÈ/p,_o ¦£Ÿ¦£þ¹…âX¦;¢ÞäååɼùB¡°âéS¶pUBj @€êáC*|D%‰@qqñ‰'Nœ8¡«««®Î®½1vÖ¸}É\*<¡ô„¯0Âþ;«©ÁÍûpïOW,¶™§Oœ§* ÿävjibßz û#ÉK _ÉË+h¯ÏJ{NçÐj›%¿ŠáÛiBª¨¸ÚåÆ Á«×Šñr×Gߨ¥¤¤,^²xÆxJQú ­Nå³³©.ßµÛôwàçâaÄ’ÊGï²rÄ%Á¢nE¢c{$¤@t<¬Í¡«Âb¤d°Š‹¥Ññ°³–Ê(+ǽ‡Ô«×tÕù¸$˜ƒËCúóšOåu¦U ü¶†ÎɃ÷´©ÜŒg ¶¿\ö[ñ/ýüùóªo«-(( •Ê}îééé[·nݺu+‡Ã‘JA¦§Š‘‘³=G¸x®ÎˆK’—÷îï‹wxjê=¼ÉÈA, u) SÈÍCìSØYÈ=3l¦ÅÑ@¦{—_0bĈ7Ëõõõµ´´hiiÉ6ëµlÙ²B”+"ôUTkÙ²¥¡¡|7DEÔ¦Z¦¸quu•I¼††ÆÄ‰}||êà¥<Š“A¨ê¯°2£;è¡àUµZ{}VÈʹ¢9`l<¬¬“_ˆÿÃñ-ò‘bj&Öí…X ãxXÅž.ÿ½ƒmgZ³ŽüBWœþâm¾ø \zAæ®).=h¨Q9ù•¿CiϤF0á LLÉÅq"õt¥†úðß.¯/,FjÌ:~l‡½…zê;µÍ›7k·¢¶,W˜¾¿IA!:›@ÀÌ_ïqp2Þã°ýà2 ®}ð, m)5uÀæCÔÆÅøã, à‹IÔß—¥/ò©Í‡°w-½ÿü¼¹ó%ºŸ#.‡¡Wüñ34Ô1ã{ôµGt<»áìe̘ˆ-Ëp­Z`ë ©ãáž={V®\Yûk/Óo¾}?²ÿjÙ?¼††Fq±|Ý€lØe``àãã³pá¾}{+ë£U_¿–Òr Ðy¸A?ͨ”ƒÎßò èê,÷Ú- ÙøiM?@E³²²¡Y±èµ­¬ ©2©½ ôõþüëlÅ´ #ÑUuuuMLL.\8cÆŒ:g Ÿ0 vcÙf,ž cCá?ò ñIoù=)•†+F¦Wïĵ;:%¥ðý£>©¹£D©¦h)v€œ<¸ö€íGYó¦J3xøiŸ¼þØ!p™†Ù“áÒ eåX¼êjrïÜ»¹k2²€ÍÂÜOiß?ÐË:ÚˆŽÇñ êâï´zÙbûQ¬^‰ûNÉÏÒ_o ŽÒŸ‡DŠM{q#õ÷Å×_ߨ?8g’¤ê#IýIÉ`EÇK‹pó>®ÝÅÕ? ‘â§}8¹Mþ<>Þ9W>›`¼;æz¡¬œŽK‚Ë´ÊßCVfô÷>X»“öß^í*ëwÃÒ ~¿‚ÍÂÚEp™¿¿åΜœÜùl¼<¨‘séU 3=keFD>uÊÒÒR$U81eþÍok)Ó²QUÅò““SÕ·"®§§‡êš^£WW×°°0‹%•J{õêõí·ßzzzʤAWWW,á}ÐU£C{jêz®—ü›ÀËF‹÷:âõtp|+Õs½ã|7çÃíw@bjåÛ犋Ü]OÔ5Ô]\\˜µaË–-õßémlˆË`ñìùO {X!ø\¾]éå[q+ Aûèã[¨YËimMäâ“>Ø·î-M}?C>§ÜûÓOÓ°À¦†È·c7íÇü5Ò•Ûaa ÷òÅ”ŽÝpägŒþ’2áÐ/ 1 7µsõÅîë—`Á0LY˜Ð™/°}\zÀ‰mðúÇÏ¡´ ÓÇWš´ÞßbËA—À ü«ïG } –““'‹Œ¡@Vn—˜:èÁ® vÉ{'*ø¹à>Çû4P¹€tØ@øàdZUŽQý&øÐÑÆÌ ‰ dŠ0ÜE>ŽëbN()lŽ™1})—^•óí­ZàÈ/غ‚Î̓©1Zµ×45Â?Á}Ý6ÐÓ©\÷1¨q Üçh¡õu+Jߨµo¯ÿ"'_±_nÿoO5ëÿ~ØŠÒ2 v–Ï5——ɵøq—Q&¦õt+ßjj"÷¿õšEÿhUœÁêj×íô©ÓÊéú°páÂ}ûö5¥xj³'#‰ ›Ô >ô½hx¸bΧµ:ëÔy,ÙDŸ?ðšZšø{¾XÛ Û“GÑq*‘4ï% ”öõe=·ÇϨZ¨¡Žþ*ëé@O§Z¢bô™ŒK©A}èŒ,l9¯Ñ¬Šp¶­Zà͉÷ú[Îf½}‡ÿ»Ê?ê;µ±cÇï=ñÇâY´²c˜¤fbÖ í•;jþ}Œþµ¶òm BÚi#W¹9;¦ÆÊ…%s©nI7nœTuI¢ŠðžÙ0‡“ϪˆºÌ8i!h§[ùÖÎZ¾JÆôñøÔƒ ¡Ží?`ù\:+FíßìiïZZ­úpIC—þ€ð5\þZ­åÕªŽ!d‹¡¥…¨@äBWç¯S÷¢™Å¿*¦´µÛ0mE£ÁØþ;ðíOtB:´Zbñ Ì›Êð.¿z¢X}ÀZ¾|y©˜Zµ’(¹gJJÀÖ$Rü~潕«G0ÔÃÓŒš…_L¢œ‘?©¥fâ?F»By””bñ&JG§ÍÂ… •ÛSŠFKKKPDÕ¿EÁ1¨æ‘ÓP¯&ßZšÕÆM8v{g0?=·¸àd›¡Ø,´“ofÑÓ©¶Nf€ ÝG#< z:á?=fÓ]p3ÕHβbŒ+¢Q‹œûX¹ ¶A"U…ë;–¥¥åžÝ{üéUÛ T•·µ„÷h8{ÂëªÏD8vƒ†õfvZ‹Nè ½>•‘F˜C·5ôúT M·b>ÝÑVî6 ý&aÑtùÎ7ePRй+©ÛÑ”¿ÿŸ.#±±q·¬›l`Œ qägLY M{˜FWK|;›i›Á+Z¶ä‘ÐÜP†¾C¶õÉÇÇ'//oÕªUI\ÖþõÒz.×O ÎÛ4Ù,Ü„ÔL¾¢m-Ѫ& [µD«–H ž¼šŽ6â.!_=]ÚÁV^¨§ƒäPY!lÿs®ÉæC2²ÿÖæ•ôŠÇszÕ¯©™ø|ûQ2Nž8ùQ¸UÙ„A~!‰'\¯Qð…ŠeÎŒSVŽÌl±ÊF¡ (%é;*¬\¹ÒÆÆfÆŒÏmGж­z­ûŒðûäWލø-©qJ«06”¿x!S£šyªþD±Yõ Ø_RŠý§YkvÒúúíB®ý©Ê1jÞƒìÁÿE6‘ø·£"úàE.Pe›¡™ <}GÕH“žžžqqñCGŒ™÷#úʾp£Z¸Î憰'/Àa {åvé¬/fÇÅ?i¤úÀÔÔÿÉA•‘mì”}^„æÃ¾SÊÒwÔHìgffx.088øÇ5k¼–D›²—Ì”L¡°`:‚ÔL^ïQ5A‘Øcôˆ?W®rvvfÚ¨z!ò^uߪŽ,#’êg—äç½%ú.¡ÈâµDýó3f4¥=D&L˜•z!섪,'¼•QsY-Û »téÓ†¼>ŸŸ˜˜È´M]]]¥%? ñÄÄÄœû; þIÃvìѮ瞰ïkó <À,%¥HHAlõïcúÏˬb‘”­Æî×§ïˆQ£¼¼¼Tÿ¹n¬[·nÃúuù>oÀ ú}Y_ÎûzûöíõoŠ@QÛv6làóù¡¡¡!!!!×CdÐPgõï û.R».°6ƒµ™ -Q_ˆ„¤eâás»»»››[£[êþ±888H¤tR:쬙6…ð’¹T±HÚØ'~ªFmGño%//ïÎ;111ÿ>¸Ÿ_ O¨®«­æ`+µ0‘vîÄ23¦ ÚÒ†zT{=åºòó ‘›‡ì|*ï%ÍÍDj&ž¤²cé²rù†®NŒ{ôpprrrvvîÛ·o“—õª‚¶mÛî_÷¾xëf ¸‚éß#==,š$(zI| òòòbccSRRRRR’Ÿ&¦¤§KÄÕü¿ÖfjºÚ´¡ÔÈ€n©}]hj§=¥Æ®f‰ÕOEq ðª‚"‹‘ó’ÊÊb Šú¼ÚîÍ-4,-,¬¬»XZZZZZvëÖÍÎήYiú›Øt±î×5y¯Êä6"Ô`ÅVüÊyþœùÄ­„¦„"%þ­ðx<÷üùs>ŸÏçóy<ž,ú‹ç¼’RQ~~Áë×ÅÛ¦Nm]m-mƒö†GKKËØØ˜Ãáp8œŽ;Ê^3Ö£ªÊܹsÏúÍ '‘ T”žãÔ{:{ž>}šiCM ¥g“166®Í†ì73OŠÅâÉnT*xo£ÃÝÝýСCqIįŠddá)·|Ù*÷ú7E T¥¡†½ —CÙ >œ­Æ¾)!¯‚Ü|1AÅQZÂV‚Š¡««Û¯OßK·ÙõoŠ p®†SݺڒAá‰oFŒŸ81ä®$W¥÷N6G ‹Š =™6„ÐQútk£¦"Ŷ@ 剄ÇãÐÕÕÕÑ©Œß¨¦¦&ˆ½5ƒ6ãðx<“ƒá=†iSU8ByG?|ø°ÉÌ ¨ÍZâE"QJJŠlµ—Ëåñx|>_(¦¥¥ˆD¢œœ¼z¶ßºµ¦ž^;#Ž‘V›6²u>²ÉçŽ;š™™1ò0pÀ€VÒûAH$â³¥TlªYJjÓ†š ª2ÝÚðùüØØØ¸¸¸ÄÄÄÄÄÄ䤧Ù9Õìš²;u¤ÛéÒc\èÖš0ÔCK J_Ц5¥Ýºò·Í‚¡>½¦Š^W–K¤àçÿ-á/..ÎóóxE¯YwÃX©Ï$iee6Ú–VVVVÖ666vvv]ºt±³³Sv'LúôÓ%KîfdÕ ²O`ŠÜ…bÝzÈ8EhŠ4åQ¼@ m¾ˆˆxU±ùÖÄP­{iW ÚÜÆèöí`¨_¯Ì!µ'¿/©ì|:# ÜL¤<Óöãdù˜ºE ;;»~ýúËöß*#`N^^ž±qÇU_–}7§!î—ðAöÂ÷¾ln:—̵”AS“x¡P( ¡síê•§IÉØ,ÊÙà(µ2ƒmçj)U„²rd<§RžÑO’ð0×#ÔEb:m´GŽòEÑQà¦ö©S§Þ¸ú×Ó«’Œq$R8ŒaÛ8Œ¸xá"Ó¶š&MDây<^ppð_gÿ “ˆ%š-Yc\¥®Î°ïëÎ/){j&=¥ÄЯ«Éb3Xt6Ÿ:ÍÛÓÓ³þ3raaa®®®»0ò¦ï³Ùsó5r.}ñâE¦m!4M·Ä‹D¢   ý{÷†Ýº »û³q’ް·­{îYU## 1¸p …DJ[t6Ÿû¥··w}žëì{´a=¹âG&]Æó+VÒsÓħI56rŠ¢±J<ŸÏß¼yóñc~ù/»[±çL–Œ,ÏßÝT),ÂõûÔùø_¢ÙjìÑ£G¯Z¹ÊÉÉécÛ‰‰‰9vìØöíÛož@ïîLßU3&1Ž•›ñ‡@h|Ÿ——·iÓ¦}ûöŠËËçL¦§ƒc7¦mjXø¹8ÌÚŠÊÌ– <è—Ÿù`q‘HtóæÍÀÀÀ   —/_FFFº quî–wzG#ûô›ó¤¯kñ³sZ¶lÉ´-„&K#sg>|ئ‹õ®ßvÎû´,á2½ý‡f§ï8øf¦ôq°äàF¤%„» t™?~ÕYäååùùùMžýôéÓ‰„ÍfK$,ËÎÎ.**JMMM$™™™öíšwf‡´! %TgþTà m.7£™§1 (›Æ1ŠÏËËsîÛ'ðï¿üŒ€Ýu×wqIèì^íÏØþÿ|D É\jÓ^ùë_ ä.Ä&àÈYªFaЪ–ûàß³´š4ËÅe@hhhÅ¡ ¯LßPåçç'›ÙkÙ²åêÕkÎßþû¸L%T—¿@zݺõDß Ê¦H¼H$?~,7-éæ Úk”bÚL Aq,Šc‘s›¾Å¬HæRµ<7ä.ý(Y^ÙïWôw€3ÁHâÒ5   „’8t76..NVèäääããSµÚ²e˪ó}||,:›/ÛÌ–q|ÃòÃvvG£5>A4‰ß¹sç;÷üwH”áv×ÒÄ“¡¡F=xLHqýö¾S’×áe#"ÿ>ƾS¸vŸ"ë}áää£D„d.•œä \¿WY(#™K9KUm­¤n ¤þÿÈTz:8³CÊi[ú™·7±X¼nݺßÿ]GG‡Åb±X,++«ü±ê)jjj;Ûõà±äìeÅw,á]\¸»’Í¿n!^xB ê/‰~þù§ã©!ý”u AÊÄt;H¤ðZŒ{(/r¨Ñ_R2ßËÃ'˜»Ÿ‡Ë·q5)ÏW€»Q°l3>AVx/À{‡ñ•…Î…P½=ñ$•~U„)‹±b+¾‚×Lú 1ÈÎÇ䯩cй=ü´TûèÑ™3gF޹víÚ9sæÄÇÇJ¥ÒcÇŽ½©)nînßü¤VX¤¬î%TEXŒ¥?³]ú÷÷ööfÚB³@Õ7\$&&¾šâQ[/J-¹zmuàe!Žž¥,ŒéNHNG‰ç§µ4Ð:Z¸|‡š5 RyHº*_w¿ïÂþ¥~^Z9M=¨íê U¿””bÑZúðOð“FÁ~,&„Q{˜6VÑW»}ô,õùxÅLz‹š3gEQgΜñòòð矼kaåî]»ì×îÂöÛÇ„·°õ0²òqþò¦ !4T]â³³³èé*xÙÏ–ƒòU4æ&pëOÏž -MØX ø ø¹ˆI@Z&®ÜA[]y} ãÞW•”†‚" ø_ &èï€û±˜0út§€µN]TØÝ±YèÔ^­Œ¥sóæÍŠ(f......ï:ÅÆÆfõê5«V­ò—^ŠífB5bðë!,]ú ‰ Oh0T]âMMM$?£ì¬©ò׎¾%´¤°óÖàêm|Ò½í`Ù ù¯ä‡ê°ª²L  Zȳ¶ºTq±ü.ª†&V Âbpùå¿üòõGE©\¾|ùéS'ç¯Iº÷—DÕb´5JJñåj¶EçN6l`ÚB3BÕ}ñ666]¬­öo;ÿ¾ŠØ$‡àì.,÷… 22ë.ĺÚ^veÉ“$ÚXÉQÚŸåYMMíÄÉS\¾|¶€  6ÿŽ'é8êwŒÌ²U—x¾Û¶ßŽ‘øRú…JK€Í€d.µ÷ÊÅo¯™› ~µl"h¯Ïz’ŠªiQ­Ìh×>X÷JJ`ß)dçÁ}€íMÀ÷[¨)S¦ÔÁààà°ù—͇pé–-l¶Ü|@ýz+þ·â=3A4‰÷ððX²dÉê]ØwJ¹š:;ÁfzÇ’MôOK‘šI ‹kVè„$.ÝÙU¹ô’Þ‹†©+ÊÊ+ mÅ«bÊÀíûâÔy‚žNm © ÿ>†ÛL–¥¥åÁƒëÖÂwß}çæî6}+5Sy}Üáçbê·ì¾}{×X´J 4#€X,ž?þ¡C‡¼GS[ÿGëh+ñZ…EK?¬ÅimãײÁ:#‘âäy,\GÙØØ„„^çp8unŠÏç;Ø÷0Ò+ñ“4º ûªII)ÆÎc%ru"£¢˜×…@¨%`@MMíàÁƒ»wï>{MÝv¤ZÀ(oC¦Žv­ä¸öñèkÙ`݈KÂÖ¼1~âĈûê£ï8N`ÐùGÉøz½‚W©6[ÖïBÄ#ÊÿÏ¿ˆ¾¡qH¼Œ… ÆÅÅ÷ê;hú÷èÿ)û % ½ê¹+©>“‘Ì3 <û×Y--­ú7ëìì¼g÷ž“éíGÓwC59èDZní:777¦m!4S‡£¦AAA?®Yû葉!{Þ4zª‡´a’k«%¥ø' ~ì»Ãö+~X9{öl…ˆ{U-Z´{÷îã¿Ê÷mêÀ¥[ð\„™3g9r„i[Í—F)ñ2ÂÂÂ|}}/]¾$KFdM.6í˜6K9”•ãÞCêâ úÈ߬b‘´[WÛù Ξ=[I+ðÄbñ¤É“.ž?´—V^èˆ&Ll\¦Qƒ‡ ¹|é2IÚG`F,ñ2ø|¾ŸŸßéÓ§bcpïÏÚ¿é¤o•%n ¹ƒÀök‘D¯]Û =gÏžýÁ4OõG(Ž1,â^ÄÍt3Ì»Rbà>“mÙ¥[xø…?`E£—ø RRRüýý/ÿóϽ÷%b‰fKÖˆÒ¾öèÙ¶h,›6ËÊ‘”ŽØ§ˆ|ŒpµÔçbÍÇŒ7zôèAƒ5äP(º¸ HyrTboËt×4bà6“eÒÉ"ìæ­zÎ~õ§éH|B¡0444,,ìÆëqññ±@3õ~å¶–èf Sc˜pêžéI±ð²Á{$.?Å£'ìûñtY¹@§NÆýû»¸»»»¹¹1¸ƒÏç<ˆ—‘JT¾6ÈõݤsØ­ÛDß ª@”øª…ÂÈÈȨ¨¨ÈÈÈÇb“’dŠÀ¢£š}WqgcªC{Ú¸ ÚÂ@ºm”²À±¤…¯•ƒì|ðs¨¬šûORÙžJ%Ryÿqlll\\\úöí«:ÁçóÝÝ݈_þýÈü3Æ:“ñ;Auhâ_‘H”˜˜øôéÓ”””ÄÄÄtnzzZjV¿F5Ãvl6T—Î8´êjT‡ö5;ª¥%*«Vøª‚"à硸”ÊÎe Ѝ$nÍ0:m´ÍÌ̬¬»XZZZZZvéÒÅÎÎN•s¼UøåmÁ÷fô…©=×î`òb–yg˰°›Dß ªCó’øwÁåry<ÞóçÏóòòòòòø|¾H$âr¹23¸å±X,~ó—àMtÚhëèê0âµÔÔÔ×××ÒÒ233ÓÒÒÒ××755åp8ÆÆÆq N(Nš<éÊå+›—bÑt¦­Q1Žœ¥n  tñÂÅÆøáš0DâëŽH$jVQÅbñ¢E‹öïß¿pÖ/‰p ¬?îÄÎãøì³ÏŽ9BÖGT "ñ„Ã××wùÿ–÷²Å _ÉǦIibä`ÆrVØéÚµkIˆ1‚jB$žðÑ„††z}:YR^tb‹¸ÙNÀFÄ`ê7l‘Xóä©ÓL›C ¼Æ¿;ˆÐู¹ÅÄ>êÒµçèyX³ƒ’Äo>”•cÓ^ ™ŽI·È¨h¢ïU†H<¡.‡‡ß]µjÕöã¬ÁÞìØ¦ j(â’0l{Ó,]º422ê£( qÔêExxøgÞÓ2že®ôÁâ™fq()ÅÞãX»‡âp8~ÇŽ“à‘„FÅê…‹‹K\ü“¥K—þr˜Ýo2»©æ¼~ý½Ø«waþ‚…‰O“ˆ¾ dOP ‘‘‘óæùDEEÈÚôÔÆ‚iƒDj&VùRA7èn]mþ£À „Œâ ŠÁÉÉ)22êèÑ£ÑOõ'bÁHæ6îÔQ¼l¬Ø ‡±Tx¬îþýûcb}'4:È(ž `„B¡¯¯ïöm¾B¡ð+oú‹I”•Y#ûŽedaÿ)ì>I©©«¯XñÃâÅ‹U9¼ðˆÄ”‚@ عsçöm¾…¯Š¼FR ¼éÞÝ™¶©DÇãàŸ”_ Ý¢…Æüù –/_NÎ5Dâ JD 8p`û6ßìœÜž¶lŸ)’qnÐÑfÚ¬7(,ÂåÛØwŠýà±Ä°½Áü .\¨¯¯Ï´]B}!OP:b±ØßßÿÐᅦݺÅfQÓÇÒcÜ0¸/óQndéýƒéSÁ¬²r©sŸ>ó,ðòòjV¡‡M"ñ„†#11ñðáçO|žõBC5ÍC:t åâH7pÆÝüBÜÆõ{8Ä*I;u˜ü©×ìÙ³íìì˜î!AÁ‰'0@xxx@@À_ú?ÏzÀ¹k¸‹ÔѶÊJ°ž_ˆØ'ø÷B"Xw¢¥ŒŒ8£Fž2e YäNh‰'0Ibbbppðµká·o¿~] ÀÄýIo‰9zØ¢ƒ:Ö%Wa^ä “¸dÖ“diÄCyÜÖ­5z÷9b¤‡‡³šDâ *X,Ž‹‹»yófLLÌÇщ‰‰¥¥e²Cê,óŽlëÎâ6š´~[èdTíÜ9T¹˜.,Bþ+*ý;'-Ie‡Øjl»®]»÷°wrr0`€ƒƒ éNhV‰'¨"b±8111##ƒËår¹\>ŸÏãñ^¾,xù²À³g¼ª•Œ8jjjmuÛ¶m§gll¬¯¯oiiifffjjjccC4Мù?«¨ "w¹È%tEXtdate:create2012-06-17T12:33:38+01:009ð ´%tEXtdate:modify2012-06-17T12:33:38+01:00H­±tEXttiff:endianlsbU·CtEXttiff:photometricRGB³ IßtEXttiff:rows-per-strip5z_¤IEND®B`‚swift-2.17.0/doc/source/admin/figures/objectstorage-accountscontainers.png0000666000175100017510000010061313236061617027036 0ustar zuulzuul00000000000000‰PNG  IHDRúïŽØÔiCCPiccxÚYyTá·ÞßpΩÓ9ÍóxÒ¨y5Ïó<’:NƒæSiJ‰ %DJ¡Ð$sR!¡"d(¤"D÷ÃoºwÝuïþã[{=ïÞÏ~ö~×·¾õ~/€h#=!!åˆKfyÚYÒüi¤ç@^`.à¡3’,ÜÝá´ïèW¥'$ÄÀÿÍx˜I ÄBÃ’±H#ÖÊH`%b@659!€°øYþ„ãÀñËoþÐ_~ð³¼=­Ãl:@ÚzFD2'€ÈÀO š2"éa¢!  ºCÿ‰'â_8CÿrÒéý_½›uTRB =ýÿ8ŽÿÝbcRþÔJR´— i º »"™οñ“ É–ž¿ñËQÉÞÀ€ D¦ØûüöÇS¢},@YˆŽwò *ê꼨,#É*ð'ª—éí÷;Æ9Œimܨ?+ÞóO|dÒz¯?xFF¤•ëŸøutGwà@ è,€_Ð2fŒ'¬@O'$»{ÿ®Õãú»ôm8ËÖó·?ÇL²ñúS+9ÒÛþ?ÆÌòöüʼn‰‡GÙ:üÒ€iD²ìÿàæ 1îοr1oVЧÈ`áÌ8ŸßœXAÝÚé×L°r°:°€ ¡‹@g°ëßO0!hÀ€xˆx`Ѹþ¬Þ^† OÿF[ý‰ƒ(ƒø¿8ãŸp/È€LHúS ÅMqcÜ7ÅÍqS\ 7À ÿ¬õN¶LþUõKk0Aõ7bù[ýúVµ…õo9¡3þS“-¼Dü‰Ð¨Ó˜ÐXø“ÿމ6Dk¢=Ñ–¸ÛŽ]Àº°Ømì2Ö4ìÖŠõ`W°–ÿ˜ý÷TXÀ„$p‚`B °€ qÿ­¢”¿¿QN%N]ð&ÄA4Ä@Ôß ¾ðXõ,)@ƒPˆ‡uN{ü3iy\ ×Å-qÜ7.ˆ‹‚*®ƒà¸nŒëâ†ÿ´‹ÿÚ*„X°˜ÑðX›ÌLK°ŠOHgEED&Ó,b˜*4‡8†š MKCS üi¿^íiO@¼ÿŒ1 `¸€t÷Xl:@€T÷?09€Ø €ºƒŒÖú_@2p?ˆ€$È€"¨‚è1˜ƒ 8‚xC¬DB,° 2!ò vC „ ¨‚8ç¡.à 脻ðÁ Ã(|€)øó‚*‡ˆ Rˆ¢Œh!ˆ)bƒ8#žH‚D qH ’‰ä …H1r9†œBÎ!‘Èmä!òA&¯ÈŠ¡”•@åQuÔµ@Po4@Ñ 4Ý…–¡•èi´½ÞE¡ÃètŒĤ1U̳Âܰ@,ca›°¬«ÄÎbmXÖ c“ØOœˆóá4\7Æíqœ'â›ðøA¼oÆ;ð~|ŸÂ—T‚8A™`Dp ø"©„¡Ya1asa¦pp½ð#á9šˆH´È‘‘¢¸¨’¨‡hªèÑ[¢“bübÆb ±±óbÏÄQq%qOñ âUâ=â3’v $nJLJ JšK®“Ü'yUrBŠOÊT*JjŸÔ5©÷4š-†VFë MI‹KÛK§H“_¡°ÂgÅ–õ+^Èe dÂeöÉ´ËLÉJɺÈfÊÖÉ>“c—3‹”Û/×%7+¯ ï'¿M¾E~\AXÁA!C¡Ná¹"UÑL1Q±Rq`%q¥ÁÊ蕇W>PB•t•"•)ÝWF•õ”£”+?T!¨ªÄ©Tª ªRT-T׫֩ލ ª9«mQkQû¤.«¨¾G½K}ICW#Fã¸Æ&¯¦£æÍ6ͯZJZ ­CZÚTm[íÍÚ­Ú_t”u˜:GtžèòéºènÓm×]ÔÓ×cé՛ЗÕÑ/×4à7p7ØaÐmH0´4ÜlxÙð§‘žQ²Ñy£ÏƪÆÑƵÆã«V1W_õÆd… Ýä˜É°)Í4Äô¨é°™´ݬÒìµ¹Œy˜ùIó1‹•ë,N[|²Ô°dY6YÎZYm´ºnYÛYX÷ÚðÚøØ´yi»Â6¶ÎvÊN×nƒÝu{‚½“ýûA †Ã)‡)G}ÇŽN'/§ƒN¯•œYÎm.¨‹£Ë^—ç®r®q®-nàæà¶×í…»‚{¢û%¢‡»Ç!wžšž™ž]^|^Á^µ^ß½-½‹¼‡|}R|Ú}¹|×øžòõ³ö+ööW÷ßè7@4 * 5èx2pfµÍê’Õ£kt×ä­y¤”t{­èÚ˜µW‚¹‚éÁB!~!µ! t7z%}&Ô!´[S;†ýN‡&Ç'NcΣ.c®ncîï=¦ EMK-íDºUzWôŸLQŒeWš¸™F˜m6ßkq²ÍêžõMŸm›Ý1û‡G?'kg5!WÌõ³Û{·G“g•×>ï\Ÿ_¦Ÿ¯¿}€q Újé5AAËkGƒo…œ ç††2LÃÄÂ~0Û"öE® \g-Cùû.n0¾'áfâUV[RKrkʹõGS ÓÒÒ£268gšlÔÞ¤¸Y<‹?›¼Ý2Ÿ3;¹õuÞ“m·wå_-¸PX³£rgÝ®ËEwv?ß3µß'V¢Yê¼?º¬ðÀ¹ƒÏʉ‡õŽDWœ8úºR¦*êxã‰Åj›šÄSÛk«ê®Ÿ~rfºž»AåœÓù˜ »››ž·Z5/†´í¾ÔyyáªÆµðëGo ¶/uHÞ2ë éÊí>u»÷Η‰{¶½ ÷=èëCúõâÕ<~ûDæ)óYõÐÐó//æ^!ÃÄòkÎ7Ô·”Qò;üÝâØÌø‡‰—ïû>ÜœlþxòSÙçü©´/N_ѯÍÓÑߤ¿=™ÙùÝòûÙ†á?ÅöÍÌ[ÌÿX8·µD[\Ž_^È *`Ѱî DÄ©@æÐhtKÇ…ñ{„}Ä$R,[&{9yˆ¢LÝÃ9ËÉ3ÄgÅ_%0#d,/R"Z-V)^$‘$é%¥Gã£}–¾¿â¬ÌnÙd9/y…iÅÁ•­J%ÊÉ*~ª†jj³êý4K´Rµt t¥ô@ï…þ ƒ`CAÃ;FU_­Úebl2fº×ÌÌlÔ|‡…¶ÅSËÍV²VÝÖq6ü6m¶t;v»óöAT‡NÇ<'ggªs¯ËNWg76·›î›<ô<¾xÖy1½%½‡|Ê|}üýžø`ªþ\ݱ¦ È:hamspbˆJÈ{ú©ÐH†"c"¬Ž®þ5¢)2#Ê(jaÝÕèM1f1‹±—ã2â âgZ7°Ì“HI÷“ËRÂÖk§â©ÓêÒ³3ü7heR3'6vm:±9/‹‘m½E.‡˜3™;¸õF^ý¶£Û‹óó r ³wdïÌÞ•[´mwÑž=Å÷ÝWUr²´vÿ…²ß ”ë<²¹âøÑ®cÓUâÇmO¤ž¬©‰ÄjâI…Oj"-²Ù±ía{ήĞÁ~—,IN"ßæ q¤sôS4)»(©nÔFNÎ .~®"n ÷vÏV^o_)?ÿŒ€ÀmÁÕ‚_„ „„;DÂE)¢bÁâTñ+I’Š’/¤JiîÒTé»+ ee9eÊËû)H(¼V<¹2NIOiQù–ÊNU5šÚ¤z³ÆvÍ-'m-Q]T÷³Þþ]ƒK† F'Œ®*1ÙeZ`–g¾Õ"ß²Ðj·õ>›rÛ“vçí¯9ÜwrúàüÃsãróóÔö2ñ¶ñq÷õ÷ ñ H Ì]]´¦<¨ní¥à{!¯è_arL«ðôˆ‹‘ßÖ©E'Æ\ˆýo–°-ñA’hrXÊùT<Í'½&c.Ósã©MKY«³/åˆçnÜúrÛªíÕÜ…Ûv,ìJ-šÝ“µ—k_E©áþ¡‡V”÷É:ªul¬ªü„{5VÓZ[púäÙ·çÔ/ä6½k j¹’{Ýø&O'v[¨Çæþ¶¾·Ÿö¿áx{kÈ:d²©A®"È{A…QMÔ e¢Ùèaô ú]¤1{,;ŒÝÇqÜOÛñomB¡‘ðhLÌ&v‘xH¤ã¤¯ll»ÙFØuÙ ØGÈÆäRòWwŽ ?%ò’jGmäÔælá2æêàvã~ÊÁóƒ7ŸO’¯‘ß…\ GPZð†P˜0»p½ˆŸ(*Z/$N¿!±]ÒCJRê3­Cº|ÅzOY-9¹)ù~…fÅÒ•Jk”MT$U–T_ªµ«ÓØ®¹Y+S{ƒNªnš^ª~¦A¦aŽQ¡ñžUGLjL[Í:ÍŸ[|·¢Z+Û8ØÆÙ•Ø·9œ \µÜ!<ÂOrgrçΜ¹sî9ߣÖÞÞ`> D"‡Ãa³ÙÕÕÕB¡ÇãÉd2‡`±X‡#“ÉTû«¶v‡Ãáp¸Î-T*@&“Ñh4‘HÄb±X,–H$𛛉D2™L$‡úÔa``Þ†lîG(2™ŒÉd–––²X,6›ÍápD"QÏ;A Ôl­H¯ÝTRV'oUö¼+$iò/t:N§[ZZÒét,;Ô— ØÜ d2YFFFZZZrrrnn.“Éì¾ÕgnF2¦(†xcc‚¡£Âh¡q8Œ&JƒHБñïqô:^c›B)hK$2‰T.6+íµœv-ŸÃT×ð+ؼ’²ºî$“ÉÎÎÎîîîîîîãÇïòÐ3hÀæ~¸Ã`0®]»Ã`0 …ªÝÙÑt´=•fI±¶4273036x?;Þ¿Ôñ¹\aqimy·°˜]P\•‘UÞù)ÁÁÁÁ××788ØËËk¨ óq›ûa ‡Ã9~üxdd¤jo5ÊpÊ$Ç ãé.Ž6ÖÆ( äP±§•T¿Ìe¥g–$>ÍOg”C&&&!!!kÖ¬¡ÑhC=@˜ØÜ; Å®]»:-¨Îöw ôœæ=z8LÞûްIü(!çJtÒÕ[éJe;‰\²dÉo¿ý;y``ØÜ/ Å´iÓúÚ;7-\0ÏÓ¨7Ôƒ„Mâ[÷Òwüt‘U%¤ÓéIIIpl Ì€›ûáÅ™3gV­Zå?Õ)âäÿ>TCß©T¾aË_œ{´víÚ£Gõp``>dC=˜ÿšš Xõé´ÁÖ´´P¾œ ÈÍÍê±ÀÀ|àÀæ~xÅÞ¬Þp"&îÅPe0ÈÉ«X´âÐPæ£`ÄDw|Tð$3æï›ìióÕ糦z;ât1C=¢~F*•?~’uãYDdÒPæc6÷Ñ£»Ý&VÞŽ+J|Z„@¨…̰¹ŽxŒ5Z»Ü9·€óAiÄÕü+7ž_¹ñ@Ðמ:ÉÁÃ>f´¥™ ‘jf8Ô#}u¼Ff)'¯°2-£ø~ì‹nÔN1@}:ßfî K×цìQ„ `s?|q°%8ض®wcäÖ'¤²S2jï?f«L?PëDur0§Y’G™“É$<‘ K$è þ2¯°IÌç7óš8\AyE]aqua ûÅËòf‘\µ ¥æïMòr5žänìéf[y˜A6÷#ggƒõ«\Å¥F¯ „Ÿ_Ò•[—Î(Wå©vÆj”¡¹ QWG{•06& ÕÕõõuT:wÖÖFãõ^µ45KšEÒÎ;tÉTTÖjê ¢*v]^¼Š¡†§+ÉÙÎÐΚàbo`c‰×@Á&þcÍfwÖü¹@ʯC=Šþ6÷# kK¼µ%+è­L¦¨¬n®¬nf²„Üz §NRÍ•U4–TÔ¿V°l °·Â˜cI:F$ É@ËÂTÏÜX×ÂLƒÑê«34´ N˜‘Bø§Žk—;õ(úÄ·»Ÿú#¨GÑoÀ¾T˜ØÜÃÀÀÀ|Àæ~Ä#·6¤}ïæÃ6÷#˜ë÷J\¦_ÀÚœ!8FX{»Wþ(•Ê[Jß²C-W¬frr¨Ï¦¯Àæ~¤røtÖ²õñ»6zó— ó—¿vܬeijzÛOVný–ýÏÞ²ƒ.ut·ûPŸ. L_Íýˆ„]#Ú°3íúiÿ9~–zºšzºšËÙoþÒñÏ yÐJ¥2%£ön\9»Fµ´Ê•™Ù\@nÿn\y-W ‹[ó‹b±"3›Û*Wd2ERjõ­¥¹|èƒ( u±FP¹|¥R™™Í}ÏêìA‚»®U®Œ{R  fh1G$1 åTŠf—Xû½›= e‚Z®8pù-€µ~è—±[×¹lZ3Ž'ºÎŒÿĺ€É$¥7$\™E2Ð>s)§†'ÿéHÆ©CSò^4,ú2ÆËÍH ­~éëË%ôß~œ }°ÎHg/½5ÆÑPÒ"—J)Y i·æ9;46µ.¿ÕØÜ2Ö‘¼lCìæ5c7†…>õI õÁ“ÊEs¬Žîõêkó±›ûIQ©`ŒcW}4• ÍöŸSœl þ<8@ìÚØd=ùÒx²¥>€f¡wâÀÀ†ÿ8Ÿsá¸ÿ/;&-]÷ðÚ™ÀÁ“™»¾q[µØ°zIí„ ›¿ý8¹ó!X5-?~gB, ¿wíÓÙÁ`Ï‘4NûqT0ØY#²ôº8ÕËŒd  °µÒ¿pÜzn€Z`gÎÈÙ¨Ò¥óm!ëonª6×òQ2Úä7™ ½°´Ð“ÊÚº|ðÖ¹9«;6¤Ï³8i  »Æw¢)ô‰N”µ´Î])œ0Žœ•[Ÿ™ÍåòÄ“Æö°[”,X~¯¼²±š#±³Â_= §«¹çHš¹‰îÚåÎ2™bÕÆ¸O*­-ð9…‚s‡} ×b³ªOÖ>¤šèÖpDžãŒNÿâ» ü.`AøÝßšÒ]3üƒ6÷ƒ“ÉŒ‹‹»|ùrFF†HÔ‘ˆtóæMè‘ å;ÉΘB “ñ&FC=¢Q_‡HÐE¡4úØ~O-G¨®åóøMu¼¦ºúF.WPÆâVUó_æUTÕrc¸ªu´3¦˜x¸u0ãhø&§'¤·<ÔWfD’˜Æÿúó1]M(X"Q\*°¶Ä&ާ\>9S&S,¿·ÿXƾ-žª=÷þ_:¿QZ•¾F2rëÇϹžo@ÄkÍYvÿÂÑ©sü,[åJïk§/å&^›O{>ñÚüS6÷HrròåË—/]ºÄçó»lZûÙto/ÇQT’Õ§;Øß<#2^õwªªyUl>³¼¶ŒÅ)(ªºǸr§òÊJhël_ÊüYÖþÞæ]æb00ý‰¨%ké^ý‰F® µ_ùÍ£Îæþäùœ_wL4¶€Æ’¶‡‹~üÓ*S#NcŽ_G(ðíˆYêê‰ôC¨§øÞÀæ¾ÿ …Ç?yò$TÓ§;kVM;úógC=Ì7bjL45&No£j)-ç0rXéY%eÝŽ«ºW˜íKùúsï ¦C=^˜½¦Ž/éÒ(“) J%¦€­ÊÞ06Âòÿ‰ûâ ”?üœràxºªE$QTÕ6Ó,ôT-PÙ5ØÜÃô'}¨‡Ù?¸ºÐî\Þºæ›?ÿ8÷èÞ£òй½ËvQ¡§«ùìö‚µ[ôÿ²£aêx-òÖ¶ã?MR¥PQ)š¿þùâô¥¼²ŠFScìæµ®?þó6Ï…á÷|/8ÚÄ&UoY3šéGŸò øô®§›§NL6Ä,_`‡Áh8Ûa]gD©¢wÞNI¹ŠFøüpÿÞ™{‡SXXXTTÄd2Ùl6‡Ãa±X<Omò~t¿¼*•J&“Éd²‰‰ F£Ñh666C^-Šš'“ð€ZŽ`åWÇ”míjjj€öövÀ¯{–£4>šº:ÂĘà eC=˜!ƒÅb±Ùìêêjè›ÍÎÏïu9o#æÚ™€Šª¦®X ´·&¨œõNtƒç÷át5óŠùHu„ òêÈþ}ÒÇkÅ^žWP—HÇ~òV¹çøYVe˜0YB ï¤ÜZ”W̧Qq=U»ðFßìˆ"&&æm¦G¡Päææ>}ú455µ°°°{x8Õ7v´1…@1Ä“Hx´¦†¾¾F…ÑBãpuuu²!®óþ8=Œ–ªsK[›²®¾±s‹ Q,‘È$R¹@جP´×rìZ>×T]ÛPÅ®Ë+bu¿I ‘H:N£ÑÜÝÝ]]]ÇÅöã;™4ã‡o×Í9™PÉ,]4qª·Ó§ádzýý}Çô¹ûáBU5oï¡«œ{HJ«Yê=w×rÅ_Í/)k´¥·b¡Àó!“ÉvíÚ¥²ì݆X,öÿûßO?ýô››êšws¯k зÈÙÁ Ó09…¼ÑöDU‹­¡{‡zºš…Fh4²KË;ɸ7··n8øœ·¾Î™Ã`0®]»–œœÜ9H@$hùOu²±¢ØÓÍ,Ì I†8"A·_ÜÐêêˆ.ýô¤[~ƒˆÇoäÖ7²*ëŠK«K˜5Ï3‹ssssssoܸíãìììîî2yòä.þ¢£MÙ¾÷P4`ÒëÕ˧»µ.bVïûõæÁ— ΩTþøIvô´³ÿ$*•í:Úˆf‰2+·nÃŽ''La׈\üþ sœ?‹—\é63’·XOWs¨G Óo ÑèÂÂBÕO¬ H$222’Çã èj¹bÊØóTŠfÀÔQC}=F¯, “ÉŒˆˆˆŒŒ„<"A+xÖÄñã¬ÇŒece<øááo‡ %ècm¬Œ'M°S5J¥òòʺŒ,æóÌℤƒÁ`0Nž‡…µ{ Àãñ¾ýöÛ .@˧³ýC='M°£š½[Fq¸¡¥…²³1±³1ùt‘7@Ø$~šR}'íüå'§OŸ>}ú´³³ó‰'ÜÝû¿øªH$ª¯¯P)šÑÍ¢Ó𒫢¿^öÛï÷¿¿«ƒEÍðuv=j”9ÙÊ’bfb0I³}§ªšW^QǪ¬+cq’s“RŠUŠ=COCìçΰtmˆ@  ÷ÚzÉôIæ€Ä”jm-ä·»Ÿ@;óZR2jasÿA§ÓsrrT-jjjííígΜ œ1À¶þ=@úøøp8ª)î‡ïø»õúÞï0§‹ ðs ðs=úógŸd¼téÒ¡ ÌÛ@†……q8œMëçü¸%ôC é‚– ²û7ï¥Í ûeÙ²eS§Ní¯€Îäääùóçs8mmm‰DÒ%çFz¸y¸­]¥ÂòŠ&&KXÄäñù qOòãž¼>’BÒÅã´‰D=NG%‹†Á¢‰ú]—³ôõuÄbYË¿eLT°kùm­mà_¹´²Jn«¼µ²ºá-¡ÇTЦÛ’õ(¼õ(<ÕDÇÞFÊH| ;7¸l;ô‚Y!€ =”¹8™§­ é|t6ôH$rÙ²eÕÕÕ±±±€öööýû÷¯[·n¨ÇóTSÜþa}ïkD8sü×_Ì<ß½ (|ðàщO‹ú¥C‘HvãÆ 2™ååå5pƒ×@! 3=l¥iÌ¡AuæÎ'LêÁô‰î†~ÇŽÎÎÎ]v;zôè°Mq‡é0y¸}Òº•c®Þ+‰¸’w;†q;† ´¦¹Û¸ûעлñ²4ñi#§ j§ >o3w†¥› ùØY胹OMM;w.‡Ãñ÷÷ŠŠ’Ä]˜~§W†f$òÊYoBÁ®_å²~•Kq©à^<+)¥&öIMDdRDd´‘ åêl5Yµï¿A$lU×6@ááEUYÙ¬’²ºÎûLO˜8Þdö4 (H¼ï½xñâªU«d2Ùúõë<ØÇLÝ‚~I™P¥>ΙôÎè—÷£A å5ÈT‚!00Ý = ž'm·míœ:1@ xí̤Ôê‡O*y|)ÙP;x¦•ƒmDz\f6·ºV¤Š[Sqì,#x¦Uo]µM-½ÍWµ²¶Ä[[âׯrQ*•ÌòÆÌlîó,nn?-‹ó(;æQv—ý©¦8s3¤™clL@ª«¿]3‡@Ðé¾& •ÓSÑÔ,iIߪ™óš`Bmœ“ž“­ëhÃ1ކN¶Äþ­>¬Z˜=}úôÊ•+ûÒUi…på×±yűdN83·é‘Éï!*ÉÈ­ON¯V•íNòóšˆ+…×Îôãu€ù`øØ =dÓ*«›YìÆ ¶¨¸LWÄçò¤]ꥼ7­rågßÅÞ}T±v™Óh{b»ylÀÕ¯W9@µ·R2kã“«»›û“çx»›öÊÜgfs¿Øü¸sÝöžð6kˆ@ Ó¯2C iyUS)«©¼JÈ®Õp$Ee %¬*a¿\¬žco…15Æ“tŒH(<ÜÌXÇ|Àj¨f‰DbTT”··w_zkljñ½(È:öR0”ø žåcb¤3ÑݸW]%§WW°›Þ²ÃôÉæ^n”º,ý‚RÙ?¿4˜^¡P(.^¼¸sçNHãì6ôÅ¥‚Ìlnv?%³&%«A.oﲑH¤R±T*‡Ãáp8¨rÒÞ¸qƒÁ`ôüX¿žÊ|–Q›¦Rüt>}|À5ÃsÞ¨·œßë8øêZÑ{TæêÝäW¯¥×êZ'“)x 2.O Š%­|¡LÑÖ^]Û (c5wÙ¿¬JÐÚúꢛcµ5ÿ£‘I!k£Ñê8´ž £¥AÀ£ÕÕdCm Ѝ ¿Ç›è÷…Ùó× 0äÞÍ*ÿ’Ÿuÿ÷cë:ê·Aƒ …ÒÛÃØÏ‡ (.2ìé„ W mí3§P=\ŠKñÉÕ|¡äüÕ°z«\y;®4Q§hk7Úh®¿¥ QYÝ\Èl˜ãgY\*¨áŠpºèk÷˜€à™4•¾`f6÷FL™B¡œêe %@‡Ãë¡ï=f-œcÝY‰°ß¹x½x ¡ß³g¤ŽõAúŠª¦'i5‰)ì›Ë:ÏÜÉd²««+N§R©4ÍÒÒòí?g‹Õssß*WþðË‹KǦtVµµ"ìøzì¿¥Bæ¾IÔòíî'Wo—Ò,ô~Ø0šÞ¯¼»oËkK¼L¦ØûéÑ÷™òÖ¶à™V;¾ù'žgqöyž“ϧYèíÝ<§«¹çÈó’ IðÊ»§Mé¹=ì_4¡`U¡x±0{÷QùÜ´.k ›ÖŒƒ^>µ÷HúîoÝ‘HµðMñKB軿ó(d6|³; §«ùŧNÅe A7³b‚µµþóç[ò¿˜ì‹¥ŽMòM{’³rëömñ,d6D\)œãgYÈlØñKŠžŽæÊPÇgé5cg^/{jnª{åVÉš­ñ»¿uÇh£¾ÜòxõÇác¡Ãa´‘^㌮ô»FôËÉÌÃh¢5áœÌAàÃ6ô­reF6÷ÞcVô}f^‰jÄ`1ÞÞã¼½½!Et"‘Ø·ƒ¼¢R\Þ>Ö±ëlxâxãoö¤×rÅ€¸§uþ>æÏï-ŠÆö]|''vµ%þúƒª-ëä€ðÍ„-7ÏÍÖÔ@nüñɪqŽû—V=o^82ùÌ!ó¤çì‰Á7‹C¿øÔiß±ôc?yãôzá¾ÿðóªúBÿ.̪¨ªL}ý ¹±©eãÏ“¯Ïñp5øxšÒ<#¡Rm%,YMæ|ÈÁ—ñ’ ¹ì}¼Œ+ØMa!ôƦC‚Ö±=>ÐÌ­©þÇ…œ.ç3Å 9 1°zL|ÅËüz’öš­ñþo:ô 1i<…êñϧ!vÐáX)‹Â?VÏ“Æ$TDÝ)†Jœ›˜˜œ?~ Dë`T|À†¾±©åQrå½GWn•7KÛH$ÒËËË×××ÏÏÏÕÕuДÏ%²VQ¿ëOª®M›&Œ!l  X0ÇêÞã²ó× !ùX@EUSÄÕrÕîð®É”±ç÷nnºz§$lò¨Ï›iuþ0PWGðh”†zoWwasÿFvíÚµsçÎ~Y˜í‚žZÑÖþÚML–P©l?¦c‚`i޳¢¢_æ×ˆx„꯫‹ízK×ÓÕ<º×'3›“PQV!¼ñ€ÙÙ]1š®‹Át(Ø8Úêøfy#O ¼S—\©ÚíE@1ÔèG[_Ï“¾Ì¯O{ÁIΨŽIàB4míÚµ«W¯þ*à [†ÐЗ–7²kDôÐ_PÂOL©¾÷˜Mz8½%Á³ýýý ü £Öð¤ª_” ×CÜœ_%3ÙZé?ñ*’°¼ªðéÍÎŸå ¤9ù|G»W)÷Sú‘öØÜ¿‘HôÙgŸEFFöËÂlwÜœ 3²ºþµ®Ü*Kätš> ³Ÿ¥¡½ ½µ,”L¦˜º º±Y6wÍÎ ¿2Ôáäß]g÷¦F¯Ì·†ºº¬¥­¥U°±Â#ÕÕ ö£»Ý-Ìô ™ x=x_ÒÊjQq™°¼JXRÖø(©ŠUóªv9F[´hQppð‡1»¶ ¡¡‡î߇ÿ*8üW˜âiâájälG45Ö±0Õ}¿h¥RYT*ÈxYÿ<«öúý²šº6kÖ¬Y³f f £×bgƒ§jÕ“‘X˜ê¼ÝM ÅX™LqüÜKl¯ }üÓê vS-GÂæ4ç º„µ!‘HWWW///ww÷ñãÇS©Ô~¿¤0r×MpppdddjjjFFƒÁ¸r§òÊWÎvX{+¢Õ(AM!at±šxœ&¯…BuÌlšEòf±œÇ—Š¥­å•MÌòÆì~Fî« 4íë;ÙßßßÏÏÏÁÁa¨¯wbÏwî_ïz2Þ…¤ÊtÉÌænù9#ê„/ôöN,ëàvT*•X_­xU ÂÎZPÈl€Â% Jø«6>¾1ËËráZáö nB©Tz]þmçä÷aŸÌýùk…M¢–=Gž•¹ÿ+2ïþ£Ê~ *ÏÈȘ={6‡ÃñõõŠŠêüHøý÷ß_½zÕä_ ªè$ zÛ«i…› ùË0ëñ×~Ýéáâ`X]+:püNWsýg.ŒÆÒ‹•_ÇÛ3E ¶íOõõ4tv0x“¹Gª#’Ó«ïÆ•“±Íe:ƒK³Àeç×ï=’Þ“é¹>^ký ÛÏ¿‹;¶×‡DÔÞ}øyvAýÿVôºÉõ˜*ÕkçêJ£ÿ‹¥¥%N‡Ý5ƒÃz$¹páB•âlnnnZZZnn.Tòš‘Ïdä‹zÛ'™Lvww‡f îîîÃóµtm GlësåóP++<#·îüuÖ‘]ã;[Èá÷æú[FÇ”æÏz¤§«ùÇþ óVÅl]çBÀ£÷ÎXB×Çk…Òÿ¼úeŒ¿·yLB…>fú$óìÂú‚RɆ·opŒÈœV¹òäùœ˜‹>!7ŸgqÜ\þHWQÕ05ƪÜZåÊòªF¼ºs”’XÜZR.´0ÓíazX=O*h”Ñ,ô:JÑ·´õäS=äòåË«V­‰DáááGíbÁÝÝÝ÷ï߯ªìØ2™L&“›ššzt$Žîõ™àjtõNéáS m-dàt˵+FC.¿“û§?÷rÅÆ‡€€©›Ö¸ Ú>^铌q40£èý,㟱ŸÊzp)è¯C·|ÖØÜ2Ë×òÉõàmûS›Z Úc   ]zøeçÄß#²7ìHjU(¦z™Ýý;H…èr¸w´nÝ:è" ‰Ûf˜ú×âààÐy®P(˜Lfii)Çãp8B¡°s±€H$Âb±&&&X,–J¥B3†¡V‚@ ¶®w[4×:6±²š#ör£ìÚè¡Z›>É u{ü´*4Ðzî Z—<ÐU‹]È’+ùÙÙþÞLŒF|TÈ­‡¥ÅeÂà™VP€õX'Rô)ßÊšæ¶Þ˜@5ÀmF ™Ù\×™Ñ;wîܱcÇPå#e8ú‘ÎòåËÏ;—qoîÀéx+•Ju³?³b‚4»ÅÁ缸Õàýedþºœ¿z‰#kó祒j_øÅÝ™ÖÅÉ˲}âdKøá`Jƒ@°ìþŸ?Oyzk~UúŠârAÄÕ¼»qå{¤çÄ.Èz¸„ýüÓ„Ôªã¿|ËáJX2{Bõ‹•Ué+44Ôþ‰.ôd¶ãk·Y>&ï‘“Ö‘HºsçNóZ[Ï`0"""ôôºJC«©©ÜÝÝsss׬Y30)˜×£P("""lmm—-[Æd2ƒ‚‚²²²¢££a[?RxžÅñ ½A1Ô°·&ô½·wòžÎœZ®øÊÊŸ¾Ÿps![ši^¹]þ©Sq©€‘/ztÙ€@ önöhk)™ àk@£‘7ÏÎÑÖBnÜ•´*ÔZÐ0 j}÷…ëo§²Þ^Ã:4ÈêÁÛÝ”S/î—óËÂ,ƒÁHLLLHHHHH …¯ý8 …Ú»wïºuë†6$æcžÑ˜Rt6­qmg08•ÖßÓH]»W\vzË©“ÿúç‹Ï—84‹åÕÒô¢¼ªqŒã«hS(x\ lqvx•áf@ЮæHº¥í¿Áé*ÿ>RC ôÝf_kâ±X¬¿¿¿···ššT¤ ªÅìîî~öìÙÝy°¡ÿ0"a³fÑ{šûÿ;Ã8ºÛ=xf‡è¿Aæ8-êéóZ" ‹[¡UGvèEWO]VõJð’‘[/•)ttü†Wå„-†DMQÍt´«jõ iiik×®‰D!!!&LX¾|ùkMüäÉ“U‰yB¡2÷êêêÛ¶mÛºu+<©‡4`CÓGÞÇZ¥dÔ–V¶,œm£Š±1"aæù™þq>çï£Ó©ÍK7 W-vœ¹”[R&<øÃÄe_'ffsÇ:‘”Jå·»“¦YÌ™>jãÎä/—Ž6 jÉdŠß#A~4€ Y'¿„uu§øí#!àÐ|¡¼'NœPSSÓÖÖ¾zõêÕ«WÁL|gp8ôë:{öl_~fM-{ޤuô©ƒÖÓC¹9“»„6½–zžÔàÍÉV×ï•æÍ´zg?C…L&냡‹Å¨žÉˆ6ô/›Í´Ã‰D"@=_)Þô/x=ÍþUh'ïs°¢ Cg›w±;+Bíf-{pðiô_³¦…FßzXÞÔÜÒØÜrãÌ#æÂ‘É‚nÌò1)«˜Rt×8¢Pˆ”ÌZ;ŸóS%³ ‘¹g0»víâp82™L( …B•e‡ôu{ ¤@ ÝÐh4¤—I¥R‰D"™L~¿t‡‘lè»ðÙgŸ;wn:#ìÁ@t;Û—rëÜœÁ<‘÷ùµü°Á½{ïŒ)æ5™ax=M#¦*}Ev¥¡noM€– BçÒ}'š3YBœJ•Jzpû¤µËœ¹< A %‘&º×f~ZT*44в4ÇAr]NtƒšÌWá7ÛÖ‡^Œu"U¤~Âkji©¿Ç‰øûûGGG÷*_£Kÿðõx•Ûnc¸Ëø€kn.Fa!o\ (c5›› ” ¹qãÆ7º·£4ö6¯žZlho‹åçÔ ;æP¡¤'÷ "‘hbbB§Ói4šƒƒÃ Ð~'°¡-зÂß›ÔE2}ÄqýAUYEã ô}Ìýký Ä+/4ÙÝ5a@ÔêþAsSÝî:\úx-×Wn"€ ÑyA£sNÖ{/#Õ‰3mÚ´èèè!w#ØZ6®v:s)',„®T*žÈ<µ0¯DL¥hnüÂeírçÇÓïijŸ<¯.b ŽîõIJ­þñ·´Ç)õH$˜?ÃüÿöL†VÅk9’%kb.ÞdMCØ¿e¤¦[Àßràéƒ'µ ™iúóV/sSÝôë]Oî>ªhiKç[íßâ…Áh(•Êß#²9‘UÉ‘«öìÉøç¸~ºØÛ€€C¡Ô‰]M”†V ‹íkÞ£¼UÁç7ªkùn°®¾‘Ër¸Âª*^Y%·´œË`0º(’S©TUú¥³³óðL¿| °¡'ý2}0W85““ƒÐüYø-lÙ²%***99yâĉ¤Ó+\ vþÆ\¼^|þjáõÓ³,Lõ?­ô‹™êeæ”Âör5þb©c-Wì½àNô)ß{ç-Ê«—|sêŸ\H.ÿÒíŠèS¾ü<õúýRßÅwX)Kt±(Ÿ×~ÞêyåäL^ƒìË-÷Ë8q`ÊÖý)x]tUúŠfQëâ5÷ÌܱÑý·?³.\/¸wqŽE÷Ô¥Üi¡×³ã–ôĽè3É1pæø~¿&( $T ù-%‘…Mâª*^Y§¤”Ã,«ÍÉg¥f”±X¬ÈÈHöòòZ´hQ``àßÔß lèa”×ÜëééÅÆÆ._¾<22²øýè¨y Ú€V¹rÂ8räï3¡Œ„É&TŠf%»ÙÖŠ ­‰ÒÓCéãµÒG‘3¡kš…ž§+…×Бã6 %$‡…ÐÏ\ʹù 4,ØöÒqHtÉ„‚ïL~‘S¶´6‹äÜz‰¹©îÕSÐÏ®_3£þ˜yÛÖ¯rùýܡ‰C%ˆÔCpºœ=ÆÑÞ\Õ"•Êó +³²ËÓ2ŠïǾˆ‹‹‹‹‹ ÷öö^»vm``àPù5À†føxÍ=F_ºt‰F£íÙ³ÇÇÇçܹsŸ|òÉP ¦ªF„B©i ¦º¯ú##¿¸¡¼ª¹IÜÞ"ÿ.†>^K[KcÉYyuyÅÀ²&ŽôWXG:±´BˆÁh 0ßÿô4ã%—‘ÏCi¨»»Û7Œ_ùu,Õãg;ì¢@› šDòf‰2|S<Óñ­¨¨i©¬iîáø‡ZZ(Wš« í³¥ÓYÙåwfþ})²û®®®gÏž>2а¡‡hj¹bykè¹¹—É‚ÆW’å$­÷­îB=OªƒÕäh¤.ìÞ½›F£­ZµjÉ’%L&s¨´_’ÒØ~“Œ[÷§É0üS'€R©|–Î%hC›î=®øþ®B,n½û¨"ê™Ù\32JU7)­ÊTX~ïÓÛ_‹°º!=õC}¼Ö8'½Ä”jÈM$·úÌ¿zbÿ”‡dƵ¾vaÓ‰¿b¾Üxfûöí·oߪ‘À†æ=P*•ÌòÆBfCN?ÁIÉät),¡‹ÅR©FÐk‰Ôº»»÷ΨAZ”ñ÷?=ó¾–¿ä-ïާG™>ÔW©Gx{{§¤¤Ìž=ûÂ… ,ëöíÛ-ä»bãCmMTk[[N>¿’#ÿcG ÍüYV?þú¼M©Iÿ\/´·Â46·œìûŽf*ÚÚ¡JÇ{?§1ÿDihIKGõðúñüÏîOô D\É œ>j’»qyUS%GþýOOí¬ wâÊšD-U"@ߨÐ/c·®sÑÔDž¾˜ûÓ÷n€c{}|ÞâÔIìéú—¢KȆ'úªô S&:rss‡äè°¡‡éJ¥2¿HšU“ð´úVle³ä•}‡ KÐh4*•J£Ñ,--¡<•·ä£¼ÏV…°¶Ä_<îg9!B•@[\*x’ÆI$‚֜閌FÜ“JA£<á[ƒ²¶Ä7¤wUð…R4J}Ч©ªÚ‹¢MyëAieM³ÈˆÅ­w±jêDh”ºû#HT©TÆ&V” °ÚHo Uü%TµR&oSUez?ètzRRÒüùó“““=<†\Â’ZñQ!%åÂ6¥ðMøX3cíæBΟŸ˜RݦTÿɇ¨NÉä>™kûÙbÇ®(·ˆ¿ÿ{¯i“Í„¥9®8ialb¥X"ÿ~í8+ Ü£äÊV¹2t.ÝÚŸÎà*Ú”G¦CSx7r~ÂâØ'bië®oÆOñ2é—Ýð!.þåªu¿É¡aCÓsÒäç5±O*/Ýdò…Z,ëïïåêêêììÛwÕãŸZ®8,„~àxú¢@›±N¤ñ¬%!ôÝßy@|‘ÃË-âAæÞßÇ*2Ó‡j3ùòše£k¸¢©žfªz)õ|if×ÃÕèäÅâ'WgCKª®TÒ&“)ÂÖ?~p¡#*qâxã‰Á7¡g‹÷>)(\‡J¥î߿ڴiçÎSÕ`ƒAH¥òì|Ö‹—e÷c3oÇ0 Æy~¦Û7¸¹ø_œ1t1ôÞÞÞûöíswwêk3Œh•+³òêî?fEÝ)É+éÈ*§ÑhAAAþþþžžžý˜'Ø's¯hk×FkvltoHžU•W6=M¯í¾§ŸuЧYf6·¸T_"(.Ôü+94cJGÄ´µ%žJÑ|™_àká:Ú0·€Ÿ_ÒPR&xš^ëîJ¬_aë»øÎç¡Öþ>Ôù³¬õt53³¹ry{fv]n\Þ^XÚÐ÷ÕÅ}ûöÑh´ðððE‹1™Ì­[·öׇ ø ¢ò N‹›ÏJN)H|Z¤Úäl‡]ºÀ>d¦Uo°ß›î†~ÇŽÞÞÞC}‘`†¼Ù’51Q÷+ ’ëH$ÒËËkÖ¬YÁÁÁäFOɨãH;ËØ²ïù´‰Ænc §z™œ¹Üµ knöÒ[2v’;ÅÅÁÐÞúU’¤®Î«P? ÙÖ¦d׈ü?‰ÖÐPóv7us!¹!µ´´~Ù91ÐÔÝGåŸ÷˜'T<¸0S¡hG Ô45;sêjGw»ãôzTöö¬\¹’J¥mÛ¶Édž:uêƒäzoä­ŠÁ9P[›²®¾‘Çoâ54×ók8 ̲ڢâê´Ìf‘¼óžÎvXowÓ1N¤ ãÈ*E¦A6ôÃZ®øÿÎ0"oWräctW,²_¹ÈÒò:p< Š[Sáàs>ò÷™¶½¨0¥T*³rë{5ÑäòÛ.Þdáp¸yóüýýý:ñûýíWq©à꽪fˆÅ­ëv¤Ýúk:äA.øW¾¸3û¥«œ6€m??Umb²„ªÈ¿‚R‰±ö 96£ô£NÍ€Ö ÿ¾VÀ©ËdŠK7ŠBƒl¼'˜Øêµn{≿s~ú~‚RÙ>Ûw*·^¹]Lìq]öw2uêÔôôô3fœ;wŽÅbEGGÃu·_ËÆmçwî¿lf¬oDÒ×ÕÑÆã±x=Œ¦–Ù05éú³ÑDi º€f‘¬©¹«´lCƒH,i6I…"i‹¼¶V(b²BI ÷õÅ߉x„§7‰>Jßž®O£â\ {Xì¾ }g’““ŠþŸ¼©®Ü›(­Nš{ÍËÍèŸßý)$L!³aó¾ä›Koý¨B¨ÒÑ»0cй¶wáû+þº”ß«¢ßx<þÞ½{ƒéÜ빿õ c¥«¤\øëŸŒÏC­|'™)•J Z­ª¦À®mÚû l’AFwbË)$,§YQÝ,·ª««9“Õ9–üçß3Ý](²öÖý)Çé»8Ä%UVÕ6 0š®Ä$pl(bß±ôŽxíŠÑ©‚Y!tu"[šãfûR¾Úžpt··Vcëþ”ÌîÒ¶ýxètzJJÊܹsÞ®Mÿ[åÊÁ)E6Hª‰&P‚¼"N^§Ï]¾m„³v”)ÞÌK&i´ ‰ÚæÆº¦ÆØÁ7î }nÞ¼4Ô£€µ[ãgø˜ŸþÅzknª;Ι4Ö/òøß/U¥R[åʼb¾™1VU†ïëÏǪfJ¥²¨T Ž@¨‚ !›Zj¸" «§«)“)êxI‹¼–+î¹v›žžÞ /äôÔÜkkiØ[a¶ì05Æ“tm÷\h @ wÿØøã“5ÛR¦}¿vœ†ú‹êZ±­aÓšqާ£Ñê[þç¶~G¢‘ËYKªöê%N'~šPZ!Œ2×›?Ëzõ¦Ç¹ as-¯Ÿž…@ ÂÜJÊm&Çë¡V†ÚÿsÔûn @܉üfwÒaOJC}Å"»­ÿ8ÿ~ûeLŒÌð1¿~zv¿ÇªÂu®^½úöpwww&“ùÝž¤Ÿ·Mü,~-W¼ñÇ'€S?O…dyÄâÖ&‘\.oã ¤€êZ@Ö¢¬ãwÂ76Êeò6’A×G1Œ–àõÐÚÚH J‹ÒD!ôûï¡­ ýkæà“ÇÆ9÷sŽÞÕ»¥,vKw®¨jŠIàfÅüçת×Ú´v쯾€Ì}nßÅï"Qý"G°ãë±ÃÇ(cÏgÜ›;Ö‰ÄÈ­_~£lmm×ÐP»qfäHØþsÊÉó9öÖø9‚#?zšRt¶ýœÊ*¦-ºžÖÃá >j€y~¦½zé;Ëøj{ê¹sç–.]Ú«~ÿý÷û÷ïG£Ñ.\Ãqqqáp8ÎvºÛ×›9Åbhõ!v(âJþÿ—%—·‡b}|Ÿ÷ŠÐW39I¥RËËËûØÏÍ›7·mÛelÁ†¾3Ë–-;ºÛý-ÕxÞà•w¯?¨ªÉ ëÉ$úA<Ë?,¦­òó._ÎçYœñ³oó—ï9’ög rbX[â‹KŽÓ®$] ts!«™œÌ¸7׉n`?åïÿ­t†Îb×/©ÉÏkb/Ï»õ tÝö'Ïn/0"a Jøv>QUÏ—¼ÈáF\)ì¹!í¯/a¯1?ÑáÀ¾}ûNŸ>­P(BBB8Ð}2™œ““ÂÈo þü‘ží_«6ÆÝ+‹[‡zìý»Ftòïlïà(S· Û½œøiÂȲõýÂÍ›7]\\‚‚‚rss½½½ããããããa[?ÜP´µº9ÕÕÕ©°b¾䥱¶Ä‡Íµ¼ÿ˜¥Ú-õEmieËlßQµ\q-Wèg÷´Ž]#º~Ÿ¹j±=t¿±µ"T=_B"jõ¹öˆsî9p¨Âu6oÞÌd2Oœ8Ñ%\‡H$FEE1ŒãÇ߸qãÌe&§4a a’;eœ³áh{ƒÁ é#[3+·îizí£¤*•îÒäñ„E6 f[ OËÀqóæÍ;wBUà}¿sàxºßdª“¡_&æÆº€îþôêZ ¥5’ _Yj<%’¼Šï4ÊŸÞTµØ[a„-ÍÍ =½Wñ„ƒÝÛw`sßk piÓ¦>} ×é^-ÏÙÙùÔ©S'NœHLLŒŒŒLNN~ö¢ðÙ‹Ž˜%JÍÃEß–F YèÑiúæÆº²öp°›µ\qUMs)KXVÑTPÒWÂcä‹T[Q(µé“HcŒÆ¹z»›ö%—í-(•Jn½´¿$WûØÐ›÷enÞ—I1ÔX<×*`ª…§«q_ÖÀìlðVTôåÛŪUYˆˆ+…+æw”tîœ#ÊM;Ùnm- $d?úú**•J~C‹QK ­.–¼z^¿õ t´ÝÈ–‚Íýû@§ÓÓÓÓgÏž7nܸû÷ïS©Ôî»!‘È©S§N:  Ÿ>}ššššššš›››˜ÆILë±jo…–ÁH’…„Aª# Ú(¢+–µ\1€ß kiU„-i«XÒÊåK«k›+«Eì1‹ÝTS÷“³ÖÝ…2aÅÉ–ŸRµ÷HÆ‹œzÔ?êžâǯ] õÍ^D&SüròÅÖõnoÚ[/¥Œ=ßCçìàúAƒJÑdÕ´ÔÔµú#ÿÐù(¤ZX°åÌ©æS½ÌÞ#ø @üþÓ¿%÷)†Øs¬2™bïÿ¥Ç&Uç',†ö‰¼U²ëw}¼VEUÓùèÒ¤k¯ªßxŒ5ÒŨE\)€dE.^/þzדšÌU~>ÔÇÓÃÜôt5+ªšæ—»PUÛÔÛ2°¹OÈdr|||XXØ7<<<¢££ßS…Ãá:Vr„B!“É,***,,,,,d³Ù§¨œW"€ÛÃ1ññõÖ¿ T¢T¶÷ütT1ŽÖ–8ëQ8 3]Sc S]•5ßõKê¹Ë1¡D’”ŒZ¿Å·1X.ó¦w’WÌßvèmæÒÃ÷S®\ ý ÐØÔ"‘*ÚÚÚ¹¼®á[ò¶v•;t¶/eÎt ‰TÞ«Î}'™=¸0ã³o­ÙO"j•TH\ô3îÏWy`¦xš¸Ï¾ìhcp'ž}`‹[ç"ÛŒFÔ3毾•¯¥…ÌxYõÇ â“yÖϳjí¼/¸»S³8¶¸Y[âårefn“…ÛÙÌ‹†Ã“úkAb°˜Ø¤êtرQ*•P}ssó>v…Åb£¢¢¾ÿþûC‡Aê:¯ ×y-8ÎÕÕÕÕÕµK{hhhddä†/gNö´««ojiiíœmÔÚ¦(+ÿO`{euC—lR…¤kkõ*N[KËÔ˜E%H$ÙüÞãò®Økœ±J-N&S@]7Ι|÷Q™½5ÑÃÕ¨\ØÐ÷ '‰ ›Íddd^äÔ;Ëðd"‰\ÑÚ^Y-pê%Í2yk[÷Z]é4c¹W#•*›{…©Âw’YiêÒ¢R°Qnh ÕyålÛºñHuD‹\Q^Õtd÷d“ÿxrï ¦Ué+òŠù{kh‡@ ŽîõYÿ¹‹°±ÅŒ¢k@Ô8ØØéKø ²þÊê_„qèÐ!Ç)—N쟩 â]„6CçÒ!ñQHîtŒ£áGƒ ;M)º7þš…@ WÆNC°µÂë`4ÖíHûcÿ„ù³¬³rêIÏ«GÛä5ÌûìîªP{œjÇ/)Ž¿xzk¾ ±%pelMf pe¬¿7‰j¬X·#í^„ŸŸµž'¾øš)Ew²»ñÖý©&Ì¥ßý¡ÃsÒ#è£ó úbî?ZCßÅdóùüÎo»líòöµœ½RvöJÙ›¶êh#ì­0€Qæzêê::H‚ÖkÃê5j«?±^½ÄÉÁ–¼ònzvcoO @¼¶Èä Â`4T“ÝV¹’Ë“TÁ6h4òµÒÝ.ŒH˜áã|-È}ûö)ŠÃ‡®Œµ·Â¬úÄa¦µKþØH§ž'I¨ˆºS|;®àååÕ8«W¯¦R©!!!o ×é-(”Ú¡ãwÿïϘ°…çÎ?e’“–ÖP¬áˆ]G¿q1jÛÏOU ÃÇ8ù^¸|³ºÌŸCƒ¼=d’öµ{ÌU‹7¬v¹x“upû$ÀéKy¿î˜B„̲¢yFÖr»>Î/_ày]±X䥛E~>Ô_ÿ|aoE¼pܰfÙh'ß wbË!UçÅóè½u.uæ0ôB¡P(ö£É~;X,Zµ‚Jj@å5 ·nÝZ4Ç<4Ð@6Ĩ««ik!qºhðÖÒ§e¬f»JõÖÚ½aõ˜Á ‹¸š÷ÙwOW.¤}Þ$‰üí·ß.\xàÀ7nlØ™¶agš³6Èæ:š4ÚÎ`…uF,n}‘S—™[÷8¹ ²ò:¾yóæO>ù¤ßÅÎüüüRRRf̘qúôi6›Õ=\§ç<¸03·ˆ¿ïhæ™ g.$ 4¡Áž“=íFÙÛš¢4{ŃÕhjz£þÉ¥[¬[g:ôùÑhdðL«GÉlÈÜOÛ1˶¥éŸ¿ZØåƒ»¿óÉ™ÙÜRVSúËÚ×v>ößÛŒ¹‰n«“PîínªÒó°¤ê¥dr s¯:\oV†2Ù‹hllìü¶ËÖ.o߃7™ìîoÕÕÕ»l}SŸ·nÝògÔÛº¤rÇœz¶/å+œ¿ÀΪŎKCì?Ô”øŽ?˜»»{tt4›Í¾víÚÕ«W“““ù hÅPc†¹ƒ-ÁŠŠe®kFÑ ¼¾Ð*WVÖ6UU73Y¼†„Ԫ΄4mÑ¢EÁÁÁZSÂÁÁ ׉‰‰ñðð¸ÿ¾‰‰Éûu¥ƒE­]îüåR§Œ—uÑ÷KÿŽ*ŠˆLŠˆL jS¼l==lGQÉ4 #S‚©ñÀªè¬¨¸ŒÜ†.Z@ìQLBù’y¶ry»®Î+%NUUÝ ½†òYÞÄ•[%aë{¸è{Œ¥Ø¼áRÝõËVÊ’˜R‘=;ØÆ9öäp¯e€ ý ›l4 ™l2™ŒF£Ñh4™L}3ÙC‚®ŽæºåôðOGÓ­†ÌÁð¡ÚzÐ%2ÇÄÄdݺuëÖ­ãp8iiiÉÉÉ©©©g.3x¥iL1Ôëd`c‰#` ‰CšHТ°x=ÍÖ ¨åŠù 2ž@RÇ“ÕñÅì¦ZޤKx8„ƒƒƒ»»»»»»§§'þþõ­zEçpqãÆÝ¾}»ûblÏA n.d7òÞÍùE‚¬¼úgé5žVÆ=É{’¯Ú ¥pu±°µ6%“qd¼© Á€€#èc‰½þrý{¹Q4‘jWïw®væRî¹Ë+ÙÛ[a˜¬F•»¼ ¤Ájî}¶Ê•aëŸ?<òÕ@Jry ¥þöŽqÄOŸl¦ÊΞÅé­x!Ä; ý01ÙÝßjjjvÙú~GVˆÅ­;Mùã|a³Dyäl¡¿7iÏw §yf6×uf4T(¦/¼Þ:“ÉäÀÀÀÀÀ@€B¡`0EEE¹¹¹L&“ÉdÞŽ«QyHº`EE£4Ô 1ºXMhù@Ô×ÂhwýM0Ý•³míյ͙¬­†#imk+«hïŠ,¤Ñht:F£Ñh4{{{ggç¡R*†Âu6lØpìØ1Ÿ .@—±/ [‚ƒ-rs×ó¤…Ì&KXVÙ”_Ôð"§îÙóÒ7•åÓÁ¢ÌŒõ»ç¨k¨›ñOrz2=]Í?LZ².Q,mõ÷¶hiUܸ_ºó7Æs~býgÎ?üœâbo`gƒ¿Szé‹ñ äM]Aϳ8Ðt¾I$4¤;~NðR é·¨ÕKÿ÷Ã7g²› 9%£Ö{Á­„+sz{I+++!ÉF}}}GGǶ¶¶]»v}ûí·<À&{БÉÓCohk©?¹>×ÙÁ ž'ýûj¾Û¬ WfAÕëÞŽR©¼ÿ¸¢­MÙƒC #D"QDDD—F4<@O]ïî‰Dvd³ÙL&“Ïçs8‡Ãf³¡Èq‘HTËã‰D"U®þ%iffŽD"MLLLLLÈd2‘H$“Éæææd2™F£ «gR$yôèQ:¾~ýz(\gãÆýØ¿QË€hÜùg “)*«›ùY=_RYÓÌ­—òøÒj®¨¬¢Q,V@¢ÄéŒ>‰1…Î¥c´5±zó3¥²}ÂBì?3!Ì‹ìe-m~ŸÜ¨©k<ž÷Ï,( ÂÞ £ùï2ƒ¶–Æ(s=ÍÿI ubðÍ„+sîž›ñÙ7Öl{fGÃìÿÞK*k[Ì(ºöV¤:êzÐÓACŽÝйt±´uáêû¬š{+LÔ _èÁ¢óáÞ‰RÙa;oRSSC£ÑjjjX,‰Db0]]]mmm€ÇãµµµG¥¡¡acc›ìþâô¥ÜލàIä0 jm «hS~ú¿‡¥©:†­reb*»¶¾¹ûdzóù³–=ê“è5<oÙ²eÝÛ“’’¢D6@­½½É8=G$ñx<™LÆátĉs¹\™ì]a¶ÿº8‡Ã©~N#‘»wï.Z´H$…‡‡=z´'÷¤åË—Ÿ;w_íßÁtN¬H$ A£ìÖÃò3—™q¸áŒšÉI---???‘H„D"¹\®¦¦&›ÍF •••=ïúr¾å‰Ô¹e¨Ï{ðè­"¦ÛÌKÝ£ªêyRCçˆ'Wgkk#]gFOC°0Õ¹[Ù,QvIº†¼=+ÒæL·êSï§/åÝŽ«8¹½šc±X(4eÐüæÃ€€€¤¤¤3fœÞ ˜KÕÿ¡sË{ŸoKK ›ÍÞ¼yóŽ;–/_¾nݺ>>ÄOC¨¬éê”oljÉ+SMt¶–ÚvÂÿà›ûÁ‹ÅFGG«Âu"##rssCBBœ“’’Þ»gv(¯ˆ×"omg•Uƒæ ‘HèÆð–¢Ç¸ÛC—›A÷ÿßt{P-ÈÉåò“'Ož IÔò$½~ÊÃèӳߒX—[À_»íqµùoÚ¡¼ªiýŽÄá\Êy$2·(ÄN&“½ýöPXX¾yófOOÏ^Y…xx)hÏ‘çÎ~WåòvÀ„1„äësT¹{VTtnßÂílEm¯1aTÀæ~° pwwô葪¥´´ôÈ‘#½’Ò|žÅ¹r§²êù•“¥9îèžIå•J+„‘ÑÅÂf™“­á¢9Ö(„XÜzöJÞâ ›¿¯V×6{Œ5 ò·llì…zxø§Žª¸ºZ®xÚ¢ëv<”Ñ^KY¥×ðîÐ[˜¡¥··™Löõ×_Ÿ8qâµ[ét:”¿v÷îÝ^ ƒÑØ·ÅóÇo^ …Tïe—0¯€Íý rùòåýû÷ÔÔ^‰yµ··766nß¾½çýpêÅdí7mÝüÓ³[ÜvltK¿w>¨†+ŽŽéXøjÅèác×.wÞºÎ%6±ªs…åÞ2n4]ÀkHÇ\³¡áOÿ2eçF÷“¹§/哱 çXSˆ¨-ë\+6Æ”4Ü>ñì¯~ ²¥ë;r J%W 홲lmè—±qO*Û>»ù°t×FŽÏÐÁ ü>¹HJ­þf÷Óo¾séÄ Àò UMã®ÙXáÿ9>cª—ùØ€«Å¥‚¡ùÓ~L@ù’&&&«V­ºqãÇ‹…•C=4˜·;s•ààà˜˜˜sçÎEGG·´üÇ yêÔ©uëÖõ°´¦Fk[Ûk7)•Êg/øÿ·gôV…ð›lžþ²vâx €NÓ‡Ú»Kõh ¶©YÞ"Wÿ©C¶ÞÖŠ2Û²‚Ý„ÁhXYà0˜ŽÒ³|Gù{›Cu6®ãxSÕÏßÿ7ƒÑp°%”W6ˆÈñdæ1Ö(üS'¨|Ä7ácE‰Å­,v3^E5Õ5"av~ãÎd §/åÍ™fecÚZ² êNDdÿöãäÁû‹~”„„„lܸ±/ò}$;¿^‹†Ò¼ÃØÜ*H$ÒÏÏÏÏÏO(^¼xñܹsP™7@[[Û·ß~ÛÃhhâ‘Ó9J¥²s#·þÚ=æ7ácÕcÐèWÏ¿:Ø~+“"“)ÚZF$ŒÏ“cgy… L~RzÃןÙvÙyÞ Ë»XY¹uŒüº§ÏëUjw¶–Úª­•î×?€Ù¾–·ãJÏ].x™_Ÿ’É4‰äs¦[ü•O{~òxBÐ ZÈL+@~QCQYä/•5ërB=/ÒÙïàqšÿ°€³ÖÝ…âæBRëµîuõXtör~Òój¬6ª ~µå½ƒ)DÞ,Zb×%Cp¶¯%Ÿ˜ÂçLÌ\rkŠ—‰H$_½ùYÊÍ@7òÅ耳ƒA-W R=ê{+Œ³ÑÐé/ÀÖu.>ÿOÍô°z‹¼5dÕ=VM‹³6ú”/”ùqýôì?>ѵý‹LD~ý¹óçK?móý¾…¢ý-ýƒlîG0«BíUñ0û·xùÌ¿úÝž§PÀÀké^ÎŒ¢8x2sŒ£áG¢@é€:88 õ@`†¡J×ê‚Q ªÁ ’ûVTïÐ#YµØQåÌQaBÁ^>9³Kã¦5ã†Kvæ| `0»¿õ8s™)·‚U…ÕLNêZÿùÕÖx™L÷¤rׯÏïij|Î*ªš–¬‰YòÕ#Àí¸šñ³"o=(í‰;häò<‹¼òî[ža°˜Ý»wõp`FqO*Sª­Gá†z }žÝ88Ù K´µs?‹K¸2Ë}ŒQQ©`öÒ[c CƒlÖ¯tNΨþë—造•·‚ühµYSùṴ̀y%âÀ•±:Úˆ•‹lýGyº0uA¹õwãÊ#®æ—°dWW×S§N9;;õ¸`úFnýÅë=ªÍûÞÈdŠ9ž®”y3{TfØ›û], ÐÖÖNÄk« ¿ÙÙà§N4­áˆÑh¤žJ[eD´ʕ¿íœÝ›ZªªEe•Â’r!³¼1»€ŸÊhP ,£Ñh__ßE‹öP\¦ïàp8ÀÅ›¬‹7Y “R±…™®©±Ž…©ngá̯ÏxÉV•˜Ö5¯‰Öôõõurrúõ×_‡ú¤G°¹ÿpx–Q££ Yè]¿W²l}üñŸ&møÜÅÂTïë]]U6R—iWV‡Ñwmô°±Ä'¦°·ìÐ@"t0¿ý8ù·'³kDûާGß-³¥—œÎ+ùoMjBÍÖR{”¹ž1 K6Ô&è£Í(:m AÄk¡PêýR}¢U®„‚ k¸"§NRÇ“Ôñ%®¤²ZTV%(eIš%]רTª»»»»»ûøñã{[ó¦ïfee=}ú”Á`ddd0Œ.JÅÎvX{+¢Y›¨¯¥§£IyÝ졲¦Y,ieU60ù)Y PÉræëëëïï?uêT,››ûž›û„âRÁ7»Ÿn]ç‚F#S2kçÍ4‡¤fd2Eü3öŒ)æwÎ+jh–(Úä9èS29%hÓã”z(Á„BÖæp$ë>sš3}”OTÊ@™\Q^Ùy«øá.ÝB+¯DœWòŽhbJÍʼ£È¢††Ú(Ó·é—qê%*6A£¼¦®õgM$-­Mèt:Fspp°´´¤ÓéXìÐkQÁ8;;«HD"ƒÁxùòeaa!“Éd2™Œ|&#_ÔóÞÈd2Nwuuõòò;v¬‰‰ÉPŸßˆ6÷#˜=GžG\)äÕ—°dëWØ~ûÅX€ßdªß’û¶Vé†툨|¢>Z$R¬¨¸u;Ò¾ØôøÇo=(†ÿÛžàæBzžÅe²„ry;–cFF|zcñåËÈt°Œ‡‹ïÄ•‰¥­ŸÎ·?†t'¶\"kãHúçwýðIEÈ,Ë9Ó-!‡ÌÓ› ®ßc66Ëü&SlõŠM¬D¡ÔÍMuËž†>~Z%–¶úz™Í˜b9dlõ‚¤À ˜€©ÔÎ.•s¦ ”±ç===“““‡ú¢Â H$’F£Ñh#;ÐeÄ›û‘Šï$³·lí¢ Ý „*- F._d¯Új×ÓÕ„ö좯§«Ùyg?*ôÂÜT·s;£ÑEj_… ñ¦5^ "É fù@b«a````Þlîa```> `g Ì` T*¹õRn½¤Y$4ÊêøYK[c£\øo@Z\Rå€6÷0000ýOcSKQ© ¯ˆ_VÙT\&ÈÊ­ƒd<ÞÉÀ…!Áæ~xEÊ?h©².@šn00#¥RYT*ÈxYŸ˜Â~’ÆînÜi4š‰‰ •J…"Œ¡HbsóW91PÀv<Àæ~xáîî~úôé§þ}Øß`Ø×Bë;2™â›“¬H 3ba׈’ÒØwãX·b+U9ÞH$Òù_ètº¥¥åpˆ:…ÍýðbéÒ¥.\ˆIH°õ¾°s㸅³m>T£ßØÔrëaù?§°jZètúÖ­[‡zD00=E©TfåÖÇ<®¸z¯X•<ˆÅbýý½¼½½'Ož<<Õ;`s?¼@"‘±±±»ví:xèàWÛS¿Úž:Û—2–µïDÓá£JÖ›Z%W^¾Urõ^$d¶hÑ¢'NÀi“0ÙLñ(¹*îIåùëÅ|aG±rwwwH½ÇÝÝ}˜§y«µ··÷½˜~‡Ãá?~<22’ÉdB-VTôTO³ ã(.ö6–ø¤G_\*`äñÒ_Ö&¦T§g7B&&&!!!kÖ¬¹0#š„„_OÃOçÛ9Ò tîO³[Pš^sÿQåX64GÁ`1Ó|§Íš5kd)­Âæ~¸Ã`0®]»Ã`0ŠWEeÇ9éÙYë[QñV£ðTSSŠÎp˜þ×ó¤ÜzIqyCyeSA‰  D‘û-C__ßàààÎ *00}¡°°ÐÑѱó¯còx‚ÇXŠõ($¹lf¤Û«éQƒ@ZT*|™_Ÿú¢öî£ ž Ã#obbâïï4uêÔaè«y'°¹1Èd²ŒŒŒ´´´äädƒÑ]U @¥hš›bMÈ:Fdms]ÑÖÐÕAé`QšH‚>ð~w…´E®6É$R…D¢4ÊmÊ®¸ºV\['ª®WT7½6ÎŒL&;;;«‰a§ Ì@ ‰ÒÒÒ Fjj*ƒÁP=«°¢¢m tt-²FSS½óÖ––6N½X ”W°›Ò²xUµ1XŒ‡»‡¯¯¯ŸŸßH/‚›û‘ŠL&c2™EEEL&³°°Åb±X,'õBW’­í¦Êjiw)ù·IBÐét(–#†x<^ff&ó_ áå~‡ÃA_`WWWOOO‡aî‘ï9°¹ÿЉD‡ÍfWWWóx<¡P(“É8z `³Ù{E"ÇëÒ ‡ë2 §R©2™ŒF£‰D"‹UÅ“H$2™<‚<˜0'l6úuðùü.³",K  yíø›üÿ%13 Ñ&C.%tEXtdate:create2012-06-17T12:35:11+01:00ãS;]%tEXtdate:modify2012-06-17T12:35:11+01:00’ƒátEXttiff:endianlsbU·CtEXttiff:photometricRGB³ IßtEXttiff:rows-per-strip5z_¤IEND®B`‚swift-2.17.0/doc/source/admin/index.rst0000666000175100017510000000116213236061617017766 0ustar zuulzuul00000000000000=================================== OpenStack Swift Administrator Guide =================================== .. toctree:: :maxdepth: 2 objectstorage-intro.rst objectstorage-features.rst objectstorage-characteristics.rst objectstorage-components.rst objectstorage-ringbuilder.rst objectstorage-arch.rst objectstorage-replication.rst objectstorage-large-objects.rst objectstorage-auditors.rst objectstorage-EC.rst objectstorage-account-reaper.rst objectstorage-tenant-specific-image-storage.rst objectstorage-monitoring.rst objectstorage-admin.rst objectstorage-troubleshoot.rst swift-2.17.0/doc/source/admin/objectstorage-ringbuilder.rst0000666000175100017510000002332213236061617024020 0ustar zuulzuul00000000000000============ Ring-builder ============ Use the swift-ring-builder utility to build and manage rings. This utility assigns partitions to devices and writes an optimized Python structure to a gzipped, serialized file on disk for transmission to the servers. The server processes occasionally check the modification time of the file and reload in-memory copies of the ring structure as needed. If you use a slightly older version of the ring, one of the three replicas for a partition subset will be incorrect because of the way the ring-builder manages changes to the ring. You can work around this issue. The ring-builder also keeps its own builder file with the ring information and additional data required to build future rings. It is very important to keep multiple backup copies of these builder files. One option is to copy the builder files out to every server while copying the ring files themselves. Another is to upload the builder files into the cluster itself. If you lose the builder file, you have to create a new ring from scratch. Nearly all partitions would be assigned to different devices and, therefore, nearly all of the stored data would have to be replicated to new locations. So, recovery from a builder file loss is possible, but data would be unreachable for an extended time. Ring data structure ~~~~~~~~~~~~~~~~~~~ The ring data structure consists of three top level fields: a list of devices in the cluster, a list of lists of device ids indicating partition to device assignments, and an integer indicating the number of bits to shift an MD5 hash to calculate the partition for the hash. Partition assignment list ~~~~~~~~~~~~~~~~~~~~~~~~~ This is a list of ``array('H')`` of devices ids. The outermost list contains an ``array('H')`` for each replica. Each ``array('H')`` has a length equal to the partition count for the ring. Each integer in the ``array('H')`` is an index into the above list of devices. The partition list is known internally to the Ring class as ``_replica2part2dev_id``. So, to create a list of device dictionaries assigned to a partition, the Python code would look like: .. code-block:: python devices = [self.devs[part2dev_id[partition]] for part2dev_id in self._replica2part2dev_id] That code is a little simplistic because it does not account for the removal of duplicate devices. If a ring has more replicas than devices, a partition will have more than one replica on a device. ``array('H')`` is used for memory conservation as there may be millions of partitions. Overload ~~~~~~~~ The ring builder tries to keep replicas as far apart as possible while still respecting device weights. When it can not do both, the overload factor determines what happens. Each device takes an extra fraction of its desired partitions to allow for replica dispersion; after that extra fraction is exhausted, replicas are placed closer together than optimal. The overload factor lets the operator trade off replica dispersion (durability) against data dispersion (uniform disk usage). The default overload factor is 0, so device weights are strictly followed. With an overload factor of 0.1, each device accepts 10% more partitions than it otherwise would, but only if it needs to maintain partition dispersion. For example, consider a 3-node cluster of machines with equal-size disks; node A has 12 disks, node B has 12 disks, and node C has 11 disks. The ring has an overload factor of 0.1 (10%). Without the overload, some partitions would end up with replicas only on nodes A and B. However, with the overload, every device can accept up to 10% more partitions for the sake of dispersion. The missing disk in C means there is one disk's worth of partitions to spread across the remaining 11 disks, which gives each disk in C an extra 9.09% load. Since this is less than the 10% overload, there is one replica of each partition on each node. However, this does mean that the disks in node C have more data than the disks in nodes A and B. If 80% full is the warning threshold for the cluster, node C's disks reach 80% full while A and B's disks are only 72.7% full. Replica counts ~~~~~~~~~~~~~~ To support the gradual change in replica counts, a ring can have a real number of replicas and is not restricted to an integer number of replicas. A fractional replica count is for the whole ring and not for individual partitions. It indicates the average number of replicas for each partition. For example, a replica count of 3.2 means that 20 percent of partitions have four replicas and 80 percent have three replicas. The replica count is adjustable. For example: .. code-block:: console $ swift-ring-builder account.builder set_replicas 4 $ swift-ring-builder account.builder rebalance You must rebalance the replica ring in globally distributed clusters. Operators of these clusters generally want an equal number of replicas and regions. Therefore, when an operator adds or removes a region, the operator adds or removes a replica. Removing unneeded replicas saves on the cost of disks. You can gradually increase the replica count at a rate that does not adversely affect cluster performance. For example: .. code-block:: console $ swift-ring-builder object.builder set_replicas 3.01 $ swift-ring-builder object.builder rebalance ... $ swift-ring-builder object.builder set_replicas 3.02 $ swift-ring-builder object.builder rebalance ... Changes take effect after the ring is rebalanced. Therefore, if you intend to change from 3 replicas to 3.01 but you accidentally type 2.01, no data is lost. Additionally, the :command:`swift-ring-builder X.builder create` command can now take a decimal argument for the number of replicas. Partition shift value ~~~~~~~~~~~~~~~~~~~~~ The partition shift value is known internally to the Ring class as ``_part_shift``. This value is used to shift an MD5 hash to calculate the partition where the data for that hash should reside. Only the top four bytes of the hash is used in this process. For example, to compute the partition for the ``/account/container/object`` path using Python: .. code-block:: python partition = unpack_from('>I', md5('/account/container/object').digest())[0] >> self._part_shift For a ring generated with part\_power P, the partition shift value is ``32 - P``. Build the ring ~~~~~~~~~~~~~~ The ring builder process includes these high-level steps: #. The utility calculates the number of partitions to assign to each device based on the weight of the device. For example, for a partition at the power of 20, the ring has 1,048,576 partitions. One thousand devices of equal weight each want 1,048.576 partitions. The devices are sorted by the number of partitions they desire and kept in order throughout the initialization process. .. note:: Each device is also assigned a random tiebreaker value that is used when two devices desire the same number of partitions. This tiebreaker is not stored on disk anywhere, and so two different rings created with the same parameters will have different partition assignments. For repeatable partition assignments, ``RingBuilder.rebalance()`` takes an optional seed value that seeds the Python pseudo-random number generator. #. The ring builder assigns each partition replica to the device that requires most partitions at that point while keeping it as far away as possible from other replicas. The ring builder prefers to assign a replica to a device in a region that does not already have a replica. If no such region is available, the ring builder searches for a device in a different zone, or on a different server. If it does not find one, it looks for a device with no replicas. Finally, if all options are exhausted, the ring builder assigns the replica to the device that has the fewest replicas already assigned. .. note:: The ring builder assigns multiple replicas to one device only if the ring has fewer devices than it has replicas. #. When building a new ring from an old ring, the ring builder recalculates the desired number of partitions that each device wants. #. The ring builder unassigns partitions and gathers these partitions for reassignment, as follows: - The ring builder unassigns any assigned partitions from any removed devices and adds these partitions to the gathered list. - The ring builder unassigns any partition replicas that can be spread out for better durability and adds these partitions to the gathered list. - The ring builder unassigns random partitions from any devices that have more partitions than they need and adds these partitions to the gathered list. #. The ring builder reassigns the gathered partitions to devices by using a similar method to the one described previously. #. When the ring builder reassigns a replica to a partition, the ring builder records the time of the reassignment. The ring builder uses this value when it gathers partitions for reassignment so that no partition is moved twice in a configurable amount of time. The RingBuilder class knows this configurable amount of time as ``min_part_hours``. The ring builder ignores this restriction for replicas of partitions on removed devices because removal of a device happens on device failure only, and reassignment is the only choice. These steps do not always perfectly rebalance a ring due to the random nature of gathering partitions for reassignment. To help reach a more balanced ring, the rebalance process is repeated until near perfect (less than 1 percent off) or when the balance does not improve by at least 1 percent (indicating we probably cannot get perfect balance due to wildly imbalanced zones or too many partitions recently moved). swift-2.17.0/doc/source/admin/objectstorage-replication.rst0000666000175100017510000001075113236061617024025 0ustar zuulzuul00000000000000=========== Replication =========== Because each replica in Object Storage functions independently and clients generally require only a simple majority of nodes to respond to consider an operation successful, transient failures like network partitions can quickly cause replicas to diverge. These differences are eventually reconciled by asynchronous, peer-to-peer replicator processes. The replicator processes traverse their local file systems and concurrently perform operations in a manner that balances load across physical disks. Replication uses a push model, with records and files generally only being copied from local to remote replicas. This is important because data on the node might not belong there (as in the case of hand offs and ring changes), and a replicator cannot know which data it should pull in from elsewhere in the cluster. Any node that contains data must ensure that data gets to where it belongs. The ring handles replica placement. To replicate deletions in addition to creations, every deleted record or file in the system is marked by a tombstone. The replication process cleans up tombstones after a time period known as the ``consistency window``. This window defines the duration of the replication and how long transient failure can remove a node from the cluster. Tombstone cleanup must be tied to replication to reach replica convergence. If a replicator detects that a remote drive has failed, the replicator uses the ``get_more_nodes`` interface for the ring to choose an alternate node with which to synchronize. The replicator can maintain desired levels of replication during disk failures, though some replicas might not be in an immediately usable location. .. note:: The replicator does not maintain desired levels of replication when failures such as entire node failures occur; most failures are transient. The main replication types are: - Database replication Replicates containers and objects. - Object replication Replicates object data. Database replication ~~~~~~~~~~~~~~~~~~~~ Database replication completes a low-cost hash comparison to determine whether two replicas already match. Normally, this check can quickly verify that most databases in the system are already synchronized. If the hashes differ, the replicator synchronizes the databases by sharing records added since the last synchronization point. This synchronization point is a high water mark that notes the last record at which two databases were known to be synchronized, and is stored in each database as a tuple of the remote database ID and record ID. Database IDs are unique across all replicas of the database, and record IDs are monotonically increasing integers. After all new records are pushed to the remote database, the entire synchronization table of the local database is pushed, so the remote database can guarantee that it is synchronized with everything with which the local database was previously synchronized. If a replica is missing, the whole local database file is transmitted to the peer by using rsync(1) and is assigned a new unique ID. In practice, database replication can process hundreds of databases per concurrency setting per second (up to the number of available CPUs or disks) and is bound by the number of database transactions that must be performed. Object replication ~~~~~~~~~~~~~~~~~~ The initial implementation of object replication performed an rsync to push data from a local partition to all remote servers where it was expected to reside. While this worked at small scale, replication times skyrocketed once directory structures could no longer be held in RAM. This scheme was modified to save a hash of the contents for each suffix directory to a per-partition hashes file. The hash for a suffix directory is no longer valid when the contents of that suffix directory is modified. The object replication process reads in hash files and calculates any invalidated hashes. Then, it transmits the hashes to each remote server that should hold the partition, and only suffix directories with differing hashes on the remote server are rsynced. After pushing files to the remote server, the replication process notifies it to recalculate hashes for the rsynced suffix directories. The number of uncached directories that object replication must traverse, usually as a result of invalidated suffix directory hashes, impedes performance. To provide acceptable replication speeds, object replication is designed to invalidate around 2 percent of the hash space on a normal node each day. swift-2.17.0/doc/source/admin/objectstorage-large-objects.rst0000666000175100017510000000252113236061617024231 0ustar zuulzuul00000000000000==================== Large object support ==================== Object Storage (swift) uses segmentation to support the upload of large objects. By default, Object Storage limits the download size of a single object to 5GB. Using segmentation, uploading a single object is virtually unlimited. The segmentation process works by fragmenting the object, and automatically creating a file that sends the segments together as a single object. This option offers greater upload speed with the possibility of parallel uploads. Large objects ~~~~~~~~~~~~~ The large object is comprised of two types of objects: - **Segment objects** store the object content. You can divide your content into segments, and upload each segment into its own segment object. Segment objects do not have any special features. You create, update, download, and delete segment objects just as you would normal objects. - A **manifest object** links the segment objects into one logical large object. When you download a manifest object, Object Storage concatenates and returns the contents of the segment objects in the response body of the request. The manifest object types are: - **Static large objects** - **Dynamic large objects** To find out more information on large object support, see :doc:`/overview_large_objects` in the developer documentation. swift-2.17.0/doc/source/admin/objectstorage-arch.rst0000666000175100017510000000676513236061617022443 0ustar zuulzuul00000000000000==================== Cluster architecture ==================== Access tier ~~~~~~~~~~~ Large-scale deployments segment off an access tier, which is considered the Object Storage system's central hub. The access tier fields the incoming API requests from clients and moves data in and out of the system. This tier consists of front-end load balancers, ssl-terminators, and authentication services. It runs the (distributed) brain of the Object Storage system: the proxy server processes. .. note:: If you want to use OpenStack Identity API v3 for authentication, you have the following options available in ``/etc/swift/dispersion.conf``: ``auth_version``, ``user_domain_name``, ``project_domain_name``, and ``project_name``. **Object Storage architecture** .. figure:: figures/objectstorage-arch.png Because access servers are collocated in their own tier, you can scale out read/write access regardless of the storage capacity. For example, if a cluster is on the public Internet, requires SSL termination, and has a high demand for data access, you can provision many access servers. However, if the cluster is on a private network and used primarily for archival purposes, you need fewer access servers. Since this is an HTTP addressable storage service, you may incorporate a load balancer into the access tier. Typically, the tier consists of a collection of 1U servers. These machines use a moderate amount of RAM and are network I/O intensive. Since these systems field each incoming API request, you should provision them with two high-throughput (10GbE) interfaces - one for the incoming front-end requests and the other for the back-end access to the object storage nodes to put and fetch data. Factors to consider ------------------- For most publicly facing deployments as well as private deployments available across a wide-reaching corporate network, you use SSL to encrypt traffic to the client. SSL adds significant processing load to establish sessions between clients, which is why you have to provision more capacity in the access layer. SSL may not be required for private deployments on trusted networks. Storage nodes ~~~~~~~~~~~~~ In most configurations, each of the five zones should have an equal amount of storage capacity. Storage nodes use a reasonable amount of memory and CPU. Metadata needs to be readily available to return objects quickly. The object stores run services not only to field incoming requests from the access tier, but to also run replicators, auditors, and reapers. You can provision storage nodes with single gigabit or 10 gigabit network interface depending on the expected workload and desired performance, although it may be desirable to isolate replication traffic with a second interface. **Object Storage (swift)** .. figure:: figures/objectstorage-nodes.png Currently, a 2 TB or 3 TB SATA disk delivers good performance for the price. You can use desktop-grade drives if you have responsive remote hands in the datacenter and enterprise-grade drives if you don't. Factors to consider ------------------- You should keep in mind the desired I/O performance for single-threaded requests. This system does not use RAID, so a single disk handles each request for an object. Disk performance impacts single-threaded response rates. To achieve apparent higher throughput, the object storage system is designed to handle concurrent uploads/downloads. The network I/O capacity (1GbE, bonded 1GbE pair, or 10GbE) should match your desired concurrent throughput needs for reads and writes. swift-2.17.0/doc/source/first_contribution_swift.rst0000666000175100017510000001660013236061617022734 0ustar zuulzuul00000000000000=========================== First Contribution to Swift =========================== ------------- Getting Swift ------------- .. highlight: none Swift's source code is hosted on github and managed with git. The current trunk can be checked out like this:: git clone https://github.com/openstack/swift.git This will clone the Swift repository under your account. A source tarball for the latest release of Swift is available on the `launchpad project page `_. Prebuilt packages for Ubuntu and RHEL variants are available. * `Swift Ubuntu Packages `_ * `Swift RDO Packages `_ -------------------- Source Control Setup -------------------- Swift uses ``git`` for source control. The OpenStack `Developer's Guide `_ describes the steps for setting up Git and all the necessary accounts for contributing code to Swift. ---------------- Changes to Swift ---------------- Once you have the source code and source control set up, you can make your changes to Swift. ------- Testing ------- The :doc:`Development Guidelines ` describe the testing requirements before submitting Swift code. In summary, you can execute tox from the swift home directory (where you checked out the source code):: tox Tox will present tests results. Notice that in the beginning, it is very common to break many coding style guidelines. -------------------------- Proposing changes to Swift -------------------------- The OpenStack `Developer's Guide `_ describes the most common ``git`` commands that you will need. Following is a list of the commands that you need to know for your first contribution to Swift: To clone a copy of Swift:: git clone https://github.com/openstack/swift.git Under the swift directory, set up the Gerrit repository. The following command configures the repository to know about Gerrit and installs the ``Change-Id`` commit hook. You only need to do this once:: git review -s To create your development branch (substitute branch_name for a name of your choice:: git checkout -b To check the files that have been updated in your branch:: git status To check the differences between your branch and the repository:: git diff Assuming you have not added new files, you commit all your changes using:: git commit -a Read the `Summary of Git commit message structure `_ for best practices on writing the commit message. When you are ready to send your changes for review use:: git review If successful, Git response message will contain a URL you can use to track your changes. If you need to make further changes to the same review, you can commit them using:: git commit -a --amend This will commit the changes under the same set of changes you issued earlier. Notice that in order to send your latest version for review, you will still need to call:: git review --------------------- Tracking your changes --------------------- After proposing changes to Swift, you can track them at https://review.openstack.org. After logging in, you will see a dashboard of "Outgoing reviews" for changes you have proposed, "Incoming reviews" for changes you are reviewing, and "Recently closed" changes for which you were either a reviewer or owner. .. _post-rebase-instructions: ------------------------ Post rebase instructions ------------------------ After rebasing, the following steps should be performed to rebuild the swift installation. Note that these commands should be performed from the root of the swift repo directory (e.g. ``$HOME/swift/``):: sudo python setup.py develop sudo pip install -r test-requirements.txt If using TOX, depending on the changes made during the rebase, you may need to rebuild the TOX environment (generally this will be the case if test-requirements.txt was updated such that a new version of a package is required), this can be accomplished using the ``-r`` argument to the TOX cli:: tox -r You can include any of the other TOX arguments as well, for example, to run the pep8 suite and rebuild the TOX environment the following can be used:: tox -r -e pep8 The rebuild option only needs to be specified once for a particular build (e.g. pep8), that is further invocations of the same build will not require this until the next rebase. --------------- Troubleshooting --------------- You may run into the following errors when starting Swift if you rebase your commit using:: git rebase .. code-block:: python Traceback (most recent call last): File "/usr/local/bin/swift-init", line 5, in from pkg_resources import require File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 2749, in working_set = WorkingSet._build_master() File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 446, in _build_master return cls._build_from_requirements(__requires__) File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 459, in _build_from_requirements dists = ws.resolve(reqs, Environment()) File "/usr/lib/python2.7/dist-packages/pkg_resources.py", line 628, in resolve raise DistributionNotFound(req) pkg_resources.DistributionNotFound: swift==2.3.1.devXXX (where XXX represents a dev version of Swift). .. code-block:: python Traceback (most recent call last): File "/usr/local/bin/swift-proxy-server", line 10, in execfile(__file__) File "/home/swift/swift/bin/swift-proxy-server", line 23, in sys.exit(run_wsgi(conf_file, 'proxy-server', **options)) File "/home/swift/swift/swift/common/wsgi.py", line 888, in run_wsgi loadapp(conf_path, global_conf=global_conf) File "/home/swift/swift/swift/common/wsgi.py", line 390, in loadapp func(PipelineWrapper(ctx)) File "/home/swift/swift/swift/proxy/server.py", line 602, in modify_wsgi_pipeline ctx = pipe.create_filter(filter_name) File "/home/swift/swift/swift/common/wsgi.py", line 329, in create_filter global_conf=self.context.global_conf) File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 296, in loadcontext global_conf=global_conf) File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 328, in _loadegg return loader.get_context(object_type, name, global_conf) File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 620, in get_context object_type, name=name) File "/usr/lib/python2.7/dist-packages/paste/deploy/loadwsgi.py", line 659, in find_egg_entry_point for prot in protocol_options] or '(no entry points)')))) LookupError: Entry point 'versioned_writes' not found in egg 'swift' (dir: /home/swift/swift; protocols: paste.filter_factory, paste.filter_app_factory; entry_points: ) This happens because ``git rebase`` will retrieve code for a different version of Swift in the development stream, but the start scripts under ``/usr/local/bin`` have not been updated. The solution is to follow the steps described in the :ref:`post-rebase-instructions` section. swift-2.17.0/doc/source/object.rst0000666000175100017510000000215013236061617017033 0ustar zuulzuul00000000000000.. _object: ****** Object ****** .. _object-auditor: Object Auditor ============== .. automodule:: swift.obj.auditor :members: :undoc-members: :show-inheritance: .. _object-diskfile: Object Backend ============== .. automodule:: swift.obj.diskfile :members: :undoc-members: :show-inheritance: .. _object-replicator: Object Replicator ================= .. automodule:: swift.obj.replicator :members: :undoc-members: :show-inheritance: .. automodule:: swift.obj.ssync_sender :members: :undoc-members: :show-inheritance: .. automodule:: swift.obj.ssync_receiver :members: :undoc-members: :show-inheritance: .. _object-reconstructor: Object Reconstructor ==================== .. automodule:: swift.obj.reconstructor :members: :undoc-members: :show-inheritance: .. _object-server: Object Server ============= .. automodule:: swift.obj.server :members: :undoc-members: :show-inheritance: .. _object-updater: Object Updater ============== .. automodule:: swift.obj.updater :members: :undoc-members: :show-inheritance: swift-2.17.0/doc/source/development_saio.rst0000666000175100017510000005771413236061617021142 0ustar zuulzuul00000000000000======================= SAIO - Swift All In One ======================= --------------------------------------------- Instructions for setting up a development VM --------------------------------------------- This section documents setting up a virtual machine for doing Swift development. The virtual machine will emulate running a four node Swift cluster. To begin: * Get a linux system server image, this guide will cover: * Ubuntu 14.04, 16.04 LTS * Fedora/CentOS * OpenSuse - Create guest virtual machine from the image. ---------------------------- What's in a ---------------------------- Much of the configuration described in this guide requires escalated administrator (``root``) privileges; however, we assume that administrator logs in as an unprivileged user and can use ``sudo`` to run privileged commands. Swift processes also run under a separate user and group, set by configuration option, and referenced as ``:``. The default user is ``swift``, which may not exist on your system. These instructions are intended to allow a developer to use his/her username for ``:``. ----------------------- Installing dependencies ----------------------- * On ``apt`` based systems:: sudo apt-get update sudo apt-get install curl gcc memcached rsync sqlite3 xfsprogs \ git-core libffi-dev python-setuptools \ liberasurecode-dev libssl-dev sudo apt-get install python-coverage python-dev python-nose \ python-xattr python-eventlet \ python-greenlet python-pastedeploy \ python-netifaces python-pip python-dnspython \ python-mock * On ``yum`` based systems:: sudo yum update sudo yum install curl gcc memcached rsync sqlite xfsprogs git-core \ libffi-devel xinetd liberasurecode-devel \ openssl-devel python-setuptools \ python-coverage python-devel python-nose \ pyxattr python-eventlet \ python-greenlet python-paste-deploy \ python-netifaces python-pip python-dns \ python-mock * On ``OpenSuse``:: sudo zypper install curl gcc memcached rsync sqlite3 xfsprogs git-core \ libffi-devel liberasurecode-devel python2-setuptools \ libopenssl-devel sudo zypper install python2-coverage python-devel python2-nose \ python-xattr python-eventlet python2-greenlet \ python2-netifaces python2-pip python2-dnspython \ python2-mock Note: This installs necessary system dependencies and *most* of the python dependencies. Later in the process setuptools/distribute or pip will install and/or upgrade packages. Next, choose either :ref:`partition-section` or :ref:`loopback-section`. .. _partition-section: Using a partition for storage ============================= If you are going to use a separate partition for Swift data, be sure to add another device when creating the VM, and follow these instructions: #. Set up a single partition:: sudo fdisk /dev/sdb sudo mkfs.xfs /dev/sdb1 #. Edit ``/etc/fstab`` and add:: /dev/sdb1 /mnt/sdb1 xfs noatime,nodiratime,nobarrier,logbufs=8 0 0 #. Create the mount point and the individualized links:: sudo mkdir /mnt/sdb1 sudo mount /mnt/sdb1 sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 sudo chown ${USER}:${USER} /mnt/sdb1/* sudo mkdir /srv for x in {1..4}; do sudo ln -s /mnt/sdb1/$x /srv/$x; done sudo mkdir -p /srv/1/node/sdb1 /srv/1/node/sdb5 \ /srv/2/node/sdb2 /srv/2/node/sdb6 \ /srv/3/node/sdb3 /srv/3/node/sdb7 \ /srv/4/node/sdb4 /srv/4/node/sdb8 \ /var/run/swift sudo chown -R ${USER}:${USER} /var/run/swift # **Make sure to include the trailing slash after /srv/$x/** for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done Note: For OpenSuse users, a user's primary group is `users`, so you have 2 options: * Change `${USER}:${USER}` to `${USER}:users` in all references of this guide; or * Create a group for your username and add yourself to it:: sudo groupadd ${USER} && sudo gpasswd -a ${USER} ${USER} Note: We create the mount points and mount the storage disk under /mnt/sdb1. This disk will contain one directory per simulated swift node, each owned by the current swift user. We then create symlinks to these directories under /srv. If the disk sdb is unmounted, files will not be written under /srv/\*, because the symbolic link destination /mnt/sdb1/* will not exist. This prevents disk sync operations from writing to the root partition in the event a drive is unmounted. #. Next, skip to :ref:`common-dev-section`. .. _loopback-section: Using a loopback device for storage =================================== If you want to use a loopback device instead of another partition, follow these instructions: #. Create the file for the loopback device:: sudo mkdir /srv sudo truncate -s 1GB /srv/swift-disk sudo mkfs.xfs /srv/swift-disk Modify size specified in the ``truncate`` command to make a larger or smaller partition as needed. #. Edit `/etc/fstab` and add:: /srv/swift-disk /mnt/sdb1 xfs loop,noatime,nodiratime,nobarrier,logbufs=8 0 0 #. Create the mount point and the individualized links:: sudo mkdir /mnt/sdb1 sudo mount /mnt/sdb1 sudo mkdir /mnt/sdb1/1 /mnt/sdb1/2 /mnt/sdb1/3 /mnt/sdb1/4 sudo chown ${USER}:${USER} /mnt/sdb1/* for x in {1..4}; do sudo ln -s /mnt/sdb1/$x /srv/$x; done sudo mkdir -p /srv/1/node/sdb1 /srv/1/node/sdb5 \ /srv/2/node/sdb2 /srv/2/node/sdb6 \ /srv/3/node/sdb3 /srv/3/node/sdb7 \ /srv/4/node/sdb4 /srv/4/node/sdb8 \ /var/run/swift sudo chown -R ${USER}:${USER} /var/run/swift # **Make sure to include the trailing slash after /srv/$x/** for x in {1..4}; do sudo chown -R ${USER}:${USER} /srv/$x/; done Note: For OpenSuse users, a user's primary group is `users`, so you have 2 options: * Change `${USER}:${USER}` to `${USER}:users` in all references of this guide; or * Create a group for your username and add yourself to it:: sudo groupadd ${USER} && sudo gpasswd -a ${USER} ${USER} Note: We create the mount points and mount the loopback file under /mnt/sdb1. This file will contain one directory per simulated swift node, each owned by the current swift user. We then create symlinks to these directories under /srv. If the loopback file is unmounted, files will not be written under /srv/\*, because the symbolic link destination /mnt/sdb1/* will not exist. This prevents disk sync operations from writing to the root partition in the event a drive is unmounted. .. _common-dev-section: Common Post-Device Setup ======================== Add the following lines to ``/etc/rc.local`` (before the ``exit 0``):: mkdir -p /var/cache/swift /var/cache/swift2 /var/cache/swift3 /var/cache/swift4 chown : /var/cache/swift* mkdir -p /var/run/swift chown : /var/run/swift Note that on some systems you might have to create ``/etc/rc.local``. On Fedora 19 or later, you need to place these in ``/etc/rc.d/rc.local``. On OpenSuse you need to place these in ``/etc/init.d/boot.local``. Creating an XFS tmp dir ----------------------- Tests require having an XFS directory available in ``/tmp`` or in the ``TMPDIR`` environment variable. To set up ``/tmp`` with an XFS filesystem, do the following:: cd ~ truncate -s 1GB xfs_file # create 1GB fil for XFS in your home directory mkfs.xfs xfs_file sudo mount -o loop,noatime,nodiratime xfs_file /tmp sudo chmod -R 1777 /tmp To persist this, edit and add the following to ``/etc/fstab``:: /home//xfs_file /tmp xfs rw,noatime,nodiratime,attr2,inode64,noquota 0 0 ---------------- Getting the code ---------------- #. Check out the python-swiftclient repo:: cd $HOME; git clone https://github.com/openstack/python-swiftclient.git #. Build a development installation of python-swiftclient:: cd $HOME/python-swiftclient; sudo python setup.py develop; cd - Ubuntu 12.04 users need to install python-swiftclient's dependencies before the installation of python-swiftclient. This is due to a bug in an older version of setup tools:: cd $HOME/python-swiftclient; sudo pip install -r requirements.txt; sudo python setup.py develop; cd - #. Check out the swift repo:: git clone https://github.com/openstack/swift.git #. Build a development installation of swift:: cd $HOME/swift; sudo pip install --no-binary cryptography -r requirements.txt; sudo python setup.py develop; cd - Note: Due to a difference in libssl.so naming in OpenSuse to other Linux distros the wheel/binary wont work so the cryptography must be built, thus the ``--no-binary cryptography``. Fedora 19 or later users might have to perform the following if development installation of swift fails:: sudo pip install -U xattr #. Install swift's test dependencies:: cd $HOME/swift; sudo pip install -r test-requirements.txt ---------------- Setting up rsync ---------------- #. Create ``/etc/rsyncd.conf``:: sudo cp $HOME/swift/doc/saio/rsyncd.conf /etc/ sudo sed -i "s//${USER}/" /etc/rsyncd.conf Here is the default ``rsyncd.conf`` file contents maintained in the repo that is copied and fixed up above: .. literalinclude:: /../saio/rsyncd.conf #. On Ubuntu, edit the following line in ``/etc/default/rsync``:: RSYNC_ENABLE=true On Fedora, edit the following line in ``/etc/xinetd.d/rsync``:: disable = no One might have to create the above files to perform the edits. On OpenSuse, nothing needs to happen here. #. On platforms with SELinux in ``Enforcing`` mode, either set to ``Permissive``:: sudo setenforce Permissive Or just allow rsync full access:: sudo setsebool -P rsync_full_access 1 #. Start the rsync daemon * On Ubuntu 14.04, run:: sudo service rsync restart * On Ubuntu 16.04, run:: sudo systemctl enable rsync sudo systemctl start rsync * On Fedora, run:: sudo systemctl restart xinetd.service sudo systemctl enable rsyncd.service sudo systemctl start rsyncd.service * On OpenSuse, run:: sudo systemctl enable rsyncd.service sudo systemctl start rsyncd.service * On other xinetd based systems simply run:: sudo service xinetd restart #. Verify rsync is accepting connections for all servers:: rsync rsync://pub@localhost/ You should see the following output from the above command:: account6012 account6022 account6032 account6042 container6011 container6021 container6031 container6041 object6010 object6020 object6030 object6040 ------------------ Starting memcached ------------------ On non-Ubuntu distros you need to ensure memcached is running:: sudo service memcached start sudo chkconfig memcached on or:: sudo systemctl enable memcached.service sudo systemctl start memcached.service The tempauth middleware stores tokens in memcached. If memcached is not running, tokens cannot be validated, and accessing Swift becomes impossible. --------------------------------------------------- Optional: Setting up rsyslog for individual logging --------------------------------------------------- #. Install the swift rsyslogd configuration:: sudo cp $HOME/swift/doc/saio/rsyslog.d/10-swift.conf /etc/rsyslog.d/ Note: OpenSuse may have the systemd logger installed, so if you want this to work, you need to install rsyslog:: sudo zypper install rsyslog sudo systemctl start rsyslog.service sudo systemctl enable rsyslog.service Be sure to review that conf file to determine if you want all the logs in one file vs. all the logs separated out, and if you want hourly logs for stats processing. For convenience, we provide its default contents below: .. literalinclude:: /../saio/rsyslog.d/10-swift.conf #. Edit ``/etc/rsyslog.conf`` and make the following change (usually in the "GLOBAL DIRECTIVES" section):: $PrivDropToGroup adm #. If using hourly logs (see above) perform:: sudo mkdir -p /var/log/swift/hourly Otherwise perform:: sudo mkdir -p /var/log/swift #. Setup the logging directory and start syslog: * On Ubuntu:: sudo chown -R syslog.adm /var/log/swift sudo chmod -R g+w /var/log/swift sudo service rsyslog restart * On Fedora and OpenSuse:: sudo chown -R root:adm /var/log/swift sudo chmod -R g+w /var/log/swift sudo systemctl restart rsyslog.service --------------------- Configuring each node --------------------- After performing the following steps, be sure to verify that Swift has access to resulting configuration files (sample configuration files are provided with all defaults in line-by-line comments). #. Optionally remove an existing swift directory:: sudo rm -rf /etc/swift #. Populate the ``/etc/swift`` directory itself:: cd $HOME/swift/doc; sudo cp -r saio/swift /etc/swift; cd - sudo chown -R ${USER}:${USER} /etc/swift #. Update ```` references in the Swift config files:: find /etc/swift/ -name \*.conf | xargs sudo sed -i "s//${USER}/" The contents of the configuration files provided by executing the above commands are as follows: #. ``/etc/swift/swift.conf`` .. literalinclude:: /../saio/swift/swift.conf #. ``/etc/swift/proxy-server.conf`` .. literalinclude:: /../saio/swift/proxy-server.conf #. ``/etc/swift/object-expirer.conf`` .. literalinclude:: /../saio/swift/object-expirer.conf #. ``/etc/swift/container-reconciler.conf`` .. literalinclude:: /../saio/swift/container-reconciler.conf #. ``/etc/swift/container-sync-realms.conf`` .. literalinclude:: /../saio/swift/container-sync-realms.conf #. ``/etc/swift/account-server/1.conf`` .. literalinclude:: /../saio/swift/account-server/1.conf #. ``/etc/swift/container-server/1.conf`` .. literalinclude:: /../saio/swift/container-server/1.conf #. ``/etc/swift/object-server/1.conf`` .. literalinclude:: /../saio/swift/object-server/1.conf #. ``/etc/swift/account-server/2.conf`` .. literalinclude:: /../saio/swift/account-server/2.conf #. ``/etc/swift/container-server/2.conf`` .. literalinclude:: /../saio/swift/container-server/2.conf #. ``/etc/swift/object-server/2.conf`` .. literalinclude:: /../saio/swift/object-server/2.conf #. ``/etc/swift/account-server/3.conf`` .. literalinclude:: /../saio/swift/account-server/3.conf #. ``/etc/swift/container-server/3.conf`` .. literalinclude:: /../saio/swift/container-server/3.conf #. ``/etc/swift/object-server/3.conf`` .. literalinclude:: /../saio/swift/object-server/3.conf #. ``/etc/swift/account-server/4.conf`` .. literalinclude:: /../saio/swift/account-server/4.conf #. ``/etc/swift/container-server/4.conf`` .. literalinclude:: /../saio/swift/container-server/4.conf #. ``/etc/swift/object-server/4.conf`` .. literalinclude:: /../saio/swift/object-server/4.conf .. _setup_scripts: ------------------------------------ Setting up scripts for running Swift ------------------------------------ #. Copy the SAIO scripts for resetting the environment:: mkdir -p $HOME/bin cd $HOME/swift/doc; cp saio/bin/* $HOME/bin; cd - chmod +x $HOME/bin/* #. Edit the ``$HOME/bin/resetswift`` script The template ``resetswift`` script looks like the following: .. literalinclude:: /../saio/bin/resetswift If you are using a loopback device add an environment var to substitute ``/dev/sdb1`` with ``/srv/swift-disk``:: echo "export SAIO_BLOCK_DEVICE=/srv/swift-disk" >> $HOME/.bashrc If you did not set up rsyslog for individual logging, remove the ``find /var/log/swift...`` line:: sed -i "/find \/var\/log\/swift/d" $HOME/bin/resetswift #. Install the sample configuration file for running tests:: cp $HOME/swift/test/sample.conf /etc/swift/test.conf The template ``test.conf`` looks like the following: .. literalinclude:: /../../test/sample.conf #. Add an environment variable for running tests below:: echo "export SWIFT_TEST_CONFIG_FILE=/etc/swift/test.conf" >> $HOME/.bashrc #. Be sure that your ``PATH`` includes the ``bin`` directory:: echo "export PATH=${PATH}:$HOME/bin" >> $HOME/.bashrc #. Source the above environment variables into your current environment:: . $HOME/.bashrc #. Construct the initial rings using the provided script:: remakerings The ``remakerings`` script looks like the following: .. literalinclude:: /../saio/bin/remakerings You can expect the output from this command to produce the following. Note that 3 object rings are created in order to test storage policies and EC in the SAIO environment. The EC ring is the only one with all 8 devices. There are also two replication rings, one for 3x replication and another for 2x replication, but those rings only use 4 devices:: Device d0r1z1-127.0.0.1:6010R127.0.0.1:6010/sdb1_"" with 1.0 weight got id 0 Device d1r1z2-127.0.0.2:6020R127.0.0.2:6020/sdb2_"" with 1.0 weight got id 1 Device d2r1z3-127.0.0.3:6030R127.0.0.3:6030/sdb3_"" with 1.0 weight got id 2 Device d3r1z4-127.0.0.4:6040R127.0.0.4:6040/sdb4_"" with 1.0 weight got id 3 Reassigned 3072 (300.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 Device d0r1z1-127.0.0.1:6010R127.0.0.1:6010/sdb1_"" with 1.0 weight got id 0 Device d1r1z2-127.0.0.2:6020R127.0.0.2:6020/sdb2_"" with 1.0 weight got id 1 Device d2r1z3-127.0.0.3:6030R127.0.0.3:6030/sdb3_"" with 1.0 weight got id 2 Device d3r1z4-127.0.0.4:6040R127.0.0.4:6040/sdb4_"" with 1.0 weight got id 3 Reassigned 2048 (200.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 Device d0r1z1-127.0.0.1:6010R127.0.0.1:6010/sdb1_"" with 1.0 weight got id 0 Device d1r1z1-127.0.0.1:6010R127.0.0.1:6010/sdb5_"" with 1.0 weight got id 1 Device d2r1z2-127.0.0.2:6020R127.0.0.2:6020/sdb2_"" with 1.0 weight got id 2 Device d3r1z2-127.0.0.2:6020R127.0.0.2:6020/sdb6_"" with 1.0 weight got id 3 Device d4r1z3-127.0.0.3:6030R127.0.0.3:6030/sdb3_"" with 1.0 weight got id 4 Device d5r1z3-127.0.0.3:6030R127.0.0.3:6030/sdb7_"" with 1.0 weight got id 5 Device d6r1z4-127.0.0.4:6040R127.0.0.4:6040/sdb4_"" with 1.0 weight got id 6 Device d7r1z4-127.0.0.4:6040R127.0.0.4:6040/sdb8_"" with 1.0 weight got id 7 Reassigned 6144 (600.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 Device d0r1z1-127.0.0.1:6011R127.0.0.1:6011/sdb1_"" with 1.0 weight got id 0 Device d1r1z2-127.0.0.2:6021R127.0.0.2:6021/sdb2_"" with 1.0 weight got id 1 Device d2r1z3-127.0.0.3:6031R127.0.0.3:6031/sdb3_"" with 1.0 weight got id 2 Device d3r1z4-127.0.0.4:6041R127.0.0.4:6041/sdb4_"" with 1.0 weight got id 3 Reassigned 3072 (300.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 Device d0r1z1-127.0.0.1:6012R127.0.0.1:6012/sdb1_"" with 1.0 weight got id 0 Device d1r1z2-127.0.0.2:6022R127.0.0.2:6022/sdb2_"" with 1.0 weight got id 1 Device d2r1z3-127.0.0.3:6032R127.0.0.3:6032/sdb3_"" with 1.0 weight got id 2 Device d3r1z4-127.0.0.4:6042R127.0.0.4:6042/sdb4_"" with 1.0 weight got id 3 Reassigned 3072 (300.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 #. Read more about Storage Policies and your SAIO :doc:`policies_saio` #. Verify the unit tests run:: $HOME/swift/.unittests Note that the unit tests do not require any swift daemons running. #. Start the "main" Swift daemon processes (proxy, account, container, and object):: startmain (The "``Unable to increase file descriptor limit. Running as non-root?``" warnings are expected and ok.) The ``startmain`` script looks like the following: .. literalinclude:: /../saio/bin/startmain #. Get an ``X-Storage-Url`` and ``X-Auth-Token``:: curl -v -H 'X-Storage-User: test:tester' -H 'X-Storage-Pass: testing' http://127.0.0.1:8080/auth/v1.0 #. Check that you can ``GET`` account:: curl -v -H 'X-Auth-Token: ' #. Check that ``swift`` command provided by the python-swiftclient package works:: swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing stat #. Verify the functional tests run:: $HOME/swift/.functests (Note: functional tests will first delete everything in the configured accounts.) #. Verify the probe tests run:: $HOME/swift/.probetests (Note: probe tests will reset your environment as they call ``resetswift`` for each test.) ---------------- Debugging Issues ---------------- If all doesn't go as planned, and tests fail, or you can't auth, or something doesn't work, here are some good starting places to look for issues: #. Everything is logged using system facilities -- usually in ``/var/log/syslog``, but possibly in ``/var/log/messages`` on e.g. Fedora -- so that is a good first place to look for errors (most likely python tracebacks). #. Make sure all of the server processes are running. For the base functionality, the Proxy, Account, Container, and Object servers should be running. #. If one of the servers are not running, and no errors are logged to syslog, it may be useful to try to start the server manually, for example: ``swift-object-server /etc/swift/object-server/1.conf`` will start the object server. If there are problems not showing up in syslog, then you will likely see the traceback on startup. #. If you need to, you can turn off syslog for unit tests. This can be useful for environments where ``/dev/log`` is unavailable, or which cannot rate limit (unit tests generate a lot of logs very quickly). Open the file ``SWIFT_TEST_CONFIG_FILE`` points to, and change the value of ``fake_syslog`` to ``True``. #. If you encounter a ``401 Unauthorized`` when following Step 12 where you check that you can ``GET`` account, use ``sudo service memcached status`` and check if memcache is running. If memcache is not running, start it using ``sudo service memcached start``. Once memcache is running, rerun ``GET`` account. ------------ Known Issues ------------ Listed here are some "gotcha's" that you may run into when using or testing your SAIO: #. fallocate_reserve - in most cases a SAIO doesn't have a very large XFS partition so having fallocate enabled and fallocate_reserve set can cause issues, specifically when trying to run the functional tests. For this reason fallocate has been turned off on the object-servers in the SAIO. If you want to play with the fallocate_reserve settings then know that functional tests will fail unless you change the max_file_size constraint to something more reasonable then the default (5G). Ideally you'd make it 1/4 of your XFS file system size so the tests can pass. swift-2.17.0/doc/source/ratelimit.rst0000666000175100017510000001177313236061617017572 0ustar zuulzuul00000000000000.. _ratelimit: ============= Rate Limiting ============= Rate limiting in Swift is implemented as a pluggable middleware. Rate limiting is performed on requests that result in database writes to the account and container sqlite dbs. It uses memcached and is dependent on the proxy servers having highly synchronized time. The rate limits are limited by the accuracy of the proxy server clocks. -------------- Configuration -------------- All configuration is optional. If no account or container limits are provided there will be no rate limiting. Configuration available: ================================ ======= ====================================== Option Default Description -------------------------------- ------- -------------------------------------- clock_accuracy 1000 Represents how accurate the proxy servers' system clocks are with each other. 1000 means that all the proxies' clock are accurate to each other within 1 millisecond. No ratelimit should be higher than the clock accuracy. max_sleep_time_seconds 60 App will immediately return a 498 response if the necessary sleep time ever exceeds the given max_sleep_time_seconds. log_sleep_time_seconds 0 To allow visibility into rate limiting set this value > 0 and all sleeps greater than the number will be logged. rate_buffer_seconds 5 Number of seconds the rate counter can drop and be allowed to catch up (at a faster than listed rate). A larger number will result in larger spikes in rate but better average accuracy. account_ratelimit 0 If set, will limit PUT and DELETE requests to /account_name/container_name. Number is in requests per second. container_ratelimit_size '' When set with container_ratelimit_x = r: for containers of size x, limit requests per second to r. Will limit PUT, DELETE, and POST requests to /a/c/o. container_listing_ratelimit_size '' When set with container_listing_ratelimit_x = r: for containers of size x, limit listing requests per second to r. Will limit GET requests to /a/c. ================================ ======= ====================================== The container rate limits are linearly interpolated from the values given. A sample container rate limiting could be: container_ratelimit_100 = 100 container_ratelimit_200 = 50 container_ratelimit_500 = 20 This would result in ================ ============ Container Size Rate Limit ---------------- ------------ 0-99 No limiting 100 100 150 75 500 20 1000 20 ================ ============ ----------------------------- Account Specific Ratelimiting ----------------------------- The above ratelimiting is to prevent the "many writes to a single container" bottleneck from causing a problem. There could also be a problem where a single account is just using too much of the cluster's resources. In this case, the container ratelimits may not help because the customer could be doing thousands of reqs/sec to distributed containers each getting a small fraction of the total so those limits would never trigger. If a system administrator notices this, he/she can set the X-Account-Sysmeta-Global-Write-Ratelimit on an account and that will limit the total number of write requests (PUT, POST, DELETE, COPY) that account can do for the whole account. This limit will be in addition to the applicable account/container limits from above. This header will be hidden from the user, because of the gatekeeper middleware, and can only be set using a direct client to the account nodes. It accepts a float value and will only limit requests if the value is > 0. ------------------- Black/White-listing ------------------- To blacklist or whitelist an account set: X-Account-Sysmeta-Global-Write-Ratelimit: BLACKLIST or X-Account-Sysmeta-Global-Write-Ratelimit: WHITELIST in the account headers. swift-2.17.0/doc/source/install/0000775000175100017510000000000013236061751016500 5ustar zuulzuul00000000000000swift-2.17.0/doc/source/install/get_started.rst0000666000175100017510000000364513236061617021552 0ustar zuulzuul00000000000000=============================== Object Storage service overview =============================== The OpenStack Object Storage is a multi-tenant object storage system. It is highly scalable and can manage large amounts of unstructured data at low cost through a RESTful HTTP API. It includes the following components: Proxy servers (swift-proxy-server) Accepts OpenStack Object Storage API and raw HTTP requests to upload files, modify metadata, and create containers. It also serves file or container listings to web browsers. To improve performance, the proxy server can use an optional cache that is usually deployed with memcache. Account servers (swift-account-server) Manages accounts defined with Object Storage. Container servers (swift-container-server) Manages the mapping of containers or folders, within Object Storage. Object servers (swift-object-server) Manages actual objects, such as files, on the storage nodes. Various periodic processes Performs housekeeping tasks on the large data store. The replication services ensure consistency and availability through the cluster. Other periodic processes include auditors, updaters, and reapers. WSGI middleware Handles authentication and is usually OpenStack Identity. swift client Enables users to submit commands to the REST API through a command-line client authorized as either a admin user, reseller user, or swift user. swift-init Script that initializes the building of the ring file, takes daemon names as parameter and offers commands. Documented in https://docs.openstack.org/swift/latest/admin_guide.html#managing-services. swift-recon A cli tool used to retrieve various metrics and telemetry information about a cluster that has been collected by the swift-recon middleware. swift-ring-builder Storage ring build and rebalance utility. Documented in https://docs.openstack.org/swift/latest/admin_guide.html#managing-the-rings. swift-2.17.0/doc/source/install/storage-install.rst0000666000175100017510000000062513236061617022350 0ustar zuulzuul00000000000000.. _storage: Install and configure the storage nodes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure storage nodes that operate the account, container, and object services. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 1 storage-install-obs.rst storage-install-rdo.rst storage-install-ubuntu-debian.rst swift-2.17.0/doc/source/install/storage-include1.txt0000666000175100017510000000213013236061617022406 0ustar zuulzuul00000000000000Edit the ``/etc/swift/account-server.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the bind IP address, bind port, user, configuration directory, and mount point directory: .. code-block:: none [DEFAULT] ... bind_ip = MANAGEMENT_INTERFACE_IP_ADDRESS bind_port = 6202 user = swift swift_dir = /etc/swift devices = /srv/node mount_check = True Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network on the storage node. * In the ``[pipeline:main]`` section, enable the appropriate modules: .. code-block:: none [pipeline:main] pipeline = healthcheck recon account-server .. note:: For more information on other modules that enable additional features, see the `Deployment Guide `__. * In the ``[filter:recon]`` section, configure the recon (meters) cache directory: .. code-block:: none [filter:recon] use = egg:swift#recon ... recon_cache_path = /var/cache/swift swift-2.17.0/doc/source/install/finalize-installation-ubuntu-debian.rst0000666000175100017510000000442013236061617026275 0ustar zuulzuul00000000000000.. _finalize-ubuntu-debian: Finalize installation for Ubuntu and Debian ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. This section applies to Ubuntu 14.04 (LTS) and Debian. #. Obtain the ``/etc/swift/swift.conf`` file from the Object Storage source repository: .. code-block:: console # curl -o /etc/swift/swift.conf \ https://git.openstack.org/cgit/openstack/swift/plain/etc/swift.conf-sample?h=stable/ocata #. Edit the ``/etc/swift/swift.conf`` file and complete the following actions: * In the ``[swift-hash]`` section, configure the hash path prefix and suffix for your environment. .. code-block:: none [swift-hash] ... swift_hash_path_suffix = HASH_PATH_SUFFIX swift_hash_path_prefix = HASH_PATH_PREFIX Replace HASH_PATH_PREFIX and HASH_PATH_SUFFIX with unique values. .. warning:: Keep these values secret and do not change or lose them. * In the ``[storage-policy:0]`` section, configure the default storage policy: .. code-block:: none [storage-policy:0] ... name = Policy-0 default = yes #. Copy the ``swift.conf`` file to the ``/etc/swift`` directory on each storage node and any additional nodes running the proxy service. 4. On all nodes, ensure proper ownership of the configuration directory: .. code-block:: console # chown -R root:swift /etc/swift 5. On the controller node and any other nodes running the proxy service, restart the Object Storage proxy service including its dependencies: .. code-block:: console # service memcached restart # service swift-proxy restart 6. On the storage nodes, start the Object Storage services: .. code-block:: console # swift-init all start .. note:: The storage node runs many Object Storage services and the :command:`swift-init` command makes them easier to manage. You can ignore errors from services not running on the storage node. swift-2.17.0/doc/source/install/storage-include3.txt0000666000175100017510000000220213236061617022410 0ustar zuulzuul00000000000000Edit the ``/etc/swift/object-server.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the bind IP address, bind port, user, configuration directory, and mount point directory: .. code-block:: none [DEFAULT] ... bind_ip = MANAGEMENT_INTERFACE_IP_ADDRESS bind_port = 6200 user = swift swift_dir = /etc/swift devices = /srv/node mount_check = True Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network on the storage node. * In the ``[pipeline:main]`` section, enable the appropriate modules: .. code-block:: none [pipeline:main] pipeline = healthcheck recon object-server .. note:: For more information on other modules that enable additional features, see the `Deployment Guide `__. * In the ``[filter:recon]`` section, configure the recon (meters) cache and lock directories: .. code-block:: none [filter:recon] use = egg:swift#recon ... recon_cache_path = /var/cache/swift recon_lock_path = /var/lock swift-2.17.0/doc/source/install/controller-install-ubuntu.rst0000666000175100017510000000350513236061617024407 0ustar zuulzuul00000000000000.. _controller-ubuntu: Install and configure the controller node for Ubuntu ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the proxy service that handles requests for the account, container, and object services operating on the storage nodes. For simplicity, this guide installs and configures the proxy service on the controller node. However, you can run the proxy service on any node with network connectivity to the storage nodes. Additionally, you can install and configure the proxy service on multiple nodes to increase performance and redundancy. For more information, see the `Deployment Guide `__. This section applies to Ubuntu 14.04 (LTS). .. include:: controller-common_prerequisites.txt Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. #. Install the packages: .. code-block:: console # apt-get install swift swift-proxy python-swiftclient \ python-keystoneclient python-keystonemiddleware \ memcached .. note:: Complete OpenStack environments already include some of these packages. 2. Create the ``/etc/swift`` directory. 3. Obtain the proxy service configuration file from the Object Storage source repository: .. code-block:: console # curl -o /etc/swift/proxy-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/proxy-server.conf-sample?h=stable/ocata 4. .. include:: controller-include.txt swift-2.17.0/doc/source/install/finalize-installation.rst0000666000175100017510000000037013236061617023535 0ustar zuulzuul00000000000000.. _finalize: Finalize installation ~~~~~~~~~~~~~~~~~~~~~ Finalizing installation varies by distribution. .. toctree:: :maxdepth: 1 finalize-installation-obs.rst finalize-installation-rdo.rst finalize-installation-ubuntu-debian.rst swift-2.17.0/doc/source/install/controller-install.rst0000666000175100017510000000075713236061617023075 0ustar zuulzuul00000000000000.. _controller: Install and configure the controller node ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the proxy service that handles requests for the account, container, and object services operating on the storage nodes. Note that installation and configuration vary by distribution. .. toctree:: :maxdepth: 1 controller-install-obs.rst controller-install-rdo.rst controller-install-ubuntu.rst controller-install-debian.rst swift-2.17.0/doc/source/install/environment-networking.rst0000666000175100017510000000273313236061617023773 0ustar zuulzuul00000000000000.. _networking: Configure networking ~~~~~~~~~~~~~~~~~~~~ Before you start deploying the Object Storage service in your OpenStack environment, configure networking for two additional storage nodes. First node ---------- Configure network interfaces ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Configure the management interface: * IP address: ``10.0.0.51`` * Network mask: ``255.255.255.0`` (or ``/24``) * Default gateway: ``10.0.0.1`` Configure name resolution ^^^^^^^^^^^^^^^^^^^^^^^^^ #. Set the hostname of the node to ``object1``. #. .. include:: edit_hosts_file.txt #. Reboot the system to activate the changes. Second node ----------- Configure network interfaces ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * Configure the management interface: * IP address: ``10.0.0.52`` * Network mask: ``255.255.255.0`` (or ``/24``) * Default gateway: ``10.0.0.1`` Configure name resolution ^^^^^^^^^^^^^^^^^^^^^^^^^ #. Set the hostname of the node to ``object2``. #. .. include:: edit_hosts_file.txt #. Reboot the system to activate the changes. .. warning:: Some distributions add an extraneous entry in the ``/etc/hosts`` file that resolves the actual hostname to another loopback IP address such as ``127.0.1.1``. You must comment out or remove this entry to prevent name resolution problems. **Do not remove the 127.0.0.1 entry.** .. note:: To reduce complexity of this guide, we add host entries for optional services regardless of whether you choose to deploy them. swift-2.17.0/doc/source/install/controller-install-obs.rst0000666000175100017510000000316013236061617023645 0ustar zuulzuul00000000000000.. _controller-obs: Install and configure the controller node for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the proxy service that handles requests for the account, container, and object services operating on the storage nodes. For simplicity, this guide installs and configures the proxy service on the controller node. However, you can run the proxy service on any node with network connectivity to the storage nodes. Additionally, you can install and configure the proxy service on multiple nodes to increase performance and redundancy. For more information, see the `Deployment Guide `__. This section applies to openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. .. include:: controller-common_prerequisites.txt Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. #. Install the packages: .. code-block:: console # zypper install openstack-swift-proxy python-swiftclient \ python-keystoneclient python-keystonemiddleware \ python-xml memcached .. note:: Complete OpenStack environments already include some of these packages. 2. .. include:: controller-include.txt swift-2.17.0/doc/source/install/controller-install-rdo.rst0000666000175100017510000000354513236061617023655 0ustar zuulzuul00000000000000.. _controller-rdo: Install and configure the controller node for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the proxy service that handles requests for the account, container, and object services operating on the storage nodes. For simplicity, this guide installs and configures the proxy service on the controller node. However, you can run the proxy service on any node with network connectivity to the storage nodes. Additionally, you can install and configure the proxy service on multiple nodes to increase performance and redundancy. For more information, see the `Deployment Guide `__. This section applies to Red Hat Enterprise Linux 7 and CentOS 7. .. include:: controller-common_prerequisites.txt Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. #. Install the packages: .. code-block:: console # yum install openstack-swift-proxy python-swiftclient \ python-keystoneclient python-keystonemiddleware \ memcached .. note:: Complete OpenStack environments already include some of these packages. 2. Obtain the proxy service configuration file from the Object Storage source repository: .. code-block:: console # curl -o /etc/swift/proxy-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/proxy-server.conf-sample?h=stable/ocata 3. .. include:: controller-include.txt swift-2.17.0/doc/source/install/storage-install-obs.rst0000666000175100017510000000743313236061617023135 0ustar zuulzuul00000000000000.. _storage-obs: Install and configure the storage nodes for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure storage nodes that operate the account, container, and object services. For simplicity, this configuration references two storage nodes, each containing two empty local block storage devices. The instructions use ``/dev/sdb`` and ``/dev/sdc``, but you can substitute different values for your particular nodes. Although Object Storage supports any file system with extended attributes (xattr), testing and benchmarking indicate the best performance and reliability on XFS. For more information on horizontally scaling your environment, see the `Deployment Guide `_. This section applies to openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. Prerequisites ------------- Before you install and configure the Object Storage service on the storage nodes, you must prepare the storage devices. .. note:: Perform these steps on each storage node. #. Install the supporting utility packages: .. code-block:: console # zypper install xfsprogs rsync #. Format the ``/dev/sdb`` and ``/dev/sdc`` devices as XFS: .. code-block:: console # mkfs.xfs /dev/sdb # mkfs.xfs /dev/sdc #. Create the mount point directory structure: .. code-block:: console # mkdir -p /srv/node/sdb # mkdir -p /srv/node/sdc #. Edit the ``/etc/fstab`` file and add the following to it: .. code-block:: none /dev/sdb /srv/node/sdb xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 /dev/sdc /srv/node/sdc xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 #. Mount the devices: .. code-block:: console # mount /srv/node/sdb # mount /srv/node/sdc #. Create or edit the ``/etc/rsyncd.conf`` file to contain the following: .. code-block:: none uid = swift gid = swift log file = /var/log/rsyncd.log pid file = /var/run/rsyncd.pid address = MANAGEMENT_INTERFACE_IP_ADDRESS [account] max connections = 2 path = /srv/node/ read only = False lock file = /var/lock/account.lock [container] max connections = 2 path = /srv/node/ read only = False lock file = /var/lock/container.lock [object] max connections = 2 path = /srv/node/ read only = False lock file = /var/lock/object.lock Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network on the storage node. .. note:: The ``rsync`` service requires no authentication, so consider running it on a private network in production environments. 7. Start the ``rsyncd`` service and configure it to start when the system boots: .. code-block:: console # systemctl enable rsyncd.service # systemctl start rsyncd.service Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. .. note:: Perform these steps on each storage node. #. Install the packages: .. code-block:: console # zypper install openstack-swift-account \ openstack-swift-container openstack-swift-object python-xml 2. .. include:: storage-include1.txt 3. .. include:: storage-include2.txt 4. .. include:: storage-include3.txt 5. Ensure proper ownership of the mount point directory structure: .. code-block:: console # chown -R swift:swift /srv/node swift-2.17.0/doc/source/install/initial-rings.rst0000666000175100017510000002553713236061617022022 0ustar zuulzuul00000000000000Create and distribute initial rings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Before starting the Object Storage services, you must create the initial account, container, and object rings. The ring builder creates configuration files that each node uses to determine and deploy the storage architecture. For simplicity, this guide uses one region and two zones with 2^10 (1024) maximum partitions, 3 replicas of each object, and 1 hour minimum time between moving a partition more than once. For Object Storage, a partition indicates a directory on a storage device rather than a conventional partition table. For more information, see the `Deployment Guide `__. .. note:: Perform these steps on the controller node. Create account ring ------------------- The account server uses the account ring to maintain lists of containers. #. Change to the ``/etc/swift`` directory. #. Create the base ``account.builder`` file: .. code-block:: console # swift-ring-builder account.builder create 10 3 1 .. note:: This command provides no output. #. Add each storage node to the ring: .. code-block:: console # swift-ring-builder account.builder \ add --region 1 --zone 1 --ip STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS --port 6202 \ --device DEVICE_NAME --weight DEVICE_WEIGHT Replace ``STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network on the storage node. Replace ``DEVICE_NAME`` with a storage device name on the same storage node. For example, using the first storage node in :ref:`storage` with the ``/dev/sdb`` storage device and weight of 100: .. code-block:: console # swift-ring-builder account.builder add \ --region 1 --zone 1 --ip 10.0.0.51 --port 6202 --device sdb --weight 100 Repeat this command for each storage device on each storage node. In the example architecture, use the command in four variations: .. code-block:: console # swift-ring-builder account.builder add \ --region 1 --zone 1 --ip 10.0.0.51 --port 6202 --device sdb --weight 100 Device d0r1z1-10.0.0.51:6202R10.0.0.51:6202/sdb_"" with 100.0 weight got id 0 # swift-ring-builder account.builder add \ --region 1 --zone 1 --ip 10.0.0.51 --port 6202 --device sdc --weight 100 Device d1r1z2-10.0.0.51:6202R10.0.0.51:6202/sdc_"" with 100.0 weight got id 1 # swift-ring-builder account.builder add \ --region 1 --zone 2 --ip 10.0.0.52 --port 6202 --device sdb --weight 100 Device d2r1z3-10.0.0.52:6202R10.0.0.52:6202/sdb_"" with 100.0 weight got id 2 # swift-ring-builder account.builder add \ --region 1 --zone 2 --ip 10.0.0.52 --port 6202 --device sdc --weight 100 Device d3r1z4-10.0.0.52:6202R10.0.0.52:6202/sdc_"" with 100.0 weight got id 3 #. Verify the ring contents: .. code-block:: console # swift-ring-builder account.builder account.builder, build version 4 1024 partitions, 3.000000 replicas, 1 regions, 2 zones, 4 devices, 100.00 balance, 0.00 dispersion The minimum number of hours before a partition can be reassigned is 1 The overload factor is 0.00% (0.000000) Devices: id region zone ip address port replication ip replication port name weight partitions balance meta 0 1 1 10.0.0.51 6202 10.0.0.51 6202 sdb 100.00 0 -100.00 1 1 1 10.0.0.51 6202 10.0.0.51 6202 sdc 100.00 0 -100.00 2 1 2 10.0.0.52 6202 10.0.0.52 6202 sdb 100.00 0 -100.00 3 1 2 10.0.0.52 6202 10.0.0.52 6202 sdc 100.00 0 -100.00 #. Rebalance the ring: .. code-block:: console # swift-ring-builder account.builder rebalance Reassigned 1024 (100.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 Create container ring --------------------- The container server uses the container ring to maintain lists of objects. However, it does not track object locations. #. Change to the ``/etc/swift`` directory. #. Create the base ``container.builder`` file: .. code-block:: console # swift-ring-builder container.builder create 10 3 1 .. note:: This command provides no output. #. Add each storage node to the ring: .. code-block:: console # swift-ring-builder container.builder \ add --region 1 --zone 1 --ip STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS --port 6201 \ --device DEVICE_NAME --weight DEVICE_WEIGHT Replace ``STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network on the storage node. Replace ``DEVICE_NAME`` with a storage device name on the same storage node. For example, using the first storage node in :ref:`storage` with the ``/dev/sdb`` storage device and weight of 100: .. code-block:: console # swift-ring-builder container.builder add \ --region 1 --zone 1 --ip 10.0.0.51 --port 6201 --device sdb --weight 100 Repeat this command for each storage device on each storage node. In the example architecture, use the command in four variations: .. code-block:: console # swift-ring-builder container.builder add \ --region 1 --zone 1 --ip 10.0.0.51 --port 6201 --device sdb --weight 100 Device d0r1z1-10.0.0.51:6201R10.0.0.51:6201/sdb_"" with 100.0 weight got id 0 # swift-ring-builder container.builder add \ --region 1 --zone 1 --ip 10.0.0.51 --port 6201 --device sdc --weight 100 Device d1r1z2-10.0.0.51:6201R10.0.0.51:6201/sdc_"" with 100.0 weight got id 1 # swift-ring-builder container.builder add \ --region 1 --zone 2 --ip 10.0.0.52 --port 6201 --device sdb --weight 100 Device d2r1z3-10.0.0.52:6201R10.0.0.52:6201/sdb_"" with 100.0 weight got id 2 # swift-ring-builder container.builder add \ --region 1 --zone 2 --ip 10.0.0.52 --port 6201 --device sdc --weight 100 Device d3r1z4-10.0.0.52:6201R10.0.0.52:6201/sdc_"" with 100.0 weight got id 3 #. Verify the ring contents: .. code-block:: console # swift-ring-builder container.builder container.builder, build version 4 1024 partitions, 3.000000 replicas, 1 regions, 2 zones, 4 devices, 100.00 balance, 0.00 dispersion The minimum number of hours before a partition can be reassigned is 1 The overload factor is 0.00% (0.000000) Devices: id region zone ip address port replication ip replication port name weight partitions balance meta 0 1 1 10.0.0.51 6201 10.0.0.51 6201 sdb 100.00 0 -100.00 1 1 1 10.0.0.51 6201 10.0.0.51 6201 sdc 100.00 0 -100.00 2 1 2 10.0.0.52 6201 10.0.0.52 6201 sdb 100.00 0 -100.00 3 1 2 10.0.0.52 6201 10.0.0.52 6201 sdc 100.00 0 -100.00 #. Rebalance the ring: .. code-block:: console # swift-ring-builder container.builder rebalance Reassigned 1024 (100.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 Create object ring ------------------ The object server uses the object ring to maintain lists of object locations on local devices. #. Change to the ``/etc/swift`` directory. #. Create the base ``object.builder`` file: .. code-block:: console # swift-ring-builder object.builder create 10 3 1 .. note:: This command provides no output. #. Add each storage node to the ring: .. code-block:: console # swift-ring-builder object.builder \ add --region 1 --zone 1 --ip STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS --port 6200 \ --device DEVICE_NAME --weight DEVICE_WEIGHT Replace ``STORAGE_NODE_MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network on the storage node. Replace ``DEVICE_NAME`` with a storage device name on the same storage node. For example, using the first storage node in :ref:`storage` with the ``/dev/sdb`` storage device and weight of 100: .. code-block:: console # swift-ring-builder object.builder add \ --region 1 --zone 1 --ip 10.0.0.51 --port 6200 --device sdb --weight 100 Repeat this command for each storage device on each storage node. In the example architecture, use the command in four variations: .. code-block:: console # swift-ring-builder object.builder add \ --region 1 --zone 1 --ip 10.0.0.51 --port 6200 --device sdb --weight 100 Device d0r1z1-10.0.0.51:6200R10.0.0.51:6200/sdb_"" with 100.0 weight got id 0 # swift-ring-builder object.builder add \ --region 1 --zone 1 --ip 10.0.0.51 --port 6200 --device sdc --weight 100 Device d1r1z2-10.0.0.51:6200R10.0.0.51:6200/sdc_"" with 100.0 weight got id 1 # swift-ring-builder object.builder add \ --region 1 --zone 2 --ip 10.0.0.52 --port 6200 --device sdb --weight 100 Device d2r1z3-10.0.0.52:6200R10.0.0.52:6200/sdb_"" with 100.0 weight got id 2 # swift-ring-builder object.builder add \ --region 1 --zone 2 --ip 10.0.0.52 --port 6200 --device sdc --weight 100 Device d3r1z4-10.0.0.52:6200R10.0.0.52:6200/sdc_"" with 100.0 weight got id 3 #. Verify the ring contents: .. code-block:: console # swift-ring-builder object.builder object.builder, build version 4 1024 partitions, 3.000000 replicas, 1 regions, 2 zones, 4 devices, 100.00 balance, 0.00 dispersion The minimum number of hours before a partition can be reassigned is 1 The overload factor is 0.00% (0.000000) Devices: id region zone ip address port replication ip replication port name weight partitions balance meta 0 1 1 10.0.0.51 6200 10.0.0.51 6200 sdb 100.00 0 -100.00 1 1 1 10.0.0.51 6200 10.0.0.51 6200 sdc 100.00 0 -100.00 2 1 2 10.0.0.52 6200 10.0.0.52 6200 sdb 100.00 0 -100.00 3 1 2 10.0.0.52 6200 10.0.0.52 6200 sdc 100.00 0 -100.00 #. Rebalance the ring: .. code-block:: console # swift-ring-builder object.builder rebalance Reassigned 1024 (100.00%) partitions. Balance is now 0.00. Dispersion is now 0.00 Distribute ring configuration files ----------------------------------- * Copy the ``account.ring.gz``, ``container.ring.gz``, and ``object.ring.gz`` files to the ``/etc/swift`` directory on each storage node and any additional nodes running the proxy service. swift-2.17.0/doc/source/install/controller-include.txt0000666000175100017510000000435213236061617023054 0ustar zuulzuul00000000000000Edit the ``/etc/swift/proxy-server.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the bind port, user, and configuration directory: .. code-block:: none [DEFAULT] ... bind_port = 8080 user = swift swift_dir = /etc/swift * In the ``[pipeline:main]`` section, remove the ``tempurl`` and ``tempauth`` modules and add the ``authtoken`` and ``keystoneauth`` modules: .. code-block:: none [pipeline:main] pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk ratelimit authtoken keystoneauth container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server .. note:: Do not change the order of the modules. .. note:: For more information on other modules that enable additional features, see the `Deployment Guide `__. * In the ``[app:proxy-server]`` section, enable automatic account creation: .. code-block:: console [app:proxy-server] use = egg:swift#proxy ... account_autocreate = True * In the ``[filter:keystoneauth]`` section, configure the operator roles: .. code-block:: console [filter:keystoneauth] use = egg:swift#keystoneauth ... operator_roles = admin,user * In the ``[filter:authtoken]`` section, configure Identity service access: .. code-block:: none [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory ... auth_uri = http://controller:5000 auth_url = http://controller:35357 memcached_servers = controller:11211 auth_type = password project_domain_id = default user_domain_id = default project_name = service username = swift password = SWIFT_PASS delay_auth_decision = True Replace ``SWIFT_PASS`` with the password you chose for the ``swift`` user in the Identity service. .. note:: Comment out or remove any other options in the ``[filter:authtoken]`` section. * In the ``[filter:cache]`` section, configure the ``memcached`` location: .. code-block:: none [filter:cache] use = egg:swift#memcache ... memcache_servers = controller:11211 swift-2.17.0/doc/source/install/controller-common_prerequisites.txt0000666000175100017510000001234113236061617025702 0ustar zuulzuul00000000000000Prerequisites ------------- The proxy service relies on an authentication and authorization mechanism such as the Identity service. However, unlike other services, it also offers an internal mechanism that allows it to operate without any other OpenStack services. Before you configure the Object Storage service, you must create service credentials and an API endpoint. .. note:: The Object Storage service does not use an SQL database on the controller node. Instead, it uses distributed SQLite databases on each storage node. #. Source the ``admin`` credentials to gain access to admin-only CLI commands: .. code-block:: console $ . admin-openrc #. To create the Identity service credentials, complete these steps: * Create the ``swift`` user: .. code-block:: console $ openstack user create --domain default --password-prompt swift User Password: Repeat User Password: +-----------+----------------------------------+ | Field | Value | +-----------+----------------------------------+ | domain_id | default | | enabled | True | | id | d535e5cbd2b74ac7bfb97db9cced3ed6 | | name | swift | +-----------+----------------------------------+ * Add the ``admin`` role to the ``swift`` user: .. code-block:: console $ openstack role add --project service --user swift admin .. note:: This command provides no output. * Create the ``swift`` service entity: .. code-block:: console $ openstack service create --name swift \ --description "OpenStack Object Storage" object-store +-------------+----------------------------------+ | Field | Value | +-------------+----------------------------------+ | description | OpenStack Object Storage | | enabled | True | | id | 75ef509da2c340499d454ae96a2c5c34 | | name | swift | | type | object-store | +-------------+----------------------------------+ #. Create the Object Storage service API endpoints: .. code-block:: console $ openstack endpoint create --region RegionOne \ object-store public http://controller:8080/v1/AUTH_%\(project_id\)s +--------------+----------------------------------------------+ | Field | Value | +--------------+----------------------------------------------+ | enabled | True | | id | 12bfd36f26694c97813f665707114e0d | | interface | public | | region | RegionOne | | region_id | RegionOne | | service_id | 75ef509da2c340499d454ae96a2c5c34 | | service_name | swift | | service_type | object-store | | url | http://controller:8080/v1/AUTH_%(project_id)s | +--------------+----------------------------------------------+ $ openstack endpoint create --region RegionOne \ object-store internal http://controller:8080/v1/AUTH_%\(project_id\)s +--------------+----------------------------------------------+ | Field | Value | +--------------+----------------------------------------------+ | enabled | True | | id | 7a36bee6733a4b5590d74d3080ee6789 | | interface | internal | | region | RegionOne | | region_id | RegionOne | | service_id | 75ef509da2c340499d454ae96a2c5c34 | | service_name | swift | | service_type | object-store | | url | http://controller:8080/v1/AUTH_%(project_id)s | +--------------+----------------------------------------------+ $ openstack endpoint create --region RegionOne \ object-store admin http://controller:8080/v1 +--------------+----------------------------------+ | Field | Value | +--------------+----------------------------------+ | enabled | True | | id | ebb72cd6851d4defabc0b9d71cdca69b | | interface | admin | | region | RegionOne | | region_id | RegionOne | | service_id | 75ef509da2c340499d454ae96a2c5c34 | | service_name | swift | | service_type | object-store | | url | http://controller:8080/v1 | +--------------+----------------------------------+ swift-2.17.0/doc/source/install/index.rst0000666000175100017510000000122513236061617020344 0ustar zuulzuul00000000000000============================ Object Storage Install Guide ============================ .. toctree:: :maxdepth: 2 get_started.rst environment-networking.rst controller-install.rst storage-install.rst initial-rings.rst finalize-installation.rst verify.rst next-steps.rst The Object Storage services (swift) work together to provide object storage and retrieval through a REST API. This chapter assumes a working setup of OpenStack following the `OpenStack Installation Tutorial `_. Your environment must at least include the Identity service (keystone) prior to deploying Object Storage. swift-2.17.0/doc/source/install/verify.rst0000666000175100017510000000573713236061617020555 0ustar zuulzuul00000000000000.. _verify: Verify operation ~~~~~~~~~~~~~~~~ Verify operation of the Object Storage service. .. note:: Perform these steps on the controller node. .. warning:: If you are using Red Hat Enterprise Linux 7 or CentOS 7 and one or more of these steps do not work, check the ``/var/log/audit/audit.log`` file for SELinux messages indicating denial of actions for the ``swift`` processes. If present, change the security context of the ``/srv/node`` directory to the lowest security level (s0) for the ``swift_data_t`` type, ``object_r`` role and the ``system_u`` user: .. code-block:: console # chcon -R system_u:object_r:swift_data_t:s0 /srv/node #. Source the ``demo`` credentials: .. code-block:: console $ . demo-openrc #. Show the service status: .. code-block:: console $ swift stat Account: AUTH_ed0b60bf607743088218b0a533d5943f Containers: 0 Objects: 0 Bytes: 0 X-Account-Project-Domain-Id: default X-Timestamp: 1444143887.71539 X-Trans-Id: tx1396aeaf17254e94beb34-0056143bde X-Openstack-Request-Id: tx1396aeaf17254e94beb34-0056143bde Content-Type: text/plain; charset=utf-8 Accept-Ranges: bytes #. Create ``container1`` container: .. code-block:: console $ openstack container create container1 +---------------------------------------+------------+------------------------------------+ | account | container | x-trans-id | +---------------------------------------+------------+------------------------------------+ | AUTH_ed0b60bf607743088218b0a533d5943f | container1 | tx8c4034dc306c44dd8cd68-0056f00a4a | +---------------------------------------+------------+------------------------------------+ #. Upload a test file to the ``container1`` container: .. code-block:: console $ openstack object create container1 FILE +--------+------------+----------------------------------+ | object | container | etag | +--------+------------+----------------------------------+ | FILE | container1 | ee1eca47dc88f4879d8a229cc70a07c6 | +--------+------------+----------------------------------+ Replace ``FILE`` with the name of a local file to upload to the ``container1`` container. #. List files in the ``container1`` container: .. code-block:: console $ openstack object list container1 +------+ | Name | +------+ | FILE | +------+ #. Download a test file from the ``container1`` container: .. code-block:: console $ openstack object save container1 FILE Replace ``FILE`` with the name of the file uploaded to the ``container1`` container. .. note:: This command provides no output. swift-2.17.0/doc/source/install/storage-install-rdo.rst0000666000175100017510000001104413236061617023127 0ustar zuulzuul00000000000000.. _storage-rdo: Install and configure the storage nodes for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure storage nodes that operate the account, container, and object services. For simplicity, this configuration references two storage nodes, each containing two empty local block storage devices. The instructions use ``/dev/sdb`` and ``/dev/sdc``, but you can substitute different values for your particular nodes. Although Object Storage supports any file system with extended attributes (xattr), testing and benchmarking indicate the best performance and reliability on XFS. For more information on horizontally scaling your environment, see the `Deployment Guide `_. This section applies to Red Hat Enterprise Linux 7 and CentOS 7. Prerequisites ------------- Before you install and configure the Object Storage service on the storage nodes, you must prepare the storage devices. .. note:: Perform these steps on each storage node. #. Install the supporting utility packages: .. code-block:: console # yum install xfsprogs rsync #. Format the ``/dev/sdb`` and ``/dev/sdc`` devices as XFS: .. code-block:: console # mkfs.xfs /dev/sdb # mkfs.xfs /dev/sdc #. Create the mount point directory structure: .. code-block:: console # mkdir -p /srv/node/sdb # mkdir -p /srv/node/sdc #. Edit the ``/etc/fstab`` file and add the following to it: .. code-block:: none /dev/sdb /srv/node/sdb xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 /dev/sdc /srv/node/sdc xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 #. Mount the devices: .. code-block:: console # mount /srv/node/sdb # mount /srv/node/sdc #. Create or edit the ``/etc/rsyncd.conf`` file to contain the following: .. code-block:: none uid = swift gid = swift log file = /var/log/rsyncd.log pid file = /var/run/rsyncd.pid address = MANAGEMENT_INTERFACE_IP_ADDRESS [account] max connections = 2 path = /srv/node/ read only = False lock file = /var/lock/account.lock [container] max connections = 2 path = /srv/node/ read only = False lock file = /var/lock/container.lock [object] max connections = 2 path = /srv/node/ read only = False lock file = /var/lock/object.lock Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network on the storage node. .. note:: The ``rsync`` service requires no authentication, so consider running it on a private network in production environments. 7. Start the ``rsyncd`` service and configure it to start when the system boots: .. code-block:: console # systemctl enable rsyncd.service # systemctl start rsyncd.service Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. .. note:: Perform these steps on each storage node. #. Install the packages: .. code-block:: console # yum install openstack-swift-account openstack-swift-container \ openstack-swift-object 2. Obtain the accounting, container, and object service configuration files from the Object Storage source repository: .. code-block:: console # curl -o /etc/swift/account-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/account-server.conf-sample?h=stable/ocata # curl -o /etc/swift/container-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/container-server.conf-sample?h=stable/ocata # curl -o /etc/swift/object-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/object-server.conf-sample?h=stable/ocata 3. .. include:: storage-include1.txt 4. .. include:: storage-include2.txt 5. .. include:: storage-include3.txt 6. Ensure proper ownership of the mount point directory structure: .. code-block:: console # chown -R swift:swift /srv/node 7. Create the ``recon`` directory and ensure proper ownership of it: .. code-block:: console # mkdir -p /var/cache/swift # chown -R root:swift /var/cache/swift # chmod -R 775 /var/cache/swift swift-2.17.0/doc/source/install/storage-include2.txt0000666000175100017510000000213413236061617022413 0ustar zuulzuul00000000000000Edit the ``/etc/swift/container-server.conf`` file and complete the following actions: * In the ``[DEFAULT]`` section, configure the bind IP address, bind port, user, configuration directory, and mount point directory: .. code-block:: none [DEFAULT] ... bind_ip = MANAGEMENT_INTERFACE_IP_ADDRESS bind_port = 6201 user = swift swift_dir = /etc/swift devices = /srv/node mount_check = True Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network on the storage node. * In the ``[pipeline:main]`` section, enable the appropriate modules: .. code-block:: none [pipeline:main] pipeline = healthcheck recon container-server .. note:: For more information on other modules that enable additional features, see the `Deployment Guide `__. * In the ``[filter:recon]`` section, configure the recon (meters) cache directory: .. code-block:: none [filter:recon] use = egg:swift#recon ... recon_cache_path = /var/cache/swift swift-2.17.0/doc/source/install/next-steps.rst0000666000175100017510000000035413236061617021351 0ustar zuulzuul00000000000000.. _next-steps: ========== Next steps ========== Your OpenStack environment now includes Object Storage. To add more services, see the `additional documentation on installing OpenStack `_ . swift-2.17.0/doc/source/install/controller-install-debian.rst0000666000175100017510000000341613236061617024310 0ustar zuulzuul00000000000000.. _controller-debian: Install and configure the controller node for Debian ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure the proxy service that handles requests for the account, container, and object services operating on the storage nodes. For simplicity, this guide installs and configures the proxy service on the controller node. However, you can run the proxy service on any node with network connectivity to the storage nodes. Additionally, you can install and configure the proxy service on multiple nodes to increase performance and redundancy. For more information, see the `Deployment Guide `__. This section applies to Debian. .. include:: controller-common_prerequisites.txt Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. #. Install the packages: .. code-block:: console # apt-get install swift swift-proxy python-swiftclient \ python-keystoneclient python-keystonemiddleware \ memcached .. note:: Complete OpenStack environments already include some of these packages. 2. Create the ``/etc/swift`` directory. 3. Obtain the proxy service configuration file from the Object Storage source repository: .. code-block:: console # curl -o /etc/swift/proxy-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/proxy-server.conf-sample?h=stable/ocata swift-2.17.0/doc/source/install/finalize-installation-obs.rst0000666000175100017510000000616313236061617024324 0ustar zuulzuul00000000000000.. _finalize-obs: Finalize installation for openSUSE and SUSE Linux Enterprise ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. This section applies to openSUSE Leap 42.2 and SUSE Linux Enterprise Server 12 SP2. #. Edit the ``/etc/swift/swift.conf`` file and complete the following actions: * In the ``[swift-hash]`` section, configure the hash path prefix and suffix for your environment. .. code-block:: none [swift-hash] ... swift_hash_path_suffix = HASH_PATH_SUFFIX swift_hash_path_prefix = HASH_PATH_PREFIX Replace HASH_PATH_PREFIX and HASH_PATH_SUFFIX with unique values. .. warning:: Keep these values secret and do not change or lose them. * In the ``[storage-policy:0]`` section, configure the default storage policy: .. code-block:: none [storage-policy:0] ... name = Policy-0 default = yes #. Copy the ``swift.conf`` file to the ``/etc/swift`` directory on each storage node and any additional nodes running the proxy service. 3. On all nodes, ensure proper ownership of the configuration directory: .. code-block:: console # chown -R root:swift /etc/swift 4. On the controller node and any other nodes running the proxy service, start the Object Storage proxy service including its dependencies and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-swift-proxy.service memcached.service # systemctl start openstack-swift-proxy.service memcached.service 5. On the storage nodes, start the Object Storage services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service \ openstack-swift-account-reaper.service openstack-swift-account-replicator.service # systemctl start openstack-swift-account.service openstack-swift-account-auditor.service \ openstack-swift-account-reaper.service openstack-swift-account-replicator.service # systemctl enable openstack-swift-container.service openstack-swift-container-auditor.service \ openstack-swift-container-replicator.service openstack-swift-container-updater.service # systemctl start openstack-swift-container.service openstack-swift-container-auditor.service \ openstack-swift-container-replicator.service openstack-swift-container-updater.service # systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service \ openstack-swift-object-replicator.service openstack-swift-object-updater.service # systemctl start openstack-swift-object.service openstack-swift-object-auditor.service \ openstack-swift-object-replicator.service openstack-swift-object-updater.service swift-2.17.0/doc/source/install/storage-install-ubuntu-debian.rst0000666000175100017510000001077713236061617025121 0ustar zuulzuul00000000000000.. _storage-ubuntu-debian: Install and configure the storage nodes for Ubuntu and Debian ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This section describes how to install and configure storage nodes that operate the account, container, and object services. For simplicity, this configuration references two storage nodes, each containing two empty local block storage devices. The instructions use ``/dev/sdb`` and ``/dev/sdc``, but you can substitute different values for your particular nodes. Although Object Storage supports any file system with extended attributes (xattr), testing and benchmarking indicate the best performance and reliability on XFS. For more information on horizontally scaling your environment, see the `Deployment Guide `_. This section applies to Ubuntu 14.04 (LTS) and Debian. Prerequisites ------------- Before you install and configure the Object Storage service on the storage nodes, you must prepare the storage devices. .. note:: Perform these steps on each storage node. #. Install the supporting utility packages: .. code-block:: console # apt-get install xfsprogs rsync #. Format the ``/dev/sdb`` and ``/dev/sdc`` devices as XFS: .. code-block:: console # mkfs.xfs /dev/sdb # mkfs.xfs /dev/sdc #. Create the mount point directory structure: .. code-block:: console # mkdir -p /srv/node/sdb # mkdir -p /srv/node/sdc #. Edit the ``/etc/fstab`` file and add the following to it: .. code-block:: none /dev/sdb /srv/node/sdb xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 /dev/sdc /srv/node/sdc xfs noatime,nodiratime,nobarrier,logbufs=8 0 2 #. Mount the devices: .. code-block:: console # mount /srv/node/sdb # mount /srv/node/sdc #. Create or edit the ``/etc/rsyncd.conf`` file to contain the following: .. code-block:: none uid = swift gid = swift log file = /var/log/rsyncd.log pid file = /var/run/rsyncd.pid address = MANAGEMENT_INTERFACE_IP_ADDRESS [account] max connections = 2 path = /srv/node/ read only = False lock file = /var/lock/account.lock [container] max connections = 2 path = /srv/node/ read only = False lock file = /var/lock/container.lock [object] max connections = 2 path = /srv/node/ read only = False lock file = /var/lock/object.lock Replace ``MANAGEMENT_INTERFACE_IP_ADDRESS`` with the IP address of the management network on the storage node. .. note:: The ``rsync`` service requires no authentication, so consider running it on a private network in production environments. 7. Edit the ``/etc/default/rsync`` file and enable the ``rsync`` service: .. code-block:: none RSYNC_ENABLE=true 8. Start the ``rsync`` service: .. code-block:: console # service rsync start Install and configure components -------------------------------- .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. .. note:: Perform these steps on each storage node. #. Install the packages: .. code-block:: console # apt-get install swift swift-account swift-container swift-object 2. Obtain the accounting, container, and object service configuration files from the Object Storage source repository: .. code-block:: console # curl -o /etc/swift/account-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/account-server.conf-sample?h=stable/ocata # curl -o /etc/swift/container-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/container-server.conf-sample?h=stable/ocata # curl -o /etc/swift/object-server.conf https://git.openstack.org/cgit/openstack/swift/plain/etc/object-server.conf-sample?h=stable/ocata 3. .. include:: storage-include1.txt 4. .. include:: storage-include2.txt 5. .. include:: storage-include3.txt 6. Ensure proper ownership of the mount point directory structure: .. code-block:: console # chown -R swift:swift /srv/node 7. Create the ``recon`` directory and ensure proper ownership of it: .. code-block:: console # mkdir -p /var/cache/swift # chown -R root:swift /var/cache/swift # chmod -R 775 /var/cache/swift swift-2.17.0/doc/source/install/edit_hosts_file.txt0000666000175100017510000000044113236061617022407 0ustar zuulzuul00000000000000Edit the ``/etc/hosts`` file to contain the following: .. code-block:: none # controller 10.0.0.11 controller # compute1 10.0.0.31 compute1 # block1 10.0.0.41 block1 # object1 10.0.0.51 object1 # object2 10.0.0.52 object2 swift-2.17.0/doc/source/install/finalize-installation-rdo.rst0000666000175100017510000000657113236061617024330 0ustar zuulzuul00000000000000.. _finalize-rdo: Finalize installation for Red Hat Enterprise Linux and CentOS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ .. note:: Default configuration files vary by distribution. You might need to add these sections and options rather than modifying existing sections and options. Also, an ellipsis (``...``) in the configuration snippets indicates potential default configuration options that you should retain. This section applies to Red Hat Enterprise Linux 7 and CentOS 7. #. Obtain the ``/etc/swift/swift.conf`` file from the Object Storage source repository: .. code-block:: console # curl -o /etc/swift/swift.conf \ https://git.openstack.org/cgit/openstack/swift/plain/etc/swift.conf-sample?h=stable/ocata #. Edit the ``/etc/swift/swift.conf`` file and complete the following actions: * In the ``[swift-hash]`` section, configure the hash path prefix and suffix for your environment. .. code-block:: none [swift-hash] ... swift_hash_path_suffix = HASH_PATH_SUFFIX swift_hash_path_prefix = HASH_PATH_PREFIX Replace HASH_PATH_PREFIX and HASH_PATH_SUFFIX with unique values. .. warning:: Keep these values secret and do not change or lose them. * In the ``[storage-policy:0]`` section, configure the default storage policy: .. code-block:: none [storage-policy:0] ... name = Policy-0 default = yes #. Copy the ``swift.conf`` file to the ``/etc/swift`` directory on each storage node and any additional nodes running the proxy service. 4. On all nodes, ensure proper ownership of the configuration directory: .. code-block:: console # chown -R root:swift /etc/swift 5. On the controller node and any other nodes running the proxy service, start the Object Storage proxy service including its dependencies and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-swift-proxy.service memcached.service # systemctl start openstack-swift-proxy.service memcached.service 6. On the storage nodes, start the Object Storage services and configure them to start when the system boots: .. code-block:: console # systemctl enable openstack-swift-account.service openstack-swift-account-auditor.service \ openstack-swift-account-reaper.service openstack-swift-account-replicator.service # systemctl start openstack-swift-account.service openstack-swift-account-auditor.service \ openstack-swift-account-reaper.service openstack-swift-account-replicator.service # systemctl enable openstack-swift-container.service \ openstack-swift-container-auditor.service openstack-swift-container-replicator.service \ openstack-swift-container-updater.service # systemctl start openstack-swift-container.service \ openstack-swift-container-auditor.service openstack-swift-container-replicator.service \ openstack-swift-container-updater.service # systemctl enable openstack-swift-object.service openstack-swift-object-auditor.service \ openstack-swift-object-replicator.service openstack-swift-object-updater.service # systemctl start openstack-swift-object.service openstack-swift-object-auditor.service \ openstack-swift-object-replicator.service openstack-swift-object-updater.service swift-2.17.0/doc/source/images/0000775000175100017510000000000013236061751016277 5ustar zuulzuul00000000000000swift-2.17.0/doc/source/images/ec_overview.png0000666000175100017510000044117213236061617021336 0ustar zuulzuul00000000000000‰PNG  IHDR~•`»ü¼sRGB®ÎégAMA± üa pHYs%%IR$ðÿ¥IDATx^ì`I†ßX“Ô(îîr¸»»ëáîîîîîîw¸îîp¸CÝbMóßfSZ®pÀ¥´ûÀ6Ù™ÍìÌîÈ;.³ˆ÷ÈÅO ‰xŽ$ü$$$$$$$$’ð“H HÂOBBBBBBB" ? ‰‚4«WB⇓°”Œ%$¢#“É„CBB":?Tø=;¹ ³vœBþÊРtZÑTBBâ{…B!pa6›…ONøfá÷ׂñhÞqüÄs8gÁìUËÑ¥FÑ82¬:ÊŒÞÒ=vãðôª¢iT|¡–yÂHßž’7RX %$$¾NºÁÁÁpvvƬ‹kᯄB.r ›0“-²×DF·T †“““h#!!Á|“ðë“Ù Sïbol¿†°(4˜>°É8 À 87¶ºp݉qP|ðT°»ÇWÌ¢cÂ’a£ðÈœCÇv„V4ýVºÿ.ì³Àì«tÉ)JHÄS"""àëë ///¸Íø~O…J´•H „ùcSë-¨—±üüüàîî.ZHHH0_-ü|vWͱ$ú*¸W4e^B+û zú¶î™’~ &]dž&A</̆ä˜0©½õ'ÇÚ‰“ñÄœ½µ‚F4…ñZ ¥,ùjôGµ|.¢…  2;‚åŤÕqhíD 2çŸP­ÝäÌ™ £;5¯—ˆ°ðã‚ÍÓÓ)æ•Åó€g$üìDÛO`¦j2Y¿«ì¹Ìú=<Œrõ‡sŸ2m,fò·üLÕÃØêÖŽéž–2£g%ÛTÐóQñs';£Ž_ ]/·>_y¿šÉ-÷oˆpë,_{é…çDqíãð}Êü{æ‡ÍÖ FºÒð÷÷‡›››h!!!Á|µðëUÖ Ó ËšG˜Ý$µhjei‡üøcáETº{FUŒ~™Ò¤ÅÝGÅ«˜L”oÞ¡tˆßd®$ñÊo²97¿1 uZo½L$o¿M¸8±žðýùîÁHQ}œðÝF»oð®Oìð 2ÁB÷ˆ¯|µðÓ½ƒ}Òº˜]¸<ôoý©ØÿšDŒJ†ü™Ú Yð>ìxE‰(Š,’­3RíÆúço¿\ü±X4¸tø¸Âö°“»¡KîÊØpm |ÂÉ?Züñ=e.è»:¶ßX·&^²Ê;ÔÏ× %½¼¡ ?(Iô=}¶ £.!Á¡GµÒCQ'‰7i¼×èwt*|ÃI²ø#Ñçìþ;æ«¥Št¡†Ð0øïù”ÅñÇ•½;Çÿ³…ŒÅÜír7À®«óðÆ,†ÍŽèY 6_˜Œá?(Ü’ð“ø,_½œËãû§{²gYSg>Þ¹"|Ú¸ûÈa¤/Yc–ñæ[ÞEÓ9·èÓÖÑÎP Ÿ7¬¢Ï) ‚ÄëÿȠťIõqQ´·Š>;¬½(Ø[‚ƒ0¥EbloÁ¨^ÂUkóo%Ñ'!‰Þ勯ě*eñÇŽNh¹}8Ú×½ˆ-EÊÁ¾(™Ì’yPÁH׆X[­HçPJÒ™@D8 ºàö6ÂudFæF Æ`x&k >«ÿ×ä@”z%_n´ºaø”ôµ*Ê­kzòS„#Féoû‹.ˆ¼žûD„kEDu#Òßtˆþ?ë—È{Úch‘ÎHn'Þ“[ô,äß[`Ý™Qh¿³Úìì‰QW‘ ±Ç~¯PÓr-7uDÇÓ‡Ñ0cqë½™’&©‚ö‰BÑns´ÝÕ]Ž,¦ª/ C¾Ÿ‰üc Ïò_¤_Åß -‰¡V3> ôÝåpÚÂÌײpãïÜJiƒ¬íE†®ãk"ÃNæ$Ö•Ž¥6òìü^Y”é†x&ªá$òYÔÉ-¾&œÌåŽ\¬7’Ê|­fÂ{"79Ϧ#‚Ýä0ÙnAnØÞ%»Í¿1Ð3±a{ßö¨ïVBBâ«ùjág/Ž“U ]Ñ åÚ"áh]¶œ{†ô,7…Â3…¬aO—ª&ê¥ÉŒ®e&ceõáH§¢w¤pÅ ¢-¡‘'ÁÒZãRKafñ'´|:ˆ~š†Êž”ïê‚‘&euÔO“ ÝÊÏB÷ é©ò„æÅF ï¤]Ú´ôLŒð{ä¿ÕÀ²ºs0%35À샭éYŒÂè;‘Ù.•ʯnj¤>hµ¦-ÎEäEEûÇx¬Â‚ªcQ×ñšm›ˆ…]ïât®Äh¼¢3Š™‹ö)è™ì°«ãIÝæëÇb^«£È¦Õ!UŠ:ØTw"ÖŸêvoãA×]І=Ƙ“Kñ*ôÚ톧z g³ô«g­…‡Ú ÅúÁè[gr9êA·±ñ"ì;Ù 3o?ÄΞϑ'p%šo芛ÊTP[LP;&B#oG´^ÓI²Ã’éñi0¡ì¸_#_–fø#y&’A¨\l8jÊ^Ã!}OÜ­XÍWTò÷Ùq¯YW@[ OšõDëÕuÐþÄ^N–“Ü‘*÷ßÊW ¿?–QfMœžÝÓNÿ#|gŽŽ/ƒ—81¦Ã܆ÑÇþ-ÐNüt¼@ø,\¥,ý¥Ú¬ÿ2Q ªñ/_>A׉Ó0þ|Ìœ25{ÎD*?U¶NÖx²}*¿¾Ò}`Ú§Ó[» ž>‰\dFBB‚øWUI0°p'"ä$T4$^¶ž™‚P³=Ì/Wbî{gtOž–a0‚Œ¡pOYYÕÁ(Sh&Uø‰R£€C (RÝwO&‘ã@ÿ›¸èCBÈŽÜTBi›aYQ‹À_W"ܨÄË·' K G…3)ŽA{ç’hJ„çWæãº6' »åD·ÙÐïÏù€}bœ¿º×|Þ"ÜìŽÞ*à­ÙS®Dý¤n(’¡ ï.à±2ÖÔŠ ›qøÕ «ÀU±þÜ ´/=3ªvƒ‹Ö …\YÌ…ã(¹¦—Ãçý)¼‰HµzÌAÎ&±‘×®/Çù÷o¡+«Väp°„`ô‘áè²»?ZPƒJ ¢µû¯'‰Ba‡/÷¢ÏÎ>h½³6¿z͆öCŸ‹T‘¶×`Û¹%h\|fÖè7µ# »'C¨)Ü l6„0ø†RnI¿{ð®ì A8ÿòfÝÚ¨=òÖ’¦ÜË¢¬§™³÷”êý‘ÄÁ•§†ÉŽk7–À'؃ÿyÜÓ'Cz ½&¥°mœ(¿7EvŸêEf`VÍ!ð²³CQÏ”0‘¨¾}eî„À9qU”t8‡g¯S^‹Ó÷Á Ô@nÔaÒe U6< 1î\hÝ !s<m÷¯C‚å)Z8b@†´hvüO (ÚoB͘Tº¥K i*Awq2ÄÖ›Uà^ìxp‹üe$!!ñõ|µðòàÍÙU·ÞEÒSÞ`]½ô #dâ‚ËÄñzTáäî"u¢'‘×íyÊÝNµ0±ºÙ|~ÖL3î­ïøÜ‚·x½L&GáY[Èòàþ†ô% e‹ö*/Tèem}lÝÈ:dlMO²Ë!|—HðÈUxüö,¥•°S’Ôã,î^Ó&Ei­ûüÞÂŽÄM8õb,Z¸¨Õ”9Dª(åj¼÷=…~;ú¡ß¡aÐ õÂ_ïàDÄ,\ÂÇzpše‘'@iÔ†œÌõ<–'‰È9—0Ó?2‹Õ ¾–>ù— º–Û ­n[±~•Ã^U'&¢Ïžáh±¾"œæö¤ÝGŽq)ÐêâmÜŒ¾™SZÇ ™œp¨ûeèLCÍqÔÏ×êK›_„ÖO%™™aa¿ð=éN‚·…Žü%B«8"+L¦Q†aÓÝ[ø=EºãÒ¸›5ê ^™ ˆ…YÀü;aƯLb2®ú†ÛcG·+Ð<›‹î›ûaÿ{®ÝÚÁUC"Úµ(¦4Z‚Œ¾‹QzÏF2&a%S@«$·èÓŽüÈO޾+é9é]ÊevÐ…^GŸí}Ðçà$™“>³J%¬ug{v~Îü~„0s…wAîUX×é¼ß,B·Í}±ûíÁÛüüt‚ü{Ûû,è“¡q|â–9ö=çð(-§J…a/–ãµG ”ÍÖ™ »ðÀ_Wµ^˜M‚:ìh Ù˜$ýQjâo¨zxõÀü‚ùÈ_?[ ‰/儸`3a€îÖéѳw?ôîÙS7ž$³ä‡Þ1©K5D›Ö­qðþ+¼ú{ºvé‚öS6À²M¼Âš¹s¦aË^34œIîú,¦WíG Íp̈J©3"ñouÐ.×ï0=ÛIÿ¼@ýß2Ó½YÈÊ¡V˜ð,€Âœ¬ Z%K]¸*%ùEÝ/Žv~˜~þÆUj'Ü3sæÆ(蕜‚EüÑs²W©a&U„Ð &35Ÿí ÿ4S06OF è-‰•ôè™·|4ñ§ÛÁ‘Å"#$Jr8ØÙ bMð«ÜŒçä×DÅÐ5UFÍÁ¸ÿæ&"dZ„н<FN7Ag9¨ì­ù'ýV«ÒB-7Ò„6G:xÿ'®›ËbhÞ ô*#S$ÖF@.'ûÈnhº?¹Ãïßd‚V‘Œž1…)ò½ÉèùEà??÷è. f 7 Ø ªTúîÃY}aŒÍKÂ70¹Ò‡†ž½–ÜUŠËÙðìgáysxù~,¸õ¯1èúUl4#ö/¡w)ÇŒÓ;1´ê8 ½[¥jf. µ{)ôÿ½ às/_@ƒ4yÉÜG$!!ñ-üÐ-Ûþ›`8Èœ†T´ì“cP{~ÃB‘5[w Ëžï|΢ëÁùt/žaŒÊE£eÊÔØs}%Â3áà­uxä‡LY»a]®{žGè¡vhUºÊ{xáéÛpt ݃rž”B"lvÅ^ùǤûþh—<[|½+â VÒû±=+þ=/Kó¦¹Sx·tÇ;~'ü\ìÄøñ)$á'!ñY~b‹ßkL<¹1¨W ÑLBBâKù¦;¾*|S¥¨Q™£í¡Ù0°Èø\a+ñ ¬™EW„CÊFF{9Bƒo£ã¾)°¨½>ˆQ‰èHÂOBâ³D~Ý/ó6‡T£¢ZÕ†kå.ŽÖŒ-ˆ×ÜŠeý©3@éÓüßE ‰_‹¨ÂÏ}faøï½z¹…*œÒçÇ-W± ·tq+g‘‘-‰V+‰ Bls«-¨›±¼$ü$$b šð“y7€û›MH‹¡Îà‰ÓÞcaùûÃR3¿Q…߸3‹á«ó‡‚…„D&Ô¤C§< ‘Õ=ü%á'!ñ1‘ÂïäÉ(V¬8þ¼îƒÔ<„!ñn¸à!,;×lÄðN E÷JHüRpÜõññ——u«B ‰è ¨ÕQÆTJHHó±víØŽÍ» EZø¼üâçáï Í;Zƒ–øUáu,ïܹ#|ÚŽéÓ§cÇŽ8räH4s>¦NúI»É“' v‡þ—ݤI“>i7aÂìܹ3F»ñãÇÒnìØ±Ÿ´3fÌ'íFýI»‘#G~ÒnĈ‚Ý¡C‡þe7lذOÚ 2»víŠÑnðàÁŸ´4hÐ'íiÇ;Eµëß¿¤R©Œfׯ_¿H;;;»hv}úôì<(¨v½{÷Ž´ÓjµÑìzöì‰Ý»w vöööÑìzôèiçèèÍŽ—Óú”]—.]"휣ÙuîÜ9ÒÎÅÅ%š]ÇŽ#í¸•.ª]‡»ÀÝÝ=š]»ví"í¸õ;ªÝªUÖ5g™\¹rE³[¼Øº“;wîhv .m€|ùòE³[°Àº S @hvsçÎm€‚ F³›3gŽh.\8šÝÌ™3E H‘"Ñì8MÛ(^¼x4»iÓ¦‰6@É’%£Ùqš¶QªT©hv'~˜LY¶lÙhvœnm”/_>š§[+VŒf7jÔ(Ѩ\¹r4;N›6ªU«ÍnøðᢠP½zõhvœþlÔ¬Y3š§?uêÔ‰f7`ÀѨ[·n4;Nc64hÍ®oß¾¢ аaÃhv½zõm€ÆG³ãtd£Y³fÑì8­ØhÞ¼y4»®]»Š6±Kd‹_Ÿ^=ðÐß‚¡Óg"$(þ·‚%J*Cf•Ljñ“ø¥¹qãöïß/üŒN§ƒYÜŒŸ rÛ‰LB²c£à¨"z½á<^‘ìâ†F£Ä­ÏÙqˉgÿ_c'!ñ+À•¬3fˆg?žHá×·wO<ð5cØŒY’𓈸õ/6µÈ‡ê²„„„„„„„„D¼F~¿0>ÄÊ•+Å3 ‰Ï)ü¸•Q&­ %!ñKqõêUaâ‚„„„„įIíÚµÅo±Cä¿^=ºãY¨ƒ&OMcü’¦!},÷«KH|ox6#Ï åIß÷ï³g xõŠªŠR'Aœ×¥òô'ììxŠ)ðÛo¢¥ÄwÇh¶nÎnß-$â4¼Žc¡B<}™§™2‰ —Há·iãüѾnø!”'ÆÅc=¤PRö4%R)$á'ñK³mÛ6a îòý¿yþàå(Ö®^¼°šåÍËS%ggÀÛÛ*6$âÜEããøùYEÉåËÏjN”ˆ×¢Ú¶²e/–øfx™>øÙrz`ñÀ";OkzÄv܄ӧ‹×¯­BýÀë¡ÓUªXß)¿ÃH¤ðcd²dô—jõ …Ìa¹ýam% ‰_[·naÏž=ÑÖŸúj¸ƒ 3^J£A U+ BÑRâ—ãäI€×¯³­S·f Ф‰õ»Ä—ÃkÏñ0Š%€aÀҥE ‰_š°0ë»4 H¸pðð-¼×”)SijOtáWþ0,ʈgñYÍ#°ì³D…»ªJ–´vénÛÔª%ZHÄ.^´Šz__àøq^X´ø$7Z[L¹5èðaÀÕU´ˆwLʪ ¨QرC4Œ}~îr.†ñKÁ¨¿HH$08£ËšÕZóå G}ñ“|ù€wï€íÛ¢E­]”Ÿ¦n]«è»y¸tI}ñÞ½­ù/úÍC'¸‚”FjKHüÂÊ—//˜­]»V8ÿ.ë’IHÄs2P õ?þÏ>ð¯X4H4 š5³¶tQ>œ á–ïýû­“8$$¢’3'0o ÕŠ?–ˆßb‡è˹”Ø Ë±Øíkþ™È*ÿËž*♄D}*š~;ìG´äÉ“‹&VΞ=‹ªU« v… Mÿ›3g΋OΞ=[8¦N*Œß”ˆðbÌ{öˆ'1À­^ãÆñ¸Ñ À­ä¼„Q}áâ‚‹ RžÌÇía D#ÎÑùµ‘ûÄsœ$ó“ ¦‰çÿ?wò©±OæŽPñÜgDiì‘eÄ—Ç_œb¿O;'žÓõC8-†w·L… ?Ñâg@e† OâÜâg)šY4dÄÆñýÈ•.©¥—ˆ!»dÏž=FÿOš4I¼âëyøð¡à†“““hb±Fs?cÆŒ‚ù¬Y³,ãÆ³øøøç1ѳgÏh¿z¬^½Z¼JâGðèÑ#ËÚµkų6ÌbÉ[<‰Ë„X5Ìmù“âËn:Nªo ñ·Úc+ÒµÙ-&áìË ùd¹=ë¢xöÌ÷,‡éÚkSNXÏ_<¶è­ß~%JX,íÛ‹'?.+c“È¿³géÕ<[ÕgŸá¢ŸgßÅÏãÜ{ú|kA¥Žc€#ÿ16ê'Ã3žyó}•J…víÚ¡{÷îhÙ²¥`÷ŽeýFxâ ½{EYËk Ïö$2gÎ,ØÝ½{W8ïÖ­ ~ÂyLØó`X"UªTèÝ»7ºvíŠbÅŠ fÍš5Coð/ñC¸|ù2ÇÔ Áƒ’y/ÊõëEƒ¸ËÝbM2ïªÒá©8„“n¹Nv ­r­=ÖK#IµÓ„ê–`hd¢Ár=± ç»oÏ> Si ¤Ïgm[ ÷•Û©„î™òã;¥£=dj;ü§—Â[[@¦\§¹üËïñÃà­ëÖY»>ã;<®‘òµØ¿õ­¼Û ¨‘mMW«{~äšV¾«{9¥ Àxiƒ—þ¢ãÎBk‹Úyú~m„µ%0üŸ#B àn:N—j/ÔÆ™·ý*ˆ-j2]˜HÃðZpýõ—ð]K™áÒ¥Kqøða¬âY"+©`àî\¾÷§È“'pͬY³püøqü&n^~ìØ1áóÀ˜e}¤I“&Á÷£•ÒOž<)„çÚµk¢‰•sçΑŽ…Í›7‹&¬mô˜6mšðLöïß/üŽ?£²xñbŒ?÷ã鲟\Î…Þ¡0@™gmÆaÂïíÄ«“F¤Û¾¶œ2nÝ;\Ëã¡T¸ÙC€%¼„Bäx©ÎÂ5/Zÿ†Ã^•……»·î—ð “²DðËýß qŒÌøw'ÊŽÀ‹Nñšì‚f5ÀñÂý¬‰XÌf(µnpTÝÁ½ég sµ{‘‰\Ȧ8*ºõw†z0FÆð߇„ûzâýÍ(ÔJáú# „ëÙOW:ÔífŸÙö@9ãcðÊVÑ­Ÿ ïPÁk‰Åâ~¡? ^:£S'ñ$÷Ž*#E¢ ûìH¼éö– oRjÁ—BQ„òäüý+àq‡"£kvoí85…â\ú2ð\}Õè‡[‹pgÑ-N¯…‹“ Ð[‹`ž¾Inä{{THKynš‘®n:¾U$cÔÅÛ!C³”¸^e”‰x7™ üÇWÅÕµgP’Ü©új^SÚzzRGi¬)œ|…¢F Šîh-t+\2¯.Ͻ'ܷʽ¸’/ ½þ "wãzåá0Ü=Œ ¹“âÙß?¹á€wõà±~QÊÃÿ¢i“F˜¿`!–,]ƒºuþ{W¤?ÿüSü;D ?³9  ‘ª\¸Çç#"Â=WëæÌ™#~û7GŽfã1:"¹råÆéÙ6}6S¡Æ-wC† D‹­^½z ­sÌÌ™3…þnAâY¢|m‹-3†ÅSß¾}±õ)"x!Ô(xyy Ÿ|fÑ¢EèÒ¥ <(ø¯ÿþ¸Ç fÿý·`Æ-…ƒ\.ÇEq-¥ B±~ýú‚djÖ¬)„ÅŸŸŸð;[˜lpK)·X*ÄÚf|ã“˹ŒmÝŽ-ŽîóL(pÓGÙË5¹PØ…Ü~@˜ôoÎ"ù¶÷¨|wLÏÃ?½ÊÙr¥Ðø¤‚7^ùW “‚SsàjžV¾€3¥:À}ÌŸB+b±C#ðÛ¼ÛðN ¸tYâ§' ·Š$"mJdÙ2¯zUAð[;kæh§|NàBƒžð^uMpË-l .T¦Š—á2ntšïY'PÞòxã'\qgÊtDú+Ö‚U?¯7ï{"ÜÆ†Kùr”0|—†Õ ÿ×x<{Ö:©#ŽWˆ»dΔ6.GK§xC¨XhSô!5åÕ­½`ž¨m+„fÞ$ãÝÇ(m ø IDàe³ ØKyëË÷€ñé ¿uò&ðL$üÔ Ov!,¤þÅáðÀ0$[u¦½ýñ┎ÒÂ*üŽ\†:q=UOïJð¢/ŸBàÉP{Õ…+7#¦¨ ¶0ÃÿÔ]JÛ~ØOþÙ—±…ź(”;ÓeHŽÜv#㦓HFá|¿õãqŒ?žu;{¶xòyXô­]·Ç®@§Ñ ±uÛÔ«S[´D ?.tYIĶóþš·Îñûáæày<Å< ýúY[.îܹ#|2¶Ö2X O¾`J•*% v‹qv¦TF¬X±Bh¥cêÖ­+Äþ ²øb¸ÅŒÏ¹EðSð2=Ë–-üÈ]ÒÜ’Èüþû遼ƒµ¥råÊ‚°caÉÝÎÜåÌ~³Ù±(å5ù~ùóçGHHˆàg¹ ws¹u¯AƒhÒ¤ 5j$ØÝºuK˜ Üw&È‘#GäÚ‘ñôéÓ£5Ï:‹ wáÓ3Öj‹ã(œ½„LH÷’Û*DÂÞ …}ÚT°èuP§­Wʬ2V€,a_PÁF¥›(ænéa¾±F(LÎô>ŒÝ}Ÿ8 =N):FíB!qGu‹9æ&¶ˆÐ 8•n‰d9ýqwà"Á_2¥ †k§„®µDes×y7Í ãÅã<A0OZ·(ýUÁ«bZr_ý­³TxwsSáF~âùÔºÇ$ ?†* qŠÜ¹77ÞP4ˆ‡ðžðô dÀps¸7ÌÚC5 Üì°NeF@È…) ø­Ù&Øþµ›D” öI(~“vœSN¡û6ù¦G¨Dyie²È1¶!´©¼`xþ— # 7qŒ1`1ñÕé¥{!<[´Ò†Fè6vʕƷ{ÅV»›ð£lÇ‘â‘cöT0½?`5÷ÛaP‘B §L‰`gAòOE> :¸°'EµkA©ô9‚Ïèð–î‘ϺÆïO…‡X}Á‚ÎÍš4D_§‘ËNrCh(ºŒZ-Û¶£^ÝO‹¿‡ëü@"…ŸD܃[´¸%‹E‹n•ëܹ³ ‚l¢[ÕÜÝÝñòåKF¡«”)Q¢„еÊpkÓ¶m[á3&.ÖølŸ6l­xìöqóæM´iÓFð#w 3ÜRèýÑ̹lÙ² ¢g’$I"E'‹?îræYÁÜ=œ%KÁÜÖÉ­~ÜûàÁA8rØ7lØ Ø1|_Æ^›ðÍ­_ñ”ìÙ³ -šÑà!‰¨:ïä$Ä]TÙkà «G-;EŽ?zÚ¥ Âà¤å\¹‘Ƈ„.,_G(,Î93PáÆ¥›5n:&1AY¢{daRÒ÷œÓ'£rÑŒ÷' ×X1“(äO´þZÌ‚à̸v‚7-¡bGYD8ì2äÆÿÜ´ö!ûî¹EÚœä,BÁêë½Õ|ßCÈ(GµK“UK–ËÁ?•-dìh­üÄi¸’—:uüÞÊÃË㩾™dQâèBŽ®*Œ‡ûKæ uÝ®(xh8Ya¤D¡Ô<Äßdw¼ûZ¤Yz^h)²l3ç×^(pd^ÔOƒ=Âï5xjB¢ég‘±´R¢ ŒË›Jê$U”²Œóeøgkô1~áï)]ø[+ÓIflD§o…±°^“÷ }>G¡Òõ—,;\ëÌ@š*pº‰4A‚ù‰ü«`OQËðä\‡Dæ²>‘cüÎÕ˜(¸kC–© Îm…³ ¶×B™¿Ò¶È ÚþD¸¢Iþųg¢Á¿iÖ´1Ö¬[Σ–R¾e„Ùd¤º]8½#: _Œ-[·£~Ý:âÕщÚcHÂ/ŽãF5pÃÆ-\,Øúôé#˜×®ý¡ö`tÏ(RòX[gÛ¿•[ËlÝ©µxù‚O`ƒ1‰BæãnܘH™2%zôè!t³ðã–·˜¦äîÞ¨°¿Q±µ²@´Á-œjqË¿ Ÿ6lφ»®Llœýéœ?åV¨_;äþç!{ '>Q Àí;˜?Ó»W!nQ´ÛùwØ•îˆTÅœaÖ³D³­éNý·'3…BŒ;cwC‘¾6òÍïŒG5ÒÌ‘ŒÕéJ{üÖ»Þ/jãEûÓù"B`0¾ÜSdm€”¼IlF <(²ä•klG<,ŸXpëõ›Ƚg$dΞKÜ-—ˆ 8/ “' OŸCéPù—ôÄÍtò¬b^›[üXtòuìø:¾yòäI´VO^C‘÷aýUpHƒ¬}…Œž ¨R¯¯À#³5¾¥ØŒr–‹ÈÏZˆ9lê pP!‚Þ½PMQ§DŽ'ÖBŒlƒ« ׸u˜ƒòtÎî–¾·K0óºK8/~2zKƒs×uä¾.r‚IÚ}¯„B0ykkµç yäÑ­wÃÉç’Ï>$ø¹‚å=rx2¡[s—6Ó„{[íîÁÑ5†:¶&¥P¨çXV4ˆ¢Â™ÇÁÅG8¯xÿž—5 â×SÉðŠŠ…ß:YwÂ’øŽðº·1ì(&´ô­]‡îãV“Ž2ÃHåŽÙAe”J¥R(w¹'Í C»¡ó±y˶ÏvûÆ‘¹·è(~‘¿Ür—áˆÂ-h®®®3fŒ0«—ÇÎÙÆËñV]¶–/[—(Ïô}þüyäxBÜ Èï¶No«eØÄ·*ò¬âOµ2_ÒÌØ&{ذQãn^sÜÂÉ÷³MäE¥žÔÁ³x¹åÒ6ÞgG…[Ù}+È|<Ù#¾Á3¿?nùăüòÅ“øGàŒ83í>õîwZâ I“Æß%]xj>âqE+@\AòL–pÊòXÃÝð‹¾šts±¥¯ë˜ ô'ѧ‡\!&lcé¹,—ËePÈå D»!ó¬Ý¾QZþ*T¨ ~‹"…_þqx÷FðLíDIeñúðN!ÃÛ—bÀã(,ÖÊ–-+,¿2tèPtèÐAÃÆ“,xÌﲕ¨ûµò„†•mnT{[·mÔÖ2›hûx†¨mÙ/سgÏÈVƨØÌbœ]›_x&jTR§N-L a¸Åý½dÉáœÇ2r !'žBÜAp7rÉ’%Q£F ¡;jwr'q™ë˜.]:ald|&Æå\¸åTœ¸qé±5©pË>$ál/kp\â‰Añ*!ñ­p£G”nÑV-[`õú­è3e ìÔZ88:[ËVúo‰°Àdäñ}h4Zì£èg†««'œœÝÑ}ÂzlÞ±6ÜÚ·/vg.Gß«7¡õõ&« Ë ë$‰¸ wrKׯ…@ž½Ê‚æcXòØ6înµu{ýúµ Џû“[ møûû "ŽÝ).ùqèÐ!Al± b‘ž1Ì-m,Àx-ÀÙ¹s§ÐÝÈK±Ø„WL°`ã1ˆÜ"É“3>†…(oMÇ~áJ›Ç†a÷·nÝ*L©X±¢`ÆãyL!ÃDl I³@æÖQ>:vì(˜ÅWXó2;<£:^®âömžò+HH|!2eŠŸ"‰»yy˜Égz-$$> 7žð8t*Ky"#O¬‹Š£³Z÷Ÿ‰@?(rAô±¤â¡FÜ ¥Ti°rZ/u¶Íï¬pÙ]°`Añ,vˆ~.œG±ṳ̀H›2ÞVŽøEðdÀ½›WbL–Ÿíº”ø5)W®œ yÇ/›ø+sýúuAxs«p$’ð“øV$á'!3Q„_LpÃY×±«ä/ôn)JaŒ ?“)Î.˜7¬UŒšcøðá‘0±AdWïÖÍ›PµIGdÊ™ÜsÇðâãÁù/ŸÕ¦û‡Å‰%â<6gý²èãõýâ»èc¸+;šècZ˽„„„DÀhÒÓa€½ƒV| SRkÔP«¬còc‚w¥ŠM"…{Niggݹƒ”Q|>"̤áñÈc+Uª¹„„„„„DlÀC“¸•[õø©)¡Û×aFxÚ¦'²«·oïžxàkư³ÿ›Ây’Gf•´[‰Ä¯ÍÓ§OqþüyÔ«WO4!x\ä­[ŸíêÕëuؽkýþ ¼ð—D¼ÇL…OŠ)Q¡b¥O·†K]½xôð!öïß]X˜Ð}'ÿá´‘?”(i];6F¾¨«wé'aÇ+³®ÑjÀû}sÚ½“  o£æà߯¦‘„Ÿ$ü$~ax Oæyü8Êÿ!üx'ž8“.{~$N–RLR:ˆßPñ#—ÁçõKÜ»zsçÌA'q§›h$pá×¢YS¬Z³y‹W‚ÆÞ.µ®€ Ÿaiܽq¾¯ž +iض3Æ¿ö×@(tóF˜äIÜ È«f¸º{aÎ˜Çøñ¸tÛNS±$übAø…›ÃñÄÿ%^½ÁûP^HªEÆu<ì]Ê-9R»ÿ&šÄMvïÞ-¬ÕxãÆ Ñ„øŒð ££æï:Â¥ó [¢I$xK¹»×£aÑ4Âñ<ã? Xømß¶µë5Æùw¨x1ø_ JDA¡fž„ýkfâéóÖ{ûá×cÂ:øû¼…^o€ÖÞNhùcs>ì´˜7´M¬hŽÿ"RøõéÕýÍ:]~߃>O±êò6L9µ ú7ðôL /{7x9¸ÃUë'^¾Ä¿áÕÖ߇øâ}˜|Âðþ’ygE·ß[ m¾úp#A—ˆq9—Ï¿ãÇþF½¦m°ÿÖCr(ÅƇ— rdÇâãQ©ŠuqôH°ð«S«¼²Cû~}(mHé"¡Á‹,;RöžÛùºà „_·q«èGñŒ+Öî^;µ`gïèŒyÃã˜ðëÕ£;ž…(1hÊÔ!ü’¦!=½Œïý^½Gå•àêÓ³¨œ³:lˆb©óANÿxån9 >¤êd\E†K„0æƒ?ÃésϽcXt~#NÜÜêy`cƒ”˜¿}û»ï ïá¼cÇa9€H>#üöí݃6û`ßÍÛT3ý²8(SÈààLŸfk¬5“&ЇY‡.ÿL”jJ¿&ŸóÁ'J .N@€¯…ÌÉ€2sÎ@çÿò° NîÖpÙœ0„¦ðŸ²(íd@î5ˆî'~'{À^ „úÓû0}¹Ÿ]=d”7eÅ¢™QY~‘”*^¿×jšÍš ñûKàøÆï€[ÎùázŠCÆ/?Ôä'£.j%ºRÖ@i"@,ßäwGÊ*c(ïeJ™°Q…Áºæ¾ðÈtÁÖ½±ã1…ÑŠ\Eárà ø*Jìïñßp¢¼"Ë· ¿6Cæ#¤ºzÍâ¾BS8´Z-4Z'LØ8F·GýïÕ~ ‘ÂoÝÚ5èÚg®¾~†PNó_ò ~Q¸I÷ýk H²ï+üÚn„%Gg¢cùÞ˜P¡7ÔJ5ôáÍF!áDFÑxülãbO<üàĬVØA£²ƒOˆ?:퉧×br½éèS‚2‚¸Èg„ßþ}{ѺS/ì½qGGÿ YØ+ ¯_¦´Å¡Õëáž½8Útj5EꟕYô]ß½ öyª m"Aüq!üx×04m¿¾¾7*Œå¾7ѨN=LÞwÞʨâ2ym0:ÌcƲðvR@jFù®“P"‡wœ,úîØ ÿ2ÿæ)þø˜ýîcÃŒñX¸l†îóCÕn_,þá—#+Ïš(µøE¡LÉâÈ_­ j·hñEÂOe/ÃÓ=«1lòd¤Ïš&ò¶î‡š%²ýDñ'ƒA8´n/Š·i• ŒHôQehVåd8œb$Ž.ÿïC€Ûz¡çޤػ²O4ñÇ¢OöòO+Ð5šÖ…^JÂÉ]gM¦4ÄŸ J¥û—mBñöÍaEü)(_0üs ]©r›:c¼ ðÅÐù;à¥úoñÇy=Wpóº~»ðãÉ~ï¡R©„µ*nM³V¼Lú¤kxB±QG¿5Pñ$·PÑÜ£ï.®€™WB ÷ø7!€#•ßãò¦Áo“® E!P­^©•áÉîÑhÒfJõ؃™ã*A÷èêת©î! ?[©Ey‹&ÍŠA·7P(ùƒüÆ-~2µÕ¯áÜÒf$¯Ðwù‡ê T`òu `Çû}séú0 £™Î(œl5Œ2:¸%H§§Àpéù‘5Œäž‰üSí=ye3]Ž {%oø ÝT± ÇÆám!+ÙO'wFÚÑûP'¯ô_(6$áósúáÇqUax†zSbæ; 2RáÖ1??=´ö!Î8Ø[[¸àV' w%òœ2ãmñ9R¼qp¡Kèq o—Þ?ïhÉ{Ésœã.g, ªDºG„’P“SEǙ̌t=ÇMÙ…„r| ‰ b³å4/,0FÐo)]ÍkË>Àä‹(“x°±ºÿ•V÷”®m~ògÛP´á|‰n÷"GÄ4¨Z™Ê4£‚û®>vÇ¢}+plz4ïç¼P»ÓÔ.“f*DlêõwÓÀoÿ(4\óeÝî¡FÕš$üî ±â߯y±bè¶í ’ðÓ“P£2‡ÇTÄòÛ©‘ÞÑ9+·Ã»Ó ð"оO^¢åÂ}(šF†ÓéþýNš78tà8ºmz‡ô7:`Ì3ŠdvÅÃOP½K\8øÞ=»â]6¢Y…T$C1ëŠxbŸºž¢ýÚƒ(`ÿM*äEš²í!{wWï+1gßf„_ZƒîMþ€*siÔì0ªä2y^žÎ$åΉôã¡n>Iøý'ß³ÅDœ:ü9ª§N…¾ͨš… ‰.N2J~¾ç·bШHš() ÎY0vþpؽF¿zà˜>^ЇÚbØzàÆTTÁ¡PG¨^!H™å yâ乫xø2‹¶o… ¹çsa' ›ŒdÞ¿!L““VŽÁ³U}ÐyáËá ÿ§7`Hß gÔÇŠA 1}æv”hÖ­zÏAZʳ(¾Ìh¿Õê†YãÖáijýx´¸7º‘ðÛ¿ª÷¿…ßóí(Úˆ„ߣ•‚ðãJ £{œ*ŒƒåýCT¬Yû7/¢D£…Oøo˜¾n2ìÃüÑ¿v-¨ÓdÆó«pݘ ÛÏ\Âô Tu.Ðö!oðNçŽZåSáЩ+xðÐKþÚW‰Áw gAH‘:=|t˜±u¶AãqçP:„¼¼‰·ží±vyK¬Þã&®BñF yŸÅÈ̯•žÓûݽ0ðtQôùý]|‡"ÍF H;ëþ;m|á×uÜJø½µZMÊ:£—göò¬^g7OÌÞ:F·c[ø‘†¶²eÓFTiÜÙó§§ÂŒU9½üxxÐó…\!C»>ß§›.ń⨕«2¶6™]ÕöM ”Žø{è(wçÖÜ+]w@a@õÝÄØû°èëÝ»·xöý #¡×uëd ܆&¥JcX—¾¸ÿp§ÂdEêHÓ}ÖoY‚qÃKcLßÐ?9ŠU;bæ¡uX{ø,òf΃Âùa ‚}Ö:˜²zt΋é£cܦå˜;} Vöí“'°®OM$ë¸ɽ‰c*cT· Ð:Ùã•¿Õ:ÃŒm»Q/õ,Zwê·C…\™PgÄz4©œ5²kͽ]jL˜6㺷!åê!¤ù‘) 5¾Ã¼>-ЯeŒ:*<äÞSA6qÃÔ¬Tm§nÂüÍË0mZ Ì0¦w×°hÙ1Ì8¶«÷CñÙQ¨ŒaÁP¦«„IÆQC*aJŸ ±q9æ-˜„ÍýZ#Ô Ø9¼ì¬Åæ­K0snKŒn×gG¼§0–i9š »íh“ï æ,>‰œÕ›¢Æï9P¹ß ´©c}Œ¤¡ÓY[P$~„«“cÃ¥384¸j«ˆ¥s!‘ê]ú·Ðsfž>nDöÀ­Xó·çç4ƒ®Ø(,Y½«w­‡g’"Èž G‘Ö³°p÷¤z²ç-°lÛFtÊㇱsNÀY†þ‡bú™“X¾kòéwañ_¡p°“‘@rƸ¥K0oç9øll€+¾@Ëa³‘\ž#VÍA•³>^EZ Cž0rÁ3¸8|zœ²L©ÿ‰-: _³Fظç´ZÊBïBm1såä+R“6îÀ¢ëQÅí–îÅ­e­ñ2[,]3ï‚‹GAäM„R|ÍÓd:æo_¼¡»±×§0Vl]‡Á•4n/ÜIpõlÜ“ϞŒ­«QÙ…Ò×&?8jäô·Ã¸Å‹1çÏ30no…³¯æ#ã7xcĺÅÈF¢/œÞ·²¿øç1înèäꂎýÛ`ZQ{yCï„ÊüØ€gñòžñ,äÂÍ&„éB!g!M÷'õü xüØ$Rø™L&¨Ô¡¹š7Ž×‡ùûìÜÑ|cå&lh8 >TÓ1[̰ÈHHG¼?L$0õA8Óq#6ÞØ%g7‰±"vᥓ““xö ¡¢×âK°tûtïPÝJ¦ÀÝ@ œ{ŒãKº£}ÃÖ˜±ê" É·´axÃÛF·Æõ© é€d$MTû͘>#B©àk]‘¢P>¨Ã(Ö;S†@ϸ}úN-ï%¸7uéi)™ÄŽ ž^ÞHâuöò@hp¨P˜Má$€‚„-&£Nj5EÕŽ¨¬¾Šù»/ÂÉžÔ\LPzÕÙ%B§)+1iÅZ Ýjû5y‹‡™»a ˜Ó¬4šÖi±cÀHBß5MN¸Ëž`Hë‘èÙ¢ ”Y[!­7ûÇF.¡qÅo…òÂ'‹À vê@¡{ìÎÉ»¸¸a°ÆqÓÿBÑrÅH´áêá‰d‰)Œ€£§=§`Ê«åË”Ü)·0›,P%)€ñ›vbÍž}Ȧ8ŠÊåQ}âžüó³ÚuB›ú­pÇ.ÒSüÍ]¹ÎMmq#§ [çYhÕ¯è5C­V!eRz÷$Ž\¼“#c–t 8dïáLq)&Ý-*è"Hü…„#$ˆ ÏÐ÷‰0›…1J÷w Á-ضu!zvª}¨>ÿœ…)iMŒ˜9ǬŜµ=aG…/ÿÔ6¦‡·h²ùÛº°Æ´NÈÝtP§o\‡!ýªÃ*†QÌ~ùùÈx aâ0jaG~‘ˆcP¼å™ÕüÊ•ôY¨jC„ܽ•sF¸k5èµlf®[Ž9›W ln;Ú°õfîAÓ}0ù¯ãhVÙ›„‚‰âEØGž‰ÓÃqJeØiÒÁ"y¯¥6÷V¡tv%U¬+Xã,»"x‰ÎÃyÅuàã†..¦LÚT˜8²(Ú6G'a윋—ì_ׂÄ_Åk»GzÆ Ý„ýª¦4·¬oÔš{ KWÏCþ”®PÙÛãôæÅ¨4n/…±7Æí<‰vuSZ+-ü;Nô)„QÔBz¦ÌE®J O’r]—ÌÅ, ãì-ëP9Ÿz#åFëO¹¡•Ò?7Ã@íIˆrÚfX«æ­ýÞݺ&¼Ò\¸~î2ÒçÍi½w, Õ8 -¿^Å#$8*±»—'|ÄÄG&ñµTYÓ=«½J “™»wÙ” ŠmîHìà$I¥#ž^prpB¡D¨Q‡i  Pš<èù×X!nÄ&©S§FãÆÅ³ï %3c{ÿ¶Ô¥MÎÕK#Sƒ)È™ h9w+Ž÷NžÝ&at§öØù÷+¸¤-ˆdaç鼦lˆ*Ékã)æ0*D¬¥G¸!AAT¢–p=ý}¡Öjîvœ”Ý»NÂúýö£O¡’[ÈÞ'²åBâKîè…±TåZTÀú!°ïôSFÖR+\„`*­Xd)“åGמ%¡{xà0¹DbLßvûƒø£R0ˆÜ6Fo!Ä!¡ðÖš©Ëp=9}†NÀºס <‹gáÉ^õFwà06FÕd•q/” %]ýŽBOÈÙb :Zàh„2ÂeìåxFÊÏ&þ,&ª­ùXOD¸Œã{°_ù¨Ò¾ÖtªƒaFã­ÁG·o„w±–xµ½F÷ê€1íÊ¡AéIPº“xd¿‰aÔSÃÄSX…!F‹ “VN@‹lÅ0eÈ mÛ÷ýù‚`RÂaèþÁì¥K‹Ì ÕÊD ÓqxL•P#O‘§©¾ÕýP·|ÌìÒÛƒ¡Oã߯«b´jš>Y Ê…±~üH5 •<'fÆ'~‹"'wH;w|9—_G¾Ùe0áB)p FIjÞEã‚)“'`ëúM¸vå¶xµD|#Iw”¯\ ƒÇ Gâĉa !ò2øòL­¿!7àÆ£î&ÜöÉÉ{кÓ׬ã'fé¿  Ì: µ¼©I€q-2\Šû>>¾TÚh‘8™#Ž©Œ}Îã°rX.žÄ‡åÍrã|¶¥Ý*0ÀZ!ç4!tcòæåœ››ÃÍP³ÞÈŒì|Þ[ÝsssÆs«&ÏŠcx Õ¥w”ÌP__„+à U‘»”Å’˜‹ O•`|γºp¨•¸±r4Ωª¡}ƒ\ [ni4“ÛrvÛ–p“Iä=è”—¶ æçWoòpyq¬ hƒmÓJƒ¥ìön¥°Õqfõ,… 0k9ççîèÃÈ3|Y¿ù¼{‹B Wwg¡Ææ.$¸µ0‚êä¶0†ùûÁ(s€£=(Qò+kK)=eª€pËЗ"MîˆùY•.Y «·E­æÍ¿h9žÙKå9Þ¿|OïK­›'œéœã–½ ú®„`çä…¤nt(äŽVýQ:%Åú}E7z^´ ³Ò³\%¼ûnš¢„`BâŠÜ5 ÑCåè rßDqÊAqDˆc‡LôSr‡~«¢(íóö=ì]¼„­B\Ýæ¸Â~çÉ,]€)Uj¢ÊÖHeä¶4k2§šˆRií¶d¬éÅz~†*­ :_?ªüª$¹Œ!áQÞÕw‡¡j:k«9Ëð…1—†Â(ûD-FJ¯F#‰À@ £ÒÞ®¼ ¯¥JÙÁÇa¤ïœÍù¼y­K¢(³vé=P~ðÒz‹žI]a¦|‡[ ÿ ßÿ=«wô*ø½£ðPøüüTÐëõ‚FÐ’ãKÆtŒÑmþí·h‘o%RøI;w|9í6 B€%ËêŒG€>ˆ^²5qdöNGThÝs~/] Ž.d.Vã%âÖÌóÖ•óعf1ï\…-û¶ L…rp€i§”ÃðÂѦpñúŸÄg„ßÁûѼ]7ì¿uï‹fõÚà‚‚3[†ÇÉFBgà”7^_Âðö‘¼TØûÝÇÉL˜½‰(Ó6[»£ø9ÒÏ"Ó^´L/Š{¶û|lOgt.ž ¥Ç‡s2¡K>:'±€:¶jÒTédN䗘ܶññ=øÜz.¯`ò½ƒ‘­šÃ«x}¸„<Á‰‡1s-RºQaÉa~gûM ÷¡ï_ÆnE%Fÿÿî^2TÉ s'BÕjÕES‘,ü*”-ìe¡AÛ¶Ð…~é3åIƒÖoìlÔwaK3l¤$QskÝhÌØqÅ çûK{ð*['ÌTOèNå.OëĈ&~ÿ8ØÜ³Þç¿ãXÔ´ú¯¸"^ã[l\ÍÚ7&xcŠ[›ÙüÄ9;{Š>;§aÌò(U¢0ü®îǽTͱbLsaÆsŒaü8 _F²ãg-?‰t‡ìþmû ÈmwO ;‰ê¨aŒä „_§‘ËI?ù Â/ÂBb×l*­\yuq÷ÀÜ!ql9içŽ/GÖ/%öuX…\Þ™a4›àåè…Byò i†ß1iùbè(! éÄHÄ(7á ¯= ™ûO£K"¸óú.’&òÆôã+°çæß8×e›xñçùóç¸téjÖ¬)šŸ~OŸ>AªÔiqä\=8C-¾Âúdï}|ô”áRÛ› x„ØÍó3áÍVÌÜíü¦Kž=Èíïßë„0º'±ƒ‚ÂÇÝlq›H)Lê÷ô‰£È›7ŸÕÀF~'ŒÇâu;°ÿú9øùІß5 #ŸÁ:¥ “†à‰@?»´¥bjJúï0TØNK•¿€…!WhàÌ:áäWPü ®¿‹æ¥3³ðë:f5é'?AŒ²ðc3›ðsóH„™ãØÎk׬FþCqùå„rº`?á®vßw”ùyýÃ~óþ-¼'æ€ßø»5„÷ã{ÿö-r¥Ê… >aæÏ–ˆÿx$’᪵ð{”5n,®½¼jK;àYïãpt e lݺUXÎåÉ“'¢ ñáÇ´o×[wïGónƒ….ÆïáðÄ9—&„u°züK¼§³m’ɯF{'l^6™R$Â_{÷‰¦QHÀÂ{g’$NŒLùK¢@‰ 0pfþ‘Ë‚ÐbXÌæxYÎþŠaäBë\Œ1¾¤«wô2a©™°0ëä5^ÓãÁ`€Z눥c;ʼn8)ü®^¹ŒÜyòbÕá[H™.‹0ˆ2^Â5‘ öm^†Échù9–܈™WWáïv«…]:œ1qø8\¼ú SW¯EpÀ׹ǢÏìwÛ7nAp˜Y+¡Qíßò‘;t½Ù §N@ñŽ“!¿¾§^x q£ê$¿6"±èÓãäÎÈ_¹ä1‘ Ñòôî‡%C,IñÇ¢ïŸ 1ÿEeÌíWå?ÅŸÆI‹ŽÇ|`O"Ö—w‰¡çîìÄkGý{¬ˆF+Þ-[°s霼xá&#\‡çÅÝŽû‘1y:ñªËöíÛѯ_?#ü8CrppÀæÍ[Q¦l9Dpü‘ÄÇV±¿Ly}¹2¥ðâÅ $K–L0‹$ ¿;w¢víÚxúì%Uêìé:6•ÒFB€Ó·ú5’òÔ­xôè‘h…/~ÍûÏ„!,D¨d©TÖe\x´!Ûi±xt‡¸%üöï‡ëÏ1iÙBøûvñÎÿ8¿KM_¾ö%ŒÝ;'}.a}ÃiÔÑ„ߤ‘ãqá’Uøñ˜ føÂéãrS~‡`àþä˜?¡=vôÏß²»Ð£VRD­-kö.FwëGG(ÅÀÜ?ÿs Z6hŒÎÕ+aç±÷pöJŠGû§¢ë¶hU±4ž’èz¹g0†.:/³z´Ãð®õ1vÌFaqÜW{G£^•:èß¼.Ÿ»ˆ½G`eŸ–¸ù6œâ– º«ëФJ} mZ…jU'±ÿïXÕ¯¨âQ.3Fvo‡µ{ïãݱµèÕ©=z5©‡›/,ÐÝ=Œ¡]Ú£OÕªØrþ%NÎDÒå1¦KMtm5³ú·ÄÊâ¯K/ !qÇ(Hdjè p± ‹¾·OCýªµ1¡g{4)^ïI½ÿ{-zÓ½z7­‡/€ÓSªaèìó0ÝX†]¦bïú¸q|5fNÚ†Ñí!¦c^ÃI)S -ʱEîܹ£‹¾ÿ 44TXP”K¢/áÁïœó0Ûw‰øø¼G¢Ä‰aü ±Kü|„tñeEö'á;xŒ#,þ¸‡E®°nãÆ;z|Šþýû‹ßb‡HáÇjÕ¶švBà[³<–Üéý÷|&ZömY‰ l†¸}õº°t‚‚›È>ƒRí€ë›‡ [ÓöXs!E3%¦ ·QX‚ãJ¸.Ž™ó¡J±Âh8v¦Ï‹Z`dßJÖu²å*Š,nï0iþ>Šl&8e®‡y+‡âúœ¦¸¹òWÄß+áM˜¦Ì<…»6aÈÈÞ°3óÀ~ö öòW9v/æl^‡U‡·aûàVð @Ò< 1qÍb o[ó–ý µŠ¢ŒÊgµÅµ÷îÈ¿N­†çÁï1vÜQ,ܹ“×,@ÞBùP±x)4Ÿ²Ù“(…–Â'׎Bî’}×ÿ…&Š¢|±Òh6i%r%ü#PoØ"tkœ#:E†lEÅ3“g.‡wá2(ž'?rHŠ?í‚Ia‚{¶†˜¹v\ï.CÎ+°bn/ìÙ¶Wh®cÑ÷èÔfÌ5 ³f/ƒÞÌÍø”°¡Ð¤ª€ +¢E‰D¸~㦠‰ÔY #k"&Lš‡ªv#Ùã¡hÝFOíêM{"sáFè1 6äŸhMdS[œø8fÄ583R¡ŸÀ¡÷/F .¨m¢X"aòÿæ‹*hÔZyJ•0±ƒãT@@Å+³°°s\!RøÅ”Th³Xúáw~NnÎк:ÀÍÎÎðyãü4ô²µ* y—è>jBƒѺBNt¬Qº08»Q.û‰œ6ÜŠ\&aöš…Ø¾¸?†ô 37káÀ‚Å,ôwLF¡»5$$áF=ÂB_C™¬j5lŠ:£bшŠt/\½“P̓ÄÔûpmÔåª5ÁæË÷B“ÊY˜œ!wt#·V™ÂÞ2…Á¬t‚†ž‹L©¥ÚI(‰O3\ÄI .n¾¨¬ x6c  …ë·ÜÞpñÒªC —;ї̧ t-‡ )§¥p¹;,Æ’e}°¦V^LÚøž®1ÁÌöœ×ºyÁ…D.¯./O\5›4G­¡;°fNklé^¯STA½º5aÏ}Àä Kª™Ó=dI“ ¿ Õ›aÇ[ C›ÔCÞγà¡5áĪ˜3j&†ŒÚ…?ÚWö5êCQ¦Óp\_Ý«Ï&·W!D•­«ÉѱÕ`,4×ÞJdÆ–ÑðÐ?{N.Ÿ„©ç Ø-©Ãä.’ÛÇ’½…-ÐOV¤W¥BÃÊŽ˜8j$¶LŽ·MH“)NožeËwÂïŸÓxëëOâ7LX¡ï…õÓx‹¡ ·58Ø9¸ÁËÛ^^F«3BÌ+‰Æ?EdBÃj˜ïðþÍKñ—@—!SQïžìˆ ´>ÀÇ ô6¤†Ê I½…oÃ|àªB"/gaK €©Ò@)‹ösäÅË»¼yþš®—ÃÅ+1ì,&˜, Š|ä.†P¼{¥Ö®®ŽÂæÚoŸ¿‚ÂÉJ9ä*ÛX/™0Yã-»%³GÒd.0†ñÖU@€Ï[h\’ÁÃx¼µ¦Ý+‰C*#Ä/ŒÜö‡Rcu[£Þ¿|C~\½’ÀžÎ}è\ã–jò‹RAöo^!Bá„$IHÓm}^¿½{AÌ*íxì¤uÒÈ»oHäÊàìáG{9|_¿†ÂÑZy"dvtw­+Hàê¡Pk 'ñl 'Áž¡çAÏ&BNÏ@p#yRa^nÏêg=¿D0‡‘\„zý}ààê ¹) ~&¸’yÔwg›Ü±kÙH½x º¿÷øâ8Þp ò¤Ï!^õcùÚå\öíÛ‡Ž;áÊÕëðõý«ÔJÄy<<<7ONqrGÑT$Oî(Uª$*U®†¦M›CÇ3ã$< ÆÙÙ^žn1—!_0¹£ËØ0éC"ljò˜jF-ôly%òÆŒ1/àÛD ¿8±s=¸°à ø¾{…p“uÕë…W’d¨œÝ]˜¶ÍM°_ò2Ô$f]›Á0ª\7„ uÐbå‚e¸}Ç}'̇Q¯Hݾr®ð›RUê£~»ø-U8:¹@ãFÍ2a,Ÿyƒ' 0o_#ŒÃýõ»°,ëIaK,úM”-«ø:n-üàn”-†øøh"‡Ei­×ò+°½µÜ“þèŽÒ£—!W;aÅènð‹uë·Ôùpï¨~áïQÃÃD“õ\°f3úZñø3Êï>vãßXÃ#\cý"˜F½ sÛ3as›ßøòá°b~;mÂÏdAÒ ±+ü¾v9IøIHÂ/z:¶! ¿„ çóÿ¯ðë>a5üÞ½ΕJ;a\k.÷5ZGÌÜâ?ʨØ!Rø­Y½ ½ÀÅçÂbõgø W¾/ßúG>ž}ëOy@Ô;þkP¯µð—aÔŸ³p1ð&–Ô¹œË´1“qéÊÌݺÓÁ’ÉC‘>K.´ì1rä…‹›•ÅAÈýÚÈ u¢2òD“ ì¾J5=*gx2˯DTáwäâIAø%›P"V…ßåË—±™ü0~üxÑ„„ŸÄg„_ÌùŒ$ü6ßCøuµ¡þÜ,A:ƒ'sX„ØNi§Å¼á­bt{Ú´ièÕ«—xöãÛ_€¦ÍšãÝ‹ÇHAÌbGƱ~ˆ÷ÍJG6Í;2+e‚èCâ™ϾùÜÁ/N¸Ž>…v56³‚bŒz XÅZè?y)†ÍYÜEJÁ3q2RýößUôÉT2x$–Á+‰õp 0É)LNî¢Y"îVýp?µƒ žâµŽôýÛ±@lùbÑÇaÖߢÞÑî#¿¨è]_Ý5 —ÿñæçÏ¡u”Añ¹çIïHõãñ~Üâ›äÉ“'ºè“ø)hÕ„›Ã©Ü£Ïp“ òX?ð„¥êÓ3JyÛÍØ$Rø]¿vUø\¶ÿ*Ž<4âÀ½ÐøyÜÃþ»!è5zðö/!Ìß‚‚´ýe¸'Ð;yj”¬\IS¦ƒ£ ½t_&”¾}Š'»kÌ£·>ƒ“ázñ´šiµ¸G•[Rj.-h#ˆi¶0mìè“…"O¸°³­MGb‰[°ÔdÇkî)蜯ákù:3¨"srâ`G×ó¶eV f]·…¯Ç¿Uê^`óæÝÂ"ÕüOíL"oi×H¿ôž° 'àîñ?áoÔÐóâñ}Ü”“ÿ£ù‘`Ñ7£{m<%‘íHvüOio½¿£"çÏþ5ÙGú“ýD§‚%óéÿÇë¿_‡ïz‰_O4z%hø™Ø*ø “ÿ7Y„étÂ,^^¿ãÏæåOëÇG\!RømX¿•¶E9©ðTAë`?{-œÐuHW1äÿ?Q#‹Z­…½£”¼}Ç*;àÊž pÉÓ7Ã-8÷Ú‚1¼Q+M2˜ŽcÏ,Øð÷E$u´ÀLy˜úéßè;Þ!cmǼ˜±î²pž¦ÚB,ëjFÉš‹pæÍ€·U#/ðî$Üíè¬D³4®¸íVçÏFjYz튗nˆ¢ËãðÖMXò:¥À‚«Î(šU‰“§®a­7´Çâã)‘=¥n<Bárqúà L¿kÁó!™±áaFx‡‚OªAØ6=/òf) %jãé±m¨?ënϨ†KOœÐrÆthTêˆ`ü‘Å—E1tö<Ô«ža×v£P¡–˜±vv®ÞŽûÇ6â¥.?ö\ž…&¥*#Wé 8º}&\0 dr;¨}Ï£X.˜qú8Ì®‚wEçãíÆöèºì(ü¶Âšà ¨…5Økl€ÞËbÿ˜ÆP–š‹¬!K±ðn&hÏŽD…Q‡ái~ Äé!ý­ØÆøíX:‡/xN>±NÄâ?Þ®» ž]Šà챈ҩ9|7OÁC]8B}_#O»%èÙ8;Öm€GŸ {ÝÑèÞ¶¼ðίY _\wï€wÕaèÖ" 7w ÅØ+T˜ä5šqg÷,¬;‚Á!èâÌ^°J»4h3yÎMïŽó÷õ¨=x ¦s†Ñôíï‹…ß~ÛIø¼t #b]ø]ºt ›6mÂĉEâ3ÂÇ›ð¶BS×AÉÊ%­†ñ?¹K0T–qvwíÜ-´,— ¯^½„·wRÑR$ ¿?wïFµup⹎ÎÂã’H(ˆicÚÐ8¿g îÞ§tð1_ ü:Y#•˦3̽Ðåk0…@•Æ‹GµQsÌš5+Vw`Š2ÆïçïÜ›ü¿ÂoQÍQ2ÃÞÁ‹¦ÏÇ‘C·1wëöÞÕ+ˆ?ª½‹¹R8‰ÐP v2a 66„z£Õ¼{‡££õz“ #srCKfvJ«™þʧM‰·äì‹cã½cH¡ÕxÉí` T<±‚k$­šÜ±@NÏÎv¿ @¡¡ƒ¥ŽÜpr¢kØŒ~Ï[높è£çÕ/átßPò‹¹Í¿““»Jqª‘1ŒÜ!{ ]«VÑ}õ<[ÊžYÌ«¾(È\FFÏ.Ƭi«Ñvö ¤¢ûñR3¼. šÂFÏENÏ…w7aq£§ç¢¤kxK_ÞÒîÿ} ¿½[¶bÛâ¡8p™„ù3Õä2±*übä3Âïä‰ã¨Ý¨Ü~Lq•3»ÿïHüZpáäî”Ï– KfMD%i¿Hê×­ —tÐiЀ[—ˆ“(¨`ár1¯ë'tÁ¿S6RÙç^™9B|Z!¡!°§‚{Áˆ61ºÍ¿Í¼8rV/7o*¸4M ð"Îß ¿á ?¼>OÞßóáÆÅ“óÓ¤øC¡›r¦èo=Xô1fzA¢™Mô1–ð× ¢O0´®Çg3 ”§Àúëøë†Ž> ÑGá`QLž(b {ÈÍ2 %ÑÇ®˜é{»K‡™.2’;:=ÙÐw¿,ôØL°#tT¿°ècl¿ ¥ß°9:aÜ…ƒî%øQ}Œ‰®ÕÓy(ù-$pL–=&ÏCJ{«ècÂé÷ü\øŒŸ _Ë–ýÉaáŠÍÿ+úÞaîŸ;×>k&aí&áŽ?øõÿ¿„ÐCSÙi…ø/‰¾„¿ó³5ã—ÞtÞ¿{¯ÄÉ`â °D‚ƒ'¹ÿ¿Å·ŸÁ`àÄ%L¢âS8Ï·Žõ‹+D ¿B¿ÿŽ“wÃÓðòާÏèõ{+ü›à “Šy ò)€@z~§àâfUï¿,È”.äw7()nòž·¿”¸ìœ<áîjðXö3·ö……kfF†µ¡çÄNÄö“{õêöîÝ+žý7Ö¸IqW*ó,BîEà×Ê¥~<\8ÿ×Nñ!gü?óÅpS¸ øÔJ•0vZ«Õ›pqq†«‹‹xÕÏ'²«÷Ê•ËÈ“'¯`˜øÚZ/wõžñ¿Š…bW/£µ·ÇÉÃÇмZsŒZ°•ë×#æÖ2‰øë&³ ¸qñ!ZVʇŠUŠb †§½+RO-‡“ Ö w]Îeÿ¾½hÝ©öÞ¸ƒ_)‚&D\=d¨œ#+sWo•ª¢©ÈîêåAî<.•'Ù¸|ùÜÜÜ‘:ujáüÊåËp¥J¨íü»ñ]½eJGþjmP»E èÅ ‰„WŠœÿ¿®ÞncW#H˜Õ«„Î*ˆI{{è z8¹¸cÞИ»zy&Þ~3¶ˆ~CÄ…ûï0xê„ –ñ 'sE^·oãwZ~#¤· ?Vޏqå:ÚÔhŽ÷ïü¬æñ¥R.ý» ûÐÞÐ……!ÂOw¤‰eá÷µË¹HÂOâg ?“4iRøø…¢@Þl8wþ<æÌž®â vŽÃgÏžEÛ¶m…ó3gN£P¡ß…ïßIøI|†ï!üzNX_kw¢œÇÐMâbéfhœ°hdÌ“;b›(“;zàQ  ÃgLëÇw%2Ћú„_ Øâgƒ€ä®‚ €@Ê7ÃãuWŠ\¡€Ñ`¤O¹0€Õ"4oþüý£á „“#‰}GèBIô‰Ë¡xÚ»!íÔò±*ü¾v9IøIü,áÇymš4i„ÖéªU« •–µk×¢iÓ¦‚=›_¸põêÕÎoÞ¼‰¬Y³ ß¿ ’ð“ø ßCøuŸ°º@¡»Wonæ!\FòŒÄã»Æ-á'-çòeX…ß,ˆAø1ra!Hë²ñ š«‹²yeBÖÜÙ°zÏZ@º8¡c‹ +¯ñeBbÅËÞ„·ø­#á—]4ý±\¼x6lÀ”)SDB~Ÿágvõ2p‰2ÞI˜p¤R K_ÄtþÝ„ŸÄgø¯㘕 Îí„I’&Áœã³\¥þd‹ßܹsѹsgñìÇ9¹Câk¡—Çï[üÌáf¡Å/¾>½A‡à   ³˜bº.>f³5Gy÷‚äï±o¹MôIHÄq¢Š>ÆÑÑ1šÈûø\BâW[öìµZa_^nàs~¼mÛç&uéÒEü;HÂïÊ|ú“†Ç,p„æ•ɭݼ ÷yØúÃÿ%$$$$ Ü Äc¾¹€'xðÞØü]£ÑÂÉÉY¼êç)ü¸À¢r\â‹áF ð y#'ª¹XŸ(†kÒ!h>þ‹ðr.Ͼö% v뙄„„Î׸2+!ñ­„L0›Ân4ÂHßr^YƒÞˆˆ8´LZ¤ð“pþrá#~O°Pþ¨×½Æ»IO„ˆýgpúôéÈY_·Ðòd©e2á Í+x¦ágºž"<ûÒdâýUE‰g‰*;ë÷oÅÖËcú8> zỻ武>Aöì±3&ÜFääŽU+–£ßÐÑ8ÿüB?£‹=x‚M P0Ñ·Mî8åój G>D4MXp­Xc¯AÑ4¿ÃÅÕÛÎìº{m]¾ OW¤ŸZ§®µÉ_»œËãÇ.}&þGgW9ÂãáÎ\Ÿ†EE’»âø‘ÈŸ¿€h#’€·l7f4Vï8ˆÝŽÃÏW4”H0ð°ÒÛסq±´1ë‚/˜ÜÑwÊføù¼Dï „ŸÁ`„#••ŸZÇ/¶‰~ŒLfOuÖ“„@Òz°¼Ü$ž|‚ðó³ ¿À„*üä¼ÿ­r–)<{.îÌx-»„ ¿ ÓbWøñr.ýû÷Çýû÷Eâ3©W§6œ8Þ£fK Ä…ŒH"vP©ÔX8u89ÈpîüEÑ4 Xøñ$5­%j4E¹jõ…Ö?‰„‹6^grT—†Xµj%š5k.ÚDá „_‡áK¡³®öÁ”xOì`\=Ý1k`˸'üB)­(qp•0>C!–Qí\Üàð•AKÂï„ ü†!È0…Åpa+šªù+#0 û¯ºŽ>7k)!àaÓ*ŪðãuÏÖ¯_iÓ¦‰&Ä?fú´©èÕ»x&‘4pÆŽ/ž}D~6ªT®ˆ={÷‹g 7ü¹gï§ ÿ"á·Œ„_P¡f!Ée"¯ïkЄ¿#ÚÅ(ü.\ˆöíÛ‹g?žhÂO⿱ ¿+˜[ch‚íêeágo¯A•ü•„å\öðã¦í„ÞâçáàŠL±,übä?„ßÉ“'Q¶LäÈ™ nnn¢©D¼‡Òm``.Qeaõš5hذ¡h….üúöí‹9sæ…¿÷ýIÅc‚€ßòógO…ÉrïÞ½‹y9¡/~=ƯƒA Ž{N­{ù«¨lä´§µwÄ‚‘mc~üÛØ”b’ðûJ¬ÂïæT–pÇøÉ)kÔ¨]´&^¿|ƒ×BeÇÛÒH]½™¦WŽÓÂO¯×ÃÑÑ‹-Eõ5…q(R0 ²Eèz:qâ8jת7oÞ qâÄ¢­H~{öìA•*Upçîxy%‚œò9)m$ 8m°H‰Û·o‹6Qøá×kÒø"$$Jqƒ>x‡+7·$˜5¸™pþ1’ð‹ã°ð;NÂonõ¡ »«W£A³ŠMpûê-½wj­ZXÔ8!ãaï†,$üNÆ¢ð{ýú5i¼[([¶¬hB|Føqk_ýú pýÆm%øîù„0žž^È•3æÎƒJ•*‰6" XøÕ¯_ÉKAC†" Àºû‚DÂ+ÁHäå³ûá×yïÜá­V‹ð#ÂBÃ()…CMå¥J逥ã;Ä áÏóý(è%ñ?zO ó ?Q¹œ>­Ëƒ$äç!ô/¶'6Ÿ:u mÚ´Ïþ^w‘»1Tª/“)“+áäâ ggg89;Á^£Žíå cDIW!î…J g'ò'®®ÎP*>åS9œÝÜáêb½ÖÉÉ vʸ•*UvP~>†ß‡ÖÁQp•B4ý28FD˜…Ö,IôG‡[@½“%f`~)Jµ.®®Ö8äìµòëÞÇA;µÝ¿Ò¨ÖAHÎÎ.ôiÿÉ4,S¨)nyˆé‚G‡8&(|ŸÈƒäJ;8S^Åþv¦4Còù$œxlÞÿƒ½½½ÆBCC…†žÑËy çSÜ8ò)²dÉ"~‹$á÷-ð¦üÑJü„wpäVRA)l].@b¾.Á± ‹8d_ ×,A¼,2&_ôªS ÍÚu@»æm1yÙV˜d?Wü)í¸uâûë#Å‹¾Wgw qëèÜ©*Uí„§¡Oÿ%þHôiBѺdÔlÚ†®m‹Ö-þÀþoãŒøS’ »öoÜMüÉ*D„<Çê©Ã$‘;vÜøjñÇðÒKbTQ(¬ë’~)*­/Ïü…ª‹£sçöh׬+þ¼úø'‹?‰0Þu¦(âO¥’c߬Á¨×ö´iYú-‡Œ*G}ò·‡‘"I&tïÞÚ·A»ƒá«ˆ+âOFåŽ ‡·íƒñ#ñ§°ÓÀüú êU/‡ÎÛ£vë?àgþJñ÷æá:]ÝO†ðpÑÉ9µšâ– vŸY${mbIø}5ÜÒ%¼OXd$€âAaçV>Gga¶RD¸5³ŒñÚ„tOá+r™ïw#ð2? {'Li]ªÓ±uÕr,]5Êæ‡ÌB›\'Axº¸8C%䰼ƣ#Õº©Æíâ///hí”Ð8¸ÀMl]squfºqMØ…jçqŒLƒ{$<]ÝÜáäè'g¸ºPí™2V'wgl3Ç_˜àê`­IGPfë:?fÏY‚å«w¢oé´»NöÁ>ú½f¼ “¡Ï„…X²x)–­\‚ ÙCfçWg'Áo*8íø;‡Åv¢€Tsˉ>WxRøìÕJØÙ;G …°…Ã/ “ÓóŒ>ÑÃÇ­”Ü: ‡ƒ›3öN‚}÷Càæhó¿ j…k¦ŒÃ«$¹P%mjL_U°E»Ñ4ÞÁ"Yè34oÒ#6\Ä’E‹1ñhäLì ‰G¡åÙÅú®ÅÖ5Šß¼ÛÇWwOx¸¹R¥„[ž=…Ö)¾ÖÙÉ­½ð[£mï6&÷”j¸»:ÑË-òÁ_ŽšôïÔ²dÞ°Køðp3²Uk‡ ËW`óÆm=6›¯A­ú("pmÀ‚ ¯üX¿v ,\ŠE ÆÂ#" Ž®žprp ÿ;‚$v¤ppœu¦<"2|”Fmáótwû>1~»PÚÂG×pz±Õɬ-uôÛ(î)ìì)|.ptägã G­G¸¹D o×ΰ$MµMÁ¹½Ì5+7ÆÈõ§°ví:l™=*sèWÕÇ?z_)܃Q“É@ïŽp6!00˜ Ç8µ<$ü¾ Ž›õ—úÃO@FbG8³¥¬¨×$ăþü¿Ç×’,Y²ÓúžÈL7jàçãO%áf¹ÐÆ­Pv–`Œn_ÍÚ¶GÝšmðȨ¤ÌÙ;CÓ-Ѻay$Iœk.àà´†¨Ø¼+úöøµj4ÁácûÐ¥KT«Q »¯ùÂjïv²ŒïX Mɽz5Z㓎¡Pµ\a ;ÛÔC©ºý¦qÁ•ó°ïálß öÞ ñ¨¤x'DpPÒ[ ¿|„É}¢‡Ä W[«W„9rµ#Lo†Ú]†¡{ÇXà æj‹víÛ¡Aͦ¸ê/‡‹“׌Gã:ÍѶiU$õrÃÂãop~q;”jÔý{QøªÖÇþã‡Ð½kÔªY ëϾ‚£V ÒˆÝê¢Ñ¾ªÍp;LgãKÔ*›ƒ'L@ç?¡dõ®P:âÞeØzãvÍì‡é;.xä]•,ÐÓ3n;q&t©-—šB¼“øÈ,F„U ŒEᄤI\ '‘x÷êծж-š£ëðP:»Á^î‡^ê¢K·Î¨”/2¯‰w:9Z—KƒC‡¢]‹&ø£ßdÜ2-[7C©ú`TQ…C£EÈýã¨W«2Ú¶l‰Nƒæ@íéç{Ç H½.Ò¿3ZÔ+SöÃÃE…ãG‡þõhÖ7ýœl)mÈ$YbXL$àë«AÒÄZ¡óêßP`¸R"`A¸ÉDéK†¶åSbд¹hסΜ;ƒž¢}ë–hÞ}2…*/v!è׬.:wë‚j…Ò"e¾2xip@§òÉÑmøthÓͺŒÂñ]‹Ðâ(R³tJØ©50¾¼„ú5*¢ÃmЪÇDØyx#àäLä¯ÞCvF›ÆåÑtÄNxx:`ÑØðEz6jŒËïåBøvZ<=0æ £a9»#FÅ…·öHâdm¨‰-Ô¢aoï à®^ëðŠ˜I|Š¥K—ŠßbIø}¶1m û2‚Îb´OˆGl—Ä ÀÌ™3ųïOX C7ü¯«P®ZUŒ?xÍ+g,Ø޵&bßöM˜Ü!º\ÓË·ê –í߉ݻÖ!{ŠT(]0+B|_Ã×)¯ÞŠaõ£sï]˜¶fVj‚ɽ‡Ãìé…•ÃÚAYi ö“{ÓºfD×^s©¶o‡›ÿ8ïQ›×¯ÅŸÆAaŸ…Ójðò¥?’—ì„m;·!ýÃ5Xp/öìÜ…!yüÑaê>x9G uã¹í,¶í؆¢!Û1îÏGp”ððþ;Œ›¾ ›·þ…ÇT¹:íã€ÎÃ&"c&=Ç<^$d)|<ìèѽG¸»^9DµÊÙнPüí§‰l© X`ÛQ¤Õ:’TÁÁÑfªTòÖmJÕ§÷‰ýƒ'ŽÄ"’ðûF„">j7_:.tœ]­cË„š ×xc¸6!+â[Ql‰0A/K„qËWcÛŠå¨÷»êU(…û¡¸|ò!NmžŠ† `܆ÛÈ™) \Sd…6ìÆ „þ}‡Á+WM$u1Ã@¹s¡Ü¹IHCã– ç“.µ;eBïa–áÂÑû8³}†àÞ¨µ×'[ èLf$Nœ “ºÃÇ?I~ó€Ÿ_0"LHÐéBC 3|váF²UhU«7cÑà²èÚ %ü”NÿÎè¨fç`鲕˜<¢´á:˜(./]а „["°jHS”¯Õ§oŽüëœ"<”ï0rÐx 0véª M¢éwrå†!8*g/¤%1èa†YåµÂ&K…ï6®ì[(„oàüÈ“;…/î^È–" |ü‚ø·D¾@D]FáЇ…"Ì`}q “QŒ±~×vlš?Ú'P´ÑHÈeOqëÞ+ÌèÓõ¨r]‘IÕFä*YgWMœ93Ñ{Ìv´lSÁ¡z äÌ~”6'OÜ93À? îÞ.ð§¸`ÔÝï1«¯Õ½ËH‹öaפÔ9óBi F¨Q‰Ä^ ß}‰!BBBDÑÇX`4ËÐ~È|¬ß´•Ô‡ÐnüN8Úÿ{Â%ܤ.Õ«VaÉòøƒ*V:Šƒ P±p>øÁäÿÍ«AÃ&ͱýú+ûù#_隸³q…o6º XŽÖ› ,DG‚‡*¨YÒÃ/ âwräÉ— þþ!póö€¿¥Å‡¸úä=hAák€ãº”HãDiÊl†g޼PG„ H'CÒ¤ <@£>á$bCBCaÃ'ä»ý^uŠfBš,•0¶OÌš·Z ·–Ç<9„Ç] Ö.ßpJÇŽöT>Z #Ç$á÷Mˆ3ZèÁÿdr>¬5)3Z»îaû›ðLÄ£GŠg?ž¦ ‹Ò%k×…§>O}åHæ­Få.“±aÝZ¬Ù´sû×ÇËC“®2Ú6¯†Z-bíŠ!PëõÂs1…‡S<ánUžDÏŠþX'ðvÎð&÷*tœ@î­Ãš »0w@}C­ ¡ÚZ·"èzž‘ÊfF*ŒÔm” äWGhíä0 pI” }L”Í9¸¸ cñ¢a1SÁŒà  „†éè¢9‰B¹·wMÀcYܾ£;W†)LŸ'Waô*†.ÔBÕF=±aÃx8¬¿åñ–Bø"(|\ÂG%“Œ'AÙ#i2{o>œ~³«ÖïÀÒ‘-a¤‚‘o>󇙅FƒQèæS¦¥@âçÁãÍÔZ *ܧBýæÍðòÌ9(SÀ]댱ËWaÕÊUؾóOÔÏŸwíAƒ~#P¢hq œ³æ%1`òNƒÇÁ,K`3Žëu R'…»ÊcȽÕäÞ¶¡~>/èŒÖ®C!îðõôÁQÇBñÚDiAk¯CÇcàœ¹ Àß’%B`@ä \=ÜÁ£#¢AéÓ¯𡵙ã¶µÓ{5Gùá{°aí*”NG¿×Øáä_;Q¥ïh”,V ½¦Rši]a:ëo9ís9|ÖÄêûW¡ð†;N°Õ+WbË®ýhU$‰M3·'×ño…ð I?:·wø>å…*×Dг—”f4äGîÞzˆÔ™R i*¶à< 00P¡ÙLéÖŽ”'ñ¸GîŽ+ØrL‰¯ÀqÅô–ð!켌 ãâö#û„zпóÏ ¯ËײeKñì{#ƒÆ!«vEŸQÓ©[ŠÎ-Û yùÎÈ—Ì]¦LÀŽ>ÐwÌlŒÖëÝ‚[šÜHmºq£ÇaÖôA(›¿%žEPqˆup3·Ê’àa¸e+ ¡!ft6 {UFï1³0~DO¬ýûTTÁðˆl¹Ð‡ ˜Ü1T(_'/ÖÏÃ×_ «ã«"°ip7´4kWÏG«æýÑdÌL¤q6aTͼ˜¸ç~ñg¦‚/ÆpzqQЇ"ˆÜ7SÁ—²`U(¯nÂÈYS±xé%„ÜÆ+Y2dU¿Ä¸Qc1kÆp”ÏÛÿ(- ³†ÏLá  ¾G˜Mt!|mÆOÁ… 5ÑmøLLÝËö'Ñ øûG†ÏF…m˜ž 2JÕ*ŠíóFaÏ¥ÇtÝÇ3E# ´ˆJÄ>,ú̺з^cL]°+NFÓvó1lÞ02©0v@qT+UWØdÈÀޏë¯D¦BðäϘ0eFöj‚&í—Cå¢Fp@‰1vÕ]p`¤ 3ê‚ ™CzL^UKÔÆlroè€ö¸ …Å‚€`k<ãñ­AÜÚGR¥Nâù|0|Àl¼¤ÊŠ0–Õü}«TÇÈ…«±`ê ßø“ú· ŠÕöN…ÇòâÏb6þÖýfmîû“Ÿ(½°p¬T§6Žë)“Çàî3N9ŽEjâÝJ»“§büà¨Ùh.ì\µæß‰Ñ”ÃF‚Ž1QšáðeÞ˜3½ªþ^sæÎÅо­qÝD’)þAÖV2ÂÂý©Ò!OŠÊ Òk:ž…PøÈß–p=Ô™¢G¾G¨Ñr8ŽkE/‹`T“¬ÕÇÞš”Ü¥kÏ‚[!‡ÚNCB0B˜Üa¦ç¦T|zVo† Äo±ƒ´€óW2vÏüýþfV”pwîY[¦ŠUsV`ÿ̓p÷ò@¸™XŽM¼e[Ž™µpºQì-à¼{÷n 47nÜMˆÏ,à¼oß>tìØ W®^‡¯¯¯hú9dppÔàéÝ;ð§Ú»ÒÞY3¦&¹Úæà×xðô-dj'dÍš ‡&ÕÄfuü9¦ª¦ÔË„‹ùg`J­ð3iàb¯"QBiGW{¡ËÖ/0 nnP©40‡¼Áý'o„ÙµéÒ¤¢K_ÿ a– Bf>Ø…#¹cGbO§(œÎ¿!©‡¸±ÄÞ^'î"D˜“{ ¤Iá s„g–Ä)m%ôo az“ÐÒàïëWw¨¸«žCJ††r_Æî«„Aúzÿçxø<©3gBxH®n„%uqlVa£¥móck¢~XÖ&;Þê”psP“° C@˜žnä§pü‚…V•RM%Ÿî>z ™ÊiÓ¦"Â?¸¸yQm1Ä:™½0S™·A|ñð¼ñ›§“Øòa…[=ƒüü rr…†J¾¯IvÈ“;'‰’Y¨\¹Šh*’€p.Y²$ªT­Ž&Mš Ërü2¹ Z;#n]¿³L—¤iÖË^¨hðæñ-¼ ÐÃÑ+r¤Ð ^‘èðgª§6CaöCNÏDw%)ƒo“@J;yB©’ £4fo§ QF‹F˜ÕÍ3Éß>¾‰78x&GšÄNÐQ%%*ž.Öµãü}ßÁÁÜ¡ø 0ãöƒçH–:#”^ò«½Ò€«7Pþ­D²ôÙà¦2B©¶``Õfh¸u2†"œ*ô2‹ïü Hâå¹Ç7«û*Šùvö¼¼wþF{äÈž¯^½Å¸æEPkC(g ‡BŽ|Z-^Ó!cè ¸<ÃØ‚°@?Dh\àH0SUâÂíàÖ>Ooⵟ÷dHçí"ìyëOõC/WG!~ø¼ÖÍjª)¡Ã­»4uf8ª¬é—Û±œœÕ¸ù‚éŠtY³ÃŽÄã‡îîÏÃåÏ*Näé&<Ïñ 8w·BxN¼S‡Q.´ºÚp¡†»eÕŽä>‰>nÍ7êaç”Y³eIRʸ=¿jk(OÍÀ€ñ30ipÌ{àŽ®5ó"BåW£>™J+ˆZ!|TÈzxºCFæÜÒiV:Y×6%7 RÚ‘‘½eÄfáþvn‚;>£ÑïT™ÿ%ú>w¦‚äkEÃWI¢9¡o±ø1¼sC¸)\9_ £ Lú,YéfF'%‰>N‹aì©›wZd!»äZ8j´jZSú´ÄÌٳЧushŽE~—P8y°˜âøm=UD(zÓw3T$ÝÕB\Ð…RåAp/ Rx:ã[åTÙòätDö_ݼDw¨.sÒ‹>!e_CM düJbP¦‡>œüø.%Ê!+ Xkã7]/SG} ûÍæ>»hÓ!1ÅÍÌS@OÂd¿%G‹V­1«Oc̘=š×‡¡öPs £»·¾h]Üá`Ç‹‡“øÕºèÓæ}*šŸ~¼¾£ƒ†ÎÛ‚*õj ‹«Jº/@6Þ;þôá“èZ·Þ½{ //BQIÀÂïÀþý¨P±"öÝ FâdŽ 2[J Jz0®O7<¹|W¯ß-¢ð¯ãØU0†Sò1C!3 ŸB·¯Ñ(T²ŠYø¥M›V[HÂï+±µøM«ÒA XøiHøm_±ã{ÅŠ«‘1g¦2&æW‚»zóÌŽ]á÷µË¹œ>u5ê7Å;Od­'‘pàÂÉÝ (—5 –Í™‚Š•*‹6" Xø5nXêßr Çð!ô—ÒEBC¡”AcäsûDëÛ¿®Ö"À÷0#:¤‡Šòb~r¹Jµ‹GuˆÕ–½O!-çò X†6y- ^žDo]=¡ó3žw¯Ír.AAP©í¡T~¹èãÏ.2ª±ZFÆ䟎B%‹œDbC®Aëhõ§“« ªmI`ƒ®¡ Þ9J¸>}íÏ!¦ð1ü>4ö2¸yÊ ¦k¾N·ä6ï&Â_$"yõê%¼“¥¤ô$| ;k<ãøãgâùCýo(ɯNb|§OÒ)1"£4ÄqË–.ø7qK(Ä>FNÏßÑ–¦Ùß_ñ:x¤’âÿ (çÇ<®O­¶öæåsî6ægÍ0® ¿o ‚¥ Ÿzp©Oß("[SGn.ŒøˆéúuO$öHB5вeËŠgÿŒ |¡âò…åwª0bbýÂèÙ¦54ie«¶#œß·xÍÏ€EÑ£³ãe€>R±è || Ó{´Â ¶­Ð¾REÜôìþUS¡  ÅÀÒ9ѾI3 m×}èóØõ×qFüqøž]<Žg>aÑÄÊ–°WøkÎHIêŠC·ƒ¾AüY`ÜiÜw¸ø1¬¤ Ï…ýhW¡ †´§8×´#Žßxú“ã‹´0œßû7ÌQÄ‘’ütyï|ŒhÓš7@¯®`á ÜG^åø%{{%’¦ÃˆÎm1è¦ض/üÉÂ}‰ú®xDô X*(ž³#õzÖÔXòlîß,¥Òpÿ2õñJ^—ž‚ú¹Ï‰8êC´®ƒ¥6£Ú´€, -zL3|‘Ž4Ë`À‚Õ˜±e=r<Û„ëoÂòòÄ ËÂjW3W/ÄØE«0náD8P,Mý”óSQŸ¶ä|Me~mýèÃ}Üšî³6Ká飦wãõ{ERÇÁrÊÃŽîµä{9ŽëM¶özŒìP ªŸÆœEXÈcѱ| t^qÓ×/ìiS Ó™¦ëO„$"y2á¼]¥ZC¡,]§/ÛéV‹-L}HÄï= $&Æ?¾»5ÍBM‹7¨nÃé%·¦@zl1¨íhW©5õ(|èÂWîû>Ùg`~<ÐÆE«èÃç¼ Õ/wßtD‚•,óºÖDŸ¶­Ñ³N;<¦8êGßM1 AYõpÂæSá8:µ:´í…q=[ kf8ydFtmƒö5+áà¥PXÐd –Çaa÷ÚèÍéÕnƒG”žeÔMt¬\ÓÇŽÆÐ6µÐ¢áÄÓzãò–øóÖüþSs¬ÞqY+&ß–NiádI‹‘z|ðópãóy ìjÁ—DDVNcô‘iÍУ׌îÜ¿í>޵£ÛbP»VT¿f¸NIqýޝÞuaPÓJ(–Ækþ ÄÅEвyWLèÕ]j4ı£bt¶èT«vœ ¢É˜&t•+úÔC6T¿Íq'–Þ§øÇè^%¦ÿ<ŽêWÍëô@•ãÎî•Øuñ>öOm‹Å›Î@+&hâh:y#þ7°1¬TÊO:©IH YR "ãµÂ`†ûQ’J //gÈT@ÌÍCèY·"5oŒÑ£BI„ÆB† Íë`X׎h“ß JVGõÓÁ•Òcˆ¡tm 4Ç7ÏAÿV мqè¨_«¨ïÄÞ>Š^u*`P‹&ù¿9Ð8ɸkš6é‰): _½Ò9m?ìí€Õcû ÁÙ²;n“¡dª‰ÅäVu1”êסp:”Î_ ñÀˆª^;b†´jŒzù ѯuc4ªÝ DhyÛV÷ôzÕ.‹!mšcp¿IPQý"OCýº]0mpG hXÇlƒ½3°vT'ª_,F5퀀`ZìQãó1¸÷ŒFb…Ð^ØŒ¹ãàj¨\iÑô)_9µ»5“Sû%ÆÇ‰†ç­_>Dñó¶?$â÷øn·zé_\l,ÒeN_—nÂùãçD{?p ~ÿÙóåÇN½öÞïäó©ŒÃ‡›~¥>b#€žNÂâød´¨^ó&ÍÁ£0ÀÎøeP-È+MÇš_—¡[OŒ¸úLJ1}þŸ·g –þ¾¾i¼Q´"ƒïà©ÂcW®D—:ŽÜm)¯^ŠÉ#ZcN¯žH¢}˰ºH,ó3ÖRz;gÄÿúÌ„•µ —/À¿ÎPÌÞ¼Åä±`ýlØ•rgA½kѬjv"Dƶçg ¶¥‰—ÿÖ-æ¡÷ÒŸ¡ŠzÍàO,Ö"!sú·Â€ÖÍ0vØL$áÒE>Å™±¶|j”Î…*}—`á¦å1¨4& š ËL¿cö­Ç’ßv#»—' uCtÈ}<ÐybôŠ•èß>3´˜„¾Ë–`ÚÄ~XÒ³â€=ã#ÈÿG¬Û¼ ÇŰN#aicëW¡B_ªßfTuùÓŸ„­¨UÄU,G»zùo®‘¿h"¶ôùšGs  =É2=îZ‹m+£yÝÖØµe ´èÐèB1¤SOôÛ°+¶­ƒóÅÉøåpnnsÜHÛ‹×,À¬y£ °ÉSúî6ônÕ™úq¬ßq –T¯ ÛëQ ³Ö,_:/ ^²‹·®AÞHjƒ?q}e;·nˆ%T¿…«¦"I“Ò!»óöEzcáÖuð¼2KÏû`é–uèìÿC'„#‘³j¡ëº?°ló*ø‡,ÂÜ1°”EãÖùÛè=eæn9ˆ›3jàÜs åÈ…ð‚;F¬Yˆ®ôÎÒ³P(‡×®ãöþ©x ðGÁ⾚ǧh¬2Jê? âˆì©4èôzh”*X[Z 2ÈǪÙèî DüÞ4ÍÿcÒÿÖ?<{&ÄÆ£ÃÀ4:ȰrúrÑ ÆÏßͺ·€^§{í½ßÇþ'ÚáSâСChÚ´©éWêÃ@ƒj‚Â?®Û„¹KEÉìJt®˜b‹.ãüæÑèܸæþrÓ»ÂÚÇÚ¸X0f&ý8¹ÃÕHÔ%!Á`ŸçÎ^HW´liA ‹4%=FÍg÷_ÀÅ߯‰ôf­>L~ˆ×éáì–~>2¼ÜÓ¦Aèó01‰%$êˆE¤PÆ×ÚÉpeåO¨Úh Ƹš¹k"…)`Ð#V튮“–ãçe«1dth¹œz©P *^¬Ó€½ct4¯×“¦®DLL(¬¼³ÃAŒy#V`êÿ@™¡.¼\è¾D=ò(DªŸÊÑéŠ#¥—¤rJö‰T„ó{ÏàêÞièÚ¤&ÏÛÌÙ}G‹²fÔàMRi|<©~ÏŶu|bâc"i21•YÂõ+—\µ1g÷.L6‰§¡q³©nãÚµXþC7´kÔ׉„9*b‘¥dm\X; Ë–l¤ŸA­Ö×Z©+›%Béo×t™àŸ/©9z8#ôYcpåÆC¬`Lï 2À•:«ŽwÞÐèi¦ÓÀ%MB©%ÄEÒ!†úðË“i¼¶¦÷pN­lvÈÇno‡5åñ:µ3–”÷:¦-Ÿ‡ŸW®E£ª9Gi),€²¥2 Œò0ÄcP èÒ¼+öO XR#)O€^ q&¸T?*¦ùFþ¿AÇÍQ£\dÊWýûçÇâY{¡ÕŠK> ´V–4f%Òº’ÈžR!Œéè=æS­bã¨1Þ€´iið !¿÷MôüxÿŽ>l½Ë=ÆÍÛ í‰ü ²#šÃ€f=[ÂÍÇ®£ÉŽþ½îþoýö͟ìÞÎÎÎôëc€uÚ虉1¨å(Ù¬#£žã!{ÆpÓ¢\ï%˜¹n1¦nÚ‚±ÃjãáéÝPg¨‰šu*¡H½¡˜±z44@³tJ§K-ÄÇœñD$¸21868a"ç’F‹ÒÝb§·q3Æý¯¢b¨mE`]%³u\b|TZK¨hµÏ`=¨ÐÓ›ñÃÔMØpþJ層kŒq¼Õöª>S‘¿(¢"€˜hz†¦`±.Þ®»ùÛ`ì(‡µ›¢_ÏFÐÇÈvÿ4]Ê ~ãj(X£/fn+®ݧ£ŸÓ0PýÌeæú±t‘‰œ‹‡ ·™YkcÊ/›0y|K$F'Žg°q¹~ºøÄõ“ð…ú°†HE<=Bk/ÔéØþÜ ¹MZØi1`ñlLY± wmC­"8²m+ªöþ Ùý  Õ¤èÕ*+bc©ÏˆwØ¤Ñ¸ÄØÙ €w]UjØ©ìñ§·r æïÜŽš-hA ã"ˆm\î@ÜßxËÙ@‹NŽ YØ¥¥wxÇȪ8âÒÇ÷M»ï÷Q¶N¬W*.ûDþèÕ£wƒÕ ŒAÆþLii€EÝ+ Ð½X´jŠû¥’2;¿k#Jõ¹  Ñ˜ß1¨kªßHɱ§/ú6ÏpZ\?6”É=a 5z/šEõ[Œ¹»v¡q)[ÄŠúѸÊùòõôáWƒGZNŽËaVÕàíè<Õš öɰÀ’…kw¯Ý‚g&_~Í?ذƒŠ,<-$R¡’èY¨hÜÔëéy¼YâwïÞ=Ó_ŸÆFÂ[ƒû=Wêˆ4Q˜:ä÷ôá \®”£FóZðNï#ÚÄÎѵè7¯p¾×vj Öÿü”xWw.ïšØ¬tØ4 &þ4ÛÖ¬ÅOí©l?ä¤ÕwËɳ°£O^Lž¸‹FÇï>‚múBðI À ñvvo´.ÒÔ$úØ0DÓ$ÇÐ'D#ÒLxh‚‰ EL$ÐjÊ\ì˜)½ÅcF`ë{PÉ ˆ {ñr»*>:”Ò¡A•ú"µ aÛÌ¡8q9HX-ªi¢Û¿°/ >åpiëZ¬œ³ûÀÚY†¹õraáΛ“?šÁ¢Â_ Áœ° œ~L<’hæôÈ_ Ê‹k0{Ñz¬[ð'£,O ?íC,?Œê÷Úl‹,Ù‹ ÷1ô‰1ˆŒˆô ¢ü1Q@Ãq pö§";~–Ž‹ ;¯]ÉȰç/%3 1ÔN1q¢~k”¾ƒqøÜã×ZŠÆPºñ‰ŸpV“ð7ˆ0%Å\ÇØºõ±béjü¶t1~è8gM¢g傾K¢CåÎX»p-f €[/€ EjãÉŽqX2c<欎þ]7Be«Bt¨æÇЏãÄØpzO¨ãX»á‡¡Ѿbê‡k1spÜ ¥¾•‰ˆHS?ãþLa‰Ôo•ڌȗ'sFnDp¬L+¹N…Ów"gv¬™¹ ë-ÆÍÀ8heÑÐˉù™ÉŸA„ÑjèD±4‘Š–H}µXÆØ;ùÌŸ½WÄàì¡ýp/Ö¡ûÆâiã°xxtm±ÄWM÷ñ=Œx*$:†ÎT?\á“Ú¡}©X·pfì…«Ï©‰£nzxXåtX?Ñ sFñ|±˜>t£"z59Z¢iŽKèÞa6MˆÕw‹ o‹Œ¯ÝÎþX`->ÞâUËùÌb6F¡~"®q»÷KäÀù1vÇ,ì :Š Uû ò_D·ï ê¼jZX˲ ø?‡ù/| KmNýy ª´Ãðy#Q³Em¡Ð*Ð ö¤ž@D)Ž&WÞÆLMðÉEæ4Á±¦¿|2ÎÇŽÃòåË1oÞ=Mðv>9ÑC æbVÀãkx +÷¬ðÏ`®…Ñt[*d ‚@ª’­ ƒ®àúü>,œÓBMÏ;òÙSÈlÒÀZ+G|x0¢’làdo!¤îO¯_Fð‹X¦ñCFo;ÄR|§B'ÑŸ?½k×´T&*[B8®\¹ ï,ùa¥6òè&ÂÃyØ ú’kzêW*Œ«P µ~Û _z‘•¤<} OZXÛÉãó§÷(ýtÂR˜Ëóðâ„%Ø g~?<ºû ÛeFÅõ¨E݇×)Õh|èqÕïÐ{Ð8§åŠyŠ$«4°±#!2‰pv°‚Ú¾qOC¢¡uÉ¿´ŽD‚Ã%ƒ›«xd/žÞ…•«¯HG¡FÀ…ËðÌZ6£°Ç*k;àú‘Óˆ„ó䆖ÚÙ¼ü_à6²²òÙuøÿ·qà_C7ÒÁËÏ5›–CÓ Mäϼǖ p´`â×Ç?!ñ{-þ…øíÙ½ ­:÷Æî€k{þöÏŒÝ-ðÊš³Ž&3„Ÿ?^ÈRp7÷bxÿ‰(Þ¼,Coâ׃ç0dúzdt‘ .! h€ä´ŒDÆx¿y‚Ižž1ŸÆóVŠÑ°âhàGh&E/ï7ãXÇhïÜQð®5 ~DzÌ“'ö­eüe„HŸ¾Íéñ5¼½ÄÛEœNÌýãÚe µè›ÈûزïzNÚˆœ^jcýø¾Wê'§òð5ãŸõ3æ‘<þïüÿY¿äå§ðwÁ]d¨–;fOê5jšBMøŽ‰_…r¥‘»RK4l×±ÑoÙ¨ô¬™ 3ø9™û)ÃüΈ>¨¡EÚ¬žX Eõrtj3 cé䞈5ö¾“Ÿ?—O¼&)úÂß}ñe>”7AŸ,ÞÜŸE?£kõÔÇŒ±T*Ýòܧe‰AX8ëtÒ /ëlì£ÉëÂHž>C¤G߬¹¡µÎ,ˆù‡ãP§Jq¼¸° ÛC³aíÜo¬_Ê÷äÝê'ÚÝTÆ”“î¡–(×=EÜ€Òtpü­(Ÿ×Ýøįð…´ð‹¦:Q% :ê*"ÓIÔž Z@ÛaÖm^›6ßû)©˜DüÞLüv3ñ«Ò‡ˆ_êHü4"}'1°Io̘J+¥ôRQ§þ’Ÿ õSèè…g¥_ j¬‡ô¥÷$nÓsçèýmä®Pÿ›3ˆx•R©ÜŽ–¶(:§9¿õ_,ñ»M{–l9°ÿV¬h°Níù]©¥4CÃqýÖ#ú¥B†Ü~°¢Á<Þ¸ËûÙ ôŒ}öCÀ¸!2W¯³NŽérf†5w‹ÿ¾ðÄÊç3÷q»·£Pᦾcâ7jÄplÚsïà Ú¤æxFã¥^½gañôž8"g~7Äñ±‰ŸqÜäqœ)#®òaÕåt¨~×o#(4žH¯röD\øç­ß»€]îݾþ ù¼ž„½ñë>f…P!a&Æl Â$”Ó³wtƬ¡m_›¶Dü¾p˜%~c+÷FT*?{Tð­eso£N=S „^Œ1 üõÒØØ["é¥éÛ‡Áˆ_ñ¹-ÞHüÖ¯_/âæ³uS Ïh2»yó&Š-j !ü ñcT«Zg¯ÞCÿ13hÅMNªÎpL.”Ô¾Ä0(]¶òþÖÜŽ|ÍõSª4X0y8”1!81ÙùÎf|ÇÄ/** 666¨Õª;ÊU«DV´Le(ˆu‹wÎdô€` ÿVðwýøÝ0;} `âÅÛ±[UÁ¼¹sЩsSL2¼ñ8ý7"Ñìi žH_<,­¬èUÒQ[èamgÉ}JÄïkÄKâW¥wªmõº¸»"Ÿ&ç›Æ# ôžaí©-ÔöâÅL ðVoñyDüš¼žø;?þøcª¾à7nDß¾}ñàÁS÷ž®]2f4ü£FŽÀð#M¿$|OèÙ½¦Ïœeúõ ¾eâǾ@œßHüLÆ*”/‹þe ‘ð½ÀÖÊ¿lú•*U6…¼‚Œ ‡Ÿ~2¤“··ÁëÆ///½iísgΜ¡)P§P²$Ч)àë…DüÞŽþŽž‡Çb}³IˆId«Ðo>WЦñ‹>X×Bë{½Ïǧj? «3pú7 ÅÉ þ¯­ ?6|càˆà½NÿµiÓ&pqqÃð£úÂ*á{KxµZ ¸¥qzý¼ž&¶Q£€jÕL©‡ 6 Aƒ¦_¬¡,áàmïçÑÄDˆ8ˆm^.|è'וìaz"c‰l=l\€³&]Õµh1Oß6öDºˆhèúöci¼ßDD ¦µܽ\Y¨sp±ha,ŠhBk"Œ O«:´!rY掆SL¬ìhlýÐÒ½-‰ÆÞr‘kûP¿ 0Èî”OÿÀÜ_€p"xÔ´6ÈOå˜NPw…Ê’´ £r­l©ÌI·€ôÙ)žg._šŸ¨Œ‡(ýÚ&áï<Ÿ¾füû¿ÀÕK­gÈD2"> r-,Ù„ô¡H‘")I#~j„Ó¦)Fƒ›*•ê­IŸL®€55¾­­-llm`¡U‹jn(øàsöñ Tj õo'8ØZšB^9]ã;;ª“ l fGz_ÞT?~ZK+!­ÓªÞ­Ì<¡%%éÅV[þçÏ¿QEà›@š4´ú|Q áÑ£ÇðöñA»*xK(©¯ÙÙÙ›ú5õ¡ÜÚHÈèP™þNZœj,,àèäBcÔ?%žfÈjê[ަ:ÑÇÚê # T?ÍëÇ ¹REã”(·-=×¼>o¿Ê{~gÏÒZÀô#uÑð5‹ô ‰ø½#²»d@â‹§‹«g&©ñ/µ ˆßs k`K"píw ÷$àþàÏy@ê_4ô‰cª*ž«ÇÓ},ôzew‡É ÷ãf ôNÁ‚x×2+pä0pr*°™ˆ˜ì Ìÿ(í `Ýd"k±H™þ XøƒÆÝ_‰Œº”'þwØ:ØD¿+W/¿ôfN¤°õD´9Œˆæ0*7qûà)•©ØÚňl=¤ßnîÈ\ýgo¹XO½§gôÌ~#ä>¬”4°ZÚšrøL(F ó×ë•Óùx4ÆÛJªer%TIQø_«ŠhÙ¡:´ì„Ùëv oûï0¦¦:*nŸ9Gá )È‘–V2WOFÏÚYP¦ëÀ‘Ÿ°;Ð>¥»ƒM†œ‰DÜ‚‰4]½8@c}[ê wªý‰ îæ¬âù^"q›w¿ÓXÉ|Ê@ì‹Uâ˜?Ä\%bw˜Kd,}>‘¬ŸËøŒÒ¾BñG¡¼elâBQñF½ñ|´pzD÷þ¶ 8Dd/7ý~ü'‘“Dú–Y þ~ÀC;¬ÙNu£ûr°Í¥W¨(Úq@V#‘}y$}ÜØŸô[ ù3øÐ|LÏî‡QÁ³,Ø`âõ´Nœ JÞ’¸sÇôãÃ`I«æiJ"²è0lXµ KWÎBåÜé!ÎÛ%²kiM+k[þØ@)FX4VÆ0;{8;;ÓBA u{º†Wâ,eÓj´ô·8gXÃç–1¨Sý#=":vö°¶²‚ …ÛÛZCAŒÅÖÉ«‡ôÀ¾ñ°·2*"È•<9¿£&-DáJu}ÿ)MdŠTº_)ÓãÖ³ô;‹.Æ’å‹P)'õ$µ5ì¨ÎÖT6-M,–Æ2syØ?C£¥úQÌõ³Ô(¡¶°õ³~Y? ¬­õÓš'Øäõ³£ôDý)ëGá\?k[lù©/~»Gkó)ó2h• X6vNÆ;¡ £#^Ä& ÷ïŒ7ÝÂ’â²eM?¾A4i¬]kúñ "%‹}‚æõ£ç¿°„úМكáE}+TjzÎvâY[[Y››î±¦ßÜŸìàä`G$F[{GØX[‹kYº¦ÑZ–¯¡þd&-rå?ÓSh¬à@ý…û™rYjÄÂÀZî-»Ñ<ài†×¨åø}Ö(l¼†*YüûZi²ØBñÄ"Ö­YyócÁ¼Ÿà”+;'X[ZRù­¡V©D_ç>kcmiªŸüïú9PýíEýXúÎõãkY §ÑZëGeÿ»~&I]²ôäjËõ³µÒÒ««€ƒ½]Û´‚ÞÓýeý¸m­‘¨_¹ºÍÙ5kÖbí„þÐ'ļ“ ÕkZÄÞU)^Üôã뇹Ù$¼-h nž­~¿rnóÙ´þI-(\5(N‹Zš²Åoów¯¾ôí 4Íbü§†%tÄÚö´DûˆœÙYBÿŒˆÄaK´ë ì¹j k"¥KZbè` {+l'"W¬®Ò:R®Vh3Ø6Ü û퀃ëÍdGŽÜåè:Eiw ÂÙ‡¸£P©¤16+6Z4¥mDdnЮ;ÈÛ†H$‘ÒVõˆ¸5'B8ˆ®mL¤ðVé~Çòz¾u€õÃ)¬Ð×dU_xÎú=ô5#ñFÍ&‚ÿ¯{ïóá|õII8|óšå¨.¶Ç?<øOݶÊd×7n˜Þ2Y"ÂäÐS%Ùë~|¢ ¾~™ &®¦‘Åaj¿fhѾš7í…Gz¼­ÔØ»jZ6ivM«!c_¬>ýg·E£îC0°7}7îˆã§¡WÏΨU¯.ö] 'ò¤†V™?4GsN¯qOOÓø+‰Vr"}fwD«AЯ[+¬Ûw K'tE‡ŽíÑŒÊ|5R["a‡7OG‹-Ñ¡emdòõÁ‚COpfE/Ôî8Cú¶CÃúmpäÌ1ôïÝõêׯ–sÁ°ÒRýTz,Ò MÚu@³úq+Q ›„§h^» ÆM›Bõk‚*M!Jk‹k»—à×€ÇømúLÝrŠÈ3Ko ˆMP ó¤u˜;¬ìY—‚û_jxH+¶´õE wnîÔF?—áºC°8-¨DêMIHR9"S:܈¤Eß=Nï@m´oÕ ƒ&®‚ÊÆ–Ê oß]{tEbY‘»BCÆÈЭN ;†®mŽ^£æáØöÅhÕ¶9*·î$µ-T´HŠpšÒ«‰ö­[ã‡qË¡urÃã=?£JÛÁ9¤»rý0ï/8Ú*1oÜH<§½[´BÀ …8 0>>5úÍÆÆ™ƒál­1Ž[oÝðRji …|"½_2ô¬“ãç-B‡N]púüEüЭ1:¶mÎ?Î…ÊÖÖšxŒéÔ]»wEÒ9‘½xU<гD¿ºÙñãøqèÔ¶%: š†³ûV¢U»–(׸t\?µ†æž«hݨ::µk‹îCçBCõ ?6åš÷Çè¡ÝÅXÒuÊ^89YaÁOƒñ‚Vÿ}›4ÅÙgrQ?…Ú÷ÍDDÉp¾µ S§ÏÆ•(WxÚð¾ÎëÁVîo$z¯bÒ$ GÓÔGVAø„Œ;ÞOî=‚çÌj=}À"®[õ±ÈŸ:cø¡†@."a$ùi²8“ˆ„Œ*¬è–ˆÈ¦*ôIKeÍNÆ =P‚^î“ôŸ¾wÒw1 ¿® Ñ÷!úÎiú]˜¾OÐwVºî"]—ƒ¾OÐwYú>Mß…M÷s¾v´švüçß7~2Ó;9‡ˆâ@"³GLÿ·WœÜ ܹ|¨q‡ :¿^Þ‡_íÃå®ÛààúŠ¥äGÄkݹ0Z´,,€ LFìÚµ ]ºtÅ9¸Ÿ39üÈä*¨îã‡.ÃñˆÈU‰‚eQ³qSdq·Æœž5ð¬ØHLëTG—tG=>Ø>8#òמ€£×OÀ'þ:òç«‹y^Æña¹0>¤6nm‰Ó[ þϸ|o+MGÕ>‡ñçÙuXÓ­îå‚ÙÝËãäʾè°ÙG§”Cÿò˜¶5íJ§ÃÓ"¬ÖrÌíZ½ ç÷¨ÍèœÏÏ#ÿÖËRj¬ðøð ´š‚ƒ»Ç >42åàÏR‹(Ô-R YJÀÃB ×¼9n0öÿ¯zÈŽ;ûgB|!zäösÃåmcÑtAöO*‰üezbïí+Èd¸‡’y+`ôΛ¸?£z]ÈÇ»§âôòî(ßë ‚÷AsnJ7_ƒ?¯íÄŽµq̵=–©…€ßF¢îŒHœ_ѾÙóbÐêËèUÙ?5È‚€|“°rPE ¯TòöË0°œ'žE˜¶šd°¥ÅWï•cÌ´Îç‚ÓÁþoÖ Ì—7fΜªU“)ªüú+ ¼|ú†AdL ¶+V˜Œ(S¦4ªT­æÍ["66¥Ç ½ ZèÇèþîg*µÏlÂàqë¡wòB«úÕQ©z-hbî A•øiße”ô£w©ôpü'ri‹1ÁMpz~[ÜÙ3¥ÇáÁ®ÁÈ•. ªÍ ÀØéÐ"›žÕY€ãaVË|8aVvˆ*Å+aÔþ«(í©@ÿ2¾°ìwµžŽEþ1ðôÂo°Ž¿‡é²aÙÍ8äV_B>÷ÆØ£¿ͽûˆO¶{mig…9MËàiíY[+"ãRÎ]2%Ù§;‘®pj‡j ÷¹;úÔÍŠYÓ ýàXÒ± n^½» …àm/ÇèZ9ÓöwT¸ÕN–ÁÍõ½tb.²u<…çgg¡¨—ò?‡™­r Kg\,> ‡g¶ÆŠ®E°Òª¶ý¯ªæÍ‡ûî¢rZFUM‡ç­÷¢;æÃ¯óQ<º}ΊPdµwǼ» (áôÙmŠa§!ÖT¿8ªŸÆÊG¦5Eã¥O°hödxZ<@—jƒ1íê䵈F— ,%NŒ…ƒÆŽþ–#–Ÿ+Í ,QuqvHIŒÿÃñ÷×Iâ÷ðp÷@¼Ý0|ïZ‰kE‡1È ïÿyãšä= Û,ùÈ×j`Ó6ñ­Ü½ M†¬FÐo ßN³ã*bJ¼JßséÛ‰¾WS¼¾gÓ·š¾×Ð7L¿9|]§¢ïUômOßóé[Îñ;(ú^¿ù““>†qÝDöªtnï3¾Œ-Oÿ^÷LÞòÇ—Ç'%`ö¡Õ[¤L©°BrÂë\oôîý‰í}`HJD¢:-fn؈ óf¢°g4êT­Œ{q8qàÎlŸ‹Æcâ¦kHïa [Ḭ̈Lx€YSçcܸ)pÊVnvTF]J)†¸ÈHX»z"s±pHˆ„Ì* dº'ÐË#qtïœÛ5_¤7ný%däƒÚ©~iܽÅäøìE Òúº 88\H÷≰ÇFG!6þ=ˆ{RbÔN4a/Yމ#:ÃB‹D}ÊT¬M\$’ì˜Ú ë4Á°™[ +ÏŒpR½ÀŒ‰s0q¨ÒV€3×Oâ…‹!1*ÎÈX$?Üô1HÒº@§ÔGâplïy\>´M¨~ÿ[t ™3¤A,ÕÏÑÉ ù3ySý"áíëNóL¨±~ :ÄÅD#æSwÇR ¶ZüÖ1eŠÑÝQh¨)àÍPásTÛáhàyü|i,æmw 1!^ùêcËîß±dL_„òí&CN ‚ WŸ`þ­Ñ°a#\‚7ì 1ÈQ¤<Îl^€+—á§™{аIDÅÄÁÒJ"þ™ñ"2 žéÒ£`ì ‹†«·#õõçHˆ¹Š³×Ÿbá°¶"½óo8ÊH}8]Þ"°2PÑiáé¦GÈ !.:zoù$’÷9”È £ñÄ·,VÒø±hé2´¯’“Þ±DpÕk—)‚çaaPêbгq4jÚÛ¯á•3WÑʸµc15í ÿy#µ¨Cïg @Q:_N¼ ‡Õ¯@‘Ü ‚KÚ4~$,öNÞ ÆªQíÑ€êw$Ö® >ù§![¹ö¦ï’ôû…ÀŠÝ­tJžü›¯ç­ØÿúîKß%^÷M+"Ž”x?dʤ˸¼ƒA-·}’!éÏãm?<ÚX«-1þÀä´ÊŒzy*1þ´¯;-þ:ÝÖóËšÕ(Åù °Îžzšä GÔjÓÎáÏpëYÒ¸hP£×D¬]µ+7ìÄ’-p÷ì_P§+ƒª¥s!_ù¶X³f ,ââÀãs"áDmfÔä#Ѝ ÙÈ@.§0+ZikPµûÏX·šÒûe–Žh‰ø¨X±cTº¦w…õ§„®&^Ôl¡ü–¶öB/ zÄQ‹Œˆ@tL,åÁaTcÊKNïôÕßÇacp>ìÙ¼czÖDRŒ / Þ© êVÊ\%cÝÆ)°M0Þ«{µ~ÈõcÝ'C’.Ô9˶µkVaùÚß°fbg$D²%sýøÞ$a(ÄHHHÛ|ŸÄX`Éj$KàU÷.ß"Øw!oÛý‡%¥B¦@@è-”ÝÕööFxHhŠS~XßLC}*.šGºlhß½3níû2kØ[8aÜÒX¶d~Ûµ-Џáàîý¨Ý¡ 2¤õE« 1ºM1ÄÄrŸ¡¾ÌéR`•‡%ñá® R»Â^e±´@áô¶îü- »Ó 9=÷¾ž¾øzõkÞ|ÖZhEü¿‚ê`çè@u5ý6ƒÈ¿ìò)–ú¡œ‡ÕyJφ(ÐwÖ¯Y‰JY\¡Ô¨prßnTèÐÒ¥EÃsèšrô^¥ ¬T.Þ_£dš{»±~2È®°ƒ#/Ç "š¿î8„.e¼iÁ£3ÖÏôbŠúq9iÌN ßT?s¹uTÆB•ª#!øMT*îÞzÏôâz,¹máV½ìêbà‘)ðø¥<E ©n °_K–€ð8úïØ¼y³é¯OƒO;K}CprwÁ/•gaƶé¸xª¿W‚ïŠø¸x®Z­[˜$|Tü²ŽVòv°w¶ù ?v6ZKl ؇C—ŽbA…°vúôÖ¼|FïâÅ‹M¿^+±Ì >¦€wMV:,Üç.ÆÖ-1¤{78oüžŽè6îX߯&~š½³'ýˆõÀ!}dÐßÀ¬Ys±|ñxÔ)× O¡„.†ÈMr ¥ˆHãÖe’>ááሎºÿ<[ÕĨ™+1gÒP¬=x‰x´ao»ã£Ã›@¤H…’U³cËÂ8v=*EÊ¡LOy„GT´´UcbÓR˜¶çV2ò§G¥› ãIåoÄÇ„#’ÒçsF=s—…êê.L_±‹Áó˜[Vz#§6³fÎÆò%“Q·L{Ü×É¡÷1˜$‡¿¬_¢p¡•„v?Æ¡ŸêaØ´˜?}–í: 5Í\áaa/%3ñ±‘ˆŠ‰'R«@±Êù°cùtºüè5ä6 QáˆK|û-ÞEçÎ|˜´éÇw€iÓŒþü’y{§® ”)¡¤ÉŸ·ÿ¢â¢Ñrÿ`øÄ9bc¶ŸÑ+K3ČϘIŸ>ö&4n‰…ë·à×u ѱËlôž<U ëéºÕÛa-bÆê…+/”È\¨žì[„y cêðNèØ=Ô6jD†'Áø ˆŠ ¢e|¦‰Ô"¢¢‰HfÁè ¢NÕÖ”Þ*LÙWB50$F#ŒF >"2œ–NO$Ìs>Ô +'…üó`RÅnTIQÁÏä“?½“‹4þ0A¤FéYc)[©*U°sÞOX8*ÎÜÇéã§à]´Bÿ\Œ¹ cÎøhÞq ÔvZqŸY2—¼~qQ§úédž˜<¦&j–k‚•«WcˆÎ8¢¡‚êÇ #acþz$ÉÝQÚ?Ç,Ócý’ã`™³Údºˆ}¦cýœ˜u#F¶È…è×íÈd´hNƒ¦þµ°5÷D pi M¢Rä“EiQ='™ËŠ„ºuëšþú4tü>ña1·}6Fžû ZME:êdÉuqÞrv#@3[—Š}ñè&^ç‚éK[Ï[×a}9÷$šóÄG,Œ¿°žÅÅѨgŒ[79‹æDB컓"®¯­ÖïœÆ¨_'`Eù™hTª6Ô&ëÒ/ ìlÔÓó¥®ß»êøQmae£ÁµS§¥µ3ŠäÏIx$kÄ…ÜÂùk¡°´GÁBEp`R¬“wÃŽq5‰šÈ0¡v&\,>?UvDˆÞ®öHˆ~à(9¼ÓØ ŒÆãÀ0xx{B­¦¸ç·qöêȉ˜çöÏ% þžÃÝËG(qG>{ˆX•ÒØ[ ýóËgAá’ÝíL’.2±¸0¾ÐÃÛˉ&B Y0 Ç­«c`ã<ˆ‰K¤•½>ƒ›'´”ßÉaôóGˆV8ÂÍ^ 9-æ"_Å¥;/=_>ľÆÕmc±äEmšÝ’ÝebAë|øÝ{0æ5ˀDZ–pw´Bb åM“¹»#ô‰±xô$>Þô^k¡ €Ów ×Ú —.šxcððñ¤ñô$7úÅcDÊìE:lyíüQè2!3m³4ˆ!§YúÙÃGÐ8»ÃVCãÇ;¼kBÇ/O.Ìœ5U«V†vîüöuû^«B˜õýUJU„oÉœhÖ¢9¼Õ®DÀ£ðäÉØÑ³²r´…šÝ‡˜;ë¿j•18~ä<ôÄ>ÓeGî´DÆâaim;áÑ‹XØyfAÑÌvhXÌí¶†¢vzäºgðwòÀÄ€h8?= ÷L°P&áÅÓGÙ¹ÃÁR…˜ÐÇÓÛÂÃÙ  kÜ¥æÃç±°¥ký}]„ X%|è=âiüɃ;pôät¨ŒqÏpòÂ-øå*[Õß~#x® £<ôÖ®p²VÀAU›¢ÉæíÈ¢§ˆA™>wžF#S:7Ç$%sO܆½§´r=4–Z\?õBlQ¬H\¿ñszTBUh–%JUò¨­ñ¿+qð : +w?XRýÂ)o78Y©ˆxKx¹ÚPýlpÿòQÜ«47cD‡?ÃãÀ—Þ#•ëɃ°óÈ j(CqìÌUdò/{-ïÀˆRÂÖ^3ûÿB¸L…œ‹ÁšÞ¯Äÿx9XÇ;‘m* œœá’ÆʵeÑ/ØÃ~Þ «Gì0ìãB¸†û„TL"~ˆð§/0lûd̼2?TïƒjYJ"‚V2:“Hûm¡T)ÅÏ=zt´O”Æ™W«þ_à“;òçÏ) ¢aæïb8ôb¸›®øŽÐº5pä8žß¼€Â£«ã–â.eï‚"VÿR‰ÏkgJ¨U¦ãß ô,tɶÍï §¡Ð¨°wz?,¿ëˆfåsãÉ©ø523vMëCcd<‘ƾü™¾ÚÿNÏ”OŠ~fìGlÎW›û[ä¾ZlcIba¦ÔcÂô-ò¿B­Â §U–™ô™‘<}†R¥¢+¹' ·O‡æõÇ´3*´©V!öaé#7ü9ÿGª_Ü{×{æ6UP;'™ÒyY?“zÅß0¾3nÒ`Ó×W®Þ„¥¥% ª¯Ùù¨4Vˆ¹‰s×ùÈ/r( {e"bSkKò=¡µ°„>>ö?Wÿÿ‘?]ø=œ¾|Ÿ~)‘%oa¸huïdUû9Á[˜µrdÁÎÅ‹Q„%ÂÔ‡P¯žéŠï—iŒÊQì ÒDyÁó¸;VýòŸãÇôvbie…[âIhTZG)–±ááD}>Ø¡»¥…11±X?ÀÂÚ÷.ÁÃñ+mQ¼d!Ä…‡ Éø— óÈÍç–Ÿ»{Í æk«?Ù¸ G]°¶Ñ¢rnã9£/bÃáha'þNM¸¸¸à["HÄ/5@-ñ< '/žÆÈãóp8üê¨ŠŠ™Š"½“—ˆg¥V½ÙÀtÛ× ¡Kµ–oUØØÙ "4â¥DãuÈQ('ª·­…¼¥óÃHR|L<’ˆ|©=&rÞèÎåUÌž q¸xB*Ô©ƒ;ð¿áÃÅÂ%µ{$KÉ”Â@Á(Õz£7ü¯rZ©Õ%uf‰Å×Fƒ  6" ت•}@N˜`Šý~!$÷j… ¡ÏŸãlðqH‡l·®¢IƨT¦ìë­å?,ISÒ¸Âr²$±Åøm½´R;®ŸÑÿß×R?–êFjµØß±=¶Çš5HjÒ„Þ“(ñÌxk›á2½828¥Å¢J#ÃåŸç=-ˆ_*"6:Ï?ÃÁ+Ç0îâbÜŒ}…¥%Ò烇3¼íÝ…ÔHCƒÍ×Üè* 7_ìN“P±ku\Ø{¡÷žAa§AlH”é*ÀÑÕZG+<¹ÿˆÇx¬YaZ]WjYv™] ×(aHÐ rð%uC–Ò†ÅFâYô \{vÇï]€"^ŽB¶ÙÑ?w+äÏœN.Ðh?¿Nß–-[0pà@\gë³ÿB×®‚ܾ!k×â×%Kh*âvÿ<ÒJ Ÿv¶(—γþø(]øåSÄ÷Þ…vx&&™‹= W ‚_1qdRT Ô5«#ôÔ)´.YW‚‚ Œ‰!n/Mß<Ôj$ØÚ¢ÌáÃXÄ¿a«9•l9|þòE̺¸ËmC÷,¸ÔöÓZã¦$â—Êà-ÄhZ%„½ÀÇ·qîÉeüxOãŸãQlÐK)Ò×ÜèjK î®<}@82 (†è+ϸíÒT΄è3Aˆz!t9ÌÛºòŒÖ°õq‚ó3 n]JvšD-T<9³ŸŠòáèf—Ÿ L‰ø) 2xh]àiáŠ2Ë#+Ò{¤ƒCGXÚ|º#Ùþ GhZ¸p!–-[f ù̘ôêe<§'~ ßnݲe ú>|ö%Ãï7 æÆNÈaå‡ÿå耊¹KÁÎÉÞk»yáwi÷î×Nþ¾Q°U-ŸoÎ hÖ{}âbbõ<·žÜÅÙˆèZ¦›‚#<>vš÷ßýá›5j˜~}|HÄï#›5‘V1‘Qˆ‹ŠÛ‰ñô¡0ö÷µ‚¥vµ~9³ Kö¬X½d‚Ÿ?C…ª•P±ltìÒ Ùh‚‰ŽŠÂý ‚ù @©Ò¥Ä6êÎm;p!à¢HÓÇÉS;V­z5dÏž ÔN±q±¦­Oß=¹Ž¬ïÁ,lÉgamI £2ógÒåKUðŠ–}ÿ±E#»*`ÿm¾mð0ß·¯Ñ‰ÉÐç{ÙpöÖ%TÎPö®ŽBO™·øþíÛfÍŒïKD¿¡3Z%$ûd7FìòcG`þ|SÄCê“ ¶ÔŠù¢ç®Ÿ0ëÒz¬®6M²U5]õöàyå“R1&~>>’’’ z½Þ Óé¾êã?þàj˜6mšøÍõ*Y²¤‹ˆˆ0\¼xÑ#Gñ»M›6†nݺèå¿ù“;wn {öì1 4Èàííý2Žï[¼x±áÅ‹"m3^W–ùá:ñ3ûfj0T«ÆCÁð¿ÿ™%|“˜=ÛøœË–5‚‚Lß>¦ŸZipžQÌ!~ÇFÇ¢Â"ÄûýV˜7Ï`P© OOƒaéRƒáùsS„„¯gÎ ߉ªU †SÄ{‚¦‰‡÷~Þ<Û`;.¿ã³î¾xdŠ|;ðÜ÷)!Iü$¼3Z´hU«VáÞ½{H›6­ã­F"yرcªT©‚«W¯¢aÆÀĉ‘%KŒ='“ù óòòB=Äõ?Æš5k°uëVyñeË–E¿~ýDü7!iû ‚Œ»wï"ŸÔñ>à­&M€sç€nÝ€.]€ìÙM‘¾ZPŸ’ 6ÜÈ” X½ÈÛùmcyÀVtØ5IÖ˜¯¬+kSì{€ÛnìX€-ç­)>‹¥€>>ìKÅt‘„/Ö‘cc[·n¡fÍš‚ü­¦•sÓ¦MqôèQLŸ>¿˜, ÙŸ\ [И4ˆÞÑ<¸qã±°nÝ:œ>}ZÄ31lß¾½ø°ñ{Ç[»syܼiÔä2=+A%‰ë— & ,õeƒR¥ŒR¾ï„”¬½²«¯löúsÄB1&, ìðÚÊÎZ8ó• ás!*<»ÏÀ˜€%8~Ó*D¯‚_ÀÙ¬Lü$Hx[4jÔˆ †û÷ï›Bþ9·råJSˆÁk RbH—.ˆ[»v­üø±ˆ !sæÌ"ÜÅÅÅP´hQ‘@ñ›uý6oÞlˆŽŽ6<þܰoß>Ã?þhÈ!ƒˆçëNœ8ÑðäÉ‘æ÷n#???Ó/ ¾ì¹Ôà·ª¦!튆õw˜BúÔ$|IHŠL4l?¶×pàÒaSˆÁiúëÓC"~Þ L¸Š+&Œ^Edd¤ˆg—o"|ý•+W ?üðÃK2W·n]CŸ>} ®®®â·£££aêÔ©†ÐÐP¡”}çÎAv:uêôòþäɓǰjÕ*Cxx¸Hû{Á_ý%Ú«xñâ ƒhn'3üýýSÄݾ}Ûc†6Z­öeÜÍ›7M1ѦÉãnܸaŠ1òåË—"îúõ릃!þü)âø›Q°`Áq—/_6Å … NÇ}ÆŒ"EФˆc"3¸?j4š—qçÏŸ7ÅD»$;{ö¬)Æ`(UªTЏ3¬ômBéÒ¥SÄ:uÊc0”)S&EÜÉ“'M1C¹råRÄ?~Üc0”/_>EÜÑ£GM1C¥J• jµúeÜ‘#GL1CåÊ•SÄñ37£jÕª)â:dŠaÛj)âÞ–5ƒ·yë–·}Ù(„·oiPçùòµ.\Ƭ7H„#FŒz9S¦Ly霸wïÞBÏÏÇÇGèC°AMòb{yûöí/·‹¹Œ¬÷Æ[Òß ˆ\ —A¬Émš+W.ÐöÎqDÅ6=ƒ tø¹}h\Μ9ʼnŒk×® €·‰Ë‘#ˆìýg÷Z\¼U» bR÷ÇèèháŽáCâØm«+0ˆ8#**ê£ÆeÍšVVF_’¬JA ¨·ŠcÕk6Nø8ÖÍe«·‰Ëœ93hÒqì‰^G‹;dÊ” DôD«ˆÄGÅÁFc‰µ;ààí‚vêƒ]yÚþ¯ã _b£c 1Ñ/ûu¯]c ʉ¹ ë¯ì¿?$â'á­`–ä-Yòß+nÖ?ákƒÞà,ÖLþ|||Äuœvrp¼ÙøÃ| ë”bÉzL={ö|©WÁ:S¦Lº„ –>{öLè²110`€!}úôâZþ°“É7•O‚ ŸÁ7 AQFgɼp‰Þû‡A†Ø˜XÉpC· êÞî=0LØ<ë¥3è ˆt0ýH:~Þ uëÖÅæÍ›…î…§§§)ôõ`=½R¥Ja̘1øñÇM¡)ÁúZ—/_î[X—ƒuôøo3X'‚õšX¿‚}N:U„³ëÖd½-Ößb÷2‹-ºGŒnݺ w0¬kÁ`],ÖebýÖ=ã|X'u`ùóçGÿþý…¿A³n” >=„=E×}£±ýòNl®;µsVîYˆí‰³¿YOSš®$|ëÐëtˆ ij `ì|p­ŠÕƒ½½èûü¤$â'á­À®L™2©òu>V€uwwDŠ•žß&|²>V~•ü1Øð€±Ïž=‹áÇ‹“?ÜÜÜ„ãâbÅŠ‰8V¼=~ü8/^ŒÃ‡‹ûذ㧟~zy¢O L$ùäïܹsزe víÚ%ÊÁ¨V­š0)W®œø-A‚„ ~/Ùxaï­£¨¸®5êyVÀÀm/‹?ä6Fc! ¾K$Ñq‰kT€‚ø±@D©üpß”ñ“ðŸ`'Ál»téR´nÝÚúï8p ~þùgaAÇ–\o‚™ü1ébÉŸùÈ·äà.j&lìôyèС‚\²1“=¶&e«@–ê]¼xQX›C³õ —ƒËo[²$Ód æ™3g°~ýzqbƒ­L›5k†ž={ X $|ð;ÈÇ®ÀÑ˧Û3+,m ±ÐB&—œ†Kø~!£ü+(ÅêâQÒ;Ÿx_ØÚŸK‰øIøO°ŽacSyS迃ɔy•hû7¼ ùc0acWìÞbüøñâ\_“6>˜ÁÛºüák8ž·ù7O.ÿûßÿн{÷—n9¸ë³D‘_&þ°[ >U„Ó3Ÿ„‘&Mq”\«V­„ $¤øýãÅ»¸(±¢ì,صMIbZ’¦& "‚`˜fî}ؽЇJý$â'á?Á[»*TÀîÝ»ßZÇ€»ûbÒÅõ¿Àä¥u5jÔøWòÇ`},Ýc¿‚¬óǾ’Š)‚_ýUl›¯1Kôx{š}²ï29¾ý?™Á[N\–2¹d¿q|ߦM›^žƒË¾Àx+˜Ï 6û’ AÂûÇVñ`¿gÕ6tÁŽë{•qQ&A‚˜¢Eñû)$Õˆß‡É %|ó`Coñ¾‹b)_ËNžÙ9éÛœ#ËwÙñ.çÇ–2²þÝëÀ¢nÖ!¬]»¶¸¦K—.8vì˜cI ùžPØñn›6mÄvõòåËQ²dIAyû™uùXgÁ¢s¾‡ófBXºti 0@H:yÛ˜ëÏÒ‰®]»ŠkŠ-*ˆ&™H áýðrLùÚÅzb£L?ÞºZíÒÇ §ûB…>×k‘ ÄuSÀ|ÿ»À@™Ä¾øg^zJ?šÂã"LŸH¶®3ån §¿“ˉøï¸°¿ãù£K4E~  ò%Qy’¨.R@’øIøW°ÔÉKÕÌÒ´·KÍø$öíÛ ËÜ·Kê˜(²ä¥üñ‡0*yø„6ð8qâ„0þà“<Øë:“2³aƒ·‰YÈÒ˜ôéá`— n†\ ‚*W˜â¾20EKe‰ŸDü$ü+xEÎG ñ6ïû€¥x¬oÇíma&L:yK÷È‘#BÂö&pæ-eÞ"f27lØ0Þ®];ÌŸ?_™c[Eqú|=oýnÛ¶MR&¬È[À¬h>NÊ Îƒ‰ßËÛÁ¼ý{úôi¡Èz ¾ŸBø~®· ÞŒo†øé¢aáÑ 1õrC6±  )U®¡ ²ÔŒâ5DÙPEG ÅÄX„b Ï>úÍ ÷ìx~ ‰q§¶§ûéÚ$"dqñÛ¥…—2¢#)-`¼YFÃPβ9ƒ)m"u&”Y]'#‚ÈÄ ‘®K¢8žâUL•8Ýï2†-tÁÎÈ4ÂRT¤å؆–¥!Q°â£äX ›„rE§`™ŸÞ+ûSºØ¤É ›„{xÏ"CÎ+óÚžÃÕ?+`úµ‡Æ<•üü¸žtŒ®Qp;°ÔÍT5·1N®/œ9+=Å«8œ®I¤:rUî zš7øo•_¥6þÍRÐ$J+yzq/è›ê-ÚœÂ54~Ç=Gª»0L¿ywllŒÇä}uàúH[½>6lØ ¾Û¶m+¾ß¼ÝË„ÉLŽÞ¼åÊg¸²»v Ãn[x+÷M`rÊçpòmçÎ…ÁÊ•+ ‹_¾Ÿ¥f0 dRÇÛÂf㓃bòäÉB¢ÉÄÓb)erW4œ[ûòy˜ìÇ0oÞ¼hÚ´©². KYÒÈD“ËÎF0£FgqJ áÛ† ‰žw´XÖbF•ìŒÅõæãP«Yp—1a‹‡£K>loý;–×›…²ø#*·jÕ[a²hôX×ö [GÉ¡eYoý?¤OSsòb£aî߆åõéþ¬¹‰Òý%V·>„¼U‹¥gaš>Á½:”…Uõfbs«U¨`Clн:âETÒWÀe§2šÁ’1…&–l€Ö›{Ðàg+adÄM#é3oÏôI:虘0dD' ðÊØGêƘš 1*w)4(ò£±­×¢š-]Cõsv¨†míVbaí¹¸9à2) ïV8×dÆ×˜‹mףЫ/†W…uM7âÇY©ND©Î‹ŽÅÞ†3èþ(¥¡²RØœ–Ç0¦x+,®;Ç)Ü1PºWÅYüà›¹¦”mh¼_‚€Dü$¼Lb¬ïö¾àmPÆôéÓÅ÷Û‚É_„Ž“7–øýùc¨T*q8oñ²ß¼yó„®^½zBŸ¥ f0‘3ëôñ!ô-[¶~W®\)êˤ‘SÊ–-ûÒÊ÷²N"xðÁÛ… Ž£Y?‘?½zõ“É ë²ô¥Š¬ï(A‚„oÄ ygƒ<ìÚ­hˆy!>ØZª21$ ìk¹˶×F«_zà©ÞBfc!GXꂱíy8g£±66mJvÇý«Ûh›…é:Kìk³«vÐýë»ã‘Þ JÿÊæš¶r£.›‹]zd²²¦AJC—ç¢ùòú¨³a~#Šø(!€üXΦf4Z€õçcj*o’3‘Q 8šî®C(39•8écIT ‘ÉÎ%Gc~ý9XU÷'¸"ˆËº¢hŽ*øù·–øß¹ã¸|k•£꬙‹ßÛÑœ‹£]f`ÔŠ*è°²5Ç8â[ÛaaéÜ `Ðæú¨²e3vt؃m;; ñª0¼öz8ÇÂ;û,ðC……uP}åb쵈CÏtðH¼"Úü‡ÛÀ‰m¡»» Ã.Åù€¹èûÇ/T.ÓV¹‰øIx=ØÊ•%YldÁ.MÞL|˜´±§ù.`·+,•KNþþKrh&t,Ñc/ûö톬+È$oÚ´i¦+0Kò˜Ä1Ñc—2Ë–-[À¼m{àÀ!qd‹^³N`r0Á{Õ(„Oa½Èµk׊4>|(,‰™”²q š¼ËÖ· ¾Ðx›ì¹w °²ÃÕçàÆj#š‚È¡9 è½·²ÁŠs¿"Vit]û¢Z¥«ÎB…‚m(̽²¤ÃàS{`©#Ž·;µ…G€µcé~[¬=»q¦ûut¿ydÕ‰Óñ8›Ž´™:cwÛõ˜[©1äj"D _;ËU@ä1ô\ß-7vBŸ£;iÊ y2Ià«à±–Æ¿y‡†¡ÓÆ®hþë„ÔrbnÿŠ=;àŽƒ«o{ì¥rÌ®ÚP:‰­ñOâѱXgÏ×ùÔWp(P-¥y—ò–ÙáOˆ,ŸÆÒ)ã¶A U3U€Ì23fµÜ€ÕãRðSALuTÌßn¬q=äÜl])%¬”*¨yûYÍm%Á ‰øIx-Ø€Á± ³Óç7Yéþ^%ÿµíkë@ð¶,n°¥/o[³¡ a0‰ããâ^K ù>î ûd§ÎlÝËÒº&Mš’dž l$ò*8OÞFæ-c???¡£ÈΣ™<²Ä3qegÒ,1dbj>iD‚ ßXª&3ý_A¤„ÕÑ ‚h™äÑ`==G;X³>š rÖ ÿ"2c@ùÁ°¸wâ, !r%ãišî„å1Dêâ`C÷[ñö²Á˜S,oë Qj¡g cׯØ]Ä •ÕC—íË¡Ö*…„Žø%“oëš!SÐFeà0$ÜÁêûQh–%%IiùL ²³µï+°P[uêX—QFÿQ™Û¿‰€C](í‡ ‹ë¢ÛÖÅ©¨rW”rŠÄʫׯ{€<3Êâ!4PÓ}J‘E*r¸}xû˜Êf$*<‰ ÇãÛ+Ñ}u;tÜØþ³h~²°ꊢå9 "žz“%¯ŠÒÓ1q~]¿cÛS‚„WÀÛ¤ &@ ó©³gÏßï 3ùc)œYòÇG´½ ø^vëÂ*¬ëÇg³Qo¿òɯƒY o÷ë×OH ù¼`ÖãcGЬ`ËNÍg¿ &‘| ëú±KÞnæígÖ'œ4i *$Èh‰%Äu=zôx-• A—…B h‰Q±§?Õ¦SGÔJ+81!йˆ±žb{ã1(ìWÓòVC¢‚7{×køzyúýµÊ´Æ€?–P Ýo '-Ý€‘ç`W“ŸP(SeÌÈ_:¶œÕÅbåóUvŠfkŠ6³BÃp]žõr6AÿJM‰´9ÁÓÖCãQ)m^ )’Q:Ö~(¥ªø•GÌánc‰žk"oáµV¬J¥/ƒ¡åF¡œ«}2ò'ƒµÆNHý’C©°„ •%nˆº„ë†th˜£1zUmIåu„¯Û£{Áfh‘½.ö´Ûµ:¹–H¤y;–ʤ$2) ƒ5-ÅýþçÐæÁ%j ¼c /Eu#ÂkGÙiMlF©°€Bc)Èãá»ûQ0KcôÉ,‘¿d¬z%ü¼ É©êÕ«‹mÉÔ€ùôv’Ì[«ï¶¦=uê”(ÿÍ’?Ö¯{[°‘ KêØQô˜1c„$Ž oC³1ȿۄ]º°äI'o3d0y7nœFþøU3;‰æ´ØÚ™%ŠLÙõ ƒuÙW oóß$|«øf¬zY2¥tB^{ œ zˆÌn9ñ(䢓䰰òAzU .‡Aòe*‹4òxxx éiì¹ò”®÷Ç£ççéz"PIÈíí‹‹Î#I®‚ÖÒ5±xA÷'ׯ܈ xxlíòÒU£p–’°Ñãª4êxÜÆÆå½2àæã£Õf‚mì ÜÖ; ¼‹ö=¼BĈ¥€Tv¹ò{d†“JƒL}Î?>ŽÇÂ’Øå2%&CÈ‹ 8ý,„Wµüðè“/§,Hˆ¾ƒà¸DJ‡˜J•…'²Y$âÂógÔ8‰PZ¦G%ïL¸óäž*<àìT7ËÚ@6¾ µ¶øJÜÊvª ¿ ·ƒçŸ3±A^7Wœ}B kJ7›{.Ü <‹X–ä)\P9}^¢ÌI8zo?ÂÈä–A!g¡—C¡u‡¿•çB(} 2¥­_<ÁžG7uþÚÀm-¹s‘ð±±fÍA<تÖlœñ¡`}7¶‚e=¹F™B߯’?&qlEû¶àîÎ.YxÂaRËV¼ìÚ…Ó[µj•x©þ |F0çËg³„ŽÛŠ?¬¿˜>}zaÐÁ†"ÿv+Ãd’óæ§µgÏ!• ¤‹Àõâ­é:uêÀÞþ+uE AÂðÍ?(ÞÓe‹X6–›¶,Y²f ?ÄÖe½øÇL’xæ}õzq ÝcÞò|ãýü›äÍNi I£é›ïe}?6ÊÛ¦Lè7盜qÜ«Ò0.§‘<ŽïO.Ýãü9Þ&æú˜‘¼ÌœYòr$ÅÓwzœê¾ KvöC€ÞJüˆ½G:bÆ;t‘Js]É3‘ÌWÛÔ´•+Ú‰ñÆ6§{ÙÚšËø5’>×A"~>6x{—%Qì°8µŽ&cIwX–ޱ»•/–º±”މûü{W¿y|—éîÝ»bëÕl¸1gÎqȉžYrÇi°E0oåòDÆÒRÖ ìÛ·ïI—Åì$š_l&³¼­ÍÄ”Ãl42pà@aÌÛÈ$|íø¦ˆŸ„·;V¸¢j†Ü‚ÓÜ<†+¡4νçNÐ7‰øIøØ0oó²”)¹ÿ»Ô@ãÆ…Ãc––±¯¼K혜ò2“°w•ü™Á„‹7ÿùçŸBÿïÎ;"®{ÆŒMW½üú˜I[PPØþe78W¯^ñ,å9rä[¥Åà“B¸nœKþXºÉeaëd΋õÙ(„]ưž  _+’¿â«šãÈ]4ù[Q ‹m¾S°¤‹MTõôÍÍÀ’=%}ØÈá[;£6KíX’Èõ”ð=cÊÐÇ0L5HÄOÂÇÁÇØæ5ƒ%XlèÁ†#:u2…¾?˜p1aãmZÞ:}Ƀ‰#o·>~üXøï›2eŠg ›ùìß·YOá3€—,Y"\â0Ø …õßÅX†Óc©"XªÈÇÒýòË/âÄ[ ³ K(ÙŠX‚„¯ ɉ_°.GÏž@p`Ű%ë÷‡$j^gË‘M±±u,ë_¿vUŒü[Â÷~G¼}|Þ/#²X¦s ë¥KÄOBª‚ Mð€Ì~çRlØ ¶lÙÄ™º©Ô" 6þ`i$ŸÇ˧n°ëÞêÞ¸qã;ÃË[·Ly+™%¬ßÈ'|p8û5d«`vqó¶9S3 ätoܸ!¬ƒ™š­Š9]v"ÍF¶$– ák3,ánT»®Ý¿ ƒšHŸÉ"ö{„\/ƒ:^Ž-¿o;­Zµ„BkC35KĤ©ú{/”2¥÷ÆAšž ¥j<¨±›•Ô—›õSƒüñëÀ‹õÙ—!oÿ2!dƒ–౿·Er=@>þõö-Z$Îf==³ #{æKpý˜@›‰e@@€8GyóæÍ"]Fž†—Žà9Œëï¶°CïêÍQ¶X{4ìØ±|^î—™L¸ðK}¢Áx2É—ŽWÊn´¡ùz¨«§ Y•ñ“Ê`Â`mmýQ·yÍ` Ÿ‚qÿþý÷"Oÿ&U¼íËä·‘Ø(‚õß>L"ÍÆìî…? Ö6l˜øû]ÁÒ:ns³ [;¯\¹R8ÖÑcKà6mÚ¼³”Žël&, äú³®"?S³^%“îvíÚ‰óƒ3dÈ Â$Høxòè1<½½pÔp/Àÿ*XÃrD!lª¤_V…pñûU(èŠ$èÞ{ûXEé[B+¶cã‰D¨©F×#ÐBŽr&ñë(g.)o\ÇÑï8*ƒ\ÜkMߺ& ±¯‘pÚ0ñ«Úeˆø5îÔ±1_ÀMÄIŠ+×ïPÙ“hœ1и˜Ϭàhi:–î‹<õ+„”Ê®H Çå«·DÙõ4öÚúäFŽ  {sÁåD´ô0)­Ï w²©Sø}c6âÞ|zÃ|®îDŽٳ™@¥&X§%fæ­Y___¡_÷!`òÅ”Ù% ;hæíÔ¼yó # OOOárå]ÁVYl–Ï÷³A Ÿ$ÂÒJ&’LÞ:tè N84hØ„4+ÿôIDATÿm!—ËÅ}¬7Èi³‘Ób韜Âz…LðY²ÈõáÏŒ3„Î¥ _:˜ô,ÿêçéˆPØͲBà“£.uvâÌV]ÅÿäD”ôÿ‡×®"6IIá4‘‹5}¿:õÉ(-¢s)Ã5”ß½ýKQß17*ÙåÃÂ7ñxÛÔö.‚šiò¢E“Ÿ‰ljaA÷>8°Ìt]^Ì_€°S¿¢¶MNTM[[>¥”,~óÚzgGe÷’X½í…|>9å\ÌÛÑ©z~tkZ[WAϦ•qüN ,,¨]‰!ñ–¤\n4ÎQIáOr]cçߣ {Ľ¦0‡‰{Å/#Ìa|-܇ù[Éá”™Z™ˆ+'N BGáDÜĵJ@ûd§({—&50¤] 4Ík‰‘ã÷ÃÊ1ežJÓ= ú;òÑu\¼öV–¯¿æk†Dü$LŸ>]l)RÄòñP @AJx›óc€‰ ë´1Ùa¤ùc0¡âmjvÉÂ4ÖËãmq&WL˜YÒö®`¢Æååsýýýѽ{wá³oÖ¬Yb›z„ ²¸~ýú/u{[0a峊ù~–¬²ól–¶ò !\~–úñ66oÿrþ¼…ÍÈ,-” áKM»ˆ |€ ó±tÉi8! êNDÈåkH šfEô*øÖ\¼ØgŸ ¼k×.—/_>!­+[¶¬øý>ಳQçÁF!gΜÁo¿ý†;v·1 vŒÝ¿qbˆä?LÂÇ»nõªaãc›aâ¦8X†¨ÐixN,^úò›‡0"ð ÂæwÂàûak™mþzh_á9Fôü J7_LØóæ×èŽô~…é|±*x3ìx{8)O/ÝÄüF}áÒe(ê7Ê7¢‘ Ì,R ÂÆéM©tQ”¿ ŽÿÜcvûbå¯Í1&Wu¸ _ —õýp0clžÕB\Ç›ÀZ"”3›µÀŽõçáÛgL¬…¢2N°Å®qõñ¿éZìœOWFÐço|‰[½r–®]^‰âå[¢×²KÈãƒx Îå…DвD䣫h¿pÎ ®†‹±ZD=‚E©A8°}bnþ‰:yJ#Âщ/žŠúvZxG»eÆ-"H“x ÷‡¢j«¸°9?Š@å0yPiìØ½‡-†«›‚C°ðR4þhúÝ~C‰Þ8·ý2¢’TXû «ªË°ý"%®´DÇEh_ÙqIDïmF¾¢u1ú°•2µ FQÛ4È9àüñõ[K>Ãàq(=•ëu·ÉÙsûgAÛ.ÿƒ½­•¸¦ÿ¯ÁhXÔ±ñŸæÙH[½R¬_Æ`²O39âmÆvMÃV­ì‡Á’?Ö+L °;Ö•c×)ìzá…¡2ëH–)Sæ¥k•÷K^9­´iÓŠcéØ˜¥t¬“Ç+,Ítww'„°®à»‚ËÎþ9 vüÌn_ø9°n$K~Y¢É$É%»¯a<.\0Ý-AÂç…>>ÎÅê¢së$Œj·}Vwƒ.‚å}áX=n3<‹—EÕÕ`xzL_5F\Þ¢9Ý¡|Th[²KMdj§„>â:FåiÓ×C±«ßÌœ|Dñ˜¼±.K &jÆ2(…Vƒè?Ö¢®}UÄÔŠ‘mJ#:*þ•ë¥]}ÄŒØÔ7&ÍÅm*¡%b1¯E5Œž¬ÇŠÀ9P!ÒtÇ—ópQŸòèÙ úž kkÀ–>Ï5ÙpœÂm*Äíç±lÏ%L×Nïµ\Þ0!ÈÏŸ`×¢þâƒÖíý ¤f¶ÉÖ§½@Z“ï8’€3ÃѹlìZµÖŠü@¤¯DÿÍØxä:ò9Æ`Úäm°q`_¥¨5:€Ò¼B'â÷?‚1ÿ/£Á`ÿ½ÑèUÛ÷õÄŒ‚t2¶"Ÿ… O›¡X¶7+þ¸`óªð«7CäÒµÄí‹ ‘£Z?,Ý€å®!·-ðÛ†]Ð|¸W•ωøIºv¼ù)­=ùX´¬Y³ ƒ† &LžÌä%~©EþLŒ˜@1‘eë\Ö§cw-iÒ¤ÁäÉ“MW½Ìz€lìÁz€#FŒ.kXZÊÛ¸;w~éð}‰&?wÖ_ä ˆ »¹¼P·dNŒš²–®v‚‚nó¯Þ–þ>B™s¡VûIð«Ûöž@÷úfʆp=zºÂÒËqô­PZÓ$zñP˜¿\\7õŠeÁcmFdMïžC-‘>-"·$¢Î†mzD‡¿ ¿€ø˜DÄ'ˆ?S@©¢…µ%{áàQ,gñâØ=¡7šóE›JEp)°¶±Æ:Q1TʇsÚ7­šO¶• áBë’[ rÿµB"~ß9x»’­=y;”¥LŸ¬cÆîRø¤ ³äÏl­œÚäÏlü‘%KŒ=Z–0±å­R¶šýPi§Ïu`‚ÉäŒOè` àìÙ³…‘ÉĉÑäó•¯\áÕï»ÃlÂRL//¯—F!|tµÇº,‰;v¬h?®+çÏ[Å$|*ˆ¨Yº§%‚gCÄ̹ò§ƒ.H›+-tIIèõÇj¸ß܈æžU1{â~šœÑ SqüÞk â‹v@áì70°ÚfÔlR§¶?¶¯  4^5¥²©£Ž«ŽhYº:0}MkÌ©Ó}JtÇé³a°ñJ,Ýê Së˜PeÒTŠYëÚb^Ýöè]œ¯‹@šŒ6˜’»&ú¶ßˆŽ»Gv„âáÔO18oüPa4X£ð«™„MgëF<ž?B™7tD° z Tý½¸©~üå&~hÍÇS!˜®½¸uàY­ÛtBûá 1~JȉW1iŒ714N‡Õ\˜^êãéG,dŽÙQÀ†ˆVNXsð ¦O™‚ª ‹ 6š·Óc‘Hy’tôé} J£.ßµ£;¦©ã'ü·îÙ¾~ñúW/L¿r¡s[wìÝ0 ÈÞ¿;w ¾w1t¹ZEz€£×õ8±k1¥~=~ž¦kb© _+ù“tü¾sÌ;]»v¼…ø)Á~üX¢ô©t Ù% ëüñv,ãÑ£GÂÈ$5Á¯çÃ8–œ1dðyºl°Á+5Àzzì®…ý²U1çÅz† –α`… Äïo%›OóqvLbYßÉ-ÿf°ACvúÍÆ$$¼-Þ׋†hY” ñäËfÖDÂD[Æþ/G"¸(¢Zaý+¿ØrW=Ýͦ˜îTŒ$POi¤œÙ‹åÈ´“]°°$ЂRˆ4¥­¢4¢(-ÎÙèæ…¼pÎbAå3ºsá¼´°£ã9«Lb#ˆú)ÉßøuüdlÉzk+ÊUï€)#1¤D*¸V ®@­Z7þ×½t à—ŽÕ0cË(ßu. 'GB]y-êÙÌC«¾àíë ½.O>ÃÜsIØÖ–Ú§æNü<°4†UM‡Ë}°eY/,ëPsåÁÑ߯!âI7Ê‹ãWBap.ŒUýÕÑÑhÜy ÖÜ<ƒtЇhê‘e—_G—Ú^Xߣ&Æ/߉V ®£sÕôˆ%¾jñ˜æ·Ê- –%aCºr=0eæÏp¶¢yáÉqt.^w 91jF#ü¯ã,¬z|>‘gÐ&wqÜt«‹ƒG‡,yp+);ÆÌnŠ¡í¦cåûð&ÂKÿ}t¤¶ŽŸDü¾s°åèƒððáC!µúÔ`Éö•—Z¤èßœü1IakßÔ::.9xåÊÒTÖÉcòÇŽ”y[˜¥Ž©y* ·i˜ MØ’™A˜¬±2o ³ŽKó>‚—·äÀ9Øs¯jÍP®X£ç/ÁÉŸM‘¡DúÌlG&ƒ5…âhQN,ìˆh©é cW*D;¤—áTÚ>رj<¢¯¯AªmÐgg$:”%ÂÁÛª€¥£$2a€Ö†Ò¡é 4Ü ÜÀØPšJ"•,¸ãü ”¦­ K™ªË`çLc.¥KyZØÒ‡†;N÷¥Ž•Ý1Ù†ïGEòøiþúl)ŽgŸX*¯]Ã)Ý$9¥ëHu âHÃ+l’_cIׄ|ÒÇpõ!«J"~Rl5Ê[{lÔÁçÐ~°~!o]2yàíØO&.,±jÕª•8÷‰/“¤Ô¿ZL”x;”·K‡*$t\O¶þMÍ­uöýÇy1dI&ûìcƒþ›Ág÷². ëé}(¸^LlÍ$[óv=“Zó¶=oM³¾ »§ámo ^‡çôn8»¹`Ÿî2¤È¿¾?ðyŽj4EßJèóÓ„‡~ÝS´œKÂã[Ò".Î?³¥î¢E‹„[ŸhÂF©EƘpšÝÏ0 ¼uë>,,ÅùDް¾ KYP‚„äHŸÖåÔC»n½L›°ß/ôïVÐ5´p«Ö{nDÂÙ•ÇÓ_)*Àš>¼ÉÍ„ƒ7éYr&‘ÿŸD2ö‡‘Ø¿n?MGûñûŽÁþó˜ð°Ä†­G?Ê—//ÎÂeÂò¡[’ï‚OIþ\?–ø±e.ŸúqãÆ ñ Xú˜9sfÓU©3!c]<Þ–]¾|ùKrÍF"ì°J•*âwj€ .û4o=³FÞâæSa˜à3Xµ€|z˧6&’ðe‚ûIþ<ù v·&‚ „AÿJýdDŒÔ*„\‚É'áù‹Œ7®>¡KLä]U ߘ™)” ’ y|G¨×°3þÔ€Dü¾Sp'bݶϹÍk»tiÙ²¥5lØÐúið©ÉKÈXÏðéÓ§âä6Â`°0[ç~ $×¼~ýº š¬ÈJÎ|´“Pv “š¤Û¬Èù2Ù={ö¬Ø~fGÑ\û d£v·ó) ¿„/ ´`8~ì„x?¾[‚C3qMÇé|ÓÁ7½¯º~í-Ì Ë~ ߘž±î{‰’¥Ä–ojA"~ß)¦M›&ô¾x[®X±b¦Ðϳ®!°¼O Þ¢dòÇ$øS?KãÌV²#GŽÏk¸˺ڬÈÄ“uÿØ% ;æú2zôè!Îæ­ÙÔ/ÉBØÚ™yÌF!\&»âþø¹û¢ $|ëˆßw v£Â„‹­Zù‡ÏºuëbóæÍ‚”ðÙµŸü p¾lÛ¶mÛOFþ8_–º±ô•‰Ð!CAb|¼-kcccº2õÁ[²f=À¿þúKèž>>â˜:¶¾–ŒB$H !u ¿ïæmÞ#GŽˆ£À¾°Ä·{™l°!Âç¿ \–@1ùdEZ¶ÄýTH–ö…†† ƒ>%b|0KBùÜÞ Öd–\u9Œ0¸<섚}¦6ÌF!L¹ý/]º$Š3 2Z±±ô‘·¢Y2ü9üMJ A·‚oŽøI<öÍ0+‡²‹-LÙ¿ÿý¹ÛÌ\®¦M› ÿv¼ýÌDçS—‹ËÁy2ùà­Öž={Šp–Äùúú~Ôò˜Û€IçÏ–Ö|ª »ÜatêÔIXá2 ûå0çŸ\%ž¬‡Ç¾¹< v|Íz€LÄXé85ÊbΛÁŽ ¹o2 dU^lÛ¶ [·nÒI†¿¿¿X¸°.¤··÷'ï'$Hð9‘|Ì||SÄ'MžŒ>´Q¾uüuðØê͘9“)äË@dhMô§‘/_~Ø8|~½ÃÛ×o‰3}ÙÂÎ7czSè§Eð“@ܺ}áBú—ÆóÃ0¿xt ×éúåkB·…YêW¬DñOsÒ·)}‚×®^Iãöt”ÐSõË&éJ áûAjðœo†øq5x[Šu³®>¿¥œÍß%ø*¸EØx%;<~)ŸËÅ>‹T*âDù>GÙÌ=†_*.‹\¡>´XïS”çe¥?d29””?· #1!Q¼ô»ÉÛ€ȹJ%d¦#õ’¨ ‰Ôw>BA^ÖŸ!²§Tþ$/gÎÏäS= $HøœÐ'éájéG ;±þýóo†ø™äYOLöƒÐP£|#– A‚ $Hø"þUÂÄ2}…îsš4iLïŽo“øð´Lü>Å^”„o | 8¿BbüàS¿åÊ”‹÷Mëk×ýð)ø ¦ÏZSƒSûþÇ{ÎÏ€¯åçð¾‹ÁÔHãc#Igó’{Ÿó¨½xvùâú¿éYrÛÈU_îó” !9ˆø,?ÿ+Öùƒ‰ŸÄŒ$|›HJbBÅË">ñ|0$ÏBÿšdJk¤³u1N o "}i|a+§ Äœ¥%WÚ"“½ÛÛ§Å%OÒÉÁ“S•?ú¹ñ“ÿwŸL,ôôáì¹­VÈäH‹,ûÔ 6Uk]áÍׯ¶Yrp[äðvðZÆ¿GÛ™Òðú4R€îç¶L‘ÎëÂÞt¯½\ø`Oî3 þ–kÉ9doûŒD|‡¾ÿ&P:r•#|mR'½×Ëûâïw#šþÖÑŒó‹OöÔ¾º8(4®Èí–6Jš?øy~ ¸¼püÌÅðý@"~¾=0é³Éƒ}í~Ãò:S±´Þ¬¬Ò2\uÇŸ—dF[t9,1iÜ«áTþDÃŒÆë&1PÓõ‚Q–Wì Mb¬ñºäi˜Ái%¯·Èß”6‡¿l_ÎÃf.Ûkëhº.>¹²ÁŽ2ULdø•|\=•Mg]ÍV!—·G&ËËܸMù“<†HƒžŸÎÛ›­Aþ—i^æi.ßÛ¤K×èôp±ñ€Ü@׊ëÌaž‡½®îÉñj||$ºUžqé3Òßü<(/*ŸZëƒåÕ‡Â>>¯i#s:¢Ì”–ÌžÖ6¬Pjº€ð¦¶ù·çC‹7¯v8[³•±<|­¸Ÿ¯ÿaT.3R\Ǿé:ú;Iƒñõ·âRÏøÖû8ŽÞé|(?Z—™Ž{ýOãaŸÃh雎î§4ü,5Ùp»Ï!œk=ßøG ¬‘#mQdsp¢ë’t•¶OGïtÉíPˆòðwr¥AÉ!“'-òyBÑ´¹a¯²DÛ¢Ó°«J[¤µ£ûyÐ×Å CÆ>Ø[µ-¦îý ¶N@ºô•`m`"¢G—ìÈî’Ö”O0 drÍ/kkØY§…«–2¦|½lád›™ˆ´Ñ„žÞ%…ÙÑ}4ÁЭ±ôB·lp·ÐÒï8Èöâ[oš(}Ñj ©þ ®·Žì–VT!ŸŸD¥-c\²ˆ6ô³µ§ëMVßg…‚žÛÙCkȵp³vFZ*{±´¹ á‰'Àø(ØÛû‰4ò¸Òä­#²¤B^ïÂ(잉îåv¦ëãáé’ƒÒÌ[}4B©æ|,Q -ÝëâI×0ñ¡0ns órqÃó¨çH0?Gj;/J£DºBðÔ*EímÜá vF~îŽ."‘®Ü–Ò(oJ#ñæ4 4y,ǧ Š{ùC-§²ÅÅÀÙ1 ¥[ŽJúM“·õßéfçgOõÔZùcg‹ è™%/l•”?=;­Uì °^Yó†û‹N†ÜT÷"Y©,Q”W²>(ˆ:Å{%‹§N†ÐD92ø¡6ôuHˆ¹Jëû"T¥¢|4ÈGí‘? =SnON'Ñ€œ^éY䇛Æå² ÀÙÆC‘ÁÑ®§:I±±vƒƒÊù¨9)\´9Åѽ¹¼¨ÿzf£òp¹ÎqpvÈ‚"éòÃU–€ jÑV:%òRÞ܈àpÞÔ6¶¶éÅóÎÅíÂ}†É½[ùÓANSO÷Rµ1]—Û)ñ:祲AÛpïÚ 4ß> í·Àâ'á˜Ûñ–Ɇ^[ ë¡yp±¢úrûñ=Ô§ªî‹´² äø9?Ź •ÞNY¡2P~jäL“™ >N~ôMuMHý5“c¨UÔw™'©‘ÖÉ Ög#i£÷(cVd¤kÄB„Þ](è½ÊŽ öŽôܹ4HG÷XÑ=ÙÓøA+ãwÜ€ EÆâr‡m¨ÈïN\Õ×WÜçÅï£Dþ$|HÄOÂ7 M6:¥=Je¬€B™+ÀÇÞ®îeÑÜ;=TYq°ÑThôa(Qp<¶—iF¤Ç yœ|@„#{ƨíîƒbY;cgù–4³Ä‚å‰Rá]ç —#Jeëƒõ›S|¢пTK¤Õj1©î&tð²‡Eš*¸ÕxMVi°»Éd%BÖ¨Øxt÷a—,^ØÕfjºx#»³/\´ö°Sˈ$ÚÂÃÂúåDeomÔ㈊ Æ©{kQUDé­ðsã¸Õ~:ìÁ›Ñ2,hý'n´ßŒ3­¶!¬çôNc¿,?àa£8Úâ\ëò'4_† í¶PØfx*¢áìQO{íljV›ð¤÷n°LB–ÌýÄ=ˆˆ\¦ôäö‡kÚãGåVçÂÔµŒD͵n´˜o‡*ØS³;ì–¨êWÖ,Yå¶’{â×FóQÀÞU Ç ŒD”­ýñ´ça´uó‚¿oüQ£'•=þ9à¯êݨîŽÈëL¤Å`Õ æ£ ‘¶|éÛ`¥è`äÈñvUh+?ôÎ[2&“27¬o°EíQ!ÿ•ÕÚÜ/AWO”ò«ƒNvHdI•ËÖ!?:dʵÆëëÍ»2]+oÀí†? ½Yu—¢‘ oQ:b]£e¨’Æ %(2NÆ4¸3ÐÜ>¦Æ´÷ñ…»S6T$ò’ί+vrЦÇúšc¡I BçŠkq¯ñ`¸Ém0¥öb4qsRÃÄ!Nôl­ùð}zζ–œ_­ˆt*ì¨ QÝÝÙ½a_ƒÁ%DŠl9"2=·ÞbÔððD6ïØßp•'œú mŠ·G+Kt+;SýýMzÜX9f×[„*®Î(³fæÎOýV‹9×¢Gºlðuð†°Ô"ŽŠ5‘îƒDhZ•YŒ –£à)·Æ˜š ÐÙ‹I¼Së,A=/dr¯ƒ?¹ŸGÁÝ·NÔê K oô.P &áIÖ˜Yw j¹¹"WÖ·õ£¼8Þt,œä”ÊTn &KI¨œ£ ÙX KÚØY™úµÒ'šƒ3]W2Se¸+XRÆí £ç— |¨âxˆÃwöÁ#Ctö°D¾É%°õÞ1ü~i&_:H$QM切ֱ~/^†îJƒeµFaz½­ìy›ÍB1çüxÔï/\l»—ZnÅýN[à÷ í«¬Ã½›q¢åfDö?Œµyò6•p¯Ó~\hµ†Þ‘8ßv NµØ‚›ÿD 'ÊK“ {ÃÅ–«p«ëQtô&rk[ wéž­×! ý6\¬BõÓÞ’å©< ̯Ön; ¢û.ìo¶Ž®ûê8&õ$¤.$â'á…2…|ì¼ÞÞ ®Z+?Ø‚i7/ànÐ~is"‹:3•)…ú›úbcÀV¬¹v”æ+<~¼SN¯ÆäCKáçU˜VæDbú8ÊÑêÍXxb5~Þ>…ü;Ò®JY‡¦bû¥­h¶s6ú–ìNƒv8ž©)œ­ìbþ¾G×pèá t*Pþ~ma² ÃÿZ‚…§7âfè 째›‡pìÁM£2¾ÊgÎ FýÝk1¿õNCe·4ðÊÔ ?dÌ€:KK¡æÁC(“½rgiŽžNÈ>É i–ÿ(Šûûã'Èë•x± ™ÇÀiš3/…Íä†ë…4œŽð›³a? \‰ˆÚÃÏ-3±>ÿóÅ {eCðÅy8Fsó¢ßÊ£â–Å4±Y@O$êE\ ôò(„Åé}ÓŽ/E”&>j«ZÂçñÛÝËT§ èP¤ w‚`]7÷ÈBØ:ç¥AÈ3ËÔE—-½±žÃâ »‰Ú'›þ(=ƒ%˜½o }"›½/¦•¨Œv[aÿ…åèr'’d8¥o†\ª+øõNŽ?8‡æ‰€!Ê’¸c.ÁêCSðWX4Ôlô W!"ä/Œ½ø'„œÀµ$W”µwE¬î›íW`䵨éé'ïÆÈ“ôFü±kMÆÐHc:„'é Åñ»;±íÆ#L+ÛKN¯ÃÀã³-‚Fž¾DÆ^`ùñ9Øvq†]¹Æé² êþ_¸„-W¶áIl‘5‚Å•ð`l Ø™cUT²¼†1,À¢ƒ£çX 5܉¸éÓÇÃÍ£.*X]Ãèý °øàDÛ×D9OÈäzl<9/ýŽö[‡ vé!°Œ@ õA_÷Ú¨êø+nœÃé{GQ¿0õ[—*¨mwwNÁªs›pòÙ-ì½sOB/àÀÍ3T.•¨©>) kNÌÅoWcà…K¨›Îp®ŒúŽð¿½s±üÐh<µ¬„*î¾S¢nï‹ýסÇáõ´‘.ÕÑÀ-‹®žÁÙ{‡P¯h_Èt!¦þû ŽÏC NCùɰëôlì|ˆ3÷ ½WUh“ž#0NF×=À‚óðTG}‹m1˜À»eå^u»íÁàôþ(œ./¹gÈPS…‚ÒeÒÇ«÷ìw¬~¦Ç¾­Q`ÙPv³Ã‰ 3à0Ì…KφËã5õµFŸ{¡DdÿÀmy,,]–ûÃñ§Ò eÊ‹ððÎI=B¦Ÿsbƒhd0\‡Ïˆt¦Ð,6®èVu6rÆÿ§ie°) h›!ìÓЂAÈós4>€Lž¹èçfŒ»ŽÛçÂwfø{ðIJ4YY6óû"AMu[É,AB*A"~¾IÈd èbaåùÕX{zN?½ÿÑØV®.ܬÓÂYm@L½I ˆåAÕd­ÉÿâxëTa)ÈW‚.,ã0 Vh‘È:„”>Kjtô­æ¿‰$ò® ¥¡UZC§áÝ8¾ rJ[£¶CçÌÐÆœ@½ý+a¥µAIáZj'öj(“))øQtwPsÇthµ¶Ð±ž§gJS€ÚÜ#mG¯Ó®Öžpת­£Ôe ˆÑÑuT>&´ D²äDÅ–1§ÁÏH<}k”˜øK], ŠCÓ¢“0©XiDR:¬§—›Éâ?`mÐsX(’(]º—&ð$‘.w:5Õ…ËD阓¤¼4œA%çk)_.7õƒDúhØúTÀ ÚÆ`núÍñZOm£§¶á~¬°œú`‚È€ÚHNm¬´B&§LðP¾@Õ-#¡ÒÚCÇ9ÑFÆúi©E)^\ä2XJ‡ë@Ï)AOmC‹*ƒ¹Œ„DzFê¿rÊKl‡Óýô^'9ozŸ29g„·&Õ¶Œ€!>¥—uƒÆ¹ v4ý9¬¨ÏÄË1¢ÎZt˘™lié½Ò!.â4Ê.ë 5_×dü­©ÞI”¦^‡^¹Ð-°ïcKdM¡çŽ š¥ó6Ùᥥë"C £2½Ô·¤2ÂÂ]Øÿô.à˜ùè˜rv`ç‡âi\qíñQºÎ \Ó!äÑ)À³°¸u׃K€S~ða…ÜG"˜ÏޱµGyw+¬?û;ܲ´tvÝBa×´¸ûä*¼]3`á|ôjûžA°ÞEÝ2àîcz¯,QÅÃoœlÝqðÔ x.n‚ùíãb•z”𲏤õD~Œ$¤ Ìo· ߘ XÛú ´oIäK_¹Ý3ÁÛ-+¢_\ÁÃ%M”jäsŒA¿3·±¥ú@äòÈ…ris gÅöÓy´”†Æêï—D©Å_óáèÙå3AR}ñèÖz<Œ¢‰ÜýJ¶…Ÿ{QÌ,׳¯A¢JK“¡Ž^[Mvèâãi’ ²Ù8vu ì};¢möÒ(›¡Ü-mð(â.rùT¥É™užâ!£ ìdÇCx0ð:Îýï8r 3ÏÄÅkË"÷ìÊ#±´òpäÒ&âÌÝDÈrãnßSØU«;ô!ç¡Sû¡µ °ïæ Ø¸õ8Šº¹Q×W1òÜ9Ô.6³+þãü‰Zx£…«§œ¡I/r+ß>"6b$¥[›.†§Òä« ²a­T kæ.ž%N݃5MdZ¶n&qñÎ/x‚ôÐè‚ð0Q…ŒjtÔîÔ¦Fr¡ öµ…­"CNŸÄòêÑÇÕ2ÇÕ›Kðܺ"êe/…J…»C²çB.cü•XT¾7|¼JcHž ”¿BîmÀ¬“žáA‚Ù,­pýáfè롱?µ¯S”på)™@“¿ƒ“‘wq':Ž•™<`­²‚•Éå‰Ze {µž=Ú‚h»Úhî_¥r6Cá>f_&ô­:® w(2ÚðÓ‰=(炯ÁÁ:#, ‘°TÛÂúeºV°a)š,âh–¹<¬TǺ,–Âdh™µ:žýŠ‹(Œ–¹Ë£TÞ.ðŽ;†­|d=j×G6à‚¡Zq|žÎH—p Û="Òæ‚ŽÅÚ#‡[~LªÔ›σŽú šî¹ýtŽG¸À]†;1 D´œøt ÎÊKcd‘Ú(œ¶0²Ù»áyì}¸¸”"òê#ÈÃBeMå6ÖAEím¯±FRàfŽÏù*¡h®ŽÔ7Ïã·‡—1%à0õÿÁô|Š`XþštŸ¤øã…Ò*#q+:™µZ(ÝkcJáR8xr£Ñº–Ê™ÑÉ wÏ!NÔOTÈ’¾1&)M ·›Ñu¶ìÌœÛ_æˆú‰”9TÇ©Q×p{Ä#Ì+] Ä™hO<üñ6Î÷:‰öËá+0¶1‘V­e±=|êùM8d`É·g©meqØvã<ü ÎÂÁÞ»1<£+Î?½A(]u©× k=…þ ÆpZçȆ3·hAeùè‘þr ~9™¨âÞ³PL9¹ ¾™;b^…¡˜W²=bôÑh‘#'î>"‚gí–^Ø~—ˆŸRM}Xk Šy¸aKÛ¿ð[•¡HKÍq-ðŠ™Ž«6Á-6œÒ– !u ùñ“ðíõ h%^&m>¸j-™S$cÕµc(Ÿ¥:\’žà·À´ÓáÀËÈç_Y-­q;Ÿ>E^GœxBƒ¾Ê‘Vîipüé5êKžÚ6ûfCLôCl¼~€ÒUÂ/CvtAHhvܹÏPEÝÆí¨Ø8dEZC žSºi©lyqìæ~Üåí^" Py¡IÖ|8z}'îǪPË¿*¬u!X}yµ'gL¨,íK²B­œUa£ŽU—·SŸ°€§kXÄÅÂ?}^$D\Á¶«'Ç|¸Ñf L,pËL¨ë_Výqó<Šc½AgÔÎU6”ôµâTÈ3dMWÙ5áØxó,¥k tsÁ*ænD†ÃÂÞ~²¸L÷Z¡†uØ%…bMÀïHRØ€ k2ût°ÄÁÛçak-Ç…§w('ÔÉU6†xüykîÇÄS¿«Œ¬6–¸tξ`é7=šg)‚Çð@î D\†½k1ä²µFÀýÝ8ó"šú“?9r{„³Z•\I‹Å…çO¨­\P1sA¨i©óˆÞÅó¡QÆ{’tk½PÑÓnï£ç—EÕôN\ xš+t(µ8b^ XéUÄUÜ {JïZä°¶DÙc1-S0dc[£@Žjx|·c-éýÌŽ£·÷Cm—ù­õô~\Ï(½wYä r?§¾p$ð) Ñ{|’Þ9Êg,€sÑN„«‹? ;¹à-Þ¢´YPÝ+¢h‘¸ïÒd-4—Š( œ=°zÿ“$|HE?~ñ“ðm‚ÉŸÙ…ƒû“ã¿ÅÊa<àÓD*¤.Ï΀Y¢  ¢Ç߬WÅÛh¦9W€-Y"Òä8ŠÛŠÇÒ ¨|?`²O4òl˜B$–õÂ"‘.¢U¾¸'‰Êb¶RTp}³u®LI¿–—y18/Óv ¸ÎœM2Eòþˆ5E+á©^…"rä›[gÃ)m®K™Ìuá|˜8°ÌóàpN‹ëÏõdrÁuá2&¿‡ËÆá ñ›¸ÅÖ%¥eNƒã’;T~Y~ºžÓäs¾çíK7‹ç@ߢ]9Šcý&ÎëežtÏËòr[ðµô¼Dûr»˜ÚWleÒ "Ÿdip™ø:N‹ã¹Ý¹ì|·ùï×¥ÁxÇåÓã_Ö®çvç|^›®©¼ÉÛ+y˜H‹¯§´Íý%9^öïdñæ:q›r½¨ ÙÒ×Áï%Ë ãü¶0X[¦qRÀœ¯~¿ ãºQ˜H‡¾M:~ÿ¬}sÛ¿® \w¾ž¿ù¹(Lô3óµó³Öâô›ÛTl}Ìm*Ò§çD¤N„½zÈÇt¿æ÷#y_H~CÄQú¼,\çP¢_ÌeägÇý”~ÛxÖµÃp?êЏeF»U%±ä^ ]Ãe£´©â±¥Lis¹8½äåã:ˆw„~›û÷ñ ¸èoQSâùq¸+σìÆ̽Bñ”Ž„ïñû'$â'á‹OX°DÕ,epõîvÜeŽyrÿà Kaƒ é ÃRnÀ¥Gp'’&#vá!áûëŠg¬€¨àƒ8“’Iøoð{LíX$}E¸(“r'ƒhn±0]ð±AÏ0>’ž›¥Dú$HÄïuˆ_*ƒ%ñÄX4VÆÕç¿!‘' †:8ñ jëK’Þ„TË›^'ÞÆÕ1#âÅ«øOQ!­`I A®þðIž¥,Éàçÿ_àÉII““ð]ÆFÉòær±3ѶÿÑÌ’&ÏfPû™¥“Ç`©Œø™ ò¡rðmüîŽ>ô1Àõ⾩¢ö3Kàì»OG}MkCma {BêDÏQH_Ó8^Hð’ƒã÷$8íÏ®7ûÈ•ݬsû±ÀŽ“¹-Ù øUpŸK!™|eŒcŸ„2zßR<›×„½ Äs¡~.ú¸¹sµ‰x/(Œ¥ˆÉŸ;‡ÇSŸà¾ÿ1!ò §v g¢¢±Í ö»Éþ -ìS–KBê!‰ß72:JHU0éƒ/¶6 ¥=õÞ"}eòGÿ,ééå7MÊ:øÌÿĵ4ùP§V)Jki°4[á}*PÞ£¯G-k"9”7µSb,òg醭g£ ;IþTàIBEÌ“<±ä'00x€~5ìUY°rÌ¥Uû@ϓ￀ÈVßšKÐÀÞ-+NE#gv^Mƒ'oƒæÖž õÞÎú/ЄǺƒœÒÓ'2:û!½ pL¦ibñvòƒŸ‹ŸÐIL‘O¢?Ô\‹'Ý7#³ÕÿS÷¡WÁ“?ë¾E•߮찰åvôËÁøž0ˆôùûÃíŽsà¤ÿ—<˜ÀñÂãM¤9=  €‡¿°¸Íå‘Ü³ÂÆÒ‹ëO€ 뾘 0éN¥6çzëÔèWuVTëK^8|,é³v­…õ•›RèÝx,<`ñ^ý“ôU*2?gÏc$©|?…•Ê?ÿËœãïçõ.`"G;_¸hxë—Ú”Ÿ¥¯µN‡B^ù×Õ—.¤÷ÉüÜÔbi³EpåE¿‚Ó¢g¥gÅùиþcÝß0;_1ª+Í S¬¼[àÅ;!)ìïrýXÿ—OGú¯k™”¿ÍuÞ ñûÀƒÂq´ÒâI›‚8zh…Ç/:;ùä0~IxækÙ ŒÉ¿0¬Ó%®£9ŒÓâÁŠÃÌçØò‹Íƒ‚H[ŠisS¡AXäÿÊýìÊóˆyG‡\Èek"M aÈš±'þÏÞUFu4áï.îB€à.Á]‹Kq)î…– ´ý«ꥴX[¼¸·¸»[qw ÄÝîŸoï^z¤—J„L»\îݳùfvvwHár¾Ü×üÚ¤ûÌʹŠÅÛEráA¨¯&²$ˆBà»phïD#ªÞÓtL#zâêÞZ}åYüYÕMx¢êeáúhù[ Gfí½QTür<ñ}…Ÿ<ŸÏÑŒö7Á1ŸÃó¨ù\¬H7LiØçôÄ¡@Ó{Èû‘o|–ö¾J¡kü–cÂGDÉûiuRíÈ{Ë;¨kä>N|ŸÄú˜Ž1ª£]£ñ!6¹ruÇÇeK˽å<’€ô\¹ºâ“reŒÇU /x­VWE °±Í‚º¹Š‰~YH”;ù‰ÏÐxÇ¿uÖøëÈd¬ ðG™ÜUQÈŽùMÏåºèxÔÈSNÜÙBÝ€|.ëÇv2®ÈDÇÚSqyМ﷗®Áº†åo,ë¿ 7­Q»Üé=Y¬ÙÞrmÔ+÷F—÷ÁÐ _âBà¹ðš¼`)ÏÜ·™m)uå5Àßù{¬Ü‹Æˆ×°Î46\ê„í¢œ\Ãzñ¹lbŸ3»&\žÁvŽ~ˆ…‡`O—/åzN‚`ÅÌž©ú²öL©?Ÿ¥x!mÏöç3¹Ç¬ŠºeE¿‚åð ä®çýå½5îÜ^ƒ>[§áa”œOá»1£¶kã=ä9ÿÚ§Öœä}¤í y@N¯òØ×g-ÞÈ‘…ÜsÃA@N¼åäñò<%ƒ&GAµ›I^4™'è ÁàßÂÇ^xÎ:¨vþð:Öç³0ïNµë*¿k÷á4>’7";¶1ÄÛ=ŒB„­ GäT.QýŠ÷¢9Z»ª6•û°1JÆû>ò<žË÷ïÒnQVÙÑ0§€)ö#í9ªOU²¬é]¾¿’¹¯ÜëàÅŘrãœÃJ˜úz' ø>¼³”B9gŠ” %;”}äZ­³P^4â½íò`A•8ÿÖüQZ%£øR'¯‚p°ùx:z¢a‰(kmª‡"¾u V‡Ak3µè<åUþVï+…º&,jþ€Z®r½&ŠïrÞ#À]ÎW ”²(²¤î'Ç`¥+Jsù™dT‚!h?:¬ý7Ä!㯉û&Wá¿:Æ~(Ç]âÂÛ àî«•ønˆ2=;ƒMqLjó9º”îŽB¸€}7/áËŽóPË©0úÔèŽn…ŠcéÙðÌÝ¿Ví„:¥ZbpµÞÀý]8"DïƒY¿G‡Òm`ó`?Î<¸ƒ/ÚÏAMÇ|è[sÚæòÀ .Ø*J¦ãkßáËêíQ»CZÌ„÷Ã=¸ˆ¼ù{ãEÜ‘-_[èïìÄõXwüØêGô.Õ\B°íƸz×äš±òÜ.ù»!~¯Q«Î_ÇØŽãгDs”uŠÃŽ;—”mVë{|P¼"ŠdσáÖø°Z?4*ÙÙb/@çÕcôGŸúøûæ6†G£Eõ‘ø¦jkô¨:]ódÅöÀx9ghˆåãzd4>©û º-]R÷ˆ˜Ô,1?5€–ùKaץ͈÷¬†)õÞB¥|õðnÍwç´¾­}üì¹u6®Å1µùOèU¬¢°ÿ.öÞĨ£Ñ¿L{TsÆf.­Ú&¶"}ŠTƒ‹[i¼[{rF_ÆÁÛ1¨Åï(| gïbPÓ‰ÂÇ}(\Z Òƒ}È]´3¢®¯ÀÞ‡÷QCÚlBý¾(“³$*»{aÖÉEà:yÖºøªéèW® j¸DaÓ­‹ÆçrBƒèûRßw'Àó‹:Ò—À¯'NaòÛGÐÝ~?¿¯‰›Àwç",–F˜ wÆ®ŽÃ঳»{Åʾƒö9sáuÞÇšÓ+1¸ùL,hÜO0ƒ5>­×ëO-‚kŽVØÔs:çÏ‹"ù{ šÍޝ†¥­† f‘Nø¶ZK ôÇô ЫPv,9³ñb(û4žŒ?[~†FÙ°ðì‘ÁÖX$×T-Ôãôŵ››áëÐÇz}‹ÒYË£ºs$æ]9£ž5{Clèµsz"»<óu‡ËØ]k;¼’ùÞ@§,ÁXýÐ˺ÿ‰ïê BΨSØ^#ª4@”sQŒ¬Ý÷ïîÁ¹QÔp$ÊÄ\D¾ _àqXyþ4›O :cÙÝh¬é³#«·ÅÃûÒ_Å@&úQ7J:ïwîù¢…Oü°y0vHÿ ·Î‚e߀§€!µú!wÌ àÌiƒ;F´øʵA)ë`ì¼sEÕ«\‰˜Ùp0jdˉ+~ÞàKÔÌY5Ý¢q6¡$¦¶x}ZáÁé_1î˜Òf4Êx–ÁÐ×ú oÜmì÷»ŠìÙ[`v›OðF‘: ½þu¿C ï\pÍâ†MgÏà“V?ãÍòmQÎ>Û¯C½j_`XjèX¥ .ÞÚ1ÑÈ‘µ%fµý-‹ÖÁå«ëp_—SÚŽEi÷Rò¼¾(bðîáw|Ûjz•nˆŠnQØî&9„¡Í~Á°ò­ÐÝ· NŽÇ÷D±Ú9!6ø¾Ø¿NMÀ;–ã‡.‹uéw©kGÜÎõ¦—/mŒÅ‹*µ6‰sþ‡þMC!'= 稌œ¡,¼(™« òÚèP"wM··GœÈ@œ€_}ŽXR½ÚÎí‚ÏïD6OuOEœ¬0aóè>­ÜŠ}€Ùåw-*2PÚÍy öÃß_]Æ®~s–Ç ì¶¾òkDÚ¸n9#ž=ÁŒŽK¬\'0Ÿ<;_\¹Œn¥›â=1Þc·N àçòÅñþÆñèÒà´Êf'â[úýŽè30í–¾¨Üq!ÁÈ‘·>Ú{§®o†UÎ×qèw±àÈ îCÐØÓo4š‡?*äG÷U£P¹üãS.Þ5Ѻh[ÝÜ"€¯¦Ömȇçá'"zåòlüïØv£–uôÁÙSa¸ù¦ÝqÁÏÕš#Aä/[ž:hZ°|¢Žàç«A¸2|#r¯Æ M?aÞͳȕ«Œ\l…K—6à¦ÜJíöòÝ}Ê6„wÂ=ì x-%<©†©Õ㯋gpôÝ ˆ¸8œ ÀüžSàe`Ô‰ÌOBÆ´±—b§–C±ç&f9ÙÛ5/öœùþüo7 ·˜` iþ¼nÍ@÷IíáUâtË~|üÄT,º~®{;„¡g™8zj$¾>¼.úëè>³5:®›Ši)_J6ñS_£ã’oðfÓ1p‹Ä—M?ÃæCÑyþ[8t#÷-Äß¾t ú·žB÷çÊsÅñ,òÚæpƒ§G5t-`ƒ> ‡à\‘Š8ö±»c/´ŸÖ=6¯Ãêöß ŠA»¯ãÜ9©Ë¢ÏÑ»É8qÂѽÑD”ü ½æ Àº(=<¹@²{]|_Ò§µDŸ½k‘…‘FáÑ ßÛ¢W›Ê½ôØ?t;Æ3­ÅãuT7\BtÖÊxÍÅßoøgo¯Ægë&#ÆÚe¼ã1|V{Œ¡Î©+:K6#‘Ï·…'mÑyÙW˜ÔvŠ1FÒ[#>ôV^;§ÖõTÄȤMA”u÷F—²­P:{Y|•\,ªˆ'#\ŒzÆÃÉÞ !…O3{ÁÞç ôÍíŒýáÙñYi‘g|W« ƬÿÓ®œÃ·߆Û8?àsô˜ÞÝ—MÃúîSå~¦e¼ v½{mm¡Çºyø_ó?ðy¡¼@–âêµý÷cüµ‡ÝîOdÕ¡eév(¨G¶2ŸaW³xwÙ <ÈÚë4B¶bïãô`ò®Oðþö8} ç#€‡ÛÑ}÷"‘9qäcuØúîa” š‹–;vàû7V"—îŽúKÝv Ã¶éÈ]~Ö4èŽ_6¿,"]rçÂÕ»±Ø2l3êÅíE Û1©Yx2zlQà3I£Là÷ÒSüD`|·Åø­Ûxä·3ÀFšM§ Çô£g×lXunjä ÒaOœ[.FPŒcä>ìŒÏƒ*Ž%ÄÚ Cíï0¹Û7ˆŽ†—•:„aÉñ|]±ÿþ8Y% zþ:X~^µ¸Æð¾(Š®¶hWë[¹þ;1î‘ð²/‰â!ÿåÿpˆÇ²3{‘À‰ $½-œ¬maÇ% ª¢‡?ÜP#ÊqÑñ¢ š¨È¢ñ\8‰Ñ±å„18vÖAøëÌ%9× î߆“Þ€29*AïX{.ˆâ`°1í!ÊËÅXº¼ ·"åú˜ãØ剆îâõù <.öÑårW†µc~Lè±£ÄP[;zãÎñIÐ{7„u¶Zh`8†¥~÷`mˆA¸u)´Í†¹~·äìð×±mh\ª)tr¢è Å °ȉC§¿Æ‚è×°ò­µèèn…Næ Ù8ÀZêåjë{+kœ½±Â ècáàVsû®ÀÔšMa­³BhàyÅP±X-¼žÇ ûo^h‹³7· p°….ÆG­w@hD \² H¶ò°wÈñÝá§²¥ÅÈ8ÃYäáŒ\s-JÚ æ!ÆÄÃNží(<µµ6vEbXœÔ‚¾rž})´Ï®Ã®(2°öØfÔ)-ŠÃ]B¬Ã¥;[p,‘¡«Øl@c‘µH‘ŸáB¤pöbÕ')>!ÅsÔ£½Iž'ÆËÿ8öúÃF‹¾ë  Ã[¯Ç”SQ7‹3¬ TCr=ÁŸ‹ʹèðû–ïðÁöÑøôàR©|êÒ+âå*£Ä!ØD½.÷•¾GªôǾ?p/<åDôº/ì‚ÕWüÑ«\%,\ß+ÎÁ²›¾¸~û œ t†üÐzõ7XzržºËÆ ;h˜»,Â|gbÚ¾•HÛNÙ2ó.R¿_‹óÀ 2¯á¦ÿ 4*T ׯâbpêæ)ƒÈ;s1jó4\D‡ŸbÂAª´ö䜸s]®ŽGÁ¼íà%rÙrÕ—Xvja 6ø¢<V¢íòâ@5GAˆ]5[N/Çéûh–¿2 þ+0bËT\ …¸¿ô+‘ÚãÛo…•[. ®ù!nœûkª¡´ü`_õ] 8s÷<Â9ü« ºªÊc‰»|ø‡œÀA‘y]$®„†ÁÚÊ5½  @¡Î˜Ôû”qЇ§­©/ `´™r™ç%~Á§±ÉWžg«G„è yýÖ`FÓ7a§c­uòû9ì½%ò…‹!áÈ.:dèšP·æï˜Ýu,<ãüEní`ÃåNl½P'gaäÌ×Nž;•œàaç)ú/ËNn‡ÆCî#eˆ…›{YävpÂÏíæâ†íñ€r%Àí^ØeìðXÇâ|Pè'4Ê™3.îœÝ°ÿÌFøsòLðZ´9xÛÞ^‡/‹ùH?â0¦­8³«pX_• ÔÃsëPªx[4+T“.‡^¯vN1ö{¾¯ƒÜ?Vœ«]"Ëž8ñà:UÿÓ˜‡€HOÌë¿SZ‡ƒN+ó!VNÚÐt©"Ê·NüÜÛ¾ùüµ*ÆÞðG¿UѸʧXØáwÌmû3òØG!,ÊK/R>tXrù:,Y[Ç¡Må€w{8\™ Ò•n´¸KÜÊ£½5Fµ™…é-úân\”¦`gsÔÕÑbÉ ì?9Wåpv'w”än)‘ÛñþÚ_1ù¸8”d³Èý*{«o\DÿÊ= Ã([´•8š—p0 «@Ø…0FÀôš3«„+_Z†½\CQô<Ü›¢¾ø§cÜÐ#gVÜŒ:-õy€H+7qÌ—áï '0¤zo_ø÷,Ãï¤í"#еÛ[/î‹#'Ç9y#îݬ†µÙ~™d‘2ßËN"Àÿë´ádžá­yŸà^‚qBbâP¿` 4 7ÂÁ[‡Ô>’eòT;#ÆÇº jYß¡ˆ+ðˆÄØ ÃðæÂþ(6¹ Äsk&{éüÆP¸(kQ8§îBí|Õ¥C  “ûା_\ˆÀ„Ãåú~(>¹D\à }>ÔpbKÚ ÖÌ¥1‘N”-A"ŽbOxVÔq¥*F±v‘*Ø{™ ÿ£Øô¢ÔÔN­Dq:¨™pÜžËøNW."Àÿ†.艮s:àK #d`hË•ùõePÇá!¶D†É³íDÙÊ}äúk.âáÃCxÇt}›í¼Þ?\‰Ç¹?cÃQñr C¶°½€Ý¡.¨ï&ÆD@F yß¿¯ìB”AgÂ^Õàn#Þµƒ'Ưë‚ÖÓ?Ç„Îó×^”·?:Ø %ÎÑI÷²'ðûrØ×² zÌi÷î”àéYWýÀÍ£4Öí‚ïN…‹fuÜ€[†Ù™Ö<³’º,ú ?Ç»RŸîs:£åÆ™ÐÙ9 ßìåéÎòlÓL@\çÀvÐòš„ŒÇäžQç°=H†Y²K]ÃP±H5œ¸¼#±m`ÊV Ùíé=D·l }(¸ËJ€½Ô'>ªdÏ®R,ôò^âŠÈ§^~OÀµ‡§QÈ[ä(6X H1ÔðôF¼ 2_ªQí hùÍ|‡D ÛL²cˆCV÷j[¬wãĽs¸*ÀàAèQ\”S&·øy]½ñZÑ6hêKÞAŠ xº¯Ùö¹ó´¿ü±[¥#8rN(Êæ® ¯’½ðUÉÜøûÖaèåB6TÊUïÔÿVîs —"ìðF¡B8Åíé²F9aÃVq m/¿ŸÆiÿk“&~x_ï‡÷–ÇÔ[áèZÄ—üNY £avì½!{äB± ~qÒìäYRw÷¬EVTÏS Cü$ïuçǶ+R{/q×/Á.Òo¥&m ÔAÑ\U§\hU¸8.ÜðéV½]°÷úaØy7ƒ=¢°'è|onF¨gkü^©:¯Ÿ$°‚ €Å'æâÇ}?¢Þ¼P´Ê8Üëx&;D'}MdÖôß¹5¡-åHÚÔVdÏá8è‡Ý'~Çàù½Qwz;LaÚ†šðÀmäl¡§s$íÏkí”fÁêÎcðãŸ-Ðoã<([ ÜöPíÞŒÚêáw]þ¨‡^‡üp¬›€`1Ú|'Q08pOO•çöB}yî7¯J?±WŽM"é¬z~áw0|Ioô]Ð •~.|uVºÎŽÃ¢êy¢W 8ˆ9‹‹ì¡f±zÈ.r{o>òêOêŸ*¿¢w.Ñ[äWL6Ü ÁüÖoaôöñ·M L*)÷õ³Òïì¤_òÞ8P®è,Ë»8(°G]`­ú†"ö{üÕýL_ÙƒÖLE¼è»xsàg"ö)+u_áAÌilõףЇ«\o‹Rž^¸z›|.K‡ ‡Èâ­hG¸Ûç>)çGZ¡UþÒXÏ¡øÍXoh‰Ó-[ã“]ó•îäŽ;¢ä·ó¸ˆ—öFÿ…}à3ëD«™Üò¾Vé@åÜUQ»ÖXÑÒÿ®Cáò!oÎrø½ÙÇr58Qܹx—ô×PF6£oà»Ý¿âÛ-#ðî¡MˆŠ ‡s¾:È•£4jä-#·×£q8J"/Φ牳DÚ|f¾90Mf¼…`[O4•ó.†Ë=¥)tpËZwú£a'ÞÜ/€›2”ËUUjýù{—ïQŒì¶Ó«ÕÞe‚?K¤ºg&½Ä$Þô¯{¿ÃkU'bZ×ϱäüIô*U 11a(œ{¦ôXŠ!Y΢ǮÍpµµÁ[Œm=[߃asJGCÍ?¾ÂäÞËðG…˜UG¼?1w‚o P”'J*(ÜQ:G\?õ–FUÅÒ>‹ðmÁüØwÿºèäÔœ9zþ©®Ÿ]§«Üó!*Ïùc;ÍÆôÎcwwnƈ¢ Ú…¼ñì,ø”…áÕk¢ñ¬7ñVóIø£ûr”šˆ÷N‰ï¨å^ȹ›äÜ"¥¾ÀàJupë¡W…¬~á:øžù sBJcñ€eXÑk&š¹»)ãAÒI=îFê1ºÙoØ4x,Æüõ6"bô½c¼xöÏÿ‚ŰT®_ÞkZz¨“ãKý¬ÎŒ9#†ZŒóý; ôè¹ð]tm8N½o­¨¹xóÄ%±àë0æŽVöû ÝEý}ï¬=k`s·Eø£ïDÌÝñ©x¨ò@eÌâp=䮼™µ€ Ü ꈚ9‡N^ÁªÞ«ÐW”s³òÊ1_«‚hS°Þª=}}ʈ’ Á ¹†,>.·Â<ÉßQÑþx«C赩øå–– Z¿¤>í³{#:.7”óóbåïÂbG¿ºvyá›MàO î‘«Ka“k ¾ª×FýšßI]ÿB;ý:t>pP)M‚NÞ=GüïáÓ†±fàoXµõGÀ—ëF¢^ÝјÕñÜð;†PñøƒÂD~ÄÀ->²ͪõDä­øâ²Ö÷[ŠŸÊׯ®;'PAÓ Øa˙Ɉðˆ?zOÀé›Çд„&þ Ì•µ´øáXÔcßÙ…KÝ~‚{Ô û½®ºµÂ÷öbW«á‚ïS„ ðtÎàã¸&|+–5îÜÞ{”1? Zõ+ò—þËëµÆÅà;Øs/þgƸËW±µÛb4Í•ñ!iðB6q æýŽÒ6A‘×°_îW5[.\º!Ñh7¿' ~ÃGÇ1¹š¼³“¼t˜þ°`œ¢p~€9·}°{˜}Ûß·ˆ .RoWÎN•c·±Ax]Ø>×oïA°!¼õ¡X}å²È  îÞ˜‚6;×cr×5¸Ðs 9‹†ùçøyA–Ø L¿q%½ ‰3´·C­s—ÅïYyèsÖ!âöh%÷Ø2p?n¿¹u<ìQ9Weä±K@ ƒ¹Ò$à¶È¿¸^ê«NøÍ`‘aÊ“8(wBïÃÎÎ ß®Ž2Õ~ÄÌ^ ñgÛÏ‘]®PXGb‘MÓf*êy¹âRàmã€!ÿ;°¿v\*UÆåf¹sá²ünyaÛÝ‘¾Ÿ€Š%†bKßeX\§z­G}ŒôªöÖøaõû(Tñ[õÜeí¾„‡>Á‘þ¸OACÒÙ">lÚo>‚MC7`~¹ø¸XY‘+éÁ~Rãóü0ÙÚÙbü†áÈYê3Ìì6M¬bs¡ms`ú 0£ïØÜüsýä…ÉžÛ)°Ý|o`饽ÐGœÀ­(gÄDñ=¸…ïÁ¦Èr˜Þú]D…Ü‚_´ô7Ñcq~|#¥ãÄ)^Gâã½3ñ]Û•ø¥ÎëX+€²}Á"F'F#¹.::@îÁ¨¨¼·!— Ã'mÿÂüž³äÝ&`ìÅëÀDrù;¨rësþ·Ð¬Â·˜ßg>¼ïN7ž#ïþÑÕ(é‚e·ÅÓ<:éïùø°å\¼žã*/Y†íïlÆœóñ}éjÆ~¨ë¿¥VÏÂÄ.Û±£i=tŸVgB]QÄ1SwnÃÑÿC?׳(8qJÑÙ À1¿ L\Û Ë aøä67kŒYÝË‹Á÷ý]XòZcéJ‘˜~ú :7˜Œž98)ÐJ€ù d_ö¦÷ÚƒïíÆ'¥íâ"°Á/­ëOÄ'U+á³? Á³).wù ÂÎù×NI¼†Ž6á—Î;0¼l9yì»çˆ.>EÅ Tš|dÒ#”¹Ž_z µ,‡xGʰKÇŒŠÀ–Qþøö§|Ø.‡Å²ˆòÕ!_Ñw°¥Œ ŠÌ)JÌAŠt*FÐ8s‹ž;x{ji‚¨@Q¢8V8ÐZÙP²\ ”]ukš–Açm·°³e9ÔY±‹•@·í·±©Y1T_¶«›UB» G±ºEuôÜ釕õ¼ÐhÝY„ ÿõæŽÃØJYq~÷jLõ¦ I§ôL"_™ 8'¡ ü’RDæ½}3f¾ŽíÜ7” D@B®½0ÏÇõVOÅ +#“ál¹ÚbqŨ÷×OÒÞ¢PÒ Ȱu¨‚Em# è>rˆ‘ÿõÀ÷Xï{[”•hÂLz6$ý$[žv¸Ös4á5žü ÕVþ.ÆG¾i 7“2)Ý[Tމ¥ò¡Äì^"ã^"㦟ӂÀ)õ9.¶é¡ ëà×+wåÿQw þtw0¾«â€ÿŽÂ•íä3úÑÏCòYÅ^>åw~ õŠâRìÈú~O¾}^Ž{ðÁXŒ©êŒxGžݘ(øý›~_x›€_Väôòé…™ƒ[óÑ«@ŒÀpREjv—x‰&åQ›ä”Þi&è{ö¤ÖŒ 5þ͉E*²“ ú2)£Á# ¢#•=4NKb4™ÑqF‹Ÿv‹HyAÚ/ŽD¤ö“Ñ]5 …X}åqù]þÏP$ÀoD£/0ªÖLà§_HH<==áüKe¸Ø:‰œ‹dR&eR&eR&eR&¥c Ǩ×ÞÆ°*=3ŸFñññ@Ö¬YMG2)“2)“2)“2)“2e?3º|ù2îß¿¯¢¯ÙØØàÚµk?~<:vìˆ*Uª¨ã¨yÁ¥H¸|ÈôéÓÈÿøã¥GÒÞ=,, §OŸÆÊ•+‘'O¼ÿþûêxz‘c:]\Iž÷;wâÔ©SjÞ½{£lÙ² MWu‰ŠŠRïÌ:ýý÷ß8{ö,bcc‘-[6Õ·J”(¡êÃóxþË@ìçÚ»GDD¨]ŒîÞ½‹Ã‡ÃÏÏO½¿½½=êÖ­‹5j¨ºÄÄĨó)kIOdR&edb?öññ‹K2“µRA øQ‰ø½,Êøy’››†Š `óæÍ(R¤HºDÉËèÑ£ñûï¿cÅŠ¨\¹²2ré•X`Êí¯¿þŠÅ‹£fÍšªn‘‘‘é0±|OæØÒ]µj¦L™¢~ëС~üñG¥¤pÓƒºá;R¦d(g{ìÝ»Wý^´hQüïÿC“&Mààà êõ²´ßòDÙa{)àºk×.lÙ²%Q7Ò10`4h ò¢5°˜ÑtF&eRF#:ÕÙÔsRç%¿LÊdmmm(]º´A¼{Ó‘ŒIgΜ!z0ôíÛ×t$ý“ƒ€&ÃÀUÝ0™~I_$ Ã Ö°cǃ€ UgggúuëLg¤/ de¸qã†A*Õ>¶¶¶ª^ÞÞÞ†ß~ûMµÝËJl†[·nŽ9b˜8qbb»°°.Ý»w7œ,Œ’¥Wh¨¢šâx¨º”/_>±n­[·6œ:uÊtöËIŒb 4øûû®^½jظq£áÃ?4äË—/±E‹5üôÓO嶺2)“2)£PfÄ/ƒó¬˜HèÐ!ØÚšV_ÏÀtýúu(Po¼ñþúë/ÓÑŒA)òGÒò͘gƨ˜8'(^¼¸Ê›+V¬˜é¬ôGZytœD1{öl¬]»VýV®\9|÷ÝwhÚ´©úþ²e¹ÑŒrBˉ'°~ýzÕ§X/RÕªUñÎ;ï mÛ¶*§1“2)“Ò9)ø—IéšöîÝ«¼ô_~ùÅtäÕ š5kªz‹á2É8¤Eþzõê¥êؾ}{Ó/é“)c®Ù… Tt‰ub>|¸éŒôKl+æ2nëÖ­*ú®åzxx¨è4sí^vâ;²·oßVy´üñ‡A+ƒ^Ï=èŒíÕ¢E UÇLʤLJ¿” ü2õë×O)åëׯ›Ž¼4}útUï+V˜Žd,âP©ŸŸŸ¡gÏžªžéü‘¢££Õ0é–-[»««k†B 3ܹsÇpôèQèQ£ 9sæTud:t¨ó/;¤³˜e]&L˜`¨U«Vb]8‘ŒNÉË>¬I™”Iÿ¦Ì¡Þ @æ­P¡8 Öó{UˆK‡x{{«%)¸TEF¤¸¸8å—k×®äZµj•XÏ2eÊV­Ze:óå'mR#¶œD¶~ýz5ToÕôññ1Œ;V ëgR&eÒËI™À/“–ÆuÆ^EZ¸p¡ªÿìÙ³MG2&iàý¬oÿþýM¿¤ŠˆˆPyesçÎ5äÉ“GÕ3e3Rê‚yà¶mÛ o½õ–ÁÎÎNÕ•CÝœAËáÕôB¬-ÊäÅ‹ ‹/6ôéÓÇàââ¢êÄR£F ÃÒ¥KÓE~c&eÒ«D™C½éœ8ÌËYwÜUÀÊÊÊtôÕ!ÎåºpÜÁƒ3š32qØ×ßß_íÐÂ!R¯M›6ÍôkÆ ßsΈåÚy_~ù%FŽiú5ýÕ-‡M¹ ·T[·nÚå„ëê‘8\:bĈ§Ú‡óyëÄ™Û æÐöþýûÕìæåË—'îÒ¦MµÖaíÚµÕ÷LʤLzq” üÒ1ì½öÚk*÷«G¦£¯që)îßKƒšÑ—›H þúõë§êþ_‰9[\üûÒ¥K¸zõªÊçzD†«À÷Ö€<ó8¹¤µµušïûËÅ¿ *„Â… £zõêÈ—/Ÿé—gO¬+ùMдaÃL˜0!1ËÀ|óÍ7jÙªhò„@øe$¶ ßKÜh ÛRGj‹D3³K—.6l˜Ú—Zkwæ&¿Èe¨´vxÕˆÁnö"ƒ”êîW¨{Ø^ï_YàGÅD¥tîÜ9eð9JOD€sðàA?~\­eçêêúD† ×ÃÃC¼òåË£R¥JÏ]ñ²³3:À½DÙ¦_RG¬;wðà}6l¨v "IÄ÷g»q_å2eʨ¨%•ðãèi#”}‚йsçª](¸æ\Á‚U!ÿ^¤ OXXGÊ#aŒˆÑ0ssrNŠÈ–-›€O«ºØ_nܸ¡@& A#mÜ_xÔ¨Qðôô4ùìhÓ¦M?~¼’aÖ™‘@Nta1'îÅÍÂuÓø¦%±ÝHì“Z;¬Ô¸²nlGÞ¤D¹ã~άãÀU„ði6¡±ÿüñÇj2£”X¥^}UL"Û„mA;@ »ᆱìÁ³$ÚZ®O:sæLÅ{-NÝ÷ªðžº‡}¼g½©ûé qôîyÐ+ü¸È*7ù'ØÈ›7/|||”WMØAÅJeÊhߟ‹«RɦøñzÎå"ÈŒøìÙ³G ß×_zõê™Î|6ÄY©4àÇŽSÑÎnäÆ÷l*þ'°¬ ‰†<ùé§ŸÔõOrA|o]¶£[T~+VTC}œù™iàÛkqÛ­Ô‚¿Ï?ÿß~û­jã÷Þ{O͈¦ÜÓhåe öCxòˆÑȱcÇbãÆ°³³S»k×®‰Êóiˆ W+[œ±úûï¿cÍš5Š?|î³ Îdf ¸ar;;¦,hÀãâÅ‹j[¸… *ËHäG}„7ß|3QæÓ Q¦hÜiðÙžt¶ÉgòøöíÛêÖ¯Q£FJ0BÈÃŒòô§å”%ÊA>ìàÁƒQ²dIµ]·Û{–`óe#êάgÊ@êŽÐ7núôéc:3mˆÏèܹ3vïÞ­]:®Üt€yOÝÿªuyOžÐ¯\¹'NTú6Œ¼y–ôÊ? u·nÝàìì¬  k®\¹T°ð3½°‚Æ‘‹1cÆ(!yÒ÷§ñ`¡GNÐxæÌµ“AÁ;gZß™Ã<ô2qjÙ²¥Ú}ƒïÍ6 xÒ6Т 4ˆõŒÜpI KQ…—øîè`[hÆž{-s_Ûš5kšÎþ7±ŽÌ‰#ð§±L üqØíÊûΛ7OExá ¸¢ì¼ŒDþP!hà²(”^¾?S±f=Ò°² õ¦Ž8}ú4zõê¥úÅ… Òl˜íÄ=6F=¸¿6Û€–uÕôëLÀG£°yófu®¯¯¯ºG÷îÝñÉ'Ÿ¨öäu,/sjŸZ?çû2rÉúq§ÖCÝüNâ²>Mš4QÇØèŒrÇ—§%Fr¹ ÍðáÃñÅ_( A@ªé­¼*Ä6Ñìu#ê´!”1‚ޤpDâi‰º‰K1šËü]mÊ8uX&ï­¯¸ Žëر£ê ? z%€A“¨é=Óã ÀÑ»Y¢OJ s=7µS§N)û´u¡ÐiцüQ%Õ3"A’D#JCÍí©¨üÙTºOKTVì0Œ0*òÕW_%‘ôD¬ۀʀÅdBü/¿üb:ãßDÅIC4T¬Œ’˜£Œæòzòä ùŸ^ˆJ’rB€ççç§À-e‰ÄÈe”<£O+"däƒC¾œ\BùzÚH8#¹1ž'øc„— Ï\'Jl'Ö›çÒsx^ËäD öSæ&ò|ÖŸÆóeUéIëFÙ¥.Ö¶½c®)£ºtIä?2SZ¸]÷ÿB”wn Èõ¹Þ'#Lt$Ò«þ–Ķ¡â($ ïäÿ%:(lg¶#Oò>-ô~F#òžà›ŽçäÉ“U4šÃá½{÷6‘v”áadMYR‰¦g¢£p°cr¨bÕªU 0¤•·DáãÐ ‡MéÔ©“Š<= q(—†‹C»j*â´;ò„’C6ä £5TêéÕƒdÐè1JW¢D bT$9ÒÀ#t>øà5\@âP>£UT$…^H]^ J4œ,@Ç„)t|ˆ˜£ÄþVF…Ï$H`ô›#‡ÿë¢Ë}ûöUïÈ"óÙØRj>[ûäylc-JÆ{,^¼X •’ØO ~éø0_H‹b½¬í¬ÕÄ¿ÙO5ÈöcD—é'¬ß‘#GLg#€üL-‘ÏŒìÑx2E€²ÃgeRÊDÊÃŽ;T:y×ç? ý2šÅÀ £ˆš£“I)ûm0û9ó¯©Ë©ÓÓ’24ðË™3§Ê™¢²`nIFððèq„Ãæ¥Aa½Òº .Ùi™HÏÓ&M2ýòd¤MáýTêÏBé²£PQÑ;å{2DNãÞ• ùÆv`ÔƒÃîuÉùÊh£©‰Üéã‡~P×Si0 š^AŸF``‚8 JP¦\O±O4ó÷´þ|&‡¥uýðÕ Ó¹xâ;ñZ‚s^û$‘I­Î$þ;κñ=êéøqèž ‰ÃEŒ¿õÖ[ªßòXÒˆâËDæu#ñ;ÛŒïÌ:²°ŽÌ ㈠'Àœ©÷ ¤ê™1ÊתU+5ÌHyI¯Îà‹ ¶sÉwNøØ°cjÖª‰ØèX؉m°µ`§7Nâ±Dœ-OÝEOÝ“aM=‘§tâè¨Ð±e¤š<­(Ã?æ‰0‰˜ySEè( ô˜  óæÈ‘C)³gÑŒ4$Œ¤q¦ãœÿ°d CÕ|OæÝiQˆgAä A*‡š5k¦òý˜”ŸÖ‘ÅA¬ ‡ SzO þèø0ZÈ¡Eö4BVÖò¯È[ü?÷Òéô´Ìâ.üÖ ëË6¾|ù2~úùìÚ±]ɼ–Ò‘À‡Ïã¨ï§ E¦†™¤óÉ7>íp´Vw¶%S%´‡¢™ãJ Db.-e…ùÄsÓK_`ù®¬#õ ùÆÓh 5¢CD KÝD§ØœÕfk§w§çE©eËþB·nÝq; 6âL†EÆ *6V:kX[ëa«K€³½-œí¬`-`ðí·ß’k–«É;i¥{^Eâ·b%èc? L ÊÀÑ¥:uê(FQù§àÐé۱‡öíS@m DNbn!?É/¨gf„©)0fNÁÓ’µ¡ë}R?¦ŸpØœù€K–,IÔ-õë×WÏ-Z¨tË3ÊÇ{¤™ÓIy§²IZG“<«=K^©÷æ?#}GǧqãÆÈ›'7þ˜9ËtTˆò—€ˆØxD ‹£äñ÷üî FÉj)&ê~ê¤4!: òs'×Ï“ø^©¸™.KK¢ÓOG–:¥”Ÿ'!²,ÃA “ãIOÚáѰòÈë,R<ܨP /. q÷EqËßT$r¦ü¦Gü½Ûb˜b礂xÌHgm+Ï,Ð;×ÑÓÙ9ÂÚ]ŒžzÎ?DÐÇñIôèmå»ïå'Æ8ñ½’#¾¯ ¬Ü=aåæò˜sÿ!;*Ôºuëª]RKLæL<*Œ'rUm ï™Ø6Â3ò7!qþRW-ó6ð÷C\hlííÔpAó±h¬-Õ“ C³ŸtVÂi+G;u\gc/ï Ϧ2M|t†XÄÝó}d#Éë wr5ÕË]ÉVjˆÎ -g¥Ð¥D¬3Á. — ø£³ÀápÏ‹ÜCç脘Skp½FE\®Xw&®,¹gÁƒ¾Åq±§ô-Weì Ö¶ˆ;¿Ê”BØm‡VRòÖZ”¡ðÂx_¹©€uŒí(çÄùû‹”s…¿N »ëØãq¼DiÄ:¸Èy¢4EètîzÜ*W¾Sˆá¢¡v*ã$‚ejÔŽÉ5.¤ˆ‡*Eýne ý¿q­vU<ÜzC Ÿ5—; åcg¬”ÉÚ;Ø«áR£×› ÁÎqǧãD©ââmßé£k.„r-Abîön‚냿QJ†ôÏ9– /= ®ïÄg›³$GôÊ) Ì-cB¯¦¼SS`e} îõ}—+K´„Ø`ÖÒq§ã”OqDEHmYGiW+Üi\7¾Z ƒç¦ÍŒëßq¸O&5òEkkQÞ¢$ƒÃ|’v‚^>ƒÎãÆë5qoéqi7Ä]‹«í .ÆøœD¾š5Õ¶¢œ®®Á™’>QÀ‡ÀæßmÀ/ñ C~ÿ—Ê  j;1arôqŠZ ùÇhsöèñiÑÌäˆOæ~-X°@9AÌ{$ø`’»2ø)ÎÞ1[Á¹Ú]aÝ´7¼ÞëÐOzàbËϤNR}wwXg÷DB ?âBxé`U¶3JˆrrÎç$ü#*ÀÏO¡víz'g Z ¦‹6@t—||„ׇ¸¶±Eì…ƒÚö'ü~[ +{7DíZ‰°c7¤o hc$P Nœ È‚€Pá›^úC|ÐÄ Ø°€äæêŽÂÅ}ðåw£0í矑_១¤Ïœ9 w1tQ«þ‚]‹÷áѪ0nµ©ŽKaS§©O˜FÀIJTÆ#æõqéŠÿ2ùJ'@;îú>ܨSW*W­oç ¸eAÐç¯á|ãÏ¡Ë" AÚÓ ·A¼ßA\ç· È™'7ª ðùæûï±Fô!êýûþ*õÂ;gN|9r$Â#Âàk߉ލ8œQ›&âJÃ7u_ÚÚÍ¡cß‚ï÷IR_H;Q®•þ3^¢HÞAïæ…šàLñnHðâº{¦s““7˜†»ûq½n DÜ‘þM‡!Òúe›@ý›£Ì!c4„«pHŒ+h“É8!“s8|åÊå°°=RtÐØ§ÎáVƒ ¸R¥2®ø›’k½«Â'vÀ©2ü½<å¥nÔ+QWq¥LY€Ô?NŽÙ3 ùaüLz(`MdBã½­âÍÇ•Z~=VøéŒˆÙŸáÚGSøK‘÷.Y1«N–hˆø,X’ã=õ\Â5ܪYWE>®”«ÿ5ÄæÉñÇ£vi °¦L'GŒ(SG1ï–`ñ‰HtÞÅ á³FàJÅʸR­&÷‹M¤Ó*ÀòªOiÜ[qV."‹Â7ƒ#"¦¿³ezŠ#jZQñ(I}ØF<&m ÓÅ P”¾+öP#½£#üßkk‚UiWÇXÜiZÁg"ŒºÁ¼í‰<¶µÈ×õâ¥qkô^X9É=µsùiNªnҟƈ˕*âRÙˆ³½˜ âhe—NZMòHòv韘HÍ%=èi<‰²Õ‰ò´²»‹K®yü° ² ÇÈ8é‘ây‹bƒ\Ð%D îî=Ê Á:ä¿÷y‡×AB,£HqFã +1JŒäéì ˆ»'K¼DzÎþ½+àR«Âä™$*‰È»=p·û¿ ƒ½xx÷Î håV zÄ.Hxø†û÷¥&vÔ¬ÛYNR×,Ò§b„ÏwXˆ7¦Cžÿ§NèâQ²L1Η-_ްPÁÚÒ ÷¥ Éßvú¨ ο6qQ}&E€Ðí‡à7ømÄÅÚÃx AkÖ‰ÁaôɆ ÅׄèímTDP§‹6k‘ ¹(e½«œl<×`å ÀǤ|ôîíGÀ†ðútôÏŵ.#‘´T´æì1òJƒ–qŒÑ).óA°X«V-Õ&Œ€Ðè%—ªˆ@è#qÿípê4¹GÉùÝÞCÁ¿!jÇx„ߌeë‚Ø-ãq)wœ)V~3÷vâ´—Bïê½eÎ,‚ b|ÏU}1±Nˆ98óÆy9v­×Hø½?zðN3<Øv-Ñ ¢oïTá“ßEÈH1xŽbHíEIàvóÊ8WªΗ(Š«=ÅqñȆèÕ?ãLáâ8Ÿ;üyMÞͶQˆî×9†Ç·÷n£{©F¸zýªŠ 4®_MBþ·:Á¹^M‘%1梣9Üü/^X(ìœXÀHÆã†\éãn4ÜzÆÐÒý’+ì»qÇçá|…f@•NÈòNDO"üì‡[2ô6Þ^Hy(2(zBú~î:(þÐne¤pq¶2ÀSú^‰eñîïcýö1âä#:ZÀQ¾¼ùѺPa™¼΢ÛèTêmìý‚w¯ƒï·3¥/º úÐF„ì;'ýFäÝÉFäÚOôÙCåð(ýag„ÐÄÞ‡7¬í¸¯è#'[Õ7âÊû‰–šýSGF¬BàÛ¸îß‚¸09–Ê(!‡u9|ΑÊ9AÁ£Ü~gt›i>„\ë“;™&Á¨'ód™IÝÄH ¡Gž!ÎÂÍM¸P¤&b‹´Þ¿ ÃÊq¶@sĉœêœÝa“#+ Œ4IÁH¢SqÞg©—[ê!zU<:eb¤?ÑAPGf„¢Ãu A¸Y±8nŽúSx%õ&œÄûCðÁðýhŒðžQ÷ÞzÌÈ{;ñŠ÷þ0Ø ï©Úí$@æ>tnÙDp`ȹ|–¿G§xOtqº ‘Ÿ‹ Ç¢YÄSTW(Ç ¨p!9bzmÛ‡:ËÒ},á„8ˆÿk„‹C'Ãí£áðìòn6-ßÉÄþÚ O„Mb÷boß™š §a QæÞ 0:W'Å£Ø{÷TÔP¹Û®.ˆx±b/ô—£àD)„Ý—wûÈsètFnߌÀ¹£„7A,õÙ~ ÑþâàG‚©í#¥Ä>+=ɾp±¡±°¶ëEÞ lgí\±E:±Zýt.xKÀìW{ý›¯àÕ_äëÓg’òÁra.3Gܘ¯”ဧŸ3¹ý‰•­(§ÐßF yPx÷t¸¶n‹Ü›OÁ¸?u§4´£¨³«¸Q¾Ζ,‰óÞCå×ó{áúw»`£¿Ž+Eò(ƒw¶@.<Üé+ǸQ¦Î•,…sÅ«!h®(¡ˆ?=W{ý"Âi ã«ÿ\ì‚ï¸"$ÎÐÛÚŠ’q@è´wäZ\-QSóTîܪ nË3Eòãj¿_¤E8»!tÆ{êÜ 4¯¿ƒ>Q* qÑ@Ö2È¿ÿ |׆Ø'éÆBÁãr ZÄ1%â6HløYºŸÅ"Š,|Éxñ]Qpïb¸µ}9þÜOÀ áÛà!|ëU¹%q®FOÜÜq»†.½µz;ܪQ oÜ@td$¶Î\;—pÜ®WFÚLÚÀ§<Îù÷]î®Çµ6#*>[ug'8æ<_QÈ:wi½w'D,…sEŠ)¾žó®ˆP_ö†;¸V&/Î/Œ+>ADáóÜ?¿Â¹ÂÆsÏ×êŽØhKè¨(À»òm˜.½àXņpÄ@ÿ‹É¶AŸ>}ÔR)£Þúbtƒs8#U4†œ Äaߣo‘þˆÂ*w *DŒº8,^yìClH„¸ŸÑÐkÍ…>j€‡_~ŠØxù]kû Üð9LF©Ðs°½¶›öà^ÏÁ°•cå¤E7ŒGî%a'lɹé²·,*ºÒ” «-‘ëíª¸-n®â8Ù#jëL<ÜsE‚ (sa"–ÿˆ°“§p÷­¯á:p&ÊdžÂíµÒ‚6Pg!hÇu”8ó•FwǧÏbÉÊí¨V­*¦N‚‚…ó`–Ü÷nƒA°ÍYVЇâmjHkN®zÜšr̯$Pçp3 `êIŒŠS<|ð¬k½‡|ã>…k—ÁÈð/ÄŸ[ŒcÒb ãÄq¸’Wäϧ(ný°ºèË8—%+OG"îè\œÏ[—Ê”Áõ*ÍábùbÏ¡ê7ßã»À |‘»*Ïmò´×¿ê…š%+cßþýpàììä{äEÜÚ/p&DŒ 8&vâøZGãnÏz8_ª4Η,Ž+-ÞG‚{vÄïŸs‹á|ž¸?ýˆŠzèƒ{½ë+ ¾xQܹDx# pusBÐ癥-œ³‘¯rø Ú€“8¸DõŒFtf8ƒ¿qˆCòêåÚ­#zdÿa´Š °F ÒN°ŸêÅÔ;Y!à³áH(Ü þø ®ú ßá½Ð=Ü… m~bH]_‹+ò‹N)ŽkÃg (‰ÄEáýý´®mÀ…ÜŒŽO¡ò¿# øÁ~\É‘ç䨥=ðpìp„…áÓ‡âöøíÆV_ê`Ðø÷ö>Þ‹"ïœ>x·9Î ï/”,KÕ{!Þ9; ç×à|þ"8_ ü~Ú*|§“¤Ãƒá-Õ³.”»1tªÂT$õ‹ó½!Ò#t¼èÙg`[JÀj*sÓº™;K¢Sc‰ÈSFÅ"Ò ¹¿‰û‘sñaxuj÷~AÿÕ…ÿ‡ŸÉ;‹=\<ª«€rœ.a7¥¿/ì‹£YkŠŽòBàˆöâ–ÆE±Ï—ûŒ…µè¾ÑƒÄ&–’ö(…»ÓVàæŸ —€ëÅ Jç!,W§K©’¸Ý´½|óäW¥{tâ’w^%Ëg‹æÃÝ9ûä¾Y8¬N³=Š•DdÏ•v 8¤ÎUmŸ/‚O…#Àt„¬}qoæ9xÖ¾ÿ¡~pÊ+²”ʸN¦û˜¿ú´”¡€gÐуãºeO¦l…äºØ‹"Ì.ù`…„0hì qo)NÅKæ:â‡ò7V öèL]g;»”¬žšð)"BK£xTrw(„»_ŒEÈ/#èV^”eUÎàA? gOØ5ýÅÖC¨Ö9ä?käZ½ï·FdŒ9sLt·q{è xM?…êrMºëò0GT>¿WV%”–cÆõ’kcEiÝÀ!Sáùó”z° ‡f"ðD¸ÜG^\¤+!ÁŽó"^<£ÿBŒ Ö¯__ͬãßÉ'!0,Í5õž´ 8twE˜ªË+}Œ± ìaWRÚà’¯@ÉyÙÖ_Aù»Ë«°7Öb<¬³z!lþh„^pEÇU[Ôý¦-ø‘Ó¾Çãñ(b@yC¼†|‰\7€u…!(vh:t¢1SG‡ìsÿDø­·bøÖ[Ý…_ÏÑp¹…ßž¥¯áÞç ¶|¦¨)! ÷:•ÅÝŽÈ3÷sèË-REìüœÔáwNàHޏ-£³cn£?ÿü³€Ÿjjg.úË<¯Eþâãçc gDoÚ†8{WXeqCÔ¾-"aްË.`5NzA¶\Â)io[qâ`1bÀÇÇÂðð‚ÿܬ3æÂ³^ÄŠb´*G“ 2`Ì…Ž”~¦94>œp×ï§ÂêôX¬ò…ÞÞ Â#˜ë)÷vö”^(ò$|ã;äöR2aíí¦î“@‡Oþ‹Ü¶ZFm”þs&*”)„ &âëo¿ùÇP¹ðËþõp?B~Ý ò:áC …²O0Ç­öR"Î$ea›YºO²…Ç!æf(ôyrŠ1 C|h°8¹àÛ_˜™ @9wu ŽAÑqÝôÃpDE¸©‘(+§hÜû=Ø´ø%"nÂ)ä,Þ€àwýé程ž‡ÖbêÂ9ŸÝKÔÂñ[gЬIS”(V³Ö€¾P=øŒ÷; pG¾Û!æørø/?× (ë1âm?Ž{Ã>„]ó¯¥oÅÀ‹ >Þ 1VÂÿ¯cÈ{ðŠ.ýAcû#2”B©8ZÑëÆÃwŠ îþCê*‡L‘,òÃB¡\s_nó5þ4¢³É!Fö®CÇ¥0V¸ý—™0a‚®äú%ïlFÖoÁÝÇnßoBÞ¯Z >Ì4ãÚ ²‚"È=cüšuB¼Ê¶EüÝmð›¾yÇ \üMàÜ2<\v>~úòCÔ}³÷¬ ôWBðnܼ ¹¶ø¢Øžñ›- 3ÀYñÞ}©È:°#\Äy‰Ûý ®´ÿ^€¡ÉùME¡}e䔜,ûs0ÙO,]ŸlᨋÿÑ3ÂÇl®j‚W|H4¬‹ç•v¸£ŽD·Øw­ê›µà÷Í4ÑOÙ`m%F8î$nÙˆ«Åé;=á}Ðqkä<äXï§®Éõ^g:µq§¢†P8Ä3ÏØ(wˆ”{÷O—]ð¾Wé£~ñ6¢³µù§ubGÜü–HÀ9çòì EÙ»gT¿d:ÊÃ/ßA¬W”{›½ª-n2^E*™j¸$ÏB7Ÿƒç;}6æ=Üü…‘Ì¢‚+Þ”k¦4<-e(àG/„C1Ob6–„èØ7¬--³a÷õ°òöÂŽ#Tpˆ}2ò;;¥¬Ý¤‘ݳ*ÆÅ3RˤÃIçÕ9Ä tÁX½þ1òNþâî߇Uî|êÜ„˜X)bXC#DoGJ'5¯ó&bÜÊv@®ÎðT!úØpªØäÉŠ Q¬Û#€*¯Õ»ƒ tÞÆ äuâiêä?CŒñÜø‡¼ò"rM›—Bb4cMÏ g5ãŸg§®§â¢Ò¥rMŽ8“‹@ƒ â‰¢}RDYؽV zÃ9„]€‘ºè‹ŒgW»Œð#Zøæ/F×Ü|U³Mù?Û@dƒ\þ~â•ÇŽK{ððÎMX{äÏYx+`RÚ1!DÞ?Žmð(Øûu…š"×° ¸Ów$ 6Òrùj%mÀOëÜ.ˆ GÔ‡£xŒrLçšE=ß /*áÞi/;ŠìÓfÁ½jvCLü¯Ï6·ÊÂÓ…àéçnJ&Ìß!¥Â6àšrÌù`ô"9b?(X° :×1ב‘?‚>z̃âº~Zäme,òî¢l³M^Ý©™¸\¬®¿VWL@–/¦Ã>+•o "×~«bxo›×!#aã*r%ÿ%èr ëçݱl ‚/@àŒ%H°Ê‹¬£º#øÓæ¸"ÀóVßÿ!ÁÁ¶Y€û[!èàÆ¢:åÙ†H9b´ È‹ìŸuçòƇ„¾A'8ØÝÇrpµJrÖ‡ëkUáÑ·‚F´Â•5qoÆyÂCa÷z7Š~ÿ AÓ'#ôÄ8{ºÀ;GNth×ø¸£–Üwíô¥h †dyØ 8èmT´è>$_(ÓœÙû¸$kæ”ñ¼'ÖErÿ«¬p¬]±Û|[9+Ç2öä6b$ØôV{+#ø¶ÊžCþ–c"ì†8ÑKrŸˆ[Y²î¿ÏBÖŽåã«‚€qóÌ‚9³¡”’{ˆ »¶«!¼?_|´öj]]„ÑW ˆZ‰¨?ŽÀŽù}QÒ·ø<¦%Ùy*cÇOi,XåÍÎXåð”:°ú!zï:DDþ…«ac,¿QÖ‚à×ÿcìõ¸Û¦™r üߎXÁ_^$S(×ÚµÔÌícÑú éRþÙœ™JLJÃÄÜ™‚‘ð;wª‰SÞÙ³¡k¯þ8Ÿ; ôÇÀ6ÞÙ³Âpy;qìKç>ÇBç–S}¶ª ¤IÚ.\ôB‚?B¬„˨?cHc±¡°Þ«3¤8<›!m-€/Þ¤#Õ¯ü ‡]«‘ðÈ·w¿Ý¤r+ QF'ÇÚC@‚ÞU¥Å? TÎyO²Êáe¼Þtn͈̑8ïˆüsþG\ú«y·ì%ó·ß‘¥ï@¸×)Š˜+—¡Е”ÇÉöjêù¤D°Ç\dÚ M÷¤¶PqrTí.–pó!Xy  sµAØŸ¢üs•U©4¬˜rÅ‹ìÎâ8†©¿•à‹ƒÄ¿cOnGèh˜³öV·Œö3Ÿ7ªÅÆ?§“ÿÉêSýg¤øyÔ¶ùP_z”˜„`±Crªß­s³Ï".Jú4¿Ë;`'í"_¤ ¡‚ìÃ:wìNDžoz ¨É!ºôòQupÿâxu§|òÊ—¯IÃ=>Ç[+Ô=\¾-¹hë“P†~T¶æK]$UÚ)H‡±o7Þ¯åÀµ<9p½A}œ÷(]‘¦ÈÖM€÷p³V3•ÿ¡s¯÷Rª/‰¢‹€ë{Á*ô,çÏGÐoãq# îÃ?ƒÕ­¸T¶:®V®Ž_Âüµå[ø~<[yŠ)Q‹ˆD–qŒB…!.PžŸö€KÃv°Ž8…«åªáZëâÍIÇÀ’í³Ÿó+‚/DàœÕjv Igç€èuSp>69Âq»V%Ü8FåëXâ…¥B~¬QÁÒ¸%Gœ€c®|Ùvlãp/—Ùaî·"ã0—voUbD–¼k Èý‹È:¤œÛtE¾ƒ—‘ûÓ ð:½9w„G×6È>s rÝA´­Ñ›H‚óÓP`á¯p®ßC߇µcœÞŒ‚‹fÁµ][¸u餌W®mÛáýÅ Øx—…I™wè<¹ÆõCBàC8tú¦O@¶®U§+ˆ×N#ë€ÎpûßD9µº`¸Žšƒü³ƒK—¡(xl7¼¿í"€'ò=¬}:À©uoxöj(†6 Ö¢`]Ýs£ÚÒÍøáÛQ÷á‡È#àøóq_£r…J*JJ^ˆ6À7*Õ:"k>é—ep¥buÄØ—‚{ËÚpïÓa“ârÃ:¸óÍálʵ†kGN‡àÙ3²÷2ôô„òîzdùur}?ÎÍ«;Ô©#†È4ÜŸ Òt‡i“1ÊÊÜ>Ê¿9ÿ ÙÆ4žÌCãÊŒ„s"×<ä(Å_sæ íªËx=p/—­†ƒo´Äí×ú gïáUÑ Âû¸;«„÷âÈÔûúŸŠ—ÞO cƒ,#Ä]†ÀÅ‹8yb‘å«OõççÂûZ¸Ñ´½jÛÜžÙ÷çL\Í\6 ¢s^d›øb¢D'…†À*C¸µÁ­²Eq¥lIDÄyã{C¸õé‰È•_âR£úðý`±HN¸»†ð,ïÀ1?!xþ,n< +¿R9°AŸuÄyòZ5Ü^!}|Ø@PÔ«'Ê?åž) Ì“LJä;‰ üIåžßïŽ< >GÐwmq¹~}\-_~k¯!ï¼Éâ °³øA\©[ ÷„!ÛCÅöŠ{(öÓàX^¯eCàèï²p.–ïƒUîºÈV;n–È‚kU«ÁoÂjèsæWNË­2m£w—ú¨‡‹ÃKý }Ñ®ÈÖÀM}Ú|‡!îàï¸Ô¸!®¶ —Ÿ‰“Xž…mp»l>\­]Á¼6 #¾@‚ïnÕöAÆ#&‚í*2/v%! Ù?¬{MŠáRò¼d}¿µOýŠ”gê””F{RKj?n¼Í¡Fæt¤föé¿HgkñœBgMDÌCAt^Å¥GSå]%Ü?ГápÌаC×àÜíM8äqÅ­‚:ĵ[Šüß·Aìù½Y†¸,¢¤ÛŠ2³EÂí£] ¢hݺ5‡µ> As–@—«"\ê–’s¥ƒDÜAðò#péÖJ:©3b,h·–õaÍVr~ëOG`ïm_œ:uYó‹§vbBwƒsûˆÜ° Ží[ÀÖÛ¡“g"6,Ö¥¹J^é,f ÕÆqgö (ƒkÓÚâ²§^Ù’ꧧܦõœqÇÝÖ¯_¯–°xrb´Õ NGÔM¨ùáÙC:Hˆ(ć×zøóZ!lïY8¶ë§"ÙàWS‡ð‚HfFÂuá÷ŠÝ¸›`:ÿ{u_oŽÕsÆà¶€°xë,píØ6±Ž—Âà\.Ps ¨èrÝ^8´nwWĞ؈ð QpmÑÖÞNææ"ú~0lk¶†sÙlømuB6…csiûƒ`×°1ìòº!|Þ|97V…^ƒK½b¢œ¤ssF·ß9„ï>©”#úìÅáòz5ј©ïü4xT¼\Ñ“7,=r³âÅ‹'Ëkà‘Ê™8Ó‘©Üკ+kµì1G”¹wŒLªã`u¢ÈÔ¬½Ü3Ò·Û6BàÁx¾t ¶.qЉœ ‚R`Z­“'Wp®:'ß)“rßUÔƒ}€÷¶µƒNOp ﬷2>_”:ŸÍåvtör_C‚ü­"cÌIÔÛ3cG®‘6L¼V;—Ç£ŒQ ¨qQ‚µá¢î ¸—J ‘'ä‚–#œ” ™îÀ«ÖÄȯßE¹‹Rg{wè[µ–VP|ŽX·Tºœë•Eľ[péØú8ÏøSdÊÍ[Ã6+W`›‰,ØI›1lŽÐ•Ëáа#¬lE.âSgŠ8¡ƒ³ª9{7)ÀàŒw.”M#ÉÅÌ“’9`çß<}„£lsê6®»Zô×­k×Ôy9s¢ÿWï¢{¿!—ÅŸPx á¾1péÞGÞç>.‹Sá:ï˜8ê>ˆÞ±áÇ®@çT®=êÉsl{@ôÉÑKÐe+ ·7j ‚¹Ž …aS­1Kx‹¨Z‰­8°}Å4¤D䯅ˆµ.ו¤/D"ä·¹ˆ·²‡Sû°q¡§1j럈º—–õ& Ï¥G3á¥Èͯr®ÜÓ¾~ÑI=’>+ýÅÊÕa‹Cl€¬ÊÕ‡[ÍBˆ L}>3'dð<æÇ‘@0(ÇÉKÚ\:¦ì7\¸ùɉùŸžH¸²[ôê éÇ"SÝÃÆ>Tt@Â×.‚]åz_³VeÀ]d8jVo\ès%ãÁ*!ÁcgˆvÒÁîµÖp(ÌeSô'ÇD´ì´“cÙ½g"OùW¹Fú&'wlXä­ ÇR9 ‹ ¥[áФìòK[ŸÚ&¶ö4Åž}!þ~(¬ìB0u ô%êÀÁîbmJéJÄ6kð†ëÛ­  4F`)÷VY¼¹z2"/‡Á¾qW8°Cü?NÌÓ&'Íše¶–â  ü¸eWñg‚éO%O$)eÔ¢UF†ÂMcÂ5¨»¬Å(É÷Ð_‡àæ§sà5û0²·È#`@ ¡-#ñÙò(M&8<Ë·$/ù>Z(@"w×ÎMÒÌÛÔÙ˜ˆÀÈôüÔ’üìžø‘¨œY’‚?F;¸ÈoRð÷X¢,"²b%¬ŠÔ…Så<"÷RÓÏ/#™~ò†Cær£GV˴аQ©rH êÚP­FOü8œÎ<¿”"ä)Ó(§|eÊ’ |s}?NÚIß1¸Û»¬¿ˆÇ}á˜Ct‡•è&ýQÆ †ILVç1%w¾©Ó¸”GŒÈ­€2Åécq ± Qõg$”ýY›Ì’³P!|øÁèÒî 8;ŠC$`)žËe0‡LÀ—÷QÏã»sýE‘ëOç*)Qå˜{ö$‹ßRÑø¥üØn©Œ°ÞlcÞ‹ @í¼cçN¬Z½ ¡ÁÆ{ùÊUѯ´nÛž^¢S¢Bp{PsÜž»y÷߆K!ÑÍ:á+õ“Iç+¢SÃcZ{(/º@ñ^@IÙ¹ÞtŽ:Ð@%m%}MGçIñ“Q.#¿È?Ê€‚¦=൉ç o5ÇÊHÂsÞWÄ€Ž?mГsƒß2öÙñé7£¥ÿ„ÃÓÉ9ÝìÕ(ûùh t§šhÛ˜þÁºÒi2ÉßÛ íj´±"«·vár™Vˆ+Õ>‡'!áa(t´¤Äz /LÇŒ|“cªÈw³…áis™ ¬®1µ!ŠÃ´òlÚpÑïš-2^`’oö%Ø:é3ªµsM )©çЮюŒ?qù"ê:=O üØü†´üt$ 2B%”3ù\»ŸAX}áQ¿É§M‰ºÈñëbdkSP‡tPæñ·ðȮӎ™€ …‡;¨œAÓ«D0W-ñ†ü² °Ê4‹“ƆÑžGeÉû²Ãÿs-‡ÌLïnô‘Tž¨öð—’¤ ¢,µðɬ øiU°¼ÇÏAö~U¤îbÅ@ÆËobÕÑ¡}{uÝú5«`CÎkÙ‘UÈýøU§Èq>š”Œâoâ³ÍdÂúHŠ—|­ ”2;7IDѪãZ1{~Z“’‘$… ™…†“ ž‰î\ãŽÃ¾£FRJ[ëC©*³x8À¥so8–Ï%u~X:ï%*ø7AA2ó9NÇ‘ŸC€\ŽÀ‡ÊÖOHJ%%½6ÕE铬(}a:NÙ‡Á(gRâõpl3y–ï€C>+c^¯-u©¯«¢Sr'ߥ¯¨ëy¾|W¼}¥PAY`^÷Úæ®#œüÀÙš¡vßçz€ÙÄðú)‚á*:ÊšFò­=ïÎgÉýÕäíÌ Ÿ'uHÌs{‚ò8²tMrE“:ÔœÝË¥Hè\1ÿoØûïcͪÕj÷nyìïƒx{Лȓ-+Z5j$ºe=uC©u‡áUÁMx ÷Ôl„Iç«¢ÓøC{Âïæ+,°=Ì®Qí«µõS"?ÿáWÛTt™Q™®M<—÷OÊ{òÜô›r:Í{|!QEfu¶E>=JåtA@¤ÀÐ×%Yºî‰ŠI¯ªºšÉ†’ÍžR7Ûeƒç·?¢ÀÒï‘"çXª÷?ÇåPõ£ÍL¼7ív©mŸ-à\ÝÃdTÑä›íC{­µ£vn’ûk%ѮюXø=¥’–”¡€ÉÞEa‡³©Ñî]ê'l˜”GZÎÕ–- "âЄ¥óžWI-Yº6­ Á·uùpïÞ  b5WÒŒ¶èñIû==Ö-5Äs“’v=Ás¸&ÁŸ6áC›í›jR†Dä?Gãe%¬+sö„µ=°íã’ Ì›3gŽba?ÔÈ_“Òè§*²6pn×.¯Á4Côi yC DÞ0ÊÌè1€fЇÃ9³™Ž£k]»vUùÕä—SyuO Yº.¥Â:k³°>¿ÌçòHì\¿‘¹±\fëÖ­èܱ †÷ñÝóùùüdàù’É?V”ßÃFãZ€)çÒÂ5i^isÉÁ`ã®CBrÎE+iEø‘Ò’A)‘òâ´HÞ3 *^æÉq+"îÀÀèDZ À“Г<÷¹µ=,³H‰Ï¦ç$.ÕÀáL=ŸéX·'f)#^Œl˜ƒ?ÎüežNZ=ãe'ò“…†žòB ÃÙžä#?\˜‹?s² ú“Èvïg^yG|dÚE\ÙXèh’8ë™üary»ví”3Åm1¹TŠ–vÀTê*‚dFJ5^YºÿÓ¾[Jd➤hõÖêÎcqÀ\<ú7ÞPkcÒIàî,Å äÇ¢9óÔ3ÄÌ›å²&䯣l‘’>+=sr¶³FÑ,vðr4¦±<¯¨B\,âÃÂ,D53vI ÊpÀ/#,f±:tè ÈãÞË@i%œOCÚ;pØŽÄ%ó¨MF'sE‘R¡Œi‘?‚NÒÀß“Ü'#ö-kÎ¥_²d‰ÚæÔ©SjÈ“ò‹ç¼J¤ñ‡DDH‡¡^½zj§$n ؾ}{•#I=U‚‹ÍOª®1~¥GÒêÍOÖmOœõç,`.ÃtÈ.Zψ'Ó¸Þw !@d) Á0uxFuª3)}P†øe„B ÂY‡$-¼èa^–Ô’¥kŸw¡!g‚Æš›ÖkÑ K禧’–Äûià›°3bÁ\·ñãÇ¿R‘?ÈÎ\ç7'0 6L­@0CÃ!σ*ÿ82o³ô^´(˜V`È#…2G’„è¤rÆ''H0êÅe=øÉQ žK9KK¾¤D–Îÿ¯Å¼Þš&iQP¦ Ìä®:“'OVie ³ÀÅÔ™v²hÑ"5¡ˆ¼ ü<ˈè³.–H;jéüÌ’v%-(C?’%†¥§B¥ÀY–{÷îUÑmšü‹,OJ–îñ¼ s”Z·n­–ùáp –¥óÒKI-Yº6¥B£Ì]©!øcB?Á#6ÿå~é¹ÐÀóS‹nq'ò…¤Ó˜ r¦.ø«Hæ<"Hfc””‡Q/2äI“&)ÐÌ<@θeä˸=šqáðôHšœ°¨S´\Hö¡Úµk«!`a: 5Ré&ÅÚPùöíÛïØ¿èà§g~dRú£ ü4…”ž ‰†˜ æ$* *‰—¡n©!*°—á]ù¤V­Z©ON’Ië¨Ã‹*©%K×&WÈFir .¬"Là×"–®ÉÈE“Ñ1"Ú½{w,_¾\E‘ïÞ½«">Û4ÝÒ}3J!´B"?¸cwÈ`ÜôéÓU ùÅý£ ˆø÷–-[Ò$0%²t~Zóú³ðA ëE L>¼þúëjí5.oÅ%“8Ìú3"H8`Àœ;wNõ=‚âô0)äqdéšÌ’6%­(C.àÌYS½Ž€*?¥ã¾ ”E”@êUñ÷áÃ8~ý:A:n¬â @¸x£ 4V\“Æé1÷'ðcnÊ〓âS~ä½)GÊêî]8’÷—/ÃVx¯€P’^žE<üØ1ü†3u뢸(Óè`øb D\Ñ¢ˆ£]¡‚’AFUïT´¯IxÀdþ83‘ÃS4NT8o¿ýö+þ™ádNúØ·oŸ2~Lî'1‘ÿý÷ßW¿ÊÀO#g/ÚßcÜÕ‡“!4ýÀááwß}W94<²—’Yâè_ÅŠÕ½ÍI~X)-°mNêIIPÓ»§i| ±ï°Žu᢮]»†Û·cµè˱?¤"¢Ÿ»uîŒ.RØßcå}"©»å·§´‰”èülSÿ§B÷RüX?FÄSKÊÞ’ïäÿK|yf$2 œòŸºßM?¥D~tÂÙo2ðcǰ³µA\|ì/ θRjˆŠöüùój¨*%àg p –/‡×GÁêáCD ¸Š‘+FYE¢^ 9ÙÙaÞθr%f „ÖÜp_øòÔ %L/|±ecwälÅ Ç”. ÿ±cÑsãyNrÄDnæç< ðÓxï¸q#¼>ø@î¨råÔ{Ä)bìüiHìPVÒž{Åm1n†J=Çôì‰TîO™¦$u§sa-€Šõ§² ~óM~ò ÒæºT¼Sj#+©~T¼Â#¥pMÆïJî{눫G*ð7xØ0вz©”Æ!áý#ÆŠ8Ù׈ÀïìÙ³ øÑ¸\8p@Mb îa¿Ø±c‡JÏ`ž×oÙÆ¶ò¢Ï´"U © Áu;A0Æ=Ä­Ñ ¼(|#qX”|,[¶¬Š’o–œ Fü¸sgZ'ü2Sü8ÒÃb%ºÊaÛ6ØÊõ¶gÎ@'ï¨Ú" Iã…FqòîQR¤ŽaÒßNŠnØ"õZ"¿‰+¨¨Šôñ·³eC9Çž¦Zx-}ú?;_¼‡¼CŒ8&±… !²V- ^Ö X~œóIàG0%àGP›*àG™ ã+mm-6ÅQøosõ*lÏSï“I'íïå…ÑͱÂȦM‘àâ´«ÒC)ñËpÀ‹‘r‹2ŸJ ðzû¾÷q7 èßS§Í0‘2ø1_‚C½É?7–ÜÃV<÷{“'#L<+¾Dð‚YÂ<™uêàÈÞ½8.õqA!ðK’ΦÀ—½=¬à>a<~ú AC† ૯Tô‰Âi‰R ü:8¬“t¸]ñ^€EŽŽá JÿÁÏ?#D@ßåI‡Ÿ„8l/«†Š8ùûÈÉ“êøVžOCä=‹=ûC‡uèP؉ÞÞ²ÑeÊùo:Õqm9ò>5ÀðqÃgê]¨|EYÛ;‡Ã‡a#ÆÏFŸÒ&7Ävž•ó§‹qî'ŸQòž/âxR£Ì)GOŒ^D½zˆË™Sפ¥ ƒ~ÜÊ‹ÀOJ¤Á#™={¶ÊçÒˆKq6çc£¤ì“ òNêùŽô¦H@†"©' MN¬ô÷p)!"Ÿ'/\ÀqÂw™rš¹L  Òft/é§6Ôk ø1:ÍI&Zt1Y= †ÔV@g¶wßUŽp¬8TQÒ§h<<ž9ÿU$E(Fê%%TxB`¼O@Ø:©ËöÍ›Õï$îCÔ¿m[Ôxí55ò!|‰þ=‘ãóä9¬7GYöíC¼»;FBH÷îFð—‚Î °K ð{ìP¯È{‚è@70ž_~©žKJç?FJJ}0]“È•è :ÿöû÷ÆjÕpÿ×_•.Ò “«;S"2ðãkÐ@«XMÚõGTd˜|·Æ¤Qýüõð7Ýtfò¤Eü’ê5uö‚`(dwV­R/5žÎó"*:z»Ü5€³Á´ äŸHQF_„I/J4¿€ŽpQ´÷D ´Ä?Î^KMÄ9ODüÈ{yV¾R¥'÷¹mRhzQÐÏCÁ’¯? Àåzuj*#õ}ÑCp4öñòÝ¥ÓgýðCÜ^»QŒ¼ŠL&GŒø¥øq¨—0%à— ÆƒC žß|·éÒ¿”‡ P‰+XPAF$i–"¥}®Ê;u™3wþ!NI,Jê%QOF"ô°mİÚ= »'T}üðƒê”I5c˜|OàÇÜ4s`ÁÈ*uõ“øõ£¢æÌhÎ*×rØþÅ/@iûƒá>u*ìPýOÀW€bDVCÅñ¸+rwJtò2áÃÒâè ŸÈ?ΠîÕ«—Š4‘‡ÔÚP/×ÉKø<% üLºÈsôhUèô¾ÿ¾ÿ´ªíŸ³\³Ÿ±$ȳ Š£BC!…:tÏž=Ø´u+Žþý·:W/ણô¿ª¨>0Áy“Úþ¨¢ûtÄåoÑÕÙúõCœè_y>hã‰IˆÀŽù¾)?¶SJÀN/u\ioÊú=iïð-Œv@úHr}/ÃÈŸ|±¾sž¢wÜ„þê‚Eõ´›ÚQ~t$3ðã+ôùT¬‡FoôEDX°2Öîµ·wH5øÓ€×Qúð“ûÅs˜L„-^x{Ó&XQ1¼DBÆ:3¢ÆÙ§L|æ5· z’ýŸH:"AH¡lÙàÿý÷% †:’<“ÀáæÇ?®eõð#ïàäìÔ Ö¾¾¸)^&‡V¤bÆßŸ1iÀï„xÎðíÒ¥‹š­Ê…U_¸ø“7^^p›=ÙD¡^ÃŶHŽ7~Tº©‰ø¥üèÙ=Š#­ôþsôè»ãÇ,ý.L@b,ó@„¨<ØôÈÛÔè{ù¸vÂ'GÑyÖ7âVÇŽ¸ðÖ[Ø$úc¾´ƒ¦Cúôéƒ÷Þ{OÍ„%Øã‰”"~É?á{‚»»êk®òÌë§N!.wnc¤å9é#K¤ú™ñ;å„ ëLȾOLâd/.•Ã4'…’FHS$‚ Ô9¤?;­Z…«âÜ©Q üxjàÇTÑI Bˆ<ïžè½•æ’Qe=%ðMyd P>qúƒÞ~Ä×ü‹ øÉãu}•ê¡aÛ^ PBKÏÚ”!ÊÖ“G½‰þ¦¤þü˜c)âGá¶)wÛ¶¸$LUQ­$JãE;;6ó‚RŽ‹1`G¢xÖÍD#c{ù2òÖ©ƒ+ò©¼¾$ü¡á¢ð¥&âÇe/4¥MyyŨ^¹zÕxï"QiM䫦X9ôÆü&èÓSNj8^ɻŠosµk§r^îN›–lÔ•ÀÛF=ø± 8Ôk)Ç9–{÷ªçÝ,FU)_žká™ïx/&£3ÿŽxªŒf¥kð§‘è>¦zäiÔHÉý­; LÚ´¿ä€ŸF¥J•BçÎUÖ–|5j”Zä—`woÛ¦d™)<$n]É5&¹syœÔ±Ñ€ºµÈ±ÁÉI¾lâX_¹qCE^T;¿D¤õ7ÿfߢ à +öCnÇ™Ášžeôý‘KÅpÖ4u›–'ùØþ)í@DçœÃÀ׃s$“\§?ê ä€ßÑ"ðc?suE’%.íx÷?TŽw†—óTí/ÍBbWo‹ÓY£Æ¿r¾i{Éó´~/vƒúj5‘ ½R®øø¸xĈ°ÇÅÅbГ0uú Øßt¥e2ï4‰$Ç(pÞƒÃ_дÊå£Gó’£RTV}Íš5SÃì°ÏèRÈ¢*VDdµjÈ"F(A$ùöÔDÞ‹À2š(Þ ÿ~ÞžµÆ?Ê•"•! ²&g/œäý8ÁÈôh¸`¶6å×= iró¯"€C/2FÐw‡Ë1¯‡Rxb0ˆ¤E3tÆø¹>—2áLLF§©ìS|æË^Ä@ê´E/\ß½[é‡l#xö¿dê•"8ÑÖBäŒU.j\Mú×E1Ú^þþ ôÝ“¾voÒ$†iÔšåÇþ‘Êg¤k¢–ºªtáG¬—np¨[€A9 9ŠA«6mT´”Û q†ôÖ­[•óÖ¢E 5:’Yj#ƒôæÐfÐç+À2ýÿ%}$s9Óúuóú˜ãK¹b´“³kÙ;v,Z¶l‰Ëâ°3øA‡ß¹,õí—áâ=’%yGÀîÌž­òÏ\l'È5IÉ_“Rrçð~.óæ©Ñ:·™ ïb <úþù'ruè F%“ÚßÔð>µôB…Ò§R}4lÛñ1Q øð…ÑFdøw„tÖ8ùþÖÈéj¢Ç Lw°LdÐ#EîÉ<6ë»wØ_€cXX²FîE‘’8+Pã…¥sŸEÑI‡ cî´b…Êñ ˜%='µ”xü­Å^€Và Aàò‹à=ùȈ‰ÛpQ¾,ûBŠð;Z¼èh8nØ ”€ÅóLå?VÊ“³³á¡­[#´}{5ÁGnh:!yâ3I\x—๧æà/Clï&€ÏJäóÖÚµpƒjì˜ÊÁû/DY#¯8Q«víÚÊCçb½ŒÔÔ¯U ß–+‡ÐaÃ+“CžÉM¨z•ˆg‚ßÛܦRþöþå¸äÉ£–‘bt›g¸ S5ÈGF˜áÞÒ©‘¢dˆQæRq(=¼aCã2Jé„Ìû>í¤¶[  bN†áJ äåÛ 2C²Ì‘*Mž£×‰§cGŠ7GLS¹~oJü™‡÷íSk¨Qø^Fc]YgQ'M«FN-1ÒÁeml «ärKÊáIIxOÐ+ <þ9Ì”K‰ÈOnGÆYƒ\IŸC$T¢/ ±ã‡¶iÇÝ»ŸzYÍHϪˆÒ¥ÑS$ßÒ9#¶­ù;'÷÷ï#ÞÓÓ8“éyÖǬh¼$µoÏ… à¥Á%Yºæ¹—¸8Ä@æR*T”Ï‘ò_‰9dN›7#RœŠ1ÿeÈ]{> ó|8 §?NêÉ‘?}X‚»w‡½ôE.0®œÅÿHZ›ˆ0ïªT˜/}ûëÚµ¡r²˜ ȼ(âôλ4!ßÑ%J ªLÔ´è7yÃá]‚®eIв[œ$Ê`¹råTÎ)·Dc?gß&àæùÔ­jÔGê¢ÈÊ•ù¬fÄsx¿ôXÈê1SÆ8†Ã½ŒŒrˆ|áÂ…jX˜ý”#‚œýã?ªü\FÉ+ÞK­ä=²B•ƒªfÿ¦)ݳm™»&àO='“þEŒ¸2êgǵ -8ÿZ?xZz®ÀÑ<+=A_4hÓ¡ÁJ!&$”!Q¿›Ùh¨3“Xáäì" I®Q4bº€¿iü曦»ÉShDiL9³T‰›™Ð½,ž–"H4ìȬ‹¥sÓª°îð‹¼6}W|WýC<–â=µsy?±3g0JÄìls…çY•j+'‰Ãoœaé¼çQ¨lÙ)ž‘ÿ¢“ãujÛ€ç%- ¢Hœ¶oWüT0'ñ=R[ؾü¤Qå²Cœ}Îa_®ïż":-š|¥—BJä8gЬØÜ¼i„[Ò3–H;_9Â{ù/_Fó>ÀÒE‹ÔZuÌ[czΜ9J&¨-½cF.æ¼’ø àâ²bÅ#iä#?9:ÂOur«3êN®›ÈE ¹ ó9›šÃŸÚTÑ-9n%üàÈÈ¢z–u/mJDD8"å÷ô\˜©ú¶Q¿º¡HáÂè,<šc"9;7…‘Éø.nîØ¶l®œ?. phbJlTXb¯6«—^ó?4ŠAÎúý÷j)?|+Óoìðô´9yDgœBübHíêâŠ×›4Q º»vî„«vÞgElt½€pWWã‚›šÄz{£„îKgÎw“0{zÚ,)Íê%ï‰AãLFµp­€wXÎëÖáÖÊ•Æe\„6¬_¹sf9ŸJCú´Ä§èÅoß¾Sñ aÃ"w)oõ,ˆÆ©t™26üCc?>«äç5kà9a®I?±fþ]âÐÁ‡o8QÀÌÒËOºœ ŸBU«â¾ô“ðzõÛi ¢åoîð$Í õöÏK—/aР7U“?˜wD¹2JÚËKlv[Ñ5"Æ÷5RœðªHÙ²¸;f "ªW7Nº¢^¢Žéׯ_Š»p˶O>ùuêÔQüQ$×Ò˜æà}õÀDÊ=C¥;v ß‹~âLiŸâÅ1yʤÕ2NÏP¼,D}ïL§^êªÉ+#Dv. Oûö¸(à›yÑ)~øð¡’mV>?9ëõï¿ÿV«"p«A‚ökÚ‘w†Ç›-[¢ ´å%H[@¢ øôÓO0wî<õýU'æRö6 ½gϪTÁácœèlò‘…Žó;ï¼£tPr³z©’ö .Y’«GDIŸzøÞ{*ªN½ÇþÄûÒs{ÍW‰¨ÉòŒ3µ• ’ï\ç¶„;+aü˜9wù:wÆù+W`#ýƒºŽËr <1f ýU£/܇Ë6aÆ×C°{ï‘ë€ÄˆŸ—¿+üRŠøq÷‘ÿüÄ›¼¹j¢Ä8ø©¡ó䈩«ðF·Fˆþý€¼ “.wêï£ø {µ4Óœ­gP¯~ „É=_¨’Iì¥ï­\¼_j‰‹W®)@ARÀÏÇ~cÇ"â)*’~'O"o×®8/ºÈZÀgýÖ¬VMû|ˆ¶Ýâï½[0ýÇÏpþäa,^?ÍYª•s!BH<”ÑˆŽ˜ƒ´AB%ð¿¡Ðæv (3·ŒF¯PíÚ øq¡g˜FÀˆ£SIŸ¶Ž'nùúúª` åÁC‡°hñbœ9uJWSÚ”ëæÍ“¥J—ÆE~·^V)}6ÄÀ €ÑþFE”\0ïÚ·ÛæÌÁIÏ9"Àe°8+û(3¥ƒ‹Es‰9™¿¤k(2mB~*â'AþÛCÞD„]vL^8ᯘßC;+ê¹´£NÉ)#þ¤XÑÑ%(øÙ˜Ž‘˜’VÀϘ\ñ‰Q>{˜ÏG€Ñ=>vjæôqHIV2º²Df‚Fó¢¬ÿ–ßU‘¿•Ð å/’woþ~†V‚Âi?}k©b%åpßÂyiYîÝ6¨V,02…¯Œ_ye^RCÿº†Ÿ®%ùÈÈ…6XÔ"=Â'~OrL;žôÂÉÚÊáaQb#Em”PÚÙÛ%=Â'¡G¦Sÿ<ÊÄíE[Yëàw3WÏ@ƒVDùü£Ÿ5iÁ¼GÌxè_¼Òøõ$”ôóû$Ú×éô¢hÙ¶Lî}žEž)z«iÇ>ê]¶¬\9&Jßòùi_¬øò"‹4¾hã§Æ/óò¤Dð¢sâ7í? Ÿž˜Oå ÎK)ü8g­û¿Žík7ÁÅíå5¨Œt›óBýmú®ÈtL;'µ¤µ×#íÇÂûhß…xßxQD<ìä슬9ò yǾ˜²êª×o­«¢~¬]´.î€í“½ÇKOŠ-ÂþiÆ´cIKJd~퉛»;¼³gG=ù틯¿Vqnuvÿ¾?b£#QÎÕAœïÑHˆ7(Yµ¶±Üo3^ù3™VÊ!GÛܽ¼[@s¹|ùлo_•Å‘œ:¨HÞÕ«WÕ.4œ¬-«CâHÉœ÷Z1'­IJöÓD÷¤OÒ2œyeü¢Žñ3ñ¸©¤=šzFŸ¯pí숷™!q²ÃÌ4Œ=-YR—Ö64ø4–:ų/ÒW6þ5G½KëîoŠ’2vDKç¦uI–þ'¢¡uŠs{>ѹòÝ ?ṕ¸8)âO”¯V¶öX³pšÈœ¼‡(}Kç?‹Âå‰b¢£')¥5%UZQ¿%ùïãàè WXÓ=]‘{dóvDùªU1iå>äÈSûüm†—7Orþ .nò¾N.Ƽ2sž°™%ŸIŽ'þ–FÄ‘ ;{GÅ;»,Ym·PV”­R_M^‚o§¯@®|…0ò­ÎèV§üïúÃ#‹¼ÿKÆËÿ\œ™·g“æYÌÛŠŸ´!ò(‘Ooµ½—8™ÃI BîY²bÊ÷ÿC|z|1xî?Pý$i¿Íh…K§…™Ò¢¨—5~Yɧ“Ød¯¬YÕò0\&‡[µq«H¦/0Õ‡»qëB‚7î¤ÂßHÌE£=§Ý~œ³DÙgÄ‘ýТldä"ý×åßk4?zá9~]:uÄiß0ÔlØÊ8¼+}ŸëɉªŽªÍìµµuĤ¯ú‰ÇqÙ²e7]ý(i“;~ýõWc¢¤‰Ô–aÌñóõ…¯x.Ü•9~ aÿyÈY½s)ûE‹›5:T+ûwncÉë*â÷¼fò¹zX£’ßéS§`#;!'w”¾œ?yRåøÉAÓÙÆ½z9tò¸?zÓÜJˆy6ÜÓC”+sü®-[¦&w°]Ñ­X¡<ƒþi§W‘¸Œg¤«Y]âm»®]«rü.ïÚ¥¶qKJÌÉã"ÔËñãz]IgõÆÉµ%säÀµ•+“;ä7æäpìK·P ˆ4wôÑõ:ƒxò qxxÿ.Žïß®7jÛMÚþ%ÙÙDœá}×÷&<í°`á"•ŸDâ¬Þâ¥KÃ÷çŸQ£ôf³z¹o,c~\ŽÐå SóÉÒ×òöès×®©™ªÌsúâóϰmßQ”([It ±½ŒöÒ˜¶£v.:°}šNjغ«ê³œe4Øêpº$[;ìܰ3§NBÅJ•Œ9‘Âgæ7qJú'zh¤MîàÐ#m†9i9~üý¾Y^ =jæT©Y§ÔýJN¸%ûÓ«L}ô‘š´Á<½xwwä “¶pÌ|<òвH{L¹æ¬s.˜M¾q)íÛ}œÄÅ¢{÷î­òÙVÔm1ÎΉ9~þÂoNî`„pñâEøvô8¼Ö°9b™tþŠ‘•8<›ÿœ‰ûó"…ÏÌÅ.%úç¤à³ÉÌñãÚ‹œ€–î'wø»Êuš;±N¯òž¬å3A^GÅéèäŠI£Rü’Nî0~œÜÁI ôø´ Ô_UÊ™3öîÝ—8«—“;J o4à—trÇ“?mr‡9ð³–c̽áBÕ:øräWJ™<÷e+NDÆ.]¼ˆiÓ¦ n½zhÛ–‰åœåa:ç#Ù Ìê­x–8«× ø%7¹#5ÀoÞ¼y_©œ9q•ÀOîAàGåË{ <ØtÖ«Iœ¡Ø½{w•8MâBæ~· ü’,çBàÇD÷Ô?mr×£sàäð³pB9àvZ\‹î¿1b:Æ}ŠœÅAÜ»oŸj¤cÀÉæÀÏ&ÉäŽÔ?m&/IM‘{øi÷#èã³*T¨ @~ ²‹î R2píÚUlܰÁt5P£FMtêÜEµ?Œr^Ò3Ú6Ù¹óç°há¥×É3NÄ ŒFð{ 6ÒÊ4ÓDG…»h³zéÜÓÖò:®£ÈɨpR'B唾”¿gO.Œ»&àÇgSïqßáW™Zµj¥ÖBUùnülÍ&wp%’ ü:àâýXñ aИt@£lÌ©áëøÙ98cÆwo>øqVoRà§ö'†ÙhÀÏ,âwðÐQ”/_11ÿÈçEN޶ø}Ò¼5äM¬Z½5~nÞ»••^ÕÙÍÕ§N0f§”_á˹'`£”t9Ç?.Ÿ`ü< üD‰j?5«WwûŽØ¼y«šÑú"ˆÀ/44eË”DîÜy°ÿÀ!Jò{ö ÔÞÞ+W.Ç·ß|¥vÐfõºø‰ü^Ú¹3YàGÖð#8gîMRà+À¯´¿++V F~#ðãý˜Ðý›Èa·®]“f(Bº¯Æ £¾½{Š1£¹wÿAÔ¨V‘\?Ñtò "[,Y¼CÄù ÔlQ’Šø•)ƒ;cÆ ÌBÄ/5ÀïÃ?T»$$øåxæêU¢âBà Æð†#2*yþ3È>åë{ .]‚ Æ+}1tè;øiÌ/°³µžÇM/Dq”6(R¤(† Œ6mÚ×S4EüŠp>) ÎRļMøˆXŠø^k÷ã}xùOžÝ½çéÿaa¡ ‚ßÝ;X½z•ðz©Û08‰6l8Þ|ó-xx¸+Y~Üd׌yÆxg÷R:ÈÏÏOEü(£‘ÈüÅ!dÄO#¿aÆ)çóÛo¿UÇ(ƒäí[·Ôqî7Ï( u”6»7‡WÚ•ÎU¼8D±õ p½E{',\0ï¥ÐÏ“Ìm0AŸšÕ+‚À¯´èåÒדFüFq"~gn‡¡âk¯C¯|W#Ø£0±Ð»cqrqÇ„Ïz¤ø%êÍ®¿ß~SC½œÁĨ߃‡ÁÊË{l .W®4üïÝÇC‡UòÎÏ‹˜»á`o£¢oì„~ê-#鹿Ÿ…¡Þ'~fîØ‘Åü®þõW"ð#0Ù°q“(Öµê¼Ašs1â‹Ï0oÞ\lß¹…™/ò¬ˆÀoÕª˜0~,vìØ¡‡6Ô›Udô’³4ÔKþ3?)¥uü(WsæÌ±ñ+-m~E”²6ÔË¡cö…›¶ pá"ÏÈ1j‰S§O¢O¯ž ýµl¹Ü: x¿H¢Ü9r}{÷TÀ€‚ĈŸ)ân!âÇ ðSñ3_ÎE‹øiÀÏFžÅ¶ªP¡]‚šp¡Ó äï¸øDFÿ#|#v@KEþyô»–Só¼‰Þæ•+WpC@TËV­T‡ÒÞéy“{Iæ<ÑÞ!é1–'¥–®7/T6T:T¾ü|Q…íТE+õÎ çÏSCM–ÎKëBoŸ$òÕœ7$óïIË“Ð#×jÇ´bºÝ-5tṏ[2: t©2˜9{ŽÞl÷F[ìÙ³KäÞÕt΋#%×bìI¯äãw³c¿RKÚ5¼¿jcS;‹·x\}×éÏÙ>‰-”<ñ|.ûäééŸøqô|óÝ÷Ê( 4M›6V@…|%€IÄ:™óþ ý®ñPûíqdéînþ›ú4ÝËÚú=D¾Q/Ð÷öή"â]»uà·?ÿ<*VŸ.Eµª•мyqŽ(ýíädÜ‚1=Mÿ’ÌùBNñ“2›”ê÷d‹Ž=Á<ʪU«ªI!K׬ÁÄ*UÐ\ŽqRs z6mÚ¤¢‚|îVEG,½ÈmZyJ2ç=‹ù±¤¼O z €Ÿ±‚ìx\³Äˆ =VVVE¡Ä«Ó„3µDáÑJR2?FÞ¼#<¯ÂαlÙŸê:uîjñœg]ØÙÌ)9^%ÇÇÇ‘ºÎô·F<Æö&`T6šC‹/ˆø”¯ò+"{¶ì*ÇaÉÿR×'%>›‘íY–ž©ø—¤< ñ–Šÿ iýˆ¼ C¡ù󣕻¯›5Ã_7*àWQô.#¥Ì±öÊâw‡ÁùóçÔ5ä·eYÉHÅ.Ίgä—éGþ~VôRäø½…Jµ›ŠÄD#!N*.ý/.>N ¢ôH8¸¸á·O{§8Ô;bÄ5l9qâÄG‡zE±çøé'5^~C~Órü8ÌuðБÄ}1Ÿ'qÜJ•Ê!(0{öîîü$ Ÿ§‡«Ú+”,´¡Þr—³Gªœ39h:Û˜ãÇáÞÔ õr÷-Ç/Ëܹp“Î~eéÒÄY½Tô•+W’vzõÂûæ4mÚ´Gfõ2Ç/«ððâöí)õnذá±C½Ì·1òcŽ_ÓPoTÑ¢ÐËošáp˽»÷Q¸haSäéÙÑ®rÝ®xܹs[Mj µkßAíÓÉãÏ›(‹ÜáŽ''%QfI±ÂÃ’eËâÖ˜1>ëMÃÿt¹»Aj†z9$öÈP¯\ëxú´F;Í-Û{6­Dßa#áå[ŒçÙ|ŸÐà üþÍp!`ä€GîX^O?æ[˜2FH)½qãòæÍk:ú(%~ñ,¼ø¹š€—sawö¬™Ø°ç8æo_ƒ ¶ž+1hÂTÏvU #<$›/ù‹ÓÁ! Õ,Ï„÷aӲ͘6j¶îØmœ#FÏM€_¶€å¶„(ÏÔ¿¤?sàLjÏ-SÒ.ý[£>ÏZ£"oÅVâ°€Î{4‘÷ŒÁÚSð)çð$¾‡ °tÎzüøN'œ>÷Ϭ^FüÌ_Òˆß~Zį@¯^8)À?ïrbC»!#ñæð^ˆzŠº³>”WæÐ?¼‹§Ž`êŸâäß{à§~š½ êÍ8åýyðøqľç` Ô*Vï ì‰V­Üa ø™GüR üäò»¸´‡v?u9VKŽÝÆÝ{B=¤ø-…²,¦ ¡A"Ó8}d?V/œŠ“‡ö¨óªÖiŠ!_ŒWB«|Jêõ«‘x/ñ?ÿuüY“¦ç*gÑ)~‘oŒø$ð«X1وߓ?Eìß_­ãwoèP5aD- óîPD;æÀ¸ù“ñ@. ‘òÏû¸ »6,@ªîQ¿e'å”7nSÜm”š-Fšÿ%ÃÒ©&òžKr¯^NVâŒjÖƒÀ¯¼Øá£"ã–"~œõžþ_çŽ8}3 ¯5zQ‘ajÛ,¾;?½?‚‚[{'LýjÐc߉',?a˜-‡zMÀû2gc ˆ¹¢ð?KrtfŸ†Ÿ>ˆsòÌ׋G`€?6{·,Pûp>ï–ð–çæÞ=rv&O›À¯‚”ÓêµñK-ð3êõš;®7â² ø1Ê4oî¬Üz3×mDpà‹í¶”/½hÞY¾ÅôŸ>Ǥ•P±zU•r/Ú%VÚ+­ÉÎA'à9þò?lÚº=q9Fü²Mš„ó[·&ñK ð£b |$â'òÎm˜.ýõWbÄ} žœ˜¹ý2 .ôLêšEF†ãì‘Ö­‘ú>wÛY/냈0ã{ØÚq+ûƳy/nvúÈQ|Ü­N}tVo© pëÇ–$âÇY½\÷05ÀkÍ%øÑ¨ž¼xQE ØVU+WÀ‡ã– JÚ Ò žDSÂ/îÍÝqöŠýM<æ”V©Ýßý± îžvjÏê¬þ¹ èèX«tm‹ÖmÚ*怟B~G…ÏI#~l'®‘˜ð£Mñ#ðó§ÝÏ9äz€µq&Ú¸ßô&ò\Ì6w›ŠƆ…†àÖ•óX³xv¬YªNñÌêAÿ€ÖÝ{‰Ü‰ì 7>ÓJÙ=©›OH%q'¾CÝüzØ(Ó*â×§"LÀ/iÄïqÀ ÏâPïÀˆðñÁ}~¼'Ï4 òUi‚Æ#œKhJÕãâbT„€/Rž8´[Éð¾­ÆAH¯5nƒ.o~€ÊµkJ;B°ƒ\Ggæ%ç'¡¬Þ:”GyÖfŒ+à'vöˆ3~ŒøqV/±QúŸÕ+ ÅÕü g“Ñóg4J›ÝCðG²±×ü¿’í7D7K×=ËF—ÉD;L’cOÒ©´sù™ô*uŒ€^ÚùEn—§êlzÛµ›´Uï·lÖDØ9½Wѽ»`/ ÍÒuOWøLæ°Èf|å_4!rÿþ‹þKhôȽÍ~3FÖÍ„á9’ƒƒJV¬†1s9V=ê—ÀùçÕ†ù4†û·m…#¼RoþL(!ûä&¹¿þá™K™´sù©ùÇxÌì8Ÿ­vLIý­S&Ó3œ‘%[N4lÝ“WD½æph×&4,lES§ÁÙÕ¬ÍÉÖ^dSøþ\I±Åèy'ò‰«…LÇÌKjè‘ó“\÷ÈoBŒ¾Xî§©,Â2æ©ÚÚÙÀÍÃÞ¹ò¢Lå×ðîÈñ˜ºöotôñ0|?¼7ªe×aâ¨/ſō}œt¸uí*jåÑaßæMJö->ã™c½I‰<1~IäYÒò8²t ‹FüK;¦d?.^þVկܾÏÙÅ ^ÙsÁ;O~ÔmÚïóf¬?Š!Ÿ}Q{6­À[o¼†*^:Œ|k0®]¼GgG®œ‘Íú%PTÚˆÞåÎu;qîØAܸrᙼŸ°_{i‰Gø"ÿùž„_‰¿›Ž?Ž,ž£]o*$žÇþEe«Ò,’Èȳ.(e«ÕÆ/ó7É èüùàâ™+|ðú6ÄÂÉ?CNQýÈÒ=ž¦—²R|0çÿ¢ÎPGÌŽ“RÃ’vOó¢f '=&zŽ‹Ø¦õ~¥b;áée\² TÅrølÜü‹%Ù§}gŽ·«û£²OÙåg–löÈ•ßCts^”ªT Ø9ë1oûy¼õ…€ÀòÕ°jþdt«SMJäR€:80H95éa/km¯^sžhùî3•´¢—"ÇïüÝhT«ßaaªrËæCš4JÌ+àο~Þ'UC½ãÆ{d¨—³zsŽ£rF®Ÿ8«—eËå(1*vj³êgIª“I©_Ø]k°|׫Î] (~š½9ó†µêχD9yz>¶:µ€­ùΕ¤œ:|X õÊAÓÆ¡^®Í”š¡^ÎìMê7›6áââÅÌê­^µ ‚BŒù¯*M™2õêÕKœÕ›8Ô»e ¬’êå0âãfõr`K9~óçÇù¥K·lã¹U*–Ç»£ç ^ó¦xÌúÁÏ„”±¹g?uh>ìÕT%ÙO[{õj,Ù½±h÷1cnT÷{Úµß nƒ¿OÌñãÎe*VÄÑ£Zµê#;w0Ç;ÿûp8Ž=öOÄO€_9áËõ~P¿¤À»¤ø%Üá(À¯0ßùó‰³z›4i"NPW|1â ‰l¯g¯’cÜ…‡àÎm_¬Zµ¿üò³<;uëÖÇŽÛðúëMDÆÖ#RÞçYšö?nƽzi ”µˆ_ÉÆñ·èmK“;4hjà§&wÈ÷Rµûñ><‡’u¼{Ï8±çÙµ½1ÀA°·iãœ=w ÌdûJêFF@Ÿ$ÍéI‰¶D³góJlŒÎø1âw—?3àGG‘ÀKJ=)ð+(À/R£Ÿ?Þ“÷6ì}88º`ÑÂùjŸéÿ"ûl;e»DŸ3О°:p›·nÂŽíÛÕyŒ˜uèØBõj•±oßAÑ)ÕDŸ–VûÕGÅÄ«´‹çEä=ÛÖÝÕIhú$Vt_e±³‡„‡Igõþ,8&c?Î꽊Jµš !.^:£jHÎü¤`¡rpõðÂï#¿€3#(æÀ¿\Â0¢çkãÆ©ˆgõ²„GĨM¹)8Ï–¨à¬1þ\>|o¿=Ù³{ÃÎŽƒü/¦ ¸17yû·€<4*"Füªp[C‡,FürçÎ*àÇáÞĈŸüÍ¡Þ ‹%îÕK`¸EÏêU/n¯^s"° ÁöíÛðÁéMР~Cü1kŽVHën¢íÕ;~ÜÏj˜‰³zµˆ_Ž)SpzãÆd#~¥K—Nðû×P¯\kü¸œ ï—?_>QÛQ¨Ð£@ñy“½½ú÷ë«GR2äm|ñåH„‡`ZåþèÑ£èÕ³«(ÿÓ‰À˹˜?óå\þ+ðãP¯“?£3?OOOiǪøyìÔ®õOtðyÑé‰WïwK€ÒoÇcíÚfN.Yò'jÔ|홿åµ^ÝZèÒ¥‹eà'ïfi¨7%àÇýbµÚ$m¨·´€¼Cülø™›ïÕû¼ˆÑÞÕºd:bµÜÕÆÃÓիׇì[5ó¬ˆm¯Þ¿ŠqwÀ€ õ>øÝþþk¨×Õ…~ø1zÈe‘*WªŠwß_í‹ü´D™¦-gÔ:Bž.ïÎÄîÝ;±aÝ:œ€§×£G/8ŠS?yÒïjf÷ºõ›ä½ï¥¹®O‰˜nƽz5àÇ'ÓA¯,vöK:Ô›VÀOoú|q$L¶¶ðcÅärk“ &¨…T©” ¨lS‹Ä-5¨£üÍô»vCÍ8>ïÙnö„:uêãwÞSÛµPiñ¸åóŸCЛ”ù'Ÿ¦¿)µâ_百›gëuÿÞFîE‚r¶IãÆM¸.P2çS²„ãÞ–®I‹ÂÜ/êSJJ|Né7sâyä±yI|ŽÙïüd¾é‹ÞKJTÔ_}õ þüs9>þäSi‹×áþÿö®0Š¢ é=BïR¤w„ÞKè ¤÷¢‚èOSP¬(ÒAQ**EQ)Ò{ï-ZÒÛÿ¾¹ÛãEÀÜ|ºÜeowvöÍ›7ß¼y3ãã£~[½úgsLÏÓ÷³ÚÉÖ2ãwë#© Sƒ¤i甿Çê<ç2 Æ5Ï |÷—íÓ»'ÖS£Ö>üMÑÇEæÖ`>â“ÔãHú·õù‡!Ñ=üÛtš?XÒ0>I“«ŸÿæÁNVÅŠ•0IHÿŒ™³ñÊUX½f¾[¸HÈÈlé8¼¡H_r÷>탰–—åï$çß„¤×«ÃôC¢ß êwézxª©Óeš´ß´tîäË÷Ú¶m‡)S§cÙ÷? ÿ€j4gþüoé£ýÛ¼y“èûPøfð5§ôl@›BXdÂOuæÞ¹¤ÇÓÀó'~G©t$yÖå+…AcÌÆ(&:F}>Ê ß'(ó½&»éwV›s?‹ƒãôìåS1ÿMB‘ªCH ‘œ¬Ô¿VçÕo€dï3ÿÍòåq¡¡!<¥dÁžîó>¨ƒÜÆ®|y?,ÿaš6m®òûoäDS þg–‹ERÆßÉOu¿èœú4§eJ—³zM®çyp§ß (*„»]»xçݱŠÎýjºví¦<ÃööÿN}¥4¬å›è»ù“°>ÿ dÉ$_«C˜ÿ6}¥üŸ=ñà…ÆMš J•ªÊÃT³V-øU¨îì"õaÿ¾½Jÿ“»÷éJ^Iåd‘ù»q¤Ö׫{¬î³>Ï%Xˆ°»¡ÏÜ‘T§—¶ Q@jÕª’%K)/==ïy„˜0 ÀÔ.&ÿÓ8øÎ$Ý„EVøÝ|î¾ßë{Ô}æ{Õ¿VçÙγ- ÝM—îéÞéÒ)fÉ’M=®êï_¯¿>3gÍÆ›o¾//oé|›ÈIàâ%‹„,fO6½§}ð½z %ÓÓßê«• Ÿ"^ˆY½Ü¹Ã¿nsÄDEJ†L,ØIÁR19 —!cf|9úá“;¸ïìĉïêÍ)ç8Ô{F>¹€3]¬¶<~â´1/yæ³ÛQÀa>7W'ìØ±CVB.à\)W.ìÙ¾=Ù¡^ñ>l¨wÁ‚j¸×êÍÌ¡ÞõëqlÑ"µe{×4:5jTÇ­[Ϙ÷y‚Œ8€=ûx77døå5Ô{P>Sê-UªÔC‡z¹aÒ?Îê­ çŽ.]Šù4bü¸Yúç¿@ãÆM,Ã’ÏÖ¤‰ùgìO1,‚ºó4ÁÙìýõúöé™8ÆOäR¦BœåP¯|&ݲmÈ!êe&'€T­ZÕ"S#ƯPïÞØuèzdÃÔ´iS¼*ĶG^¢÷†Õþ}˜†Å8âÁI”7·Ì¤-d£@y§OŸN5ÌÿìÔ_ÍÕбc4ª&š‰Ýg|SÉF°ýÂ…ûbüQ·n]e³¬a õÒFÝ7¹CʶT:–ô¨g>2x0~ûdËèÑ£FŒaŠËBR°W/„•)ƒ«}úÜ7¹ã­·ÞR$*¥¡^³'ãWpÀ„q¨W>™&Gó¸ÃÓŒ3ÕÖ¡ÖÑþ °¼9‹72<+W­4ŸM ÖÇ&M›!úYØAÉ“èåwß-¸'39ǡފҾnýOº€3¹ åôŸñk/ÄïôxT¨Ñq±1ʱbGIOÔU  -— ðô΀/G=<Æñ:Icü¸s‰Ÿ‹ñ”|rrc¸”i˨T±"¾%2fõrr‡¿¿ÝÛ¶!ÁÕ5ÑΜÜÁ=zSCüx¨ýg­ˆßQ!~œÜA2áÂy4oÞ ýF~ 7wOyö¿cùbkW-™û*•*ŒqッB8¹C¿Ù³qpݺ'wøýúë¯)?’CêvrįRÁ‚8¼d‰…øÑønÚ´IÍjµep%€zõêYfV’ø••ºq棒%~Œß{ñ8pàý“;„ð‘øíÈ?ÊíÚµjF±­ã÷ßWºKU1~(Ù°!¶‹½IãGâÇòJ-ñS“;DÞ¥„,ÄÞFÞ_½z5´èöši¯^«å?l\NäôÑظâì;pPÅE2ÆïeÑQNî¸Â?+âÇvóí·ß~ ñ» òMn9¿p!~Wä“i²gÇ£¬<Ç–ÁØÖ÷Þ{Oµ—'wT"ñc,j’?r›4AüŒuüJù×A¼?Vj£WÏŠIÅ !Èœ->Ö!UÄ/%ŸAüèñ£©Vµ2ŠT¨«öê5Íê}®¢x¦àðJhðmÌþd„Ú²o®dMâ'J§ˆ_ŸÚÞ+oÞT?‹ÇO_!~G„Z¶l›ÿ-~ÝvKþ\Ɇ-‰^ÁÕømåï˜1¦'~]¿Q5d†Ç/»¿$~ðø=ŒøÍœ93Yâç/â!)cV/‡è«ÁÑhÓUí’ñœMÂ3 øŸkWÂ>ü:¾]`òRŠøùûã¬4n!)¿­ãg?k÷¾æäEü„’ˆPþ-›7EƒñÚ舲-Þ¡<¹Nbî«å/‚×útE@ã&÷&wˆ|K5j„¿Ïƒs2į~ýú)¿³gÏ&žÜa&~¥…,þ-öëÞäŽB 뫽z¯³ºÙ˜âÎböÕ^½$lÆäŽÂ}ú „¿^½î›ÜAâÇÉK)?¶ÉMîxYê—s¹,LcVïè‘#p;Á_­˜ˆg¤ûÜÞíÖµ(Ø‹N¸¹;¨íó¸`¶ƒ£ÈCô‘kî> æÜ­˜ÙNyü {¢ì´´³_ºtñ#·¡Îs ®'Á 1«÷ð…Pø×jjb°Wqí&ñ‹S/š>C&LÙ)UÄ3_’zür SæPïiùÍØ«7W®\–½zm¨­³ KÓ^½ÿý·ΡœIü*çɃÝr.éÎ$~/½ôÒC‰ßüùó¿¬B4ÒKoþˆœ3ˆß’E ±vó|½æ—ç¾Wïó€i¯ÞUønÂ0¬ùÅ4«—ÄÏ—³zçÎŵk“%~ôø•ƒüXÄOÎW*T‡Iüèñ“ßxmþ|ù°lÇ%,šCmhK.†ýÿÃàæe±gâY½åD¾$~ÁœÕk&oôV“øqqæÔ?Ó4÷<~žGŽ(oÊ?*"Âò¬V¹þ7õT¨^ QÏpŸÖÜ9¤mõ*èÚ:Mš5· õr¿RŠø%êe9qœ¤“’¬‰_RŸ³Ü£ˆŸ9=ÓñMÔ­[ïÉ÷êý‚»á{õÒ®“øqX¶pß¾)]WEW“õ¦†ø%7ÔKâÇ¡Þ+BüŒ½zõï‹üUš¡çëoXöç~V0BJˆçEƒ2g³S›(ø©É‰_e!~[Iü’ê}Äï…˜ÜApݨhîÖ¡¾³ xP1¹ +ϧƽIa¹–ïônÆ^½T:[;îíÕ›XN¦3ÉË1µ°¾GIZäm}އ† ™PF4FV2JîH-î»×tÒ”¾ùw‚îâÀPrz’–..ïÎN&aÈŠ2b³`ÈÌrþ`Üc} [Ÿ3upcä»úÉæÀ÷Vöìð[É…‡é÷ÄçŒãAHîúd·Â2Ol ÷EÅ€ÙÚ!jß^½r(=•ß­ÏÇÃÀ¶5¹ûxX·Á<ø †x=$ÊÇs‚1µÀ:/F~T{Ìù§‚øqVÝîôô±GÍõØKf¼ •ÈòM ’Ï_ÍŸT…)$[)ÒøanëÉËø®ä¨¾=:¬ËÀ’¶<Ïϱ<9«ÍË ðâž…élëàÞ”nî–NŽò]ësÞŸÜ¡~“²0ŒŠÉH‹!à²6XXøIX7X‚ÆÃ|Î8R‹¤×ª¿ŒsæO^ÑÆZy±¹ƒvH>çk‘™•ìî“c’¿“Cr×gŒßøÉç‘áôÂή¶upPÇÚ´ÈEÃ%•crçRë´»UZæv@ ±Zë„-òÒjOÊÉJVÖx\Ù'‡#ÆïJÊUm€è¨Hµ¢<]ð<˜5U1®Æ;½/¦ŒêòСÞÝ»w«mM,o€;wäš<®—/ã„üæ(¿q†*‡-ÿ<O; ó¶%dÈܺe œÅ°á‹¹T¹ìܺUÅ$YïÜ¡†óçèP/wåàp¯±sG–Å‹‘ñ÷ßqPÎ9É9’ûS'O¢CçWQ³I[µFãsUÂçê÷ùÓÇQª@¼÷þx¥¯qÒáñýõWäœ3{׬S2“;¸ƒ¡ûí·õr¶0ËÊz(,ZÎW.T‡¤<Âå7.à¬Ò+]/ü•kU’:h¾ØFà$ ß®Í{ðV‡êj¯^¹1"¿Ê•qŠ1~~~‰p>~üxª†z9a†C½ÆlEµ€ó‘#(Ú·/¶í߯†ã×¼Iš÷ƒŽ}»*bkà†üÍ+TB·6MÔŒJÆøÅKýàPo™&M°åÌ™ûbü8ÔâPïéÓ§ï[À™Cëe6L”ÃN†¾ñÖoØ þ¶UtëÖM ár¸–ñxEúõSC½W’‰ñûßÿþ§ô;¥¡Þ3"ßD3ªL³ÐàÁûsYÒfšº|wÌhD{æÀ3¿ÄÝ{ÑY6izáî ”ô°Ãùsçxíšj9Ô[5_>l–vÖÅÊÆ“;ØNÿ÷cü„øŒFåÚÍq—ã®’VlŽc3k<æâæ‰i©Ø¹ƒÄo„ ÷¿ÜBühHNÈoƬÞÙ³g«íÝl+TÀœ¹sïÍêâWM”î!ƒj˶$1~©%~< â—uÉøŠa=dŽñcãÉÕÝ[¶h!ãQhw¥Ì¶êô‚óÕΣG¶ÌêUÄOÊcÏêÕÉ?Æ„¥†ø±XVIcüª-Šƒß}§ˆcüØðq˸7¤ñ³eŒ;Ví—Ë…Í ÆøUÒvrüxEü¬'wøQ^©%~Ö“;<FQiø¶íÛ'!/ÔƒC‡¡K—.ê[EŸôøå×ß”'DÙ~!~œÜQVˆßf!qɿƧHüN:•ìäŽrBüŒôŒ‘$îÜÑ©SµbA¬•½³p9‘£ÇŽbÍ꟰sç.%WNî ñ -S—{ô¸øqÙ—‡¿ä&w2áBü.’øI=c]b[Ò²eKóU¶ îÄ´bÅ K|q´´³ÕÄ>o"ñK2¹ƒ«¤ âÇ-Û.Ü…¿?;˜fð’гRsØ—“£³[ªÖñ3ˆ_ÒÉyD`dÏ'>ýTMî`Ê _¶l9T¨XIÅØØhô‚ƒC0ñó j?Byü²dAuƒøIÅL:¹ƒRKüŒÉÙ/†Ïï¿ãÐüù–Éœü±}Ç?øý÷?Ô¼¶æñã^½?ý¸ Öä3öêÍHâ'•zÏO?¥¸œ ×Ý{ñ›6mÚ}“;èñ«*Ä3ñ£¡æšt\O±UëWÌ×ÛNip?î•©–H0fõÒãW¡jUœzÿ}'ãñ:thªˆ_åÊ•Mî Ç¯Xß¾ø{ÿ~EDØ~ðÁxìÚ¹]»÷°9ù“è±úø£”íàÄ%Ê˘ÜQ¶iSlg½œ‹1¹ãaÄï¾É$~)âgLî 9l$çØ ^»~Óæì÷ç»gÉì«›1«—“Pé`’øYOî ¾Ž9òÄÞ֤ē; ‰‰²¹`&~Lkì»ï"&.«Vþ`ÞÚ|ƒ €‹¶ßº9²àܹs÷&wˆ®&¶{g³[?zHÓñãΗ"àW­‘ä†^Ó¢4ÈÆš~4Þ>¾øräÃ=~Ü7Yâ7eŠê=.¿ 8ç‚cÙ«×–4ÎŒ¬RÙ)ÛÍ›7«Þ{Ú$~5„ìØ´)ÙY½\Y>5Ĩ±Ž=~ê= ç â·télÚ¼?ý´ú…Ø«÷YôWïB¼?ÅÏ?ÿ|ø­_¯†z÷ȹ”ˆ_ùòåSEü’óøU-V „t?^ˆrëß;Ôþ›œ`eKp;³sç´nÕBu^·òøY¿¤¿Ô¿~ÒÀ%%~žG*â·•?!jç’/³f¥öžx†{õ¾(È(²®TÑÍ›7S‹Y[Ïê-׬6 ™KŽøq±ç”ˆß‰'î÷øÝº¿† ñ—B.çb¤Câ÷<öê}`òzšöê%a3ˆ_±ÔPoJÄ^ª”ˆIwrëøâw·pa\ìÓG?^ËEι›Æ×^Wí½­«‹x¸»¨YèÖįº´³ɹ¤Ë¹Ló4ˆŸ9¬óù‚ÆÔÙÍ v ª"“ô±òKŒ›r/ËÔâ>.Ë¿EX*¨ÝrÊôClj¯^[<¬öêµÈÌüÉ Yxœ>ïQ3¹øÝtJ{Œ¤l™?“BÉSôøa0Éøþƒw²“c}ŽûsÏhÖ…dõ$ ìôqt  µLxÈ?÷3ŽÔ éuêoÞo|7ƒ„;gØ¢üyð½9âbòþ™åb–“újþ4Zùß«´M÷Ò1öä¶µƒzϽj‰Dr•ïüû±eF:Vé1mÎ(VûT›ÏÙÈo‹œåÓÅS“}2x!ˆ_ _ˆY\L<ÿP-\Ý¥‘r¿/ßìÍW?ÖÂâwõ7=Y¬éê·,Áx‘ä*„-ÖÓøÕ§éõ]}RVVïI Ø#1Ò$᎒Œ£xN&0ŸU Ï/]:oÕ󳵃[a¹»»Y:6Æ!BV“äDçƒe`‹”ÀáHz[“–e¯Ò07ÀsjV=¹Ç–ãKD 2QêkâßèEMÍöR†7Ê(;õÿ¨çÝ;Ï™ÆÄIóg ß[„rOFVò",rJrþAõÀºAµÜg¾—ÿ©ðù'”¸¹»+O¼m®Ò¶Þ›ZšHƆ¼ÌçŒóI½¬I‘œìÕæOëójEîSD'lá Þ›tßk9°–AÙߟ/ÆPïÕ(T¬ÕÑáá*Ø”F• IOCÏŸ›‡÷C'wpqCîË¡Fö"WãévîDþO>ÁN¡ÉoY2gVÖ§OŸ‡§—çCÓ´Ý:9ÚáÏ?ÿ4yWyR!¤s¦¯_ýMPQ¹!·êáb“Üç89ÌŸ?ãÇDzeËLËñ ñ:z/‰]«V)òç,e|TÎuíÚ-[¶e—rz®ZøìAÒ{øð!äÍ›Ÿ|ò©&Q³ÏçÎ…·ÈæÐ_¨IHÖ`4rq§è3‡|“c9|ÅÉ lÐ Dûú¢z±bØgžÜa™Õ[¦ ,\¢b^aI[G¶nÙŒŽÛ©0cVo´ÈÅ¿zuœàVJ"g#Æõ„ÃìÔsêpJàVmþþþj;&c8cçâbËîÝj¨—uªQ£†xëíQèÔ©“é°5¤TªP^ôµ¥¾5vîp¹v ~Í›ããÇMî`džCc”-;™Öày†8lݺUýmù]:6ÒžTlÐ[·mS“Ö8Áòä,Õ 6>«—ºÇ=xCEF±^^(6t(BÄÎ\~õUØ›õ—ö‡C½½zõRCòœ”f ¶¹\à™¡C´oÖm*íòqó¹æä¸qê“i3žžé0kÎ\„&±wi”'‡o½½ÜÕ𸠑s 3©Y¸0þ`H‚U8‰±O2c·¿üòKóÙÇËAü.G J½–ˆŽŒ@¤>*·—·ZãŒ;w0ÒÙÕ“Gv~ ñ[²d † †uëÖ)%4 †ýº*•+cÓ¾}J‘=ÝÜðÕW_aêÔ©¦‹l$\úƒd;N&ŸíÛQPˆÛ?kÖ˜v+0« 8(ã‘Âåp’'*piÆ­Ñ3/ŠLYµR%lûýwÄI¯šF— ÿ×_aÿþý‰z=¶V;VäöíÛ+¹ò 1«Ð¬®¼ò ®Êá„PN45jÔÀÞ½{ O$|ŒÝ0`@¢¸NX¨Ô¸1NŒ[BjìéåzÀ=R¹”ƒ-ƒ3_™+’FãK2R¿>NHã"DÚˆñc|*;—Ô[Ê-%tîÜY•1 µQìyž<‰Œñ“zFbï"uŽ„ÓÖ÷JV“~úI°“0¨ÉBö*ÁøãØ1Óh$kÛ„¼qÙ.Æ[ƒ¶‰ ãâÅ‹UÇÖâu•ºc/ßËKz{¥ˆIŸÞDþ¤¬I@ø|[õÞh3IÒJ ¼Ü±#nQ¦0Àò!áf;Û³gOóYØidG†õ#Gމ<ƒ\Ý!ÛŠÈ(¦ýóæÁéÎÕÎs2w`±eXH·ÈŸËًݯ\»ö}vú»wï®ìÔ“Úë‚øq?ÿÚMEùB£*!+¤AØÃ‹·sÀŒwz=øq³y6ŠTC]Ö¯¦Ö0“ßÎ÷ë‡@éErŠ:ÉÙ–ÁÊjÈŠ^ŽR"Ÿ¨¬YqbäH5=® 8ÓµUP6Ê»!²W^Ê¢J­ZØ.¤›ÄÃz)‚ñ4”mÛ¶UK€pù‰ä0f̵<;BÖõ€Ö% Â??\’r4–i`}cãg‹œ |ht Ï;*œ\PQHò¡I“&D‚ah“¸î^Ÿ>}”WõA½nfz>¸d”1SX‘©;ezôÀ??þhꊰ°>Ùªü ’ÊÉð©5ÏœAi‘Õ_bS¸æ¡’ïE‹)¯  5XŽœ´Ç ~ìY†ä¥ìH>üZ´Ày!Þ×…Ð8X ¨“ôLW±Cˆ8—±38ê?mE½zõÔ¨½³Ö =ãH;Ql¬G ݯ(òßHBcžüAûÏ:EÛf«`Œ±Ñ94röe˰såJE pt†ü†3{IŸ/ñ;r9~ÕÄLjòÄ)¢¤10bð<¼ÒcÚ˜$~T4ÓãGvœÈÕ,ÆÕçŸPR¾?PäÃzqb[åãzåŠ{ÿÂB^IDATª˜›9Ì+2·–+<‰eKbÍ2Iôòq²‡Â8»Nyýèé^{ÙnÝTÚŠäXõm¬€ôö•|ýuEBs‘ñ奶 ½L$\z'î½LÁý|YŒz@okÎ… ‘å—_°S Krëj˜).ýTNŒëŽÕ«UÝ`] M¢,9û–õ nݺæ;îÇúõëÑ@ȉ‰—£¼NB6*´l‰½óæ!2{v]R€j—/GV‘ÿn!y\øÝ;)ìüÐóÄõ“¢ºt0¹8?Ù6Öe$bEÿs‹Ü3ÿövñ¦1y+f› âÊéwîD©ð;=­AA±-&A©_¿¾Z…€„-)ÚHŸ2ÿè£,D‚i3”¥š¿¿ c¹#'+b¨an„hWb}ItûŠè¸1êÃö–®áÊeŠ8¢ó$x!º˜T(OOa¡&â@Ä$Ÿqq7lý¤§òý÷ß«O[óAƒ{]ʸ…ÒÝ»+ï‡!­¯±Õ#N:6v$}'Þz ±btEà‰®¡ÒQ®íÚµK‘ôôÎ6kÖL]Ëòà½vÒë¾]¡‚¸n¢¿XOOijgi•¾-1ÒIÉ.\&iŽ­b‘ä‡û®#ù^)½ÀæÍ›§HúÖþ¾K†Hyàpå iI¹TF¼ ÷²êæ ‘'"Å‹Rç)KzZi¯*V¬¨Î¥h–¯§Wƒv,AêÓ‹Ì– >[·ªÐ ž×G’Ct=VdMÈÙ5!ÏÒ£·üFÛÃ8L.×Âuü’‡ 9ÔKù³Þ÷rí’Ø/O¹7æMˆ•6ÂøM¢Ÿ";vrІCŸ|ŽXýN›BûS²dÉdIÁø?vŠ’Ê^m)Çi!ã%D´Ø<–³ñ»>„çˆ>úHGÑíâEœŽbnÚ²–)?J}È’%Ë“>â… ~1BêB#ä%¥RCìYs(–†–J™p†kÜPPIA÷ò®… á³{7ʈ‚2΃ÛdY/ób+àûò½IÂ8ÄX]ˆÙõºuq±sgµÆ’5(Nè`< {t믾ªbü8Ìb _q—‚ÒÓãg9éÍPîl`mVörp[*Îxξt)Špññ ”§#é/Á2àºdÿüónHPèí 7Ä é4² ËŸ_õ¶ Šañö¶J}˜Ž8‘3‡W^š>G¥L NÐ6½ÿþû( ½N×[dìÒ¬Y³T§‰åÀ’`þ\ïÞÈÿå—&ºÔõ›>,‡Ù=ÄÖpRصF”gÈø„bíÚµ*~/¥ÉM5kÖTz[ÙXÒyÓë}tÜ8”éÛWÙ=z¸,¿ÛðA=dHNñ7ÞPaQÒy§|ŒßiÇiƒ¾ùæôýM F‡ˆ±Þ$ŠÖÏ`hÉ%i_èt)6|8¢'®õ_JÅî—·oÆ Ëˆñ;CA8Äû´bŸûPoû¶¯à\°ª5zwƒo‹5yšâã„›½|ÎNÎHï› Ÿ¾þ ®"³°Þ”@²A…5jZ´h‘ÈÕOг+„Ò¿U+xœ=‹ý"Ì 1ÊS"ä4 ëw”Ï\‹áå pAÈÄñ#àÌá?Ê ìap.³5kÖ<4.F—±ôLq[(5Ã# f¹=à³g}ð®˜òă°ùËÁY‹ÅFŽT²Ø±d‰Š%SD#0hš³g¹}ûözü0Àš÷°X&H0¶©šüC}„@iX9Œ–Æ¥ž*P6 l÷oÚT úøcE)’7Î~. WÙO)¾ÒŒwe,àBél2îLÅß­NH9XÇ´²³ÃN ;óŒÙN)¾þQð܉_ëV-±âÇ_‘«PqÄFGIå6ÅѰ‡ÁœëûÀÞíB`àUdÉ’Õ|wò˜;w®ê•p%~µ¤H"C/{5YÖ­CI2qKz*‘Y³¦í€_¨šU ÏS§àuì"²gǾ©S& Sr€CæœjÎ`ÒÃÜg4…™¤IÁ¡^ÆáüñǪ×ÍázBÉ^¤ï¦M(ýÚkªÂß)[áBù]R]—&!ïÆJÎoê$î“B&Šà`¿$ ÉãT8NšaÃ÷0°ZÓÛÄ"É:‰£èNH/¤¤¬òÃb ¯´li*{ú´,ÿ” :I4áG·”ŶU« §>ÒƒÊeVÓ÷±AÎÀS6é! ã w’qÉ– c^I(«±92v,®´n­â-Ó´î§ô€s¢”4p Gø{íZS,¶Y.ŒÛ¦Ç•„šKé<¨óCrÎ8?’.=’(ÞLÊŽ£=ù¦MC>©#ç¤#zfÀƒiº@ž—ŠòýOÃxGyçt{÷¢ôàÁ*rÛêÕj"“uì) 7ÛcîBö5élÞ¤ ѦGö5±ïÔ’kPÿ‰* (›³OÊm€Åþ¤qÙSŸ©ƒÆ÷|3f ¯È•„ï¼ÈÖÅj&/í m7wJá„¥7ß|ÓüË“AžKI??pû£ï—-ãÊž@ÊÎlº¸º¥j3yö¬©¤ôŽp}&‘%%T.nIFà-=Ÿ]»àÊíQŒÊŸVÁâ–w Ë›WÅ{E s¢[™=‰*„¸×m²Ž{H2ëÇ™w$+ô8“ ”ì%mÊžË[dرnW®˜~Kã g™1}!B oŠŽÒÈ:Ò+T?Í ¼ £Ë0nÙó0oŸz»i¤ÙSç SÊ߈¡‘Ö[× Bt‚ŨBÃN€My¾ yO6øôt”Bw³R%앆ˆeBYлÍÎg2r¦âÒ¥K:Ìk Ò¥K«:Äõ-ŸF[FòMùûuíŠK­ZáØ¨Q*O$¶ÒïH"À¡.m꣓´-ÒY¡h—ØøQæ$Ï´AÔe®5ú òM]çr]}ûöÅ?ü ¼ß‰ÖH”{c½½á~ñ"JHcê%vènþü-\X…a¤åI7Ôw†”Ðæún߮΂|ù•WLž&ÃN h{ÏÇxJÊyz ¶µŒGã²R\Ú…ëåZfÌ›AÏW.ȵd ŠH½ nT­ªÚ£´î|aç†!¬ÿu )R{§NUí¡õÚ­ÔoÆôQŽä4\Bêi­Bò܉߿VzîÈÝgΜ© ®…x$•PõBDÉÓœ0’{lÔÔ)È„¤¤ƒñ2l´8£÷Q\ÌT+ºù9ÔEÃAÀ^·áù3 d/‡xlAö„’½È («Æ/Pþ$-[¶T{:Òs÷(½4Àìôp÷á¤1fÜ¬Ú UžËÆ–3¬_ž4 ¹¥|cä·[Bn8ÔFãkôJÓøV¬ï|w÷óçá³oŸêˆìýâ ÜBÁpÖlÚ Æµ²ØaãÇÆ0µ ÑÞ!zi9üÈ‘.[²aãËxÚòBNè?ß©¥ -T(ÅŽ@š‚èë;~9V­Bæq±m[1Âäé“ß ÒljJìÄЖsˆ7¹î¤`g“¡ܷ𳬙NÒ²»Fl¦?ÿ„û… ¦†Ò¨îêÍD¿8ÂEO[p©Rj¸5)ᢧ¶›CŒìÀsä';g¢§BÿIôè¡bœ1c2i·’†]±üU¬·ÈŸq÷餌þÂz™–AÛB½cGƒáŒuWΫ¶˜2¦ÌèpaÇ…«80¼äQlσ&‰AƒÍ¡I6šôztØâÊøÃÐr­>öò¬Ë™\YÅH<¨‡X4lôø}•w6¦4 iTíž (g4‘¨ ƒ¥§âSçéù&áàò.sæÌQAñ gG¢Ç€úŒ›7Ãûøq5ìköJà6‡ ‘»!d7\dÌÞ6‡ÛéåcÏš1Jœ¨Ä8æÏŸ¼yó>4¾59°·¾zõjµð*'|p© Öžg#-kiøò~ó 2 ¹´5x]mÐg{ô0ÅÛ±û@Y³³I/+eöî»ïªKÖÔ‚Ï!C†¨Ž+‡)Ù°^Ýþ#úNò/,M“>kÿ¤ÞMÚëÇU.]º¤&•q6ï£è?;8ŒµŸ>}º©à2;”}rÕ“´mÁî(ˆ~Û‹”ó%‰.²“Iï,Áƒbƒ¸<mvjGÛRƒ4KüVÎFmݺ58€O>ùD­­e1ºš|XÀÞŽ›‹Î2>Œ=ë>ø@Åk.([¦òäIå=aYÓ³Ëô9Ìb’£,l”;½Ÿœ<Ãx`6x áDz˜Š)¢dó¤u€ Ѹpá‚cü÷”åðeíÚµUú†îÛZ=໳.c™h+ËÇŲÙùä¤2êë〲¤îs¸’[q @:8„ÏNAþx­é¿aWXÔOê?I2;*l/{ôè¡¶ç|’Neω–œ˜À!{N¶áÄ?–©–½©ˆ9¬]·NyGÙQ)žº#o®ìÈäzwj‘æ‰A…âŒ:öäIh8tÀž‡nHlØà1vq#6 ¥hô¶q8—2aüåÃ=.¹'+%ɇ»xíÓåÊ–Ïãp#=Zll3EÃÂ2àp²±ðgZåÊN åÁr ! 9Î;·š ×¥K%6vO« z—¨ûôÂr­EîJc¬/^\5Jô ¶ái÷´Yæîb 7ÃsoÎ å‰÷£z›’½«|gYSö;wîTu‹^->ƒ6KVÙá{S¿8"F™Ðñ;'pGN$£ÒQò4:=ôð±Œ9lÉa_’p‚Ÿø ʺÿ¼eo²µ\ÓÓønj»ˆ¤àÊ#<Ã_¹(uj@™Òæß[|=(HÅ`ÓÞ°CB½Ï‘#;âÜ•`ìÞg}TØñ3@£Ë€_ö@¨ät©Òxùòe›tùÓ­LÂEÏã/ø„ïiTöäÀø@nùCcÃ2 äw’Q[‰-‰FÕªUU„€eð¤^¾u½z~öô){žãaK 'ƒ>†2ÐûLo7ëEj&< (oÊž$ðرcزe‹"ä¬ ¶æù`ƒjtÀIÂ*T¨ Ê„€eñ4Á掲f;À ;ô‚³óeYoÔ†@½§Í¡ âP,×_¥ýa‡óI¼|ɲ¥­¡í§ÞÓ£ËeyHù÷“ÊÞDÙäu¡‘qpq´—Ã1ñŠÈ¹;›ìpDtœºÈÍÉ´Ôa/÷„DÅÃU®ŠM€·«\kdB~ãu)’A;{xúfÁ˹²¡@¹Ê¨R¢ Eö´?‘1q¸‹<韼ӓ6Eü¬AÃËž6£Á3½-€ïÊŠÍŠo4~O£Wý(Ðe`Ú©Æ??Ÿu$$ä(<.ò‡‡3bbã)¤ÄAWB"‘ÝËU]%¿1–ìñBèîDÆ"ƒ»¢yj†®àN„0ùêêtoòŸNSt72á1±’¾#Ü’LúxTжKÒ1¯ò=L¾'Ø%ÀS§uºä5`)Ë!3ϦӉð¸e”ñ¢oÑò¬GIJ=WtJzœ`LíÓÈÇc@¾‡àqÅ“ eʺ`©Ë¡¢Ë/ÒNFfÝOIï¤ÞEIžÍñÞ©‚ñÎI“Lé|jÀü%µ)±bÙ>·Ü]¥<ðä «}.½{ÞNÿ㦑 $Ä9¢½ÿÛøZÕå/±¨íttÏý’œÈŸ()u>ÚLF“ê?uN]ï#ôpHnòí\1¤ÚHäq²"!æó#ê~ˆÎHNTM@«JoáÕܹL6EH_ÎÜÍñvñrò÷3"|‡ ¯;E\HDÌç±/™²`L©J©Ë«zß8t¨< 弤JIî$}Î…0©Å$ÕN|Ój*¾ò!òàåûoAé“èPD°ù»ù\lê‹­¯Ÿ%½zÏB]‰–þï¢JºTêàs†&~ © öéš#að×µ¢3º,ê‚uqåQ7³«¹×i¥<†"±1—¯·Â®"„JŠ¿ûý€Ù«šaþÁP¦üTÜèøºœ¿qネ@ nw¿EÇEÝÐaÙhÌx Bx-ÊvGmoQ0KJ¾ÛÙá¶ÅàX¹Ÿ½ÿx©ÀÖ¿3;L›ùáy’¢èÛ(^â#Üzu„`aÈ»Þ÷ïˆÉÅÊÊ=fÒ¢;" ÊõF9O?¦g–K¤ùýÔõ’–[ ¼^,;º/xÝV B§å#q^ºžˆ‘w§Ý6ôÂú}Ô;Ê?†Îùäu,Gê—q­¡ƒ,Cõ\9øi”·!3¾¯rœç) 9B½0î²ß8cJŸwiP¢/êeLgzË#Ñ}>ƒº@ÙJ›Ü²\wTõ&ñ“óÙÊ;ò:¶ƒ–r—ûù.ÌÓs‚&~ )ÀE[àÚÑ)¸*Jâæ‹½G¦`C`0Ô›ŠJž¢lø#Ä P)cBQ¹ô[¨â“€h)èpQž6 ç ¢s^X€¦%›`t)?xdmˆ9MJÌÊš±=¦ä¿„¼K§žY‡ë(1±3Þl7OJ-w岋v¹0±ÕL,m÷|Õ0œy‡òcNëYXÖ~<2Sii,„–,öšÜ3Óª½xÃ{e«Â=S|ðì$a¥œrÐØ±b‡JŸ¿Qòjqb Í•€Ïµ·‡gü¯\C4öûæÖêÏt…1·õyÞ(æ$ïÅ4CùîŸaAë)˜Öô#L«ÝÎù0¬lCtªü–·ÿn @áþXÑa.:äÊa’% ŽcLm9SÞécäN`e A½2o Aîâ˜Ôl:·¯X©lžÅñ¾_]¸¥¯ŒyMß„‹+¿9ãÒXT*Ý[„Ýê KV‘¯>¸ÀðòÍаìëø¶^ipÎ#oÁžX&²šU»‡Ü¡Êï®cN¼ßb–´_P.ñòövˆCÖ¼Êû 1Œ“çE:`pí¡xÙIän—IåoiûO𲔟…d±¼sÔD͸ X~ûšœûì3âsuí§(d'¿»À{•ÃÍ«4æ7ǸÛð%mgáóª­LSÞÙÕ» ¾j;_·ø_6›ˆžù Êy£|Ì#~WeÞhœì|ðiÓiò¬ (î(’´ü‹÷EË|%1¾ñTyÇw‘ƒùupÇÖcß PÁÞÈè,ºõ<­–ÆSF"D'bh;i¼¤Á~9_;t¹Þn4ƒ äFÑB=°Ltnfƒ°ûƆ޾¦·›…y-'á˦0¬xdÉÙýŠøatã9˜Ó ŸØœ`ô«ó9–·ú9Åî)ݺL9[JÝšƒ©õ»ŠMN¯;úûBÕü°¤Ý\L¨ÒTÎ!gÁŽè˜5#*—#ýj›lÁú늷ý› ï¢î(U}<|âY"‘7OKô-T¯7˜‘ÅŠK:!èQó±Csñf±’ª¾ß‰ENÉÃwíçbb–Š`6"Aô}DÝÍᎩ.ÛçÇõ{w¯Â+[#•ïYb#BoZîaÇ;*ä4VŸ¿…­¼VˆQ¬Ô±03é+œ¿“’ß×M†Ã%’QGôª< 7{6¯æÈxT›÷5&Vj`"¹”•©Aõ¦HÞ§â•,r ¨‘a gaQÛÙS²ü=™’†EÄJùødo„׋–E¯ZŸcY›É¨™^lm‚t&áQß‹mû®õDävA¤Èä®™|f’wSº¢éYÈ€Ñ ‡Â›Ä%ú62doªä8¡V{\¼{WÚ5Ñ›˜`Øg¨©äòM‹7;HtH¸ 88Ë3bѳÊh”ËY?´Ÿ…|Ž·0¼Ñ×X,ökpÁ¦çF£Dá>XÒfæ4ÿSE¿JyÄ¡“ÿhÔÍ_KlÖW˜?·´ ÅðƒÈlnM±…álçä¹a7УÖdÉç,tË-í‡0Ï̵0¢d9t¬>AÊa†ØiOÑÃ(ôh0NekV—ðOOÝ7å;JÚж9™ªcTI?´¯Æûf¢If/³ ƒá’©¶’ÅŒúÝq!4QJ’/?%‹EmF·/ÂI®[Ñ|ˆèèù]7Lé4ížùÓÄïQáè?¶Â¹—F`oÅh™3³ª˜4‚éü1¢x%)è lw?ù—‚#¥¢÷Gtðmtôˆ™<ðýÚ×q,ú&üÐ?Y‡1»6âì¹Ð{í4)VŠT,PwÉó¸W *~Þ:ˆSÈŒú.¸-=Û™5{aèÒöè¾ý:nŒüQ EÚVŒF>¢|N¥pkÐŒþ¾)Ú.ÿÖJq=[îŲ|×Ñn~{Œ={ù"NbØŽµ8{ñGô\; vò|ÆR‰ÂW*&½×æŸ`fã1»ÙgøÈ¯¡ä힉’kj–ˆéR‰–¶Ÿ éÄ%8çÆGÓPøÆ"ôZ· ”D¯EÍÐnñ,lï·RduúBÁ‹sÐe^W„ø¶n¬A´}n|Öx"ÏMÂ+?#¯c˜ýV´új8&t^‰r®RCœ áÒÀï0ù§6h»t޼¾FòyÕŠöƺºñÆòv{Η{~.inÃð¿WáÜÕ_ÑýçϤB:™ÞKõ”=1¦Ju,>ø#>ݵýüêÈ9)Cçø´Ñ$ø‡­C×Uï¡U£MØTÚ m¿k‡á‡÷"ŸGf!ºQè[º5Ƭꄑ'íq¾ÇD‘ëu4(ý†4NÞ8î‰éuz˹۰ËÑ)‰‘Þ8:ð{|ûK'‘Ã\l¼™EŽ40b øÇͰcâôŽXŽÅ¿tV2Û4ø7dˆ:„77/Åùë›ðꪱH—¹ œ¿AûYøÕ¾9VÕ¨+¤ÕÁ=ßÅàeÐcÃOèZ¨,~:¿GôI Y2ˆ§1vȉVaÕ†îh·p ÖõÿBQª@G¬hØ£~¯ºS}fÃ^zÞÁ!¥Aô†¯‡)ïiñ¸ wŒ­û)æ¶™¹oÀ-öòä À×m¿À ½0õ’ *Ù@ÛÙxÿœ޶}Mnˈs¯¯Ä쟻¢û“Q¿ps¬?±s7ÅŒÖミšcÚÍBHxÿŽl}¯l9Œc}¦ )¸ŠEG`O-?´Õ“.çÃÙ®ÃÅ6ÄaDa’-í綇]þQX\­..ž‡YçN`í¶ñáÎRGÌ;*Лä]­³Åaö5ØtÓ­_Ê+¤! 9³ÕÁÌ63±æ¯~øpÏ>¬|ãÊÜœ…vßvÅú¨ pEœ˜r/´Ïê‰Î_µAXÎ7±¢ZM±'vQ}(2Ç^C¶\0$ŸØðˆÔ­:±à”o0Î494Ęãéq£Ïû&û¯`WL\Ý Žå>Cu_{„ÅÅKUòæVu<.¢íœôÝŒÀž‰ÃðÚƒ18kÚ~ݽ:nŹº~h5³" ¼‹‰¥ KÚQø¶ïxífwA÷Ö¡aºX¼Öæ/T¿5çvÀïqYÙΨ´)öx³Ú r¸…t«aR«X¿ã ´ýiþ°Y¢.Ã-{W$ úÝ–µFçU£`–Š&›@ÄG!sÖÚS¬œÈƒ„-ީОҾ¸äŒ›Ýûˆ›bø iÃrz"”^ËlÖ¡§’K÷w?tª‰¸ªôÄÖ¹ûa I¦ßÿ™FE Wµ^XY®(ZÕ>/ ÂÏuD‡Ù­Q²ÆJtÉ”€ŒEßÃê¹Ðþ+±·ÇÃ00G$ö_ ½ñi!_‘GK4l¾!­›¡õŒF8˜iT*+Äê&&÷9¢?F»éQ¿ñFtË&TÇ«,>l¾ûö¼…¶Ë>š>»!øú—Q¸%î º`Û)+¶fÄIÞÓ•Ãøæßáо·Ñvñûø¹÷N¼}Hß‘Rþç7Aÿ=g1*·3îÒc¾!ú¾#eÝ;Ž„w–#æø<œô}+ª”@ËÖ«Qòò"œŒb[k~Ð3†&~ {G9î Ò‡™PæÛaÈS´/bß½ˆª™=ðå¶/P+¯ÌõñöuÔ+Òð©üAk°3$ÊÔñaƒéä'IÇËEzBè<Ä9:¸ñÞ˜¡t¼ÎZ)T½¶WƯîrÏ»~ ½¶ì?þ)6Ú•CÓt¸#õ4$:eJô„}x F5_ˆ) ÚÁέrØÆ»e2¢ÍÏ‹¤dGÐÕ8‡tN®pRÏw¿÷|G7lßÿ9º.€~+¡Ï÷}1Bˆ”ê­™á"×üyp,ëvKaÛÝXØqaÉ;ãó3'%o ˜¹ëLm¿Óšõ‚“g.vrA ôzâí…ˆÉ3½œ#pý®Üãàˆ»×7â‡KW…àžÅÙèóøà¤_§ Øx;ªepD‘‚á{ƒ}…)Ýáá^åÜ<+†uú?3$ÏpüÊ_pr} ö.vpwt1¿—•\Ũ!G-Ô‹9†U7ïàïƒ3P¹ø¸ºJ…GßÙ‹÷Nì•´Š`œAôX=W„!7âL¸ôܰ`÷,)Šô8sm ⤡q‰E$‡$ì]pt׸d¬ '§»èR¾7¾ùóä(Ð9ìƒÑ£ÞL|Ù´?2ºçCe.·aö°ÄKþÃÍ“;Šæk‡œö!èZoº\;=^BeïLpva§À5n^üGÝð]¯Ð3—/ f£! FS'¹’Lj˜Ûˆ‹KÙ°ÄHG%ž6È'ùìPk ¦6™¥|ªÈ³bããðÃyÇO\»¹]LcNdqùH/7A4Бe§‰_‚=<ޱÞB¯ïû£×ÚIˆ"è(ºs`÷$\Š”ò¾»ßgÅâÞ«0¾DQøf*%J†@©ŽÔ9±ÒãEx´‹è‡=vIKˆ÷ÁɳÛqíÖFlº-õ.bn8–G'ô.ßAјúêRŒ)ZysV•Šƒ°(|ºO:©ž®Xuz?ÊfË):ì7±.NbŸ ÒG+(ϯT¦7‚}%-tÞÛ¿3jvV¤‚øs'ãdH4\3Iãﳃ·–F9öŸþ‘bbâñÙÞò,Oüxz7ÊdÍ#éÄ"\λ9DbÈ/s0È¿‰è½ F-‰N-Çð*]q=Â_vY†K‡¯äÛ‰A3ì$¯žöWÑpéL¬ï:a·¥3-ö-ü ¦]²Ç’^?aZÅ pñùÅÇâ¶p£÷v,åŠMgObºtFááŠ-—£VÎ"ò¾~è˜Í¹Š¼‰]f#¯ØµæyJaÕ¾%¨Sc*†U®‰¿wÌÃuÚ ä$Ûæ&äîéoq>Xþˆ=Š-wQÈ;ý+÷¼µ¯! Ù¥ìîâ÷ á¶͌8éà+ï!vê®\c‡UÚbþêw…Èæ7`ê…;°·G¯Š=‚)"—éý`—±:ÜIøæ-|'†,íƒÎËúâãÃÒ!utR>„M^™»÷~ŽòfàënßJê9‹áNÄ É‡ä[ÚOÜ Qí›ÿûëK‘Süvò8æX*íYl½° uòñ‹/ŽA¹<ášofv_ŒüŽqh–×Ñôž^Y€#7å½Î`½p4¿Lbƒ=à ÿ©ö8™Е ½º‡nȃíÎâW¹¯dút¬Òk7¼‰8‘ÅÍ.\íþÑ²Ò \ º„É—aJÕ’Ïj*í·gAÙF°"Ç_¨¹n½´ÿή©&~W:§—Þf<¾XÙõ¶ĜꀣßáJæèé?sV×Çj÷úWñ,Ø¿X‘‚Ú`ØÛÙK;j"ªAEßqê7¤ÏVZj`¤ªxj¢BÆ’ÒC …‚³$Fãfj€½Áù¬èÆC쥲>¿\*[ Yóìþç‹ËqÒh‹‚ËÓä ã0=?žÏ`ZFƒN_é7± íLÌj= sÛÎÆ'U„ÈZyüg|Wo9Xi˜#;D‰Á°³—ôbóàΨ?0o}_ \0‡£¢—à…Üî‚h‰OÛLÄŸ¿6øã§Ä¶KÎä½¹õƒœ$0n›#†” PL<·ÑqÂÅK«åú™ÞiDì…›½¢Ù3•÷°“{I¤øöbpÉU5wѱÌ@œ ½„‰-æâ ÿê8dŸý3g”wãŠí1"[ 1AÒQ¯dºÏ ;‘^´â’ª#ùQ½yãgÉ+âöãã‹®x·tŒÈo‡ ÇŽ"Ï«ë1dI_ ^ýºÊ÷wÄz˜W_§ü=¤3{îNá„«×7`°ùZ¹vµ´” h£Cáçÿ ~+—´Å€M?#š½}Ïœbì£1¼Æx|Qáeäú¢ nÙ¹Êû ¡Lœl*#Â^ ýµ ?Ô³ɳ\äY?ݧ<+Šò¤·ï$ï¨æ ‰±¦îÄRÿ­zÅi¢ƒÎžÒI]vaФ—‘ HˆB–—†áú+­ÑaÁ+è¾ök„sgyç,pJкü(LªÓu§T©gUI“*ÂŽH㣨ë¢ÛôÒÑN9K¥^¹íc Z>Ý–4ƒÝx!XbS6Á:%7;Ë=¦íµ¤ÖI^âeÔ5Ök» Y¼ 9À´Žß¢wÆ„eo,Ž¡ˆ‘û#©ÃªŽ1 s—Ù¸_ÒT»>±7.i«g‘˜ao/v-p!öyt@£’ÝP>æG¹åOg¬ùç3 ^>XHrØ©˜$k&º:¥Cбñ˜z7 +–“θ–ŒÐk(Ú× }V~‰0ó.¬FîÒQc\Ħ„2òÝ‘6ñ·ò=ç0hqOôÿéMûÀýÅùcà9®þtª‰„÷O#³ÝMyMËËY`'÷‡Sþæºì$Ï‹[È}jÌv΀=ÿ6Û± $´ü;6’ÓîÝg‚I­€ùÝ´w †|?ýWvWáž¾ê7vI¬ØVˆ‘̘ž¬VˆÄ¸gÐ(ìô˜ß_ž<¥ž›%}\ŠN‡q-§ Wº=ðýŽÜoZ´XÉLà*ù QáF&™E1Ï’?{ÜÄ E=Ðï§á¨ø‰/ZmÙ_!×qŒ×£-5Ë‚[¶1/¢#ä˜Æû[ƒ2T±|J‡TÛ› ú#:¨df†! i{wž…×– ”vª¿´½9ê)ºí–.¢ ȘÏÔŽ&ó¬gMüBˆJ½ÜŸWª'=Ø;bÙŠ`fÍr˜³û)ØP̻⃹UsbòÑÓøb÷~¼S§-íÞ"×9{>»(E#ëè.Ê&ßE©Ão¢@zéÙÒEIÄÍåèuÔÞ"oÉÉ\89ô[¼¿°‹\ç¨ nlÀ'òÛud,þJÜøë„L¸I’ôÄíÞ;ÅË|ŒšÒóa«ÝÖ XÛ“øß®‹XÞ¢»ôÀ‚#grvÇåðxÉ[z-bà¨b¸·˜‚.+^GßßD¯Cðöö5¢Õ’7³®;9¸Á]’{°2á"DǤX b€Ã,ÝO×ò½QÖ%›T®[øëÒidsõQU¿~Ù®b¨y¥ܤ2›ˆ–4Bô8˜D¹yI~KeÊZh$šçÏ$å‡úK~„¨É}nÊÀËR¡Ý\¥òÙãFÔMdB$BR¿© e´JÑvÉp CúúÏo£Ó/ßãæҋMˆÃî.y´œ®`ÌÖÃøºé *3—EwoØ‹l-ï,†ÆËl€\„¸¹Ò¸:{cò_cTó©¸yü[\ˆtÆ‘ßÀ%×`t*,y‘|øõCzG1V¬÷’ßS·"ƒûËð’žþác³¬ýÑöå¬b£Q£LÑ•\ »Š ®Ò;OQ¤,ˆ:•¯×é…Ìl,®oÇñhWx áŽwÊåÁ¡èXímÔ÷Mg"gt”ÈÆN•xœ<õ ‚}º¢Gñ<"¢pÔ.×[Þ‰„ÔEÊO®gºr¸IãG÷lR–gp5$˜­Þ[# @ê™»è½S’2ut}¢7OÕÔ8DÄÝsG—ú¯‹5e ;Š]¡qÈá,6ÁÁ+wPÃ{rŸ‡tä{!{žÒà*HúžB.½œ"1aórŒm1n®bs<ó¡})±§¢“b;œÌõÞI¥#$Á>W¤ƒWÔW½P´Ob+½²µBÓtÑzÑp ”º<ô‡¡XpÝ“+7C”4Ì^ôÊó#ïlÀê%1«*‡Cà÷r]¸ Ypsfãoz–é]M„ÄÃÉN¬Ëqw0lçF¬í0c~™I#„I›`xóðv“|»å@ç²MMyR°f–£‹†-ïƒÊ¹Š‹Í—ü Iˆù‰=hP82²òË{xÈ#Ù±%h·]ÍvŒïî%m"6cþ•—±¨QCUw ëƒÂ h_ý”Èš»w~…SN¾ð÷dœ Áw4$]ŽªX¼¤,gÀ[Ž)›gâÕFSáëÀX<4Ê[ W¤cîí(ÏûwøÜ>d(TGÊøü+¾&ùuü%`Ú–EèÔx<œ¢á˜#ýsy ¹fn™ŽW›ÍC&‘‹c:t«(ºÀ¸iõX¹ÀÐ’7ɇ³Éà ‰ŠF'g¸ǧ•ÊËß±¸|áwD¹å»j/ŸJhS¨ˆ¤«ÞïÆDèx0l?ËíâwcÂ9¬oÙZ‘ÊÜ…û |Ñ`!dN†—mŒ<Ÿeew!pDOy¾…š!‘âï”—’­\Ã÷âh)æÞ/ÕIß ÷°œøœ­eÜKC¥ŠŒËy¾3=²”¥õ³¨Æy*{¯òUhƒJñ>8y›:÷Å’ýC°ö¦äÏÙ“÷¹£F¦ÂØw`Ê•¯‡£{6 gñŠ8²çO”ôk„s6£@ÉJ–ç–çä)ág¯¡Ëªõ¨ûiaü'¡GäY@Œ±sGPP2fÌ(Åhކµs‡“ .E=ÆÎ6UCYR' }%xŽ6AyÂDq©¿ªîˆž óÊ5×ÛÖƒÛ§¢ëY° çv\ÝÕ oíÛ'×HZ¼OÕ7IŸzܹ´m‹Ð°m,O†:Ð3Æg¨ïIžÏ{¹ð8ë˜áÁJÎþ(ûKý”g&yðiGXÿ8jÁ{¬Ÿ¥BjäzÖ_ëó´á\"†¶—¼–öÂÈ·Ùk¥TŒµ’¥ØÚ°ƒÔõæwà(åA™ç™g¹Î°Íê=Ìe¢l­üΡ|^kØJæ—äšï¤ÊMžÏá[ÚjCo,éÉeJ.†í“{ùÞÖ^?ÊŠïä(ä…—åË›ée ƈ®‡Pþü@´þs#³ú}ïñE êï3ç…YnF*™1}±ý”óÉgº¡d‘äùFÞ6Ö(Kê›t ”'PÉÝú>^“’,$-µl€iR8–Ì|ºèä%ך.I"ŸÞΚø¥ˆ’–-Ò½|¯cÀæD1EÁ^DYp©däFo"½£½ô2Æ;Û~Ü}%¿¦Kl4Rb_à⊠·"q+³2\HæÓ]>ÃäÓ#ñ§P×;5k#áäa,ú‡^Gù¶#e~1°¾Bofñ€ïµ°ÔfvשëÍ—]ykõ„ÐÄ/Ux"â÷¸ˆº…t9_ÁÔŠu¥}ÇÊmaÕåb‡…ˆhh< HŽÂƒÐ±öT4ðuA|üMô]þ¢Ýs?SóóBA?¿ JNg»Ä`…=z¦^d°·ªÖ‡$ŠŸÐP ·À˜Yò(ŸìÝÓƒBo{ªŒ×áù¤Hmz)}>Kñ+”»ŽõþY¿À ~ç"Qðƒ—…øe2ÿò/ƒ^cD‚žlkÏ“†ÆAlòÒÒæÛ³öºÚ""C1¢áX|Xã5MüBBB!CŒøkÜ]E/t£ ¡‘V‹ÜÞÙЫTkMü?gG'„ÛEãÿgˤ††FšgØ7x©*ªç*gÛÄ“;._¾Œ\¹r™Ïhhh¤U*c§‰ßý`°wdd¤ýÐÐÐH» ï¹qã2g~üpŽÿ4ñ#.^¼ˆ]»v©YmºAÐÐH{ ¡óòòBÅŠ•ÇOã~PF‡ƱcÇÔwm 54Ò ª–)S&T®\®®®êïÇÁžøÑÈqI—ÿøkhhh<öööpvvVŸɃ¡/ìkhh¤]8::*[ø$øÏ? ÔAwŸ54444444lšøihhhhhhhØ4ñÓÐÐÐÐÐÐаhâ§¡¡¡¡¡¡¡a#ÐÄOCCCCCCCÃF ‰Ÿ††††††††@? &~6Mü44444444lšøihhhhhhhØ4ñÓÐÐÐÐÐÐаhâ§¡¡¡¡¡¡¡a#ÐÄOCCCCCCCÃF ‰Ÿ††††††††@? &~6Mü44444444lšøihhhhhhhØ4ñÓÐÐÐÐÐÐаhâ§¡¡¡¡¡¡¡a#ÐÄOCCCCCCCÃF ‰Ÿ††††††††@? &~6Mü44444444lšøihhhhhhhØ4ñÓÐÐÐÐÐÐаhâ§¡¡¡¡¡¡¡a#ÐÄOCCCCCCCÃF ‰Ÿ††††††††@? &~6Mü44444444lšøihhhhhhhØ4ñÓÐÐÐÐÐÐаhâ§¡¡¡¡¡¡¡a#ÐÄOCCCCCCCÃF ‰Ÿ††††††††@? &~6Mü44444444lšøihhhhhhhØ4ñÓÐÐÐÐÐÐаhâ§¡¡¡¡¡¡¡a#ÐÄOCCCCCCCÃF ‰Ÿ††††††††@? &~6Mü44444444lÀÿèVÎÅn8׌IEND®B`‚swift-2.17.0/doc/source/overview_ring.rst0000666000175100017510000005655513236061617020474 0ustar zuulzuul00000000000000========= The Rings ========= The rings determine where data should reside in the cluster. There is a separate ring for account databases, container databases, and individual object storage policies but each ring works in the same way. These rings are externally managed. The server processes themselves do not modify the rings; they are instead given new rings modified by other tools. The ring uses a configurable number of bits from the MD5 hash of an item's path as a partition index that designates the device(s) on which that item should be stored. The number of bits kept from the hash is known as the partition power, and 2 to the partition power indicates the partition count. Partitioning the full MD5 hash ring allows the cluster components to process resources in batches. This ends up either more efficient or at least less complex than working with each item separately or the entire cluster all at once. Another configurable value is the replica count, which indicates how many devices to assign for each partition in the ring. By having multiple devices responsible for each partition, the cluster can recover from drive or network failures. Devices are added to the ring to describe the capacity available for partition replica assignments. Devices are placed into failure domains consisting of region, zone, and server. Regions can be used to describe geographical systems characterized by lower bandwidth or higher latency between machines in different regions. Many rings will consist of only a single region. Zones can be used to group devices based on physical locations, power separations, network separations, or any other attribute that would lessen multiple replicas being unavailable at the same time. Devices are given a weight which describes the relative storage capacity contributed by the device in comparison to other devices. When building a ring, replicas for each partition will be assigned to devices according to the devices' weights. Additionally, each replica of a partition will preferentially be assigned to a device whose failure domain does not already have a replica for that partition. Only a single replica of a partition may be assigned to each device - you must have at least as many devices as replicas. .. _ring_builder: ------------ Ring Builder ------------ The rings are built and managed manually by a utility called the ring-builder. The ring-builder assigns partitions to devices and writes an optimized structure to a gzipped, serialized file on disk for shipping out to the servers. The server processes check the modification time of the file occasionally and reload their in-memory copies of the ring structure as needed. Because of how the ring-builder manages changes to the ring, using a slightly older ring usually just means that for a subset of the partitions the device for one of the replicas will be incorrect, which can be easily worked around. The ring-builder also keeps a separate builder file which includes the ring information as well as additional data required to build future rings. It is very important to keep multiple backup copies of these builder files. One option is to copy the builder files out to every server while copying the ring files themselves. Another is to upload the builder files into the cluster itself. Complete loss of a builder file will mean creating a new ring from scratch, nearly all partitions will end up assigned to different devices, and therefore nearly all data stored will have to be replicated to new locations. So, recovery from a builder file loss is possible, but data will definitely be unreachable for an extended time. ------------------- Ring Data Structure ------------------- The ring data structure consists of three top level fields: a list of devices in the cluster, a list of lists of device ids indicating partition to device assignments, and an integer indicating the number of bits to shift an MD5 hash to calculate the partition for the hash. *************** List of Devices *************** The list of devices is known internally to the Ring class as ``devs``. Each item in the list of devices is a dictionary with the following keys: ====== ======= ============================================================== id integer The index into the list of devices. zone integer The zone in which the device resides. region integer The region in which the zone resides. weight float The relative weight of the device in comparison to other devices. This usually corresponds directly to the amount of disk space the device has compared to other devices. For instance a device with 1 terabyte of space might have a weight of 100.0 and another device with 2 terabytes of space might have a weight of 200.0. This weight can also be used to bring back into balance a device that has ended up with more or less data than desired over time. A good average weight of 100.0 allows flexibility in lowering the weight later if necessary. ip string The IP address or hostname of the server containing the device. port int The TCP port on which the server process listens to serve requests for the device. device string The on-disk name of the device on the server. For example: ``sdb1`` meta string A general-use field for storing additional information for the device. This information isn't used directly by the server processes, but can be useful in debugging. For example, the date and time of installation and hardware manufacturer could be stored here. ====== ======= ============================================================== .. note:: The list of devices may contain holes, or indexes set to ``None``, for devices that have been removed from the cluster. However, device ids are reused. Device ids are reused to avoid potentially running out of device id slots when there are available slots (from prior removal of devices). A consequence of this device id reuse is that the device id (integer value) does not necessarily correspond with the chronology of when the device was added to the ring. Also, some devices may be temporarily disabled by setting their weight to ``0.0``. To obtain a list of active devices (for uptime polling, for example) the Python code would look like:: devices = list(self._iter_devs()) ************************* Partition Assignment List ************************* The partition assignment list is known internally to the Ring class as ``_replica2part2dev_id``. This is a list of ``array('H')``\s, one for each replica. Each ``array('H')`` has a length equal to the partition count for the ring. Each integer in the ``array('H')`` is an index into the above list of devices. So, to create a list of device dictionaries assigned to a partition, the Python code would look like:: devices = [self.devs[part2dev_id[partition]] for part2dev_id in self._replica2part2dev_id] ``array('H')`` is used for memory conservation as there may be millions of partitions. ********************* Partition Shift Value ********************* The partition shift value is known internally to the Ring class as ``_part_shift``. This value is used to shift an MD5 hash of an item's path to calculate the partition on which the data for that item should reside. Only the top four bytes of the hash are used in this process. For example, to compute the partition for the path ``/account/container/object``, the Python code might look like:: objhash = md5('/account/container/object').digest() partition = struct.unpack_from('>I', objhash)[0] >> self._part_shift For a ring generated with partition power ``P``, the partition shift value is ``32 - P``. ******************* Fractional Replicas ******************* A ring is not restricted to having an integer number of replicas. In order to support the gradual changing of replica counts, the ring is able to have a real number of replicas. When the number of replicas is not an integer, the last element of ``_replica2part2dev_id`` will have a length that is less than the partition count for the ring. This means that some partitions will have more replicas than others. For example, if a ring has ``3.25`` replicas, then 25% of its partitions will have four replicas, while the remaining 75% will have just three. .. _ring_dispersion: ********** Dispersion ********** With each rebalance, the ring builder calculates a dispersion metric. This is the percentage of partitions in the ring that have too many replicas within a particular failure domain. For example, if you have three servers in a cluster but two replicas for a partition get placed onto the same server, that partition will count towards the dispersion metric. A lower dispersion value is better, and the value can be used to find the proper value for "overload". .. _ring_overload: ******** Overload ******** The ring builder tries to keep replicas as far apart as possible while still respecting device weights. When it can't do both, the overload factor determines what happens. Each device may take some extra fraction of its desired partitions to allow for replica dispersion; once that extra fraction is exhausted, replicas will be placed closer together than is optimal for durability. Essentially, the overload factor lets the operator trade off replica dispersion (durability) against device balance (uniform disk usage). The default overload factor is ``0``, so device weights will be strictly followed. With an overload factor of ``0.1``, each device will accept 10% more partitions than it otherwise would, but only if needed to maintain dispersion. Example: Consider a 3-node cluster of machines with equal-size disks; let node A have 12 disks, node B have 12 disks, and node C have only 11 disks. Let the ring have an overload factor of ``0.1`` (10%). Without the overload, some partitions would end up with replicas only on nodes A and B. However, with the overload, every device is willing to accept up to 10% more partitions for the sake of dispersion. The missing disk in C means there is one disk's worth of partitions that would like to spread across the remaining 11 disks, which gives each disk in C an extra 9.09% load. Since this is less than the 10% overload, there is one replica of each partition on each node. However, this does mean that the disks in node C will have more data on them than the disks in nodes A and B. If 80% full is the warning threshold for the cluster, node C's disks will reach 80% full while A and B's disks are only 72.7% full. ------------------------------- Partition & Replica Terminology ------------------------------- All descriptions of consistent hashing describe the process of breaking the keyspace up into multiple ranges (vnodes, buckets, etc.) - many more than the number of "nodes" to which keys in the keyspace must be assigned. Swift calls these ranges `partitions` - they are partitions of the total keyspace. Each partition will have multiple replicas. Every replica of each partition must be assigned to a device in the ring. When describing a specific replica of a partition (like when it's assigned a device) it is described as a `part-replica` in that it is a specific `replica` of the specific `partition`. A single device will likely be assigned different replicas from many partitions, but it may not be assigned multiple replicas of a single partition. The total number of partitions in a ring is calculated as ``2 ** ``. The total number of part-replicas in a ring is calculated as `` * 2 ** ``. When considering a device's `weight` it is useful to describe the number of part-replicas it would like to be assigned. A single device, regardless of weight, will never hold more than ``2 ** `` part-replicas because it can not have more than one replica of any partition assigned. The number of part-replicas a device can take by weights is calculated as its `parts-wanted`. The true number of part-replicas assigned to a device can be compared to its parts-wanted similarly to a calculation of percentage error - this deviation in the observed result from the idealized target is called a device's `balance`. When considering a device's `failure domain` it is useful to describe the number of part-replicas it would like to be assigned. The number of part-replicas wanted in a failure domain of a tier is the sum of the part-replicas wanted in the failure domains of its sub-tier. However, collectively when the total number of part-replicas in a failure domain exceeds or is equal to ``2 ** `` it is most obvious that it's no longer sufficient to consider only the number of total part-replicas, but rather the fraction of each replica's partitions. Consider for example a ring with 3 replicas and 3 servers: while dispersion requires that each server hold only â…“ of the total part-replicas, placement is additionally constrained to require ``1.0`` replica of *each* partition per server. It would not be sufficient to satisfy dispersion if two devices on one of the servers each held a replica of a single partition, while another server held none. By considering a decimal fraction of one replica's worth of partitions in a failure domain we can derive the total part-replicas wanted in a failure domain (``1.0 * 2 ** ``). Additionally we infer more about `which` part-replicas must go in the failure domain. Consider a ring with three replicas and two zones, each with two servers (four servers total). The three replicas worth of partitions will be assigned into two failure domains at the zone tier. Each zone must hold more than one replica of some partitions. We represent this improper fraction of a replica's worth of partitions in decimal form as ``1.5`` (``3.0 / 2``). This tells us not only the *number* of total partitions (``1.5 * 2 ** ``) but also that *each* partition must have `at least` one replica in this failure domain (in fact ``0.5`` of the partitions will have 2 replicas). Within each zone the two servers will hold ``0.75`` of a replica's worth of partitions - this is equal both to "the fraction of a replica's worth of partitions assigned to each zone (``1.5``) divided evenly among the number of failure domains in its sub-tier (2 servers in each zone, i.e. ``1.5 / 2``)" but *also* "the total number of replicas (``3.0``) divided evenly among the total number of failure domains in the server tier (2 servers × 2 zones = 4, i.e. ``3.0 / 4``)". It is useful to consider that each server in this ring will hold only ``0.75`` of a replica's worth of partitions which tells that any server should have `at most` one replica of a given partition assigned. In the interests of brevity, some variable names will often refer to the concept representing the fraction of a replica's worth of partitions in decimal form as *replicanths* - this is meant to invoke connotations similar to ordinal numbers as applied to fractions, but generalized to a replica instead of a four\*th* or a fif\*th*. The "n" was probably thrown in because of Blade Runner. ----------------- Building the Ring ----------------- First the ring builder calculates the replicanths wanted at each tier in the ring's topology based on weight. Then the ring builder calculates the replicanths wanted at each tier in the ring's topology based on dispersion. Then the ring builder calculates the maximum deviation on a single device between its weighted replicanths and wanted replicanths. Next we interpolate between the two replicanth values (weighted & wanted) at each tier using the specified overload (up to the maximum required overload). It's a linear interpolation, similar to solving for a point on a line between two points - we calculate the slope across the max required overload and then calculate the intersection of the line with the desired overload. This becomes the target. From the target we calculate the minimum and maximum number of replicas any partition may have in a tier. This becomes the `replica-plan`. Finally, we calculate the number of partitions that should ideally be assigned to each device based the replica-plan. On initial balance (i.e., the first time partitions are placed to generate a ring) we must assign each replica of each partition to the device that desires the most partitions excluding any devices that already have their maximum number of replicas of that partition assigned to some parent tier of that device's failure domain. When building a new ring based on an old ring, the desired number of partitions each device wants is recalculated from the current replica-plan. Next the partitions to be reassigned are gathered up. Any removed devices have all their assigned partitions unassigned and added to the gathered list. Any partition replicas that (due to the addition of new devices) can be spread out for better durability are unassigned and added to the gathered list. Any devices that have more partitions than they now desire have random partitions unassigned from them and added to the gathered list. Lastly, the gathered partitions are then reassigned to devices using a similar method as in the initial assignment described above. Whenever a partition has a replica reassigned, the time of the reassignment is recorded. This is taken into account when gathering partitions to reassign so that no partition is moved twice in a configurable amount of time. This configurable amount of time is known internally to the RingBuilder class as ``min_part_hours``. This restriction is ignored for replicas of partitions on devices that have been removed, as device removal should only happens on device failure and there's no choice but to make a reassignment. The above processes don't always perfectly rebalance a ring due to the random nature of gathering partitions for reassignment. To help reach a more balanced ring, the rebalance process is repeated a fixed number of times until the replica-plan is fulfilled or unable to be fulfilled (indicating we probably can't get perfect balance due to too many partitions recently moved). .. _composite_rings: --------------- Composite Rings --------------- .. automodule:: swift.common.ring.composite_builder --------------------- Ring Builder Analyzer --------------------- .. automodule:: swift.cli.ring_builder_analyzer ------- History ------- The ring code went through many iterations before arriving at what it is now and while it has largely been stable, the algorithm has seen a few tweaks or perhaps even fundamentally changed as new ideas emerge. This section will try to describe the previous ideas attempted and attempt to explain why they were discarded. A "live ring" option was considered where each server could maintain its own copy of the ring and the servers would use a gossip protocol to communicate the changes they made. This was discarded as too complex and error prone to code correctly in the project timespan available. One bug could easily gossip bad data out to the entire cluster and be difficult to recover from. Having an externally managed ring simplifies the process, allows full validation of data before it's shipped out to the servers, and guarantees each server is using a ring from the same timeline. It also means that the servers themselves aren't spending a lot of resources maintaining rings. A couple of "ring server" options were considered. One was where all ring lookups would be done by calling a service on a separate server or set of servers, but this was discarded due to the latency involved. Another was much like the current process but where servers could submit change requests to the ring server to have a new ring built and shipped back out to the servers. This was discarded due to project time constraints and because ring changes are currently infrequent enough that manual control was sufficient. However, lack of quick automatic ring changes did mean that other components of the system had to be coded to handle devices being unavailable for a period of hours until someone could manually update the ring. The current ring process has each replica of a partition independently assigned to a device. A version of the ring that used a third of the memory was tried, where the first replica of a partition was directly assigned and the other two were determined by "walking" the ring until finding additional devices in other zones. This was discarded due to the loss of control over how many replicas for a given partition moved at once. Keeping each replica independent allows for moving only one partition replica within a given time window (except due to device failures). Using the additional memory was deemed a good trade-off for moving data around the cluster much less often. Another ring design was tried where the partition to device assignments weren't stored in a big list in memory but instead each device was assigned a set of hashes, or anchors. The partition would be determined from the data item's hash and the nearest device anchors would determine where the replicas should be stored. However, to get reasonable distribution of data each device had to have a lot of anchors and walking through those anchors to find replicas started to add up. In the end, the memory savings wasn't that great and more processing power was used, so the idea was discarded. A completely non-partitioned ring was also tried but discarded as the partitioning helps many other components of the system, especially replication. Replication can be attempted and retried in a partition batch with the other replicas rather than each data item independently attempted and retried. Hashes of directory structures can be calculated and compared with other replicas to reduce directory walking and network traffic. Partitioning and independently assigning partition replicas also allowed for the best-balanced cluster. The best of the other strategies tended to give ±10% variance on device balance with devices of equal weight and ±15% with devices of varying weights. The current strategy allows us to get ±3% and ±8% respectively. Various hashing algorithms were tried. SHA offers better security, but the ring doesn't need to be cryptographically secure and SHA is slower. Murmur was much faster, but MD5 was built-in and hash computation is a small percentage of the overall request handling time. In all, once it was decided the servers wouldn't be maintaining the rings themselves anyway and only doing hash lookups, MD5 was chosen for its general availability, good distribution, and adequate speed. The placement algorithm has seen a number of behavioral changes for unbalanceable rings. The ring builder wants to keep replicas as far apart as possible while still respecting device weights. In most cases, the ring builder can achieve both, but sometimes they conflict. At first, the behavior was to keep the replicas far apart and ignore device weight, but that made it impossible to gradually go from one region to two, or from two to three. Then it was changed to favor device weight over dispersion, but that wasn't so good for rings that were close to balanceable, like 3 machines with 60TB, 60TB, and 57TB of disk space; operators were expecting one replica per machine, but didn't always get it. After that, overload was added to the ring builder so that operators could choose a balance between dispersion and device weights. In time the overload concept was improved and made more accurate. For more background on consistent hashing rings, please see :doc:`ring_background`. swift-2.17.0/doc/source/development_ondisk_backends.rst0000666000175100017510000000267213236061617023321 0ustar zuulzuul00000000000000=============================== Pluggable On-Disk Back-end APIs =============================== The internal REST API used between the proxy server and the account, container and object server is almost identical to public Swift REST API, but with a few internal extensions (for example, update an account with a new container). The pluggable back-end APIs for the three REST API servers (account, container, object) abstracts the needs for servicing the various REST APIs from the details of how data is laid out and stored on-disk. The APIs are documented in the reference implementations for all three servers. For historical reasons, the object server backend reference implementation module is named `diskfile`, while the account and container server backend reference implementation modules are named appropriately. This API is still under development and not yet finalized. ----------------------------------------- Back-end API for Account Server REST APIs ----------------------------------------- .. automodule:: swift.account.backend :noindex: :members: ------------------------------------------- Back-end API for Container Server REST APIs ------------------------------------------- .. automodule:: swift.container.backend :noindex: :members: ---------------------------------------- Back-end API for Object Server REST APIs ---------------------------------------- .. automodule:: swift.obj.diskfile :noindex: :members: swift-2.17.0/doc/source/overview_replication.rst0000666000175100017510000002027513236061617022034 0ustar zuulzuul00000000000000=========== Replication =========== Because each replica in Swift functions independently, and clients generally require only a simple majority of nodes responding to consider an operation successful, transient failures like network partitions can quickly cause replicas to diverge. These differences are eventually reconciled by asynchronous, peer-to-peer replicator processes. The replicator processes traverse their local filesystems, concurrently performing operations in a manner that balances load across physical disks. Replication uses a push model, with records and files generally only being copied from local to remote replicas. This is important because data on the node may not belong there (as in the case of handoffs and ring changes), and a replicator can't know what data exists elsewhere in the cluster that it should pull in. It's the duty of any node that contains data to ensure that data gets to where it belongs. Replica placement is handled by the ring. Every deleted record or file in the system is marked by a tombstone, so that deletions can be replicated alongside creations. The replication process cleans up tombstones after a time period known as the consistency window. The consistency window encompasses replication duration and how long transient failure can remove a node from the cluster. Tombstone cleanup must be tied to replication to reach replica convergence. If a replicator detects that a remote drive has failed, the replicator uses the get_more_nodes interface for the ring to choose an alternate node with which to synchronize. The replicator can maintain desired levels of replication in the face of disk failures, though some replicas may not be in an immediately usable location. Note that the replicator doesn't maintain desired levels of replication when other failures, such as entire node failures, occur because most failure are transient. Replication is an area of active development, and likely rife with potential improvements to speed and correctness. There are two major classes of replicator - the db replicator, which replicates accounts and containers, and the object replicator, which replicates object data. -------------- DB Replication -------------- The first step performed by db replication is a low-cost hash comparison to determine whether two replicas already match. Under normal operation, this check is able to verify that most databases in the system are already synchronized very quickly. If the hashes differ, the replicator brings the databases in sync by sharing records added since the last sync point. This sync point is a high water mark noting the last record at which two databases were known to be in sync, and is stored in each database as a tuple of the remote database id and record id. Database ids are unique amongst all replicas of the database, and record ids are monotonically increasing integers. After all new records have been pushed to the remote database, the entire sync table of the local database is pushed, so the remote database can guarantee that it is in sync with everything with which the local database has previously synchronized. If a replica is found to be missing entirely, the whole local database file is transmitted to the peer using rsync(1) and vested with a new unique id. In practice, DB replication can process hundreds of databases per concurrency setting per second (up to the number of available CPUs or disks) and is bound by the number of DB transactions that must be performed. ------------------ Object Replication ------------------ The initial implementation of object replication simply performed an rsync to push data from a local partition to all remote servers it was expected to exist on. While this performed adequately at small scale, replication times skyrocketed once directory structures could no longer be held in RAM. We now use a modification of this scheme in which a hash of the contents for each suffix directory is saved to a per-partition hashes file. The hash for a suffix directory is invalidated when the contents of that suffix directory are modified. The object replication process reads in these hash files, calculating any invalidated hashes. It then transmits the hashes to each remote server that should hold the partition, and only suffix directories with differing hashes on the remote server are rsynced. After pushing files to the remote server, the replication process notifies it to recalculate hashes for the rsynced suffix directories. Performance of object replication is generally bound by the number of uncached directories it has to traverse, usually as a result of invalidated suffix directory hashes. Using write volume and partition counts from our running systems, it was designed so that around 2% of the hash space on a normal node will be invalidated per day, which has experimentally given us acceptable replication speeds. .. _ssync: Work continues with a new ssync method where rsync is not used at all and instead all-Swift code is used to transfer the objects. At first, this ssync will just strive to emulate the rsync behavior. Once deemed stable it will open the way for future improvements in replication since we'll be able to easily add code in the replication path instead of trying to alter the rsync code base and distributing such modifications. One of the first improvements planned is an "index.db" that will replace the hashes.pkl. This will allow quicker updates to that data as well as more streamlined queries. Quite likely we'll implement a better scheme than the current one hashes.pkl uses (hash-trees, that sort of thing). Another improvement planned all along the way is separating the local disk structure from the protocol path structure. This separation will allow ring resizing at some point, or at least ring-doubling. Note that for objects being stored with an Erasure Code policy, the replicator daemon is not involved. Instead, the reconstructor is used by Erasure Code policies and is analogous to the replicator for Replication type policies. See :doc:`overview_erasure_code` for complete information on both Erasure Code support as well as the reconstructor. ---------- Hashes.pkl ---------- The hashes.pkl file is a key element for both replication and reconstruction (for Erasure Coding). Both daemons use this file to determine if any kind of action is required between nodes that are participating in the durability scheme. The file itself is a pickled dictionary with slightly different formats depending on whether the policy is Replication or Erasure Code. In either case, however, the same basic information is provided between the nodes. The dictionary contains a dictionary where the key is a suffix directory name and the value is the MD5 hash of the directory listing for that suffix. In this manner, the daemon can quickly identify differences between local and remote suffix directories on a per partition basis as the scope of any one hashes.pkl file is a partition directory. For Erasure Code policies, there is a little more information required. An object's hash directory may contain multiple fragments of a single object in the event that the node is acting as a handoff or perhaps if a rebalance is underway. Each fragment of an object is stored with a fragment index, so the hashes.pkl for an Erasure Code partition will still be a dictionary keyed on the suffix directory name, however, the value is another dictionary keyed on the fragment index with subsequent MD5 hashes for each one as values. Some files within an object hash directory don't require a fragment index so None is used to represent those. Below are examples of what these dictionaries might look like. Replication hashes.pkl:: {'a43': '72018c5fbfae934e1f56069ad4425627', 'b23': '12348c5fbfae934e1f56069ad4421234'} Erasure Code hashes.pkl:: {'a43': {None: '72018c5fbfae934e1f56069ad4425627', 2: 'b6dd6db937cb8748f50a5b6e4bc3b808'}, 'b23': {None: '12348c5fbfae934e1f56069ad4421234', 1: '45676db937cb8748f50a5b6e4bc34567'}} ----------------------------- Dedicated replication network ----------------------------- Swift has support for using dedicated network for replication traffic. For more information see :ref:`Overview of dedicated replication network `. swift-2.17.0/doc/source/apache_deployment_guide.rst0000666000175100017510000001501013236061617022422 0ustar zuulzuul00000000000000======================= Apache Deployment Guide ======================= ---------------------------- Web Front End Considerations ---------------------------- Swift can be configured to work both using an integral web front-end and using a full-fledged Web Server such as the Apache2 (HTTPD) web server. The integral web front-end is a wsgi mini "Web Server" which opens up its own socket and serves http requests directly. The incoming requests accepted by the integral web front-end are then forwarded to a wsgi application (the core swift) for further handling, possibly via wsgi middleware sub-components. client<---->'integral web front-end'<---->middleware<---->'core swift' To gain full advantage of Apache2, Swift can alternatively be configured to work as a request processor of the Apache2 server. This alternative deployment scenario uses mod_wsgi of Apache2 to forward requests to the swift wsgi application and middleware. client<---->'Apache2 with mod_wsgi'<----->middleware<---->'core swift' The integral web front-end offers simplicity and requires minimal configuration. It is also the web front-end most commonly used with Swift. Additionally, the integral web front-end includes support for receiving chunked transfer encoding from a client, presently not supported by Apache2 in the operation mode described here. The use of Apache2 offers new ways to extend Swift and integrate it with existing authentication, administration and control systems. A single Apache2 server can serve as the web front end of any number of swift servers residing on a swift node. For example when a storage node offers account, container and object services, a single Apache2 server can serve as the web front end of all three services. The apache variant described here was tested as part of an IBM research work. It was found that following tuning, the Apache2 offer generally equivalent performance to that offered by the integral web front-end. Alternative to Apache2, other web servers may be used, but were never tested. ------------- Apache2 Setup ------------- Both Apache2 and mod-wsgi needs to be installed on the system. Ubuntu comes with Apache2 installed. Install mod-wsgi using:: sudo apt-get install libapache2-mod-wsgi First, change the User and Group IDs of Apache2 to be those used by Swift. For example in /etc/apache2/envvars use:: export APACHE_RUN_USER=swift export APACHE_RUN_GROUP=swift Create a directory for the Apache2 wsgi files:: sudo mkdir /var/www/swift Create a file for each service under /var/www/swift. For a proxy service create /var/www/swift/proxy-server.wsgi:: from swift.common.wsgi import init_request_processor application, conf, logger, log_name = \ init_request_processor('/etc/swift/proxy-server.conf','proxy-server') For an account service create /var/www/swift/account-server.wsgi:: from swift.common.wsgi import init_request_processor application, conf, logger, log_name = \ init_request_processor('/etc/swift/account-server.conf', 'account-server') For an container service create /var/www/swift/container-server.wsgi:: from swift.common.wsgi import init_request_processor application, conf, logger, log_name = \ init_request_processor('/etc/swift/container-server.conf', 'container-server') For an object service create /var/www/swift/object-server.wsgi:: from swift.common.wsgi import init_request_processor application, conf, logger, log_name = \ init_request_processor('/etc/swift/object-server.conf', 'object-server') Create a /etc/apache2/conf.d/swift_wsgi.conf configuration file that will define a port and Virtual Host per each local service. For example an Apache2 serving as a web front end of a proxy service:: #Proxy NameVirtualHost *:8080 Listen 8080 ServerName proxy-server LimitRequestBody 5368709122 WSGIDaemonProcess proxy-server processes=5 threads=1 WSGIProcessGroup proxy-server WSGIScriptAlias / /var/www/swift/proxy-server.wsgi LimitRequestFields 200 ErrorLog /var/log/apache2/proxy-server LogLevel debug CustomLog /var/log/apache2/proxy.log combined Notice that when using Apache the limit on the maximal object size should be imposed by Apache using the LimitRequestBody rather by the swift proxy. Note also that the LimitRequestBody should indicate the same value as indicated by max_file_size located in both /etc/swift/swift.conf and in /etc/swift/test.conf. The Swift default value for max_file_size (when not present) is 5368709122. For example an Apache2 serving as a web front end of a storage node:: #Object Service NameVirtualHost *:6200 Listen 6200 ServerName object-server WSGIDaemonProcess object-server processes=5 threads=1 WSGIProcessGroup object-server WSGIScriptAlias / /var/www/swift/object-server.wsgi LimitRequestFields 200 ErrorLog /var/log/apache2/object-server LogLevel debug CustomLog /var/log/apache2/access.log combined #Container Service NameVirtualHost *:6201 Listen 6201 ServerName container-server WSGIDaemonProcess container-server processes=5 threads=1 WSGIProcessGroup container-server WSGIScriptAlias / /var/www/swift/container-server.wsgi LimitRequestFields 200 ErrorLog /var/log/apache2/container-server LogLevel debug CustomLog /var/log/apache2/access.log combined #Account Service NameVirtualHost *:6202 Listen 6202 ServerName account-server WSGIDaemonProcess account-server processes=5 threads=1 WSGIProcessGroup account-server WSGIScriptAlias / /var/www/swift/account-server.wsgi LimitRequestFields 200 ErrorLog /var/log/apache2/account-server LogLevel debug CustomLog /var/log/apache2/access.log combined Next stop the Apache2 and start it again (apache2ctl restart is not enough):: apache2ctl stop apache2ctl start Edit the tests config file and add:: web_front_end = apache2 normalized_urls = True Also check to see that the file includes max_file_size of the same value as used for the LimitRequestBody in the apache config file above. We are done. You may run functional tests to test - e.g.:: cd ~swift/swift ./.functests swift-2.17.0/doc/source/overview_reaper.rst0000666000175100017510000001100113236061617020764 0ustar zuulzuul00000000000000================== The Account Reaper ================== The Account Reaper removes data from deleted accounts in the background. An account is marked for deletion by a reseller issuing a DELETE request on the account's storage URL. This simply puts the value DELETED into the status column of the account_stat table in the account database (and replicas), indicating the data for the account should be deleted later. There is normally no set retention time and no undelete; it is assumed the reseller will implement such features and only call DELETE on the account once it is truly desired the account's data be removed. However, in order to protect the Swift cluster accounts from an improper or mistaken delete request, you can set a delay_reaping value in the [account-reaper] section of the account-server.conf to delay the actual deletion of data. At this time, there is no utility to undelete an account; one would have to update the account database replicas directly, setting the status column to an empty string and updating the put_timestamp to be greater than the delete_timestamp. (On the TODO list is writing a utility to perform this task, preferably through a REST call.) The account reaper runs on each account server and scans the server occasionally for account databases marked for deletion. It will only trigger on accounts that server is the primary node for, so that multiple account servers aren't all trying to do the same work at the same time. Using multiple servers to delete one account might improve deletion speed, but requires coordination so they aren't duplicating effort. Speed really isn't as much of a concern with data deletion and large accounts aren't deleted that often. The deletion process for an account itself is pretty straightforward. For each container in the account, each object is deleted and then the container is deleted. Any deletion requests that fail won't stop the overall process, but will cause the overall process to fail eventually (for example, if an object delete times out, the container won't be able to be deleted later and therefore the account won't be deleted either). The overall process continues even on a failure so that it doesn't get hung up reclaiming cluster space because of one troublesome spot. The account reaper will keep trying to delete an account until it eventually becomes empty, at which point the database reclaim process within the db_replicator will eventually remove the database files. Sometimes a persistent error state can prevent some object or container from being deleted. If this happens, you will see a message such as "Account has not been reaped since " in the log. You can control when this is logged with the reap_warn_after value in the [account-reaper] section of the account-server.conf file. By default this is 30 days. ------- History ------- At first, a simple approach of deleting an account through completely external calls was considered as it required no changes to the system. All data would simply be deleted in the same way the actual user would, through the public REST API. However, the downside was that it would use proxy resources and log everything when it didn't really need to. Also, it would likely need a dedicated server or two, just for issuing the delete requests. A completely bottom-up approach was also considered, where the object and container servers would occasionally scan the data they held and check if the account was deleted, removing the data if so. The upside was the speed of reclamation with no impact on the proxies or logging, but the downside was that nearly 100% of the scanning would result in no action creating a lot of I/O load for no reason. A more container server centric approach was also considered, where the account server would mark all the containers for deletion and the container servers would delete the objects in each container and then themselves. This has the benefit of still speedy reclamation for accounts with a lot of containers, but has the downside of a pretty big load spike. The process could be slowed down to alleviate the load spike possibility, but then the benefit of speedy reclamation is lost and what's left is just a more complex process. Also, scanning all the containers for those marked for deletion when the majority wouldn't be seemed wasteful. The db_replicator could do this work while performing its replication scan, but it would have to spawn and track deletion processes which seemed needlessly complex. In the end, an account server centric approach seemed best, as described above. swift-2.17.0/doc/source/crossdomain.rst0000666000175100017510000000371513236061617020116 0ustar zuulzuul00000000000000======================== Cross-domain Policy File ======================== A cross-domain policy file allows web pages hosted elsewhere to use client side technologies such as Flash, Java and Silverlight to interact with the Swift API. See http://www.adobe.com/devnet/articles/crossdomain_policy_file_spec.html for a description of the purpose and structure of the cross-domain policy file. The cross-domain policy file is installed in the root of a web server (i.e., the path is /crossdomain.xml). The crossdomain middleware responds to a path of /crossdomain.xml with an XML document such as:: You should use a policy appropriate to your site. The examples and the default policy are provided to indicate how to syntactically construct a cross domain policy file -- they are not recommendations. ------------- Configuration ------------- To enable this middleware, add it to the pipeline in your proxy-server.conf file. It should be added before any authentication (e.g., tempauth or keystone) middleware. In this example ellipsis (...) indicate other middleware you may have chosen to use:: [pipeline:main] pipeline = ... crossdomain ... authtoken ... proxy-server And add a filter section, such as:: [filter:crossdomain] use = egg:swift#crossdomain cross_domain_policy = For continuation lines, put some whitespace before the continuation text. Ensure you put a completely blank line to terminate the cross_domain_policy value. The cross_domain_policy name/value is optional. If omitted, the policy defaults as if you had specified:: cross_domain_policy = swift-2.17.0/doc/source/associated_projects.rst0000666000175100017510000001463613236061617021631 0ustar zuulzuul00000000000000.. _associated_projects: Associated Projects =================== .. _application-bindings: Application Bindings -------------------- * OpenStack supported binding: * `Python-SwiftClient `_ * Unofficial libraries and bindings: * `PHP-opencloud `_ - Official Rackspace PHP bindings that should work for other Swift deployments too. * `PyRAX `_ - Official Rackspace Python bindings for CloudFiles that should work for other Swift deployments too. * `openstack.net `_ - Official Rackspace .NET bindings that should work for other Swift deployments too. * `RSwift `_ - R API bindings. * `Go language bindings `_ * `supload `_ - Bash script to upload file to cloud storage based on OpenStack Swift API. * `libcloud `_ - Apache Libcloud - a unified interface in Python for different clouds with OpenStack Swift support. * `SwiftBox `_ - C# library using RestSharp * `jclouds `_ - Java library offering bindings for all OpenStack projects * `java-openstack-swift `_ - Java bindings for OpenStack Swift * `swift_client `_ - Small but powerful Ruby client to interact with OpenStack Swift * `nightcrawler_swift `_ - This Ruby gem teleports your assets to a OpenStack Swift bucket/container * `swift storage `_ - Simple OpenStack Swift storage client. * `javaswift `_ - Collection of Java tools for Swift Authentication -------------- * `Keystone `_ - Official Identity Service for OpenStack. * `Swauth `_ - An alternative Swift authentication service that only requires Swift itself. * `Basicauth `_ - HTTP Basic authentication support (keystone backed). Command Line Access ------------------- * `Swiftly `_ - Alternate command line access to Swift with direct (no proxy) access capabilities as well. Log Processing -------------- * `Slogging `_ - Basic stats and logging tools. Monitoring & Statistics ----------------------- * `Swift Informant `_ - Swift Proxy Middleware to send events to a statsd instance. * `Swift Inspector `_ - Swift middleware to relay information about a request back to the client. Content Distribution Network Integration ---------------------------------------- * `SOS `_ - Swift Origin Server. Alternative API --------------- * `Swift3 `_ - Amazon S3 API emulation. * `CDMI `_ - CDMI support * `SwiftHLM `_ - a middleware for using OpenStack Swift with tape and other high latency media storage backends Benchmarking/Load Generators ---------------------------- * `getput `_ - getput tool suite * `COSbench `_ - COSbench tool suite * `ssbench `_ - ssbench tool suite .. _custom-logger-hooks-label: Custom Logger Hooks ------------------- * `swift-sentry `_ - Sentry exception reporting for Swift Storage Backends (DiskFile API implementations) ----------------------------------------------- * `Swift-on-File `_ - Enables objects created using Swift API to be accessed as files on a POSIX filesystem and vice versa. * `swift-ceph-backend `_ - Ceph RADOS object server implementation for Swift. * `kinetic-swift `_ - Seagate Kinetic Drive as backend for Swift * `swift-scality-backend `_ - Scality sproxyd object server implementation for Swift. Developer Tools --------------- * `SAIO bash scripts `_ - Well commented simple bash scripts for Swift all in one setup. * `vagrant-swift-all-in-one `_ - Quickly setup a standard development environment using Vagrant and Chef cookbooks in an Ubuntu virtual machine. * `SAIO Ansible playbook `_ - Quickly setup a standard development environment using Vagrant and Ansible in a Fedora virtual machine (with built-in `Swift-on-File `_ support). Other ----- * `Glance `_ - Provides services for discovering, registering, and retrieving virtual machine images (for OpenStack Compute [Nova], for example). * `Better Staticweb `_ - Makes swift containers accessible by default. * `Django Swiftbrowser `_ - Simple Django web app to access OpenStack Swift. * `Swift-account-stats `_ - Swift-account-stats is a tool to report statistics on Swift usage at tenant and global levels. * `PyECLib `_ - High Level Erasure Code library used by Swift * `liberasurecode `_ - Low Level Erasure Code library used by PyECLib * `Swift Browser `_ - JavaScript interface for Swift * `swift-ui `_ - OpenStack Swift web browser * `Swift Durability Calculator `_ - Data Durability Calculation Tool for Swift * `swiftbackmeup `_ - Utility that allows one to create backups and upload them to OpenStack Swift * `Multi Swift `_ - Bash scripts to spin up multiple Swift clusters sharing the same hardware swift-2.17.0/doc/source/conf.py0000666000175100017510000001775313236061617016351 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # Copyright (c) 2010-2012 OpenStack Foundation. # # Swift documentation build configuration file, created by # sphinx-quickstart on Tue May 18 13:50:15 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import datetime import logging import os from swift import __version__ import sys # NOTE(amotoki): Our current doc build job uses an older version of # liberasurecode which comes from Ubuntu 16.04. # pyeclib emits a warning message if liberasurecode <1.3.1 is used [1] and # this causes the doc build failure if warning-is-error is enabled in Sphinx. # As a workaround we suppress the warning message from pyeclib until we use # a newer version of liberasurecode in our doc build job. # [1] https://github.com/openstack/pyeclib/commit/d163972b logging.getLogger('pyeclib').setLevel(logging.ERROR) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.extend([os.path.abspath('../swift'), os.path.abspath('..'), os.path.abspath('../bin')]) # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'openstackdocstheme'] todo_include_todos = True # Add any paths that contain templates here, relative to this directory. # templates_path = [] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Swift' copyright = u'%d, OpenStack Foundation' % datetime.datetime.now().year # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = __version__.rsplit('.', 1)[0] # The full version, including alpha/beta/rc tags. release = __version__ # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. # unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = [] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. show_authors = True # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['swift.'] # -- Options for HTML output ----------------------------------------------- # The theme to use for HTML and HTML Help pages. Major themes that come with # Sphinx are currently 'default' and 'sphinxdoc'. # html_theme = 'default' # html_theme_path = ["."] html_theme = 'openstackdocs' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # " v documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # html_static_path = ['_static'] # Add any paths that contain "extra" files, such as .htaccess or # robots.txt. html_extra_path = ['_extra'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' html_last_updated_fmt = '%Y-%m-%d %H:%M' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_use_modindex = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, an OpenSearch description file will be output, and all pages will # contain a tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = '' # Output file base name for HTML help builder. htmlhelp_basename = 'swiftdoc' # -- Options for LaTeX output ------------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ ('index', 'Swift.tex', u'Swift Documentation', u'Swift Team', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_use_modindex = True # -- Options for openstackdocstheme ------------------------------------------- repository_name = 'openstack/swift' bug_project = 'swift' bug_tag = '' swift-2.17.0/doc/source/middleware.rst0000666000175100017510000001204313236061617017704 0ustar zuulzuul00000000000000.. _common_middleware: ********** Middleware ********** Account Quotas ============== .. automodule:: swift.common.middleware.account_quotas :members: :show-inheritance: .. _bulk: Bulk Operations (Delete and Archive Auto Extraction) ==================================================== .. automodule:: swift.common.middleware.bulk :members: :show-inheritance: .. _catch_errors: CatchErrors ============= .. automodule:: swift.common.middleware.catch_errors :members: :show-inheritance: CNAME Lookup ============ .. automodule:: swift.common.middleware.cname_lookup :members: :show-inheritance: .. _container-quotas: Container Quotas ================ .. automodule:: swift.common.middleware.container_quotas :members: :show-inheritance: .. _container-sync: Container Sync Middleware ========================= .. automodule:: swift.common.middleware.container_sync :members: :show-inheritance: Cross Domain Policies ===================== .. automodule:: swift.common.middleware.crossdomain :members: :show-inheritance: .. _discoverability: Discoverability =============== Swift will by default provide clients with an interface providing details about the installation. Unless disabled (i.e ``expose_info=false`` in :ref:`proxy-server-config`), a GET request to ``/info`` will return configuration data in JSON format. An example response:: {"swift": {"version": "1.11.0"}, "staticweb": {}, "tempurl": {}} This would signify to the client that swift version 1.11.0 is running and that staticweb and tempurl are available in this installation. There may be administrator-only information available via ``/info``. To retrieve it, one must use an HMAC-signed request, similar to TempURL. The signature may be produced like so:: swift tempurl GET 3600 /info secret 2>/dev/null | sed s/temp_url/swiftinfo/g Domain Remap ============ .. automodule:: swift.common.middleware.domain_remap :members: :show-inheritance: Dynamic Large Objects ===================== DLO support centers around a user specified filter that matches segments and concatenates them together in object listing order. Please see the DLO docs for :ref:`dlo-doc` further details. .. _encryption: Encryption ========== Encryption middleware should be deployed in conjunction with the :ref:`keymaster` middleware. .. automodule:: swift.common.middleware.crypto :members: :show-inheritance: .. automodule:: swift.common.middleware.crypto.encrypter :members: :show-inheritance: .. automodule:: swift.common.middleware.crypto.decrypter :members: :show-inheritance: .. _formpost: FormPost ======== .. automodule:: swift.common.middleware.formpost :members: :show-inheritance: .. _gatekeeper: GateKeeper ========== .. automodule:: swift.common.middleware.gatekeeper :members: :show-inheritance: .. _healthcheck: Healthcheck =========== .. automodule:: swift.common.middleware.healthcheck :members: :show-inheritance: .. _keymaster: Keymaster ========= Keymaster middleware should be deployed in conjunction with the :ref:`encryption` middleware. .. automodule:: swift.common.middleware.crypto.keymaster :members: :show-inheritance: .. _keystoneauth: KeystoneAuth ============ .. automodule:: swift.common.middleware.keystoneauth :members: :show-inheritance: .. _list_endpoints: List Endpoints ============== .. automodule:: swift.common.middleware.list_endpoints :members: :show-inheritance: Memcache ======== .. automodule:: swift.common.middleware.memcache :members: :show-inheritance: Name Check (Forbidden Character Filter) ======================================= .. automodule:: swift.common.middleware.name_check :members: :show-inheritance: .. _versioned_writes: Object Versioning ================= .. automodule:: swift.common.middleware.versioned_writes :members: :show-inheritance: Proxy Logging ============= .. automodule:: swift.common.middleware.proxy_logging :members: :show-inheritance: Ratelimit ========= .. automodule:: swift.common.middleware.ratelimit :members: :show-inheritance: .. _recon: Recon =========== .. automodule:: swift.common.middleware.recon :members: :show-inheritance: .. _copy: Server Side Copy ================ .. automodule:: swift.common.middleware.copy :members: :show-inheritance: Static Large Objects ==================== Please see the SLO docs for :ref:`slo-doc` further details. .. _staticweb: StaticWeb ========= .. automodule:: swift.common.middleware.staticweb :members: :show-inheritance: .. _symlink: Symlink ======= .. automodule:: swift.common.middleware.symlink :members: :show-inheritance: .. _common_tempauth: TempAuth ======== .. automodule:: swift.common.middleware.tempauth :members: :show-inheritance: .. _tempurl: TempURL ======= .. automodule:: swift.common.middleware.tempurl :members: :show-inheritance: XProfile ============== .. automodule:: swift.common.middleware.xprofile :members: :show-inheritance: swift-2.17.0/doc/source/db.rst0000666000175100017510000000056313236061617016160 0ustar zuulzuul00000000000000.. _account_and_container_db: *************************** Account DB and Container DB *************************** .. _db: DB == .. automodule:: swift.common.db :members: :undoc-members: :show-inheritance: .. _db-replicator: DB replicator ============= .. automodule:: swift.common.db_replicator :members: :undoc-members: :show-inheritance: swift-2.17.0/doc/source/proxy.rst0000666000175100017510000000131113236061617016744 0ustar zuulzuul00000000000000.. _proxy: ***** Proxy ***** .. _proxy-controllers: Proxy Controllers ================= Base ~~~~ .. automodule:: swift.proxy.controllers.base :members: :undoc-members: :show-inheritance: Account ~~~~~~~ .. automodule:: swift.proxy.controllers.account :members: :undoc-members: :show-inheritance: Container ~~~~~~~~~ .. automodule:: swift.proxy.controllers.container :members: :undoc-members: :show-inheritance: Object ~~~~~~ .. automodule:: swift.proxy.controllers.obj :members: :undoc-members: :show-inheritance: .. _proxy-server: Proxy Server ============ .. automodule:: swift.proxy.server :members: :undoc-members: :show-inheritance: swift-2.17.0/doc/source/ring_partpower.rst0000666000175100017510000001647513236061617020646 0ustar zuulzuul00000000000000============================== Modifying Ring Partition Power ============================== The ring partition power determines the on-disk location of data files and is selected when creating a new ring. In normal operation, it is a fixed value. This is because a different partition power results in a different on-disk location for all data files. However, increasing the partition power by 1 can be done by choosing locations that are on the same disk. As a result, we can create hard-links for both the new and old locations, avoiding data movement without impacting availability. To enable a partition power change without interrupting user access, object servers need to be aware of it in advance. Therefore a partition power change needs to be done in multiple steps. .. note:: Do not increase the partition power on account and container rings. Increasing the partition power is *only* supported for object rings. Trying to increase the part_power for account and container rings *will* result in unavailability, maybe even data loss. ------- Caveats ------- Before increasing the partition power, consider the possible drawbacks. There are a few caveats when increasing the partition power: * All hashes.pkl files will become invalid once hard links are created, and the replicators will need significantly more time on the first run after finishing the partition power increase. * Object replicators will skip partitions during the partition power increase. Replicators are not aware of hard-links, and would simply copy the content; this would result in heavy data movement and the worst case would be that all data is stored twice. * Due to the fact that each object will now be hard linked from two locations, many more inodes will be used - expect around twice the amount. You need to check the free inode count *before* increasing the partition power. * Also, object auditors might read each object twice before cleanup removes the second hard link. * Due to the new inodes more memory is needed to cache them, and your object servers should have plenty of available memory to avoid running out of inode cache. Setting ``vfs_cache_pressure`` to 1 might help with that. * All nodes in the cluster *must* run at least Swift version 2.13.0 or later. Due to these caveats you should only increase the partition power if really needed, i.e. if the number of partitions per disk is extremely low and the data is distributed unevenly across disks. ----------------------------------- 1. Prepare partition power increase ----------------------------------- The swift-ring-builder is used to prepare the ring for an upcoming partition power increase. It will store a new variable ``next_part_power`` with the current partition power + 1. Object servers recognize this, and hard links to the new location will be created (or deleted) on every PUT or DELETE. This will make it possible to access newly written objects using the future partition power:: swift-ring-builder prepare_increase_partition_power swift-ring-builder write_ring Now you need to copy the updated .ring.gz to all nodes. Already existing data needs to be relinked too; therefore an operator has to run a relinker command on all object servers in this phase:: swift-object-relinker relink .. note:: Start relinking after *all* the servers re-read the modified ring files, which normally happens within 15 seconds after writing a modified ring. Also, make sure the modified rings are pushed to all nodes running object services (replicators, reconstructors and reconcilers)- they have to skip partitions during relinking. Relinking might take some time; while there is no data copied or actually moved, the tool still needs to walk the whole file system and create new hard links as required. --------------------------- 2. Increase partition power --------------------------- Now that all existing data can be found using the new location, it's time to actually increase the partition power itself:: swift-ring-builder increase_partition_power swift-ring-builder write_ring Now you need to copy the updated .ring.gz again to all nodes. Object servers are now using the new, increased partition power and no longer create additional hard links. .. note:: The object servers will create additional hard links for each modified or new object, and this requires more inodes. .. note:: If you decide you don't want to increase the partition power, you should instead cancel the increase. It is not possible to revert this operation once started. To abort the partition power increase, execute the following commands, copy the updated .ring.gz files to all nodes and continue with `3. Cleanup`_ afterwards:: swift-ring-builder cancel_increase_partition_power swift-ring-builder write_ring ---------- 3. Cleanup ---------- Existing hard links in the old locations need to be removed, and a cleanup tool is provided to do this. Run the following command on each storage node:: swift-object-relinker cleanup .. note:: The cleanup must be finished within your object servers reclaim_age period (which is by default 1 week). Otherwise objects that have been overwritten between step #1 and step #2 and deleted afterwards can't be cleaned up anymore. Afterwards it is required to update the rings one last time to inform servers that all steps to increase the partition power are done, and replicators should resume their job:: swift-ring-builder finish_increase_partition_power swift-ring-builder write_ring Now you need to copy the updated .ring.gz again to all nodes. ---------- Background ---------- An existing object that is currently located on partition X will be placed either on partition 2*X or 2*X+1 after the partition power is increased. The reason for this is the Ring.get_part() method, that does a bitwise shift to the right. To avoid actual data movement to different disks or even nodes, the allocation of partitions to nodes needs to be changed. The allocation is pairwise due to the above mentioned new partition scheme. Therefore devices are allocated like this, with the partition being the index and the value being the device id:: old new part dev part dev ---- --- ---- --- 0 0 0 0 1 0 1 3 2 3 3 3 2 7 4 7 5 7 3 5 6 5 7 5 4 2 8 2 9 2 5 1 10 1 11 1 There is a helper method to compute the new path, and the following example shows the mapping between old and new location:: >>> from swift.common.utils import replace_partition_in_path >>> old='objects/16003/a38/fa0fcec07328d068e24ccbf2a62f2a38/1467658208.57179.data' >>> replace_partition_in_path(old, 14) 'objects/16003/a38/fa0fcec07328d068e24ccbf2a62f2a38/1467658208.57179.data' >>> replace_partition_in_path(old, 15) 'objects/32007/a38/fa0fcec07328d068e24ccbf2a62f2a38/1467658208.57179.data' Using the original partition power (14) it returned the same path; however after an increase to 15 it returns the new path, and the new partition is 2*X+1 in this case. swift-2.17.0/doc/source/overview_expiring_objects.rst0000666000175100017510000000663713236061617023067 0ustar zuulzuul00000000000000======================= Expiring Object Support ======================= The ``swift-object-expirer`` offers scheduled deletion of objects. The Swift client would use the ``X-Delete-At`` or ``X-Delete-After`` headers during an object ``PUT`` or ``POST`` and the cluster would automatically quit serving that object at the specified time and would shortly thereafter remove the object from the system. The ``X-Delete-At`` header takes a Unix Epoch timestamp, in integer form; for example: ``1317070737`` represents ``Mon Sep 26 20:58:57 2011 UTC``. The ``X-Delete-After`` header takes a positive integer number of seconds. The proxy server that receives the request will convert this header into an ``X-Delete-At`` header using the request timestamp plus the value given. If both the ``X-Delete-At`` and ``X-Delete-After`` headers are sent with a request then the ``X-Delete-After`` header will take precedence. As expiring objects are added to the system, the object servers will record the expirations in a hidden ``.expiring_objects`` account for the ``swift-object-expirer`` to handle later. Usually, just one instance of the ``swift-object-expirer`` daemon needs to run for a cluster. This isn't exactly automatic failover high availability, but if this daemon doesn't run for a few hours it should not be any real issue. The expired-but-not-yet-deleted objects will still ``404 Not Found`` if someone tries to ``GET`` or ``HEAD`` them and they'll just be deleted a bit later when the daemon is restarted. By default, the ``swift-object-expirer`` daemon will run with a concurrency of 1. Increase this value to get more concurrency. A concurrency of 1 may not be enough to delete expiring objects in a timely fashion for a particular Swift cluster. It is possible to run multiple daemons to do different parts of the work if a single process with a concurrency of more than 1 is not enough (see the sample config file for details). To run the ``swift-object-expirer`` as multiple processes, set ``processes`` to the number of processes (either in the config file or on the command line). Then run one process for each part. Use ``process`` to specify the part of the work to be done by a process using the command line or the config. So, for example, if you'd like to run three processes, set ``processes`` to 3 and run three processes with ``process`` set to 0, 1, and 2 for the three processes. If multiple processes are used, it's necessary to run one for each part of the work or that part of the work will not be done. The daemon uses the ``/etc/swift/object-expirer.conf`` by default, and here is a quick sample conf file:: [DEFAULT] # swift_dir = /etc/swift # user = swift # You can specify default log routing here if you want: # log_name = swift # log_facility = LOG_LOCAL0 # log_level = INFO [object-expirer] interval = 300 [pipeline:main] pipeline = catch_errors cache proxy-server [app:proxy-server] use = egg:swift#proxy # See proxy-server.conf-sample for options [filter:cache] use = egg:swift#memcache # See proxy-server.conf-sample for options [filter:catch_errors] use = egg:swift#catch_errors # See proxy-server.conf-sample for options The daemon needs to run on a machine with access to all the backend servers in the cluster, but does not need proxy server or public access. The daemon will use its own internal proxy code instance to access the backend servers. swift-2.17.0/doc/source/overview_erasure_code.rst0000666000175100017510000012642613236061617022170 0ustar zuulzuul00000000000000==================== Erasure Code Support ==================== ******************************* History and Theory of Operation ******************************* There's a lot of good material out there on Erasure Code (EC) theory, this short introduction is just meant to provide some basic context to help the reader better understand the implementation in Swift. Erasure Coding for storage applications grew out of Coding Theory as far back as the 1960s with the Reed-Solomon codes. These codes have been used for years in applications ranging from CDs to DVDs to general communications and, yes, even in the space program starting with Voyager! The basic idea is that some amount of data is broken up into smaller pieces called fragments and coded in such a way that it can be transmitted with the ability to tolerate the loss of some number of the coded fragments. That's where the word "erasure" comes in, if you transmit 14 fragments and only 13 are received then one of them is said to be "erased". The word "erasure" provides an important distinction with EC; it isn't about detecting errors, it's about dealing with failures. Another important element of EC is that the number of erasures that can be tolerated can be adjusted to meet the needs of the application. At a high level EC works by using a specific scheme to break up a single data buffer into several smaller data buffers then, depending on the scheme, performing some encoding operation on that data in order to generate additional information. So you end up with more data than you started with and that extra data is often called "parity". Note that there are many, many different encoding techniques that vary both in how they organize and manipulate the data as well by what means they use to calculate parity. For example, one scheme might rely on `Galois Field Arithmetic `_ while others may work with only XOR. The number of variations and details about their differences are well beyond the scope of this introduction, but we will talk more about a few of them when we get into the implementation of EC in Swift. Overview of EC Support in Swift ================================ First and foremost, from an application perspective EC support is totally transparent. There are no EC related external API; a container is simply created using a Storage Policy defined to use EC and then interaction with the cluster is the same as any other durability policy. EC is implemented in Swift as a Storage Policy, see :doc:`overview_policies` for complete details on Storage Policies. Because support is implemented as a Storage Policy, all of the storage devices associated with your cluster's EC capability can be isolated. It is entirely possible to share devices between storage policies, but for EC it may make more sense to not only use separate devices but possibly even entire nodes dedicated for EC. Which direction one chooses depends on why the EC policy is being deployed. If, for example, there is a production replication policy in place already and the goal is to add a cold storage tier such that the existing nodes performing replication are impacted as little as possible, adding a new set of nodes dedicated to EC might make the most sense but also incurs the most cost. On the other hand, if EC is being added as a capability to provide additional durability for a specific set of applications and the existing infrastructure is well suited for EC (sufficient number of nodes, zones for the EC scheme that is chosen) then leveraging the existing infrastructure such that the EC ring shares nodes with the replication ring makes the most sense. These are some of the main considerations: * Layout of existing infrastructure. * Cost of adding dedicated EC nodes (or just dedicated EC devices). * Intended usage model(s). The Swift code base does not include any of the algorithms necessary to perform the actual encoding and decoding of data; that is left to external libraries. The Storage Policies architecture is leveraged to enable EC on a per container basis -- the object rings are still used to determine the placement of EC data fragments. Although there are several code paths that are unique to an operation associated with an EC policy, an external dependency to an Erasure Code library is what Swift counts on to perform the low level EC functions. The use of an external library allows for maximum flexibility as there are a significant number of options out there, each with its owns pros and cons that can vary greatly from one use case to another. PyECLib: External Erasure Code Library ======================================= PyECLib is a Python Erasure Coding Library originally designed and written as part of the effort to add EC support to the Swift project, however it is an independent project. The library provides a well-defined and simple Python interface and internally implements a plug-in architecture allowing it to take advantage of many well-known C libraries such as: * Jerasure and GFComplete at http://jerasure.org. * Intel(R) ISA-L at http://01.org/intel%C2%AE-storage-acceleration-library-open-source-version. * Or write your own! PyECLib uses a C based library called liberasurecode to implement the plug in infrastructure; liberasurecode is available at: * liberasurecode: https://github.com/openstack/liberasurecode PyECLib itself therefore allows for not only choice but further extensibility as well. PyECLib also comes with a handy utility to help determine the best algorithm to use based on the equipment that will be used (processors and server configurations may vary in performance per algorithm). More on this will be covered in the configuration section. PyECLib is included as a Swift requirement. For complete details see `PyECLib `_ Storing and Retrieving Objects ============================== We will discuss the details of how PUT and GET work in the "Under the Hood" section later on. The key point here is that all of the erasure code work goes on behind the scenes; this summary is a high level information overview only. The PUT flow looks like this: #. The proxy server streams in an object and buffers up "a segment" of data (size is configurable). #. The proxy server calls on PyECLib to encode the data into smaller fragments. #. The proxy streams the encoded fragments out to the storage nodes based on ring locations. #. Repeat until the client is done sending data. #. The client is notified of completion when a quorum is met. The GET flow looks like this: #. The proxy server makes simultaneous requests to participating nodes. #. As soon as the proxy has the fragments it needs, it calls on PyECLib to decode the data. #. The proxy streams the decoded data it has back to the client. #. Repeat until the proxy is done sending data back to the client. It may sound like, from this high level overview, that using EC is going to cause an explosion in the number of actual files stored in each node's local file system. Although it is true that more files will be stored (because an object is broken into pieces), the implementation works to minimize this where possible, more details are available in the Under the Hood section. Handoff Nodes ============= In EC policies, similarly to replication, handoff nodes are a set of storage nodes used to augment the list of primary nodes responsible for storing an erasure coded object. These handoff nodes are used in the event that one or more of the primaries are unavailable. Handoff nodes are still selected with an attempt to achieve maximum separation of the data being placed. Reconstruction ============== For an EC policy, reconstruction is analogous to the process of replication for a replication type policy -- essentially "the reconstructor" replaces "the replicator" for EC policy types. The basic framework of reconstruction is very similar to that of replication with a few notable exceptions: * Because EC does not actually replicate partitions, it needs to operate at a finer granularity than what is provided with rsync, therefore EC leverages much of ssync behind the scenes (you do not need to manually configure ssync). * Once a pair of nodes has determined the need to replace a missing object fragment, instead of pushing over a copy like replication would do, the reconstructor has to read in enough surviving fragments from other nodes and perform a local reconstruction before it has the correct data to push to the other node. * A reconstructor does not talk to all other reconstructors in the set of nodes responsible for an EC partition, this would be far too chatty, instead each reconstructor is responsible for sync'ing with the partition's closest two neighbors (closest meaning left and right on the ring). .. note:: EC work (encode and decode) takes place both on the proxy nodes, for PUT/GET operations, as well as on the storage nodes for reconstruction. As with replication, reconstruction can be the result of rebalancing, bit-rot, drive failure or reverting data from a hand-off node back to its primary. ************************** Performance Considerations ************************** In general, EC has different performance characteristics than replicated data. EC requires substantially more CPU to read and write data, and is more suited for larger objects that are not frequently accessed (e.g. backups). Operators are encouraged to characterize the performance of various EC schemes and share their observations with the developer community. .. _using_ec_policy: **************************** Using an Erasure Code Policy **************************** To use an EC policy, the administrator simply needs to define an EC policy in `swift.conf` and create/configure the associated object ring. An example of how an EC policy can be setup is shown below:: [storage-policy:2] name = ec104 policy_type = erasure_coding ec_type = liberasurecode_rs_vand ec_num_data_fragments = 10 ec_num_parity_fragments = 4 ec_object_segment_size = 1048576 Let's take a closer look at each configuration parameter: * ``name``: This is a standard storage policy parameter. See :doc:`overview_policies` for details. * ``policy_type``: Set this to ``erasure_coding`` to indicate that this is an EC policy. * ``ec_type``: Set this value according to the available options in the selected PyECLib back-end. This specifies the EC scheme that is to be used. For example the option shown here selects Vandermonde Reed-Solomon encoding while an option of ``flat_xor_hd_3`` would select Flat-XOR based HD combination codes. See the `PyECLib `_ page for full details. * ``ec_num_data_fragments``: The total number of fragments that will be comprised of data. * ``ec_num_parity_fragments``: The total number of fragments that will be comprised of parity. * ``ec_object_segment_size``: The amount of data that will be buffered up before feeding a segment into the encoder/decoder. The default value is 1048576. When PyECLib encodes an object, it will break it into N fragments. However, what is important during configuration, is how many of those are data and how many are parity. So in the example above, PyECLib will actually break an object in 14 different fragments, 10 of them will be made up of actual object data and 4 of them will be made of parity data (calculations depending on ec_type). When deciding which devices to use in the EC policy's object ring, be sure to carefully consider the performance impacts. Running some performance benchmarking in a test environment for your configuration is highly recommended before deployment. To create the EC policy's object ring, the only difference in the usage of the ``swift-ring-builder create`` command is the ``replicas`` parameter. The ``replicas`` value is the number of fragments spread across the object servers associated with the ring; ``replicas`` must be equal to the sum of ``ec_num_data_fragments`` and ``ec_num_parity_fragments``. For example:: swift-ring-builder object-1.builder create 10 14 1 Note that in this example the ``replicas`` value of ``14`` is based on the sum of ``10`` EC data fragments and ``4`` EC parity fragments. Once you have configured your EC policy in `swift.conf` and created your object ring, your application is ready to start using EC simply by creating a container with the specified policy name and interacting as usual. .. note:: It's important to note that once you have deployed a policy and have created objects with that policy, these configurations options cannot be changed. In case a change in the configuration is desired, you must create a new policy and migrate the data to a new container. .. warning:: Using ``isa_l_rs_vand`` with more than 4 parity fragments creates fragments which may in some circumstances fail to reconstruct properly or (with liberasurecode < 1.3.1) reconstruct corrupted data. New policies that need large numbers of parity fragments should consider using ``isa_l_rs_cauchy``. Any existing affected policies must be marked deprecated, and data in containers with that policy should be migrated to a new policy. Migrating Between Policies ========================== A common usage of EC is to migrate less commonly accessed data from a more expensive but lower latency policy such as replication. When an application determines that it wants to move data from a replication policy to an EC policy, it simply needs to move the data from the replicated container to an EC container that was created with the target durability policy. ********* Global EC ********* The following recommendations are made when deploying an EC policy that spans multiple regions in a :doc:`Global Cluster `: * The global EC policy should use :ref:`ec_duplication` in conjunction with a :ref:`Composite Ring `, as described below. * Proxy servers should be :ref:`configured to use read affinity ` to prefer reading from their local region for the global EC policy. :ref:`proxy_server_per_policy_config` allows this to be configured for individual policies. .. note:: Before deploying a Global EC policy, consideration should be given to the :ref:`global_ec_known_issues`, in particular the relatively poor performance anticipated from the object-reconstructor. .. _ec_duplication: EC Duplication ============== EC Duplication enables Swift to make duplicated copies of fragments of erasure coded objects. If an EC storage policy is configured with a non-default ``ec_duplication_factor`` of ``N > 1``, then the policy will create ``N`` duplicates of each unique fragment that is returned from the configured EC engine. Duplication of EC fragments is optimal for Global EC storage policies, which require dispersion of fragment data across failure domains. Without fragment duplication, common EC parameters will not distribute enough unique fragments between large failure domains to allow for a rebuild using fragments from any one domain. For example a uniformly distributed ``10+4`` EC policy schema would place 7 fragments in each of two failure domains, which is less in each failure domain than the 10 fragments needed to rebuild a missing fragment. Without fragment duplication, an EC policy schema must be adjusted to include additional parity fragments in order to guarantee the number of fragments in each failure domain is greater than the number required to rebuild. For example, a uniformly distributed ``10+18`` EC policy schema would place 14 fragments in each of two failure domains, which is more than sufficient in each failure domain to rebuild a missing fragment. However, empirical testing has shown encoding a schema with ``num_parity > num_data`` (such as ``10+18``) is less efficient than using duplication of fragments. EC fragment duplication enables Swift's Global EC to maintain more independence between failure domains without sacrificing efficiency on read/write or rebuild! The ``ec_duplication_factor`` option may be configured in `swift.conf` in each ``storage-policy`` section. The option may be omitted - the default value is ``1`` (i.e. no duplication):: [storage-policy:2] name = ec104 policy_type = erasure_coding ec_type = liberasurecode_rs_vand ec_num_data_fragments = 10 ec_num_parity_fragments = 4 ec_object_segment_size = 1048576 ec_duplication_factor = 2 .. warning:: EC duplication is intended for use with Global EC policies. To ensure independent availability of data in all regions, the ``ec_duplication_factor`` option should only be used in conjunction with :ref:`composite_rings`, as described in this document. In this example, a ``10+4`` schema and a duplication factor of ``2`` will result in ``(10+4)x2 = 28`` fragments being stored (we will use the shorthand ``10+4x2`` to denote that policy configuration) . The ring for this policy should be configured with 28 replicas (i.e. ``(ec_num_data_fragments + ec_num_parity_fragments) * ec_duplication_factor``). A ``10+4x2`` schema **can** allow a multi-region deployment to rebuild an object to full durability even when *more* than 14 fragments are unavailable. This is advantageous with respect to a ``10+18`` configuration not only because reads from data fragments will be more common and more efficient, but also because a ``10+4x2`` can grow into a ``10+4x3`` to expand into another region. EC duplication with composite rings ----------------------------------- It is recommended that EC Duplication is used with :ref:`composite_rings` in order to disperse duplicate fragments across regions. When EC duplication is used, it is highly desirable to have one duplicate of each fragment placed in each region. This ensures that a set of ``ec_num_data_fragments`` unique fragments (the minimum needed to reconstruct an object) can always be assembled from a single region. This in turn means that objects are robust in the event of an entire region becoming unavailable. This can be achieved by using a :ref:`composite ring ` with the following properties: * The number of component rings in the composite ring is equal to the ``ec_duplication_factor`` for the policy. * Each *component* ring has a number of ``replicas`` that is equal to the sum of ``ec_num_data_fragments`` and ``ec_num_parity_fragments``. * Each component ring is populated with devices in a unique region. This arrangement results in each component ring in the composite ring, and therefore each region, having one copy of each fragment. For example, consider a Swift cluster with two regions, ``region1`` and ``region2`` and a ``4+2x2`` EC policy schema. This policy should use a composite ring with two component rings, ``ring1`` and ``ring2``, having devices exclusively in regions ``region1`` and ``region2`` respectively. Each component ring should have ``replicas = 6``. As a result, the first 6 fragments for an object will always be placed in ``ring1`` (i.e. in ``region1``) and the second 6 duplicate fragments will always be placed in ``ring2`` (i.e. in ``region2``). Conversely, a conventional ring spanning the two regions may give a suboptimal distribution of duplicates across the regions; it is possible for duplicates of the same fragment to be placed in the same region, and consequently for another region to have no copies of that fragment. This may make it impossible to assemble a set of ``ec_num_data_fragments`` unique fragments from a single region. For example, the conventional ring could have a pathologically sub-optimal placement such as:: r1 #0#d.data #0#d.data #2#d.data #2#d.data #4#d.data #4#d.data r2 #1#d.data #1#d.data #3#d.data #3#d.data #5#d.data #5#d.data In this case, the object cannot be reconstructed from a single region; ``region1`` has only the fragments with index ``0, 2, 4`` and ``region2`` has the other 3 indexes, but we need 4 unique indexes to be able to rebuild an object. Node Selection Strategy for Reads --------------------------------- Proxy servers require a set of *unique* fragment indexes to decode the original object when handling a GET request to an EC policy. With a conventional EC policy, this is very likely to be the outcome of reading fragments from a random selection of backend nodes. With an EC Duplication policy it is significantly more likely that responses from a *random* selection of backend nodes might include some duplicated fragments. For this reason it is strongly recommended that EC Duplication always be deployed in combination with :ref:`composite_rings` and :ref:`proxy server read affinity `. Under normal conditions with the recommended deployment, read affinity will cause a proxy server to first attempt to read fragments from nodes in its local region. These fragments are guaranteed to be unique with respect to each other. Even if there are a small number of local failures, unique local parity fragments will make up the difference. However, should enough local primary storage nodes fail, such that sufficient unique fragments are not available in the local region, a global EC cluster will proceed to read fragments from the other region(s). Random reads from the remote region are not guaranteed to return unique fragments; with EC Duplication there is a significantly high probability that the proxy sever will encounter a fragment that is a duplicate of one it has already found in the local region. The proxy server will ignore these and make additional requests until it accumulates the required set of unique fragments, potentially searching all the primary and handoff locations in the local and remote regions before ultimately failing the read. A global EC deployment configured as recommended is therefore extremely resilient. However, under extreme failure conditions read handling can be inefficient because nodes in other regions are guaranteed to have some fragments which are duplicates of those the proxy server has already received. Work is in progress to improve the proxy server node selection strategy such that when it is necessary to read from other regions, nodes that are likely to have useful fragments are preferred over those that are likely to return a duplicate. .. _global_ec_known_issues: Known Issues ============ Efficient Cross Region Rebuild ------------------------------ Work is also in progress to improve the object-reconstructor efficiency for Global EC policies. Unlike the proxy server, the reconstructor does not apply any read affinity settings when gathering fragments. It is therefore likely to receive duplicated fragments (i.e. make wasted backend GET requests) while performing *every* fragment reconstruction. Additionally, other reconstructor optimisations for Global EC are under investigation: * Since fragments are duplicated between regions it may in some cases be more attractive to restore failed fragments from their duplicates in another region instead of rebuilding them from other fragments in the local region. * Conversely, to avoid WAN transfer it may be more attractive to rebuild fragments from local parity. * During rebalance it will always be more attractive to revert a fragment from it's old-primary to it's new primary rather than rebuilding or transferring a duplicate from the remote region. ************** Under the Hood ************** Now that we've explained a little about EC support in Swift and how to configure and use it, let's explore how EC fits in at the nuts-n-bolts level. Terminology =========== The term 'fragment' has been used already to describe the output of the EC process (a series of fragments) however we need to define some other key terms here before going any deeper. Without paying special attention to using the correct terms consistently, it is very easy to get confused in a hurry! * **chunk**: HTTP chunks received over wire (term not used to describe any EC specific operation). * **segment**: Not to be confused with SLO/DLO use of the word, in EC we call a segment a series of consecutive HTTP chunks buffered up before performing an EC operation. * **fragment**: Data and parity 'fragments' are generated when erasure coding transformation is applied to a segment. * **EC archive**: A concatenation of EC fragments; to a storage node this looks like an object. * **ec_ndata**: Number of EC data fragments. * **ec_nparity**: Number of EC parity fragments. Middleware ========== Middleware remains unchanged. For most middleware (e.g., SLO/DLO) the fact that the proxy is fragmenting incoming objects is transparent. For list endpoints, however, it is a bit different. A caller of list endpoints will get back the locations of all of the fragments. The caller will be unable to re-assemble the original object with this information, however the node locations may still prove to be useful information for some applications. On Disk Storage =============== EC archives are stored on disk in their respective objects-N directory based on their policy index. See :doc:`overview_policies` for details on per policy directory information. In addition to the object timestamp, the filenames of EC archives encode other information related to the archive: * The fragment archive index. This is required for a few reasons. For one, it allows us to store fragment archives of different indexes on the same storage node which is not typical however it is possible in many circumstances. Without unique filenames for the different EC archive files in a set, we would be at risk of overwriting one archive of index `n` with another of index `m` in some scenarios. The index is appended to the filename just before the ``.data`` extension. For example, the filename for a fragment archive storing the 5th fragment would be:: 1418673556.92690#5.data * The durable state of the archive. The meaning of this will be described in more detail later, but a fragment archive that is considered durable has an additional ``#d`` string included in its filename immediately before the ``.data`` extension. For example:: 1418673556.92690#5#d.data A policy-specific transformation function is therefore used to build the archive filename. These functions are implemented in the diskfile module as methods of policy specific sub classes of ``BaseDiskFileManager``. The transformation function for the replication policy is simply a NOP. .. note:: In older versions the durable state of an archive was represented by an additional file called the ``.durable`` file instead of the ``#d`` substring in the ``.data`` filename. The ``.durable`` for the example above would be:: 1418673556.92690.durable Proxy Server ============ High Level ---------- The Proxy Server handles Erasure Coding in a different manner than replication, therefore there are several code paths unique to EC policies either though sub classing or simple conditionals. Taking a closer look at the PUT and the GET paths will help make this clearer. But first, a high level overview of how an object flows through the system: .. image:: images/ec_overview.png Note how: * Incoming objects are buffered into segments at the proxy. * Segments are erasure coded into fragments at the proxy. * The proxy stripes fragments across participating nodes such that the on-disk stored files that we call a fragment archive is appended with each new fragment. This scheme makes it possible to minimize the number of on-disk files given our segmenting and fragmenting. Multi_Phase Conversation ------------------------ Multi-part MIME document support is used to allow the proxy to engage in a handshake conversation with the storage node for processing PUT requests. This is required for a few different reasons. #. From the perspective of the storage node, a fragment archive is really just another object, we need a mechanism to send down the original object etag after all fragment archives have landed. #. Without introducing strong consistency semantics, the proxy needs a mechanism to know when a quorum of fragment archives have actually made it to disk before it can inform the client of a successful PUT. MIME supports a conversation between the proxy and the storage nodes for every PUT. This provides us with the ability to handle a PUT in one connection and assure that we have the essence of a 2 phase commit, basically having the proxy communicate back to the storage nodes once it has confirmation that a quorum of fragment archives in the set have been written. For the first phase of the conversation the proxy requires a quorum of `ec_ndata + 1` fragment archives to be successfully put to storage nodes. This ensures that the object could still be reconstructed even if one of the fragment archives becomes unavailable. As described above, each fragment archive file is named:: #.data where ``ts`` is the timestamp and ``frag_index`` is the fragment archive index. During the second phase of the conversation the proxy communicates a confirmation to storage nodes that the fragment archive quorum has been achieved. This causes each storage node to rename the fragment archive written in the first phase of the conversation to include the substring ``#d`` in its name:: ##d.data This indicates to the object server that this fragment archive is `durable` and that there is a set of data files that are durable at timestamp ``ts``. For the second phase of the conversation the proxy requires a quorum of `ec_ndata + 1` successful commits on storage nodes. This ensures that there are sufficient committed fragment archives for the object to be reconstructed even if one becomes unavailable. The reconstructor ensures that the durable state is replicated on storage nodes where it may be missing. Note that the completion of the commit phase of the conversation is also a signal for the object server to go ahead and immediately delete older timestamp files for this object. This is critical as we do not want to delete the older object until the storage node has confirmation from the proxy, via the multi-phase conversation, that the other nodes have landed enough for a quorum. The basic flow looks like this: * The Proxy Server erasure codes and streams the object fragments (ec_ndata + ec_nparity) to the storage nodes. * The storage nodes store objects as EC archives and upon finishing object data/metadata write, send a 1st-phase response to proxy. * Upon quorum of storage nodes responses, the proxy initiates 2nd-phase by sending commit confirmations to object servers. * Upon receipt of commit message, object servers rename ``.data`` files to include the ``#d`` substring, indicating successful PUT, and send a final response to the proxy server. * The proxy waits for `ec_ndata + 1` object servers to respond with a success (2xx) status before responding to the client with a successful status. Here is a high level example of what the conversation looks like:: proxy: PUT /p/a/c/o Transfer-Encoding': 'chunked' Expect': '100-continue' X-Backend-Obj-Multiphase-Commit: yes obj: 100 Continue X-Obj-Multiphase-Commit: yes proxy: --MIMEboundary X-Document: object body --MIMEboundary X-Document: object metadata Content-MD5: --MIMEboundary #.data file> obj: 100 Continue proxy: X-Document: put commit commit_confirmation --MIMEboundary-- #.data to ##d.data> obj: 20x =2 2xx responses> proxy: 2xx -> client A few key points on the durable state of a fragment archive: * A durable fragment archive means that there exist sufficient other fragment archives elsewhere in the cluster (durable and/or non-durable) to reconstruct the object. * When a proxy does a GET, it will require at least one object server to respond with a fragment archive is durable before reconstructing and returning the object to the client. Partial PUT Failures -------------------- A partial PUT failure has a few different modes. In one scenario the Proxy Server is alive through the entire PUT conversation. This is a very straightforward case. The client will receive a good response if and only if a quorum of fragment archives were successfully landed on their storage nodes. In this case the Reconstructor will discover the missing fragment archives, perform a reconstruction and deliver those fragment archives to their nodes. The more interesting case is what happens if the proxy dies in the middle of a conversation. If it turns out that a quorum had been met and the commit phase of the conversation finished, its as simple as the previous case in that the reconstructor will repair things. However, if the commit didn't get a chance to happen then some number of the storage nodes have .data files on them (fragment archives) but none of them knows whether there are enough elsewhere for the entire object to be reconstructed. In this case the client will not have received a 2xx response so there is no issue there, however, it is left to the storage nodes to clean up the stale fragment archives. Work is ongoing in this area to enable the proxy to play a role in reviving these fragment archives, however, for the current release, a proxy failure after the start of a conversation but before the commit message will simply result in a PUT failure. GET --- The GET for EC is different enough from replication that subclassing the `BaseObjectController` to the `ECObjectController` enables an efficient way to implement the high level steps described earlier: #. The proxy server makes simultaneous requests to `ec_ndata` primary object server nodes with goal of finding a set of `ec_ndata` distinct EC archives at the same timestamp, and an indication from at least one object server that a durable fragment archive exists for that timestamp. If this goal is not achieved with the first `ec_ndata` requests then the proxy server continues to issue requests to the remaining primary nodes and then handoff nodes. #. As soon as the proxy server has found a usable set of `ec_ndata` EC archives, it starts to call PyECLib to decode fragments as they are returned by the object server nodes. #. The proxy server creates Etag and content length headers for the client response since each EC archive's metadata is valid only for that archive. #. The proxy streams the decoded data it has back to the client. Note that the proxy does not require all objects servers to have a durable fragment archive to return in response to a GET. The proxy will be satisfied if just one object server has a durable fragment archive at the same timestamp as EC archives returned from other object servers. This means that the proxy can successfully GET an object that had missing durable state on some nodes when it was PUT (i.e. a partial PUT failure occurred). Note also that an object server may inform the proxy server that it has more than one EC archive for different timestamps and/or fragment indexes, which may cause the proxy server to issue multiple requests for distinct EC archives to that object server. (This situation can temporarily occur after a ring rebalance when a handoff node storing an archive has become a primary node and received its primary archive but not yet moved the handoff archive to its primary node.) The proxy may receive EC archives having different timestamps, and may receive several EC archives having the same index. The proxy therefore ensures that it has sufficient EC archives with the same timestamp and distinct fragment indexes before considering a GET to be successful. Object Server ============= The Object Server, like the Proxy Server, supports MIME conversations as described in the proxy section earlier. This includes processing of the commit message and decoding various sections of the MIME document to extract the footer which includes things like the entire object etag. DiskFile -------- Erasure code policies use subclassed ``ECDiskFile``, ``ECDiskFileWriter``, ``ECDiskFileReader`` and ``ECDiskFileManager`` to implement EC specific handling of on disk files. This includes things like file name manipulation to include the fragment index and durable state in the filename, construction of EC specific ``hashes.pkl`` file to include fragment index information, etc. Metadata ^^^^^^^^ There are few different categories of metadata that are associated with EC: System Metadata: EC has a set of object level system metadata that it attaches to each of the EC archives. The metadata is for internal use only: * ``X-Object-Sysmeta-EC-Etag``: The Etag of the original object. * ``X-Object-Sysmeta-EC-Content-Length``: The content length of the original object. * ``X-Object-Sysmeta-EC-Frag-Index``: The fragment index for the object. * ``X-Object-Sysmeta-EC-Scheme``: Description of the EC policy used to encode the object. * ``X-Object-Sysmeta-EC-Segment-Size``: The segment size used for the object. User Metadata: User metadata is unaffected by EC, however, a full copy of the user metadata is stored with every EC archive. This is required as the reconstructor needs this information and each reconstructor only communicates with its closest neighbors on the ring. PyECLib Metadata: PyECLib stores a small amount of metadata on a per fragment basis. This metadata is not documented here as it is opaque to Swift. Database Updates ================ As account and container rings are not associated with a Storage Policy, there is no change to how these database updates occur when using an EC policy. The Reconstructor ================= The Reconstructor performs analogous functions to the replicator: #. Recovering from disk drive failure. #. Moving data around because of a rebalance. #. Reverting data back to a primary from a handoff. #. Recovering fragment archives from bit rot discovered by the auditor. However, under the hood it operates quite differently. The following are some of the key elements in understanding how the reconstructor operates. Unlike the replicator, the work that the reconstructor does is not always as easy to break down into the 2 basic tasks of synchronize or revert (move data from handoff back to primary) because of the fact that one storage node can house fragment archives of various indexes and each index really \"belongs\" to a different node. So, whereas when the replicator is reverting data from a handoff it has just one node to send its data to, the reconstructor can have several. Additionally, it is not always the case that the processing of a particular suffix directory means one or the other job type for the entire directory (as it does for replication). The scenarios that create these mixed situations can be pretty complex so we will just focus on what the reconstructor does here and not a detailed explanation of why. Job Construction and Processing ------------------------------- Because of the nature of the work it has to do as described above, the reconstructor builds jobs for a single job processor. The job itself contains all of the information needed for the processor to execute the job which may be a synchronization or a data reversion. There may be a mix of jobs that perform both of these operations on the same suffix directory. Jobs are constructed on a per-partition basis and then per-fragment-index basis. That is, there will be one job for every fragment index in a partition. Performing this construction \"up front\" like this helps minimize the interaction between nodes collecting hashes.pkl information. Once a set of jobs for a partition has been constructed, those jobs are sent off to threads for execution. The single job processor then performs the necessary actions, working closely with ssync to carry out its instructions. For data reversion, the actual objects themselves are cleaned up via the ssync module and once that partition's set of jobs is complete, the reconstructor will attempt to remove the relevant directory structures. Job construction must account for a variety of scenarios, including: #. A partition directory with all fragment indexes matching the local node index. This is the case where everything is where it belongs and we just need to compare hashes and sync if needed. Here we simply sync with our partners. #. A partition directory with at least one local fragment index and mix of others. Here we need to sync with our partners where fragment indexes matches the local_id, all others are sync'd with their home nodes and then deleted. #. A partition directory with no local fragment index and just one or more of others. Here we sync with just the home nodes for the fragment indexes that we have and then all the local archives are deleted. This is the basic handoff reversion case. .. note:: A \"home node\" is the node where the fragment index encoded in the fragment archive's filename matches the node index of a node in the primary partition list. Node Communication ------------------ The replicators talk to all nodes who have a copy of their object, typically just 2 other nodes. For EC, having each reconstructor node talk to all nodes would incur a large amount of overhead as there will typically be a much larger number of nodes participating in the EC scheme. Therefore, the reconstructor is built to talk to its adjacent nodes on the ring only. These nodes are typically referred to as partners. Reconstruction -------------- Reconstruction can be thought of sort of like replication but with an extra step in the middle. The reconstructor is hard-wired to use ssync to determine what is missing and desired by the other side. However, before an object is sent over the wire it needs to be reconstructed from the remaining fragments as the local fragment is just that - a different fragment index than what the other end is asking for. Thus, there are hooks in ssync for EC based policies. One case would be for basic reconstruction which, at a high level, looks like this: * Determine which nodes need to be contacted to collect other EC archives needed to perform reconstruction. * Update the etag and fragment index metadata elements of the newly constructed fragment archive. * Establish a connection to the target nodes and give ssync a DiskFileLike class from which it can stream data. The reader in this class gathers fragments from the nodes and uses PyECLib to reconstruct each segment before yielding data back to ssync. Essentially what this means is that data is buffered, in memory, on a per segment basis at the node performing reconstruction and each segment is dynamically reconstructed and delivered to ``ssync_sender`` where the ``send_put()`` method will ship them on over. The sender is then responsible for deleting the objects as they are sent in the case of data reversion. The Auditor =========== Because the auditor already operates on a per storage policy basis, there are no specific auditor changes associated with EC. Each EC archive looks like, and is treated like, a regular object from the perspective of the auditor. Therefore, if the auditor finds bit-rot in an EC archive, it simply quarantines it and the reconstructor will take care of the rest just as the replicator does for replication policies. swift-2.17.0/doc/source/misc.rst0000666000175100017510000000336613236061617016532 0ustar zuulzuul00000000000000.. _misc: **** Misc **** .. _acls: ACLs ==== .. automodule:: swift.common.middleware.acl :members: :show-inheritance: .. _buffered_http: Buffered HTTP ============= .. automodule:: swift.common.bufferedhttp :members: :show-inheritance: .. _constraints: Constraints =========== .. automodule:: swift.common.constraints :members: :undoc-members: :show-inheritance: Container Sync Realms ===================== .. automodule:: swift.common.container_sync_realms :members: :show-inheritance: .. _direct_client: Direct Client ============= .. automodule:: swift.common.direct_client :members: :undoc-members: :show-inheritance: .. _exceptions: Exceptions ========== .. automodule:: swift.common.exceptions :members: :undoc-members: :show-inheritance: .. _internal_client: Internal Client =============== .. automodule:: swift.common.internal_client :members: :undoc-members: :show-inheritance: Manager ========= .. automodule:: swift.common.manager :members: :show-inheritance: MemCacheD ========= .. automodule:: swift.common.memcached :members: :show-inheritance: .. _request_helpers: Request Helpers =============== .. automodule:: swift.common.request_helpers :members: :undoc-members: :show-inheritance: .. _swob: Swob ==== .. automodule:: swift.common.swob :members: :show-inheritance: :special-members: __call__ .. _utils: Utils ===== .. automodule:: swift.common.utils :members: :show-inheritance: .. _wsgi: WSGI ==== .. automodule:: swift.common.wsgi :members: :show-inheritance: .. _storage_policy: Storage Policy ============== .. automodule:: swift.common.storage_policy :members: :show-inheritance: swift-2.17.0/doc/source/replication_network.rst0000666000175100017510000003361513236061617021661 0ustar zuulzuul00000000000000.. _Dedicated-replication-network: ============================= Dedicated replication network ============================= ------- Summary ------- Swift's replication process is essential for consistency and availability of data. By default, replication activity will use the same network interface as other cluster operations. However, if a replication interface is set in the ring for a node, that node will send replication traffic on its designated separate replication network interface. Replication traffic includes REPLICATE requests and rsync traffic. To separate the cluster-internal replication traffic from client traffic, separate replication servers can be used. These replication servers are based on the standard storage servers, but they listen on the replication IP and only respond to REPLICATE requests. Storage servers can serve REPLICATE requests, so an operator can transition to using a separate replication network with no cluster downtime. Replication IP and port information is stored in the ring on a per-node basis. These parameters will be used if they are present, but they are not required. If this information does not exist or is empty for a particular node, the node's standard IP and port will be used for replication. -------------------- For SAIO replication -------------------- #. Create new script in ~/bin/ (for example: remakerings_new):: #!/bin/bash cd /etc/swift rm -f *.builder *.ring.gz backups/*.builder backups/*.ring.gz swift-ring-builder object.builder create 18 3 1 swift-ring-builder object.builder add z1-127.0.0.1:6010R127.0.0.1:6050/sdb1 1 swift-ring-builder object.builder add z2-127.0.0.1:6020R127.0.0.1:6060/sdb2 1 swift-ring-builder object.builder add z3-127.0.0.1:6030R127.0.0.1:6070/sdb3 1 swift-ring-builder object.builder add z4-127.0.0.1:6040R127.0.0.1:6080/sdb4 1 swift-ring-builder object.builder rebalance swift-ring-builder container.builder create 18 3 1 swift-ring-builder container.builder add z1-127.0.0.1:6011R127.0.0.1:6051/sdb1 1 swift-ring-builder container.builder add z2-127.0.0.1:6021R127.0.0.1:6061/sdb2 1 swift-ring-builder container.builder add z3-127.0.0.1:6031R127.0.0.1:6071/sdb3 1 swift-ring-builder container.builder add z4-127.0.0.1:6041R127.0.0.1:6081/sdb4 1 swift-ring-builder container.builder rebalance swift-ring-builder account.builder create 18 3 1 swift-ring-builder account.builder add z1-127.0.0.1:6012R127.0.0.1:6052/sdb1 1 swift-ring-builder account.builder add z2-127.0.0.1:6022R127.0.0.1:6062/sdb2 1 swift-ring-builder account.builder add z3-127.0.0.1:6032R127.0.0.1:6072/sdb3 1 swift-ring-builder account.builder add z4-127.0.0.1:6042R127.0.0.1:6082/sdb4 1 swift-ring-builder account.builder rebalance .. note:: Syntax of adding device has been changed: R: was added between z-: and /_ . Added devices will use and for replication activities. #. Add next rows in /etc/rsyncd.conf:: [account6052] max connections = 25 path = /srv/1/node/ read only = false lock file = /var/lock/account6052.lock [account6062] max connections = 25 path = /srv/2/node/ read only = false lock file = /var/lock/account6062.lock [account6072] max connections = 25 path = /srv/3/node/ read only = false lock file = /var/lock/account6072.lock [account6082] max connections = 25 path = /srv/4/node/ read only = false lock file = /var/lock/account6082.lock [container6051] max connections = 25 path = /srv/1/node/ read only = false lock file = /var/lock/container6051.lock [container6061] max connections = 25 path = /srv/2/node/ read only = false lock file = /var/lock/container6061.lock [container6071] max connections = 25 path = /srv/3/node/ read only = false lock file = /var/lock/container6071.lock [container6081] max connections = 25 path = /srv/4/node/ read only = false lock file = /var/lock/container6081.lock [object6050] max connections = 25 path = /srv/1/node/ read only = false lock file = /var/lock/object6050.lock [object6060] max connections = 25 path = /srv/2/node/ read only = false lock file = /var/lock/object6060.lock [object6070] max connections = 25 path = /srv/3/node/ read only = false lock file = /var/lock/object6070.lock [object6080] max connections = 25 path = /srv/4/node/ read only = false lock file = /var/lock/object6080.lock #. Restart rsync daemon:: service rsync restart #. Add changes in configuration files in directories: * /etc/swift/object-server(files: 1.conf, 2.conf, 3.conf, 4.conf) * /etc/swift/container-server(files: 1.conf, 2.conf, 3.conf, 4.conf) * /etc/swift/account-server(files: 1.conf, 2.conf, 3.conf, 4.conf) delete all configuration options in section [<*>-replicator] #. Add configuration files for object-server, in /etc/swift/object-server/ * 5.conf:: [DEFAULT] devices = /srv/1/node mount_check = false disable_fallocate = true bind_port = 6050 user = swift log_facility = LOG_LOCAL2 recon_cache_path = /var/cache/swift [pipeline:main] pipeline = recon object-server [app:object-server] use = egg:swift#object replication_server = True [filter:recon] use = egg:swift#recon [object-replicator] rsync_module = {replication_ip}::object{replication_port} * 6.conf:: [DEFAULT] devices = /srv/2/node mount_check = false disable_fallocate = true bind_port = 6060 user = swift log_facility = LOG_LOCAL3 recon_cache_path = /var/cache/swift2 [pipeline:main] pipeline = recon object-server [app:object-server] use = egg:swift#object replication_server = True [filter:recon] use = egg:swift#recon [object-replicator] rsync_module = {replication_ip}::object{replication_port} * 7.conf:: [DEFAULT] devices = /srv/3/node mount_check = false disable_fallocate = true bind_port = 6070 user = swift log_facility = LOG_LOCAL4 recon_cache_path = /var/cache/swift3 [pipeline:main] pipeline = recon object-server [app:object-server] use = egg:swift#object replication_server = True [filter:recon] use = egg:swift#recon [object-replicator] rsync_module = {replication_ip}::object{replication_port} * 8.conf:: [DEFAULT] devices = /srv/4/node mount_check = false disable_fallocate = true bind_port = 6080 user = swift log_facility = LOG_LOCAL5 recon_cache_path = /var/cache/swift4 [pipeline:main] pipeline = recon object-server [app:object-server] use = egg:swift#object replication_server = True [filter:recon] use = egg:swift#recon [object-replicator] rsync_module = {replication_ip}::object{replication_port} #. Add configuration files for container-server, in /etc/swift/container-server/ * 5.conf:: [DEFAULT] devices = /srv/1/node mount_check = false disable_fallocate = true bind_port = 6051 user = swift log_facility = LOG_LOCAL2 recon_cache_path = /var/cache/swift [pipeline:main] pipeline = recon container-server [app:container-server] use = egg:swift#container replication_server = True [filter:recon] use = egg:swift#recon [container-replicator] rsync_module = {replication_ip}::container{replication_port} * 6.conf:: [DEFAULT] devices = /srv/2/node mount_check = false disable_fallocate = true bind_port = 6061 user = swift log_facility = LOG_LOCAL3 recon_cache_path = /var/cache/swift2 [pipeline:main] pipeline = recon container-server [app:container-server] use = egg:swift#container replication_server = True [filter:recon] use = egg:swift#recon [container-replicator] rsync_module = {replication_ip}::container{replication_port} * 7.conf:: [DEFAULT] devices = /srv/3/node mount_check = false disable_fallocate = true bind_port = 6071 user = swift log_facility = LOG_LOCAL4 recon_cache_path = /var/cache/swift3 [pipeline:main] pipeline = recon container-server [app:container-server] use = egg:swift#container replication_server = True [filter:recon] use = egg:swift#recon [container-replicator] rsync_module = {replication_ip}::container{replication_port} * 8.conf:: [DEFAULT] devices = /srv/4/node mount_check = false disable_fallocate = true bind_port = 6081 user = swift log_facility = LOG_LOCAL5 recon_cache_path = /var/cache/swift4 [pipeline:main] pipeline = recon container-server [app:container-server] use = egg:swift#container replication_server = True [filter:recon] use = egg:swift#recon [container-replicator] rsync_module = {replication_ip}::container{replication_port} #. Add configuration files for account-server, in /etc/swift/account-server/ * 5.conf:: [DEFAULT] devices = /srv/1/node mount_check = false disable_fallocate = true bind_port = 6052 user = swift log_facility = LOG_LOCAL2 recon_cache_path = /var/cache/swift [pipeline:main] pipeline = recon account-server [app:account-server] use = egg:swift#account replication_server = True [filter:recon] use = egg:swift#recon [account-replicator] rsync_module = {replication_ip}::account{replication_port} * 6.conf:: [DEFAULT] devices = /srv/2/node mount_check = false disable_fallocate = true bind_port = 6062 user = swift log_facility = LOG_LOCAL3 recon_cache_path = /var/cache/swift2 [pipeline:main] pipeline = recon account-server [app:account-server] use = egg:swift#account replication_server = True [filter:recon] use = egg:swift#recon [account-replicator] rsync_module = {replication_ip}::account{replication_port} * 7.conf:: [DEFAULT] devices = /srv/3/node mount_check = false disable_fallocate = true bind_port = 6072 user = swift log_facility = LOG_LOCAL4 recon_cache_path = /var/cache/swift3 [pipeline:main] pipeline = recon account-server [app:account-server] use = egg:swift#account replication_server = True [filter:recon] use = egg:swift#recon [account-replicator] rsync_module = {replication_ip}::account{replication_port} * 8.conf:: [DEFAULT] devices = /srv/4/node mount_check = false disable_fallocate = true bind_port = 6082 user = swift log_facility = LOG_LOCAL5 recon_cache_path = /var/cache/swift4 [pipeline:main] pipeline = recon account-server [app:account-server] use = egg:swift#account replication_server = True [filter:recon] use = egg:swift#recon [account-replicator] rsync_module = {replication_ip}::account{replication_port} --------------------------------- For a Multiple Server replication --------------------------------- #. Move configuration file. * Configuration file for object-server from /etc/swift/object-server.conf to /etc/swift/object-server/1.conf * Configuration file for container-server from /etc/swift/container-server.conf to /etc/swift/container-server/1.conf * Configuration file for account-server from /etc/swift/account-server.conf to /etc/swift/account-server/1.conf #. Add changes in configuration files in directories: * /etc/swift/object-server(files: 1.conf) * /etc/swift/container-server(files: 1.conf) * /etc/swift/account-server(files: 1.conf) delete all configuration options in section [<*>-replicator] #. Add configuration files for object-server, in /etc/swift/object-server/2.conf:: [DEFAULT] bind_ip = $STORAGE_LOCAL_NET_IP workers = 2 [pipeline:main] pipeline = object-server [app:object-server] use = egg:swift#object replication_server = True [object-replicator] #. Add configuration files for container-server, in /etc/swift/container-server/2.conf:: [DEFAULT] bind_ip = $STORAGE_LOCAL_NET_IP workers = 2 [pipeline:main] pipeline = container-server [app:container-server] use = egg:swift#container replication_server = True [container-replicator] #. Add configuration files for account-server, in /etc/swift/account-server/2.conf:: [DEFAULT] bind_ip = $STORAGE_LOCAL_NET_IP workers = 2 [pipeline:main] pipeline = account-server [app:account-server] use = egg:swift#account replication_server = True [account-replicator] swift-2.17.0/doc/source/admin_guide.rst0000666000175100017510000023461413236061617020046 0ustar zuulzuul00000000000000===================== Administrator's Guide ===================== ------------------------- Defining Storage Policies ------------------------- Defining your Storage Policies is very easy to do with Swift. It is important that the administrator understand the concepts behind Storage Policies before actually creating and using them in order to get the most benefit out of the feature and, more importantly, to avoid having to make unnecessary changes once a set of policies have been deployed to a cluster. It is highly recommended that the reader fully read and comprehend :doc:`overview_policies` before proceeding with administration of policies. Plan carefully and it is suggested that experimentation be done first on a non-production cluster to be certain that the desired configuration meets the needs of the users. See :ref:`upgrade-policy` before planning the upgrade of your existing deployment. Following is a high level view of the very few steps it takes to configure policies once you have decided what you want to do: #. Define your policies in ``/etc/swift/swift.conf`` #. Create the corresponding object rings #. Communicate the names of the Storage Policies to cluster users For a specific example that takes you through these steps, please see :doc:`policies_saio` ------------------ Managing the Rings ------------------ You may build the storage rings on any server with the appropriate version of Swift installed. Once built or changed (rebalanced), you must distribute the rings to all the servers in the cluster. Storage rings contain information about all the Swift storage partitions and how they are distributed between the different nodes and disks. Swift 1.6.0 is the last version to use a Python pickle format. Subsequent versions use a different serialization format. **Rings generated by Swift versions 1.6.0 and earlier may be read by any version, but rings generated after 1.6.0 may only be read by Swift versions greater than 1.6.0.** So when upgrading from version 1.6.0 or earlier to a version greater than 1.6.0, either upgrade Swift on your ring building server **last** after all Swift nodes have been successfully upgraded, or refrain from generating rings until all Swift nodes have been successfully upgraded. If you need to downgrade from a version of Swift greater than 1.6.0 to a version less than or equal to 1.6.0, first downgrade your ring-building server, generate new rings, push them out, then continue with the rest of the downgrade. For more information see :doc:`overview_ring`. .. highlight:: none Removing a device from the ring:: swift-ring-builder remove / Removing a server from the ring:: swift-ring-builder remove Adding devices to the ring: See :ref:`ring-preparing` See what devices for a server are in the ring:: swift-ring-builder search Once you are done with all changes to the ring, the changes need to be "committed":: swift-ring-builder rebalance Once the new rings are built, they should be pushed out to all the servers in the cluster. Optionally, if invoked as 'swift-ring-builder-safe' the directory containing the specified builder file will be locked (via a .lock file in the parent directory). This provides a basic safe guard against multiple instances of the swift-ring-builder (or other utilities that observe this lock) from attempting to write to or read the builder/ring files while operations are in progress. This can be useful in environments where ring management has been automated but the operator still needs to interact with the rings manually. If the ring builder is not producing the balances that you are expecting, you can gain visibility into what it's doing with the ``--debug`` flag.:: swift-ring-builder rebalance --debug This produces a great deal of output that is mostly useful if you are either (a) attempting to fix the ring builder, or (b) filing a bug against the ring builder. You may notice in the rebalance output a 'dispersion' number. What this number means is explained in :ref:`ring_dispersion` but in essence is the percentage of partitions in the ring that have too many replicas within a particular failure domain. You can ask 'swift-ring-builder' what the dispersion is with:: swift-ring-builder dispersion This will give you the percentage again, if you want a detailed view of the dispersion simply add a ``--verbose``:: swift-ring-builder dispersion --verbose This will not only display the percentage but will also display a dispersion table that lists partition dispersion by tier. You can use this table to figure out were you need to add capacity or to help tune an :ref:`ring_overload` value. Now let's take an example with 1 region, 3 zones and 4 devices. Each device has the same weight, and the ``dispersion --verbose`` might show the following:: Dispersion is 16.666667, Balance is 0.000000, Overload is 0.00% Required overload is 33.333333% Worst tier is 33.333333 (r1z3) -------------------------------------------------------------------------- Tier Parts % Max 0 1 2 3 -------------------------------------------------------------------------- r1 768 0.00 3 0 0 0 256 r1z1 192 0.00 1 64 192 0 0 r1z1-127.0.0.1 192 0.00 1 64 192 0 0 r1z1-127.0.0.1/sda 192 0.00 1 64 192 0 0 r1z2 192 0.00 1 64 192 0 0 r1z2-127.0.0.2 192 0.00 1 64 192 0 0 r1z2-127.0.0.2/sda 192 0.00 1 64 192 0 0 r1z3 384 33.33 1 0 128 128 0 r1z3-127.0.0.3 384 33.33 1 0 128 128 0 r1z3-127.0.0.3/sda 192 0.00 1 64 192 0 0 r1z3-127.0.0.3/sdb 192 0.00 1 64 192 0 0 The first line reports that there are 256 partitions with 3 copies in region 1; and this is an expected output in this case (single region with 3 replicas) as reported by the "Max" value. However, there is some imbalance in the cluster, more precisely in zone 3. The "Max" reports a maximum of 1 copy in this zone; however 50.00% of the partitions are storing 2 replicas in this zone (which is somewhat expected, because there are more disks in this zone). You can now either add more capacity to the other zones, decrease the total weight in zone 3 or set the overload to a value `greater than` 33.333333% - only as much overload as needed will be used. ----------------------- Scripting Ring Creation ----------------------- You can create scripts to create the account and container rings and rebalance. Here's an example script for the Account ring. Use similar commands to create a make-container-ring.sh script on the proxy server node. 1. Create a script file called make-account-ring.sh on the proxy server node with the following content:: #!/bin/bash cd /etc/swift rm -f account.builder account.ring.gz backups/account.builder backups/account.ring.gz swift-ring-builder account.builder create 18 3 1 swift-ring-builder account.builder add r1z1-:6202/sdb1 1 swift-ring-builder account.builder add r1z2-:6202/sdb1 1 swift-ring-builder account.builder rebalance You need to replace the values of , , etc. with the IP addresses of the account servers used in your setup. You can have as many account servers as you need. All account servers are assumed to be listening on port 6202, and have a storage device called "sdb1" (this is a directory name created under /drives when we setup the account server). The "z1", "z2", etc. designate zones, and you can choose whether you put devices in the same or different zones. The "r1" designates the region, with different regions specified as "r1", "r2", etc. 2. Make the script file executable and run it to create the account ring file:: chmod +x make-account-ring.sh sudo ./make-account-ring.sh 3. Copy the resulting ring file /etc/swift/account.ring.gz to all the account server nodes in your Swift environment, and put them in the /etc/swift directory on these nodes. Make sure that every time you change the account ring configuration, you copy the resulting ring file to all the account nodes. ----------------------- Handling System Updates ----------------------- It is recommended that system updates and reboots are done a zone at a time. This allows the update to happen, and for the Swift cluster to stay available and responsive to requests. It is also advisable when updating a zone, let it run for a while before updating the other zones to make sure the update doesn't have any adverse effects. ---------------------- Handling Drive Failure ---------------------- In the event that a drive has failed, the first step is to make sure the drive is unmounted. This will make it easier for Swift to work around the failure until it has been resolved. If the drive is going to be replaced immediately, then it is just best to replace the drive, format it, remount it, and let replication fill it up. After the drive is unmounted, make sure the mount point is owned by root (root:root 755). This ensures that rsync will not try to replicate into the root drive once the failed drive is unmounted. If the drive can't be replaced immediately, then it is best to leave it unmounted, and set the device weight to 0. This will allow all the replicas that were on that drive to be replicated elsewhere until the drive is replaced. Once the drive is replaced, the device weight can be increased again. Setting the device weight to 0 instead of removing the drive from the ring gives Swift the chance to replicate data from the failing disk too (in case it is still possible to read some of the data). Setting the device weight to 0 (or removing a failed drive from the ring) has another benefit: all partitions that were stored on the failed drive are distributed over the remaining disks in the cluster, and each disk only needs to store a few new partitions. This is much faster compared to replicating all partitions to a single, new disk. It decreases the time to recover from a degraded number of replicas significantly, and becomes more and more important with bigger disks. ----------------------- Handling Server Failure ----------------------- If a server is having hardware issues, it is a good idea to make sure the Swift services are not running. This will allow Swift to work around the failure while you troubleshoot. If the server just needs a reboot, or a small amount of work that should only last a couple of hours, then it is probably best to let Swift work around the failure and get the machine fixed and back online. When the machine comes back online, replication will make sure that anything that is missing during the downtime will get updated. If the server has more serious issues, then it is probably best to remove all of the server's devices from the ring. Once the server has been repaired and is back online, the server's devices can be added back into the ring. It is important that the devices are reformatted before putting them back into the ring as it is likely to be responsible for a different set of partitions than before. ----------------------- Detecting Failed Drives ----------------------- It has been our experience that when a drive is about to fail, error messages will spew into `/var/log/kern.log`. There is a script called `swift-drive-audit` that can be run via cron to watch for bad drives. If errors are detected, it will unmount the bad drive, so that Swift can work around it. The script takes a configuration file with the following settings: ``[drive-audit]`` ================== ============== =========================================== Option Default Description ------------------ -------------- ------------------------------------------- user swift Drop privileges to this user for non-root tasks log_facility LOG_LOCAL0 Syslog log facility log_level INFO Log level device_dir /srv/node Directory devices are mounted under minutes 60 Number of minutes to look back in `/var/log/kern.log` error_limit 1 Number of errors to find before a device is unmounted log_file_pattern /var/log/kern* Location of the log file with globbing pattern to check against device errors regex_pattern_X (see below) Regular expression patterns to be used to locate device blocks with errors in the log file ================== ============== =========================================== The default regex pattern used to locate device blocks with errors are `\berror\b.*\b(sd[a-z]{1,2}\d?)\b` and `\b(sd[a-z]{1,2}\d?)\b.*\berror\b`. One is able to overwrite the default above by providing new expressions using the format `regex_pattern_X = regex_expression`, where `X` is a number. This script has been tested on Ubuntu 10.04 and Ubuntu 12.04, so if you are using a different distro or OS, some care should be taken before using in production. ------------------------------ Preventing Disk Full Scenarios ------------------------------ .. highlight:: cfg Prevent disk full scenarios by ensuring that the ``proxy-server`` blocks PUT requests and rsync prevents replication to the specific drives. You can prevent `proxy-server` PUT requests to low space disks by ensuring ``fallocate_reserve`` is set in the ``object-server.conf``. By default, ``fallocate_reserve`` is set to 1%. This blocks PUT requests that leave the free disk space below 1% of the disk. In order to prevent rsync replication to specific drives, firstly setup ``rsync_module`` per disk in your ``object-replicator``. Set this in ``object-server.conf``: .. code:: [object-replicator] rsync_module = {replication_ip}::object_{device} Set the individual drives in ``rsync.conf``. For example: .. code:: [object_sda] max connections = 4 lock file = /var/lock/object_sda.lock [object_sdb] max connections = 4 lock file = /var/lock/object_sdb.lock Finally, monitor the disk space of each disk and adjust the rsync ``max connections`` per drive to ``-1``. We recommend utilising your existing monitoring solution to achieve this. The following is an example script: .. code-block:: python #!/usr/bin/env python import os import errno RESERVE = 500 * 2 ** 20 # 500 MiB DEVICES = '/srv/node1' path_template = '/etc/rsync.d/disable_%s.conf' config_template = ''' [object_%s] max connections = -1 ''' def disable_rsync(device): with open(path_template % device, 'w') as f: f.write(config_template.lstrip() % device) def enable_rsync(device): try: os.unlink(path_template % device) except OSError as e: # ignore file does not exist if e.errno != errno.ENOENT: raise for device in os.listdir(DEVICES): path = os.path.join(DEVICES, device) st = os.statvfs(path) free = st.f_bavail * st.f_frsize if free < RESERVE: disable_rsync(device) else: enable_rsync(device) For the above script to work, ensure ``/etc/rsync.d/`` conf files are included, by specifying ``&include`` in your ``rsync.conf`` file: .. code:: &include /etc/rsync.d Use this in conjunction with a cron job to periodically run the script, for example: .. highlight:: none .. code:: # /etc/cron.d/devicecheck * * * * * root /some/path/to/disable_rsync.py .. _dispersion_report: ----------------- Dispersion Report ----------------- There is a swift-dispersion-report tool for measuring overall cluster health. This is accomplished by checking if a set of deliberately distributed containers and objects are currently in their proper places within the cluster. For instance, a common deployment has three replicas of each object. The health of that object can be measured by checking if each replica is in its proper place. If only 2 of the 3 is in place the object's heath can be said to be at 66.66%, where 100% would be perfect. A single object's health, especially an older object, usually reflects the health of that entire partition the object is in. If we make enough objects on a distinct percentage of the partitions in the cluster, we can get a pretty valid estimate of the overall cluster health. In practice, about 1% partition coverage seems to balance well between accuracy and the amount of time it takes to gather results. The first thing that needs to be done to provide this health value is create a new account solely for this usage. Next, we need to place the containers and objects throughout the system so that they are on distinct partitions. The swift-dispersion-populate tool does this by making up random container and object names until they fall on distinct partitions. Last, and repeatedly for the life of the cluster, we need to run the swift-dispersion-report tool to check the health of each of these containers and objects. .. highlight:: cfg These tools need direct access to the entire cluster and to the ring files (installing them on a proxy server will probably do). Both swift-dispersion-populate and swift-dispersion-report use the same configuration file, /etc/swift/dispersion.conf. Example conf file:: [dispersion] auth_url = http://localhost:8080/auth/v1.0 auth_user = test:tester auth_key = testing endpoint_type = internalURL .. highlight:: none There are also options for the conf file for specifying the dispersion coverage (defaults to 1%), retries, concurrency, etc. though usually the defaults are fine. If you want to use keystone v3 for authentication there are options like auth_version, user_domain_name, project_domain_name and project_name. Once the configuration is in place, run `swift-dispersion-populate` to populate the containers and objects throughout the cluster. Now that those containers and objects are in place, you can run `swift-dispersion-report` to get a dispersion report, or the overall health of the cluster. Here is an example of a cluster in perfect health:: $ swift-dispersion-report Queried 2621 containers for dispersion reporting, 19s, 0 retries 100.00% of container copies found (7863 of 7863) Sample represents 1.00% of the container partition space Queried 2619 objects for dispersion reporting, 7s, 0 retries 100.00% of object copies found (7857 of 7857) Sample represents 1.00% of the object partition space Now I'll deliberately double the weight of a device in the object ring (with replication turned off) and rerun the dispersion report to show what impact that has:: $ swift-ring-builder object.builder set_weight d0 200 $ swift-ring-builder object.builder rebalance ... $ swift-dispersion-report Queried 2621 containers for dispersion reporting, 8s, 0 retries 100.00% of container copies found (7863 of 7863) Sample represents 1.00% of the container partition space Queried 2619 objects for dispersion reporting, 7s, 0 retries There were 1763 partitions missing one copy. 77.56% of object copies found (6094 of 7857) Sample represents 1.00% of the object partition space You can see the health of the objects in the cluster has gone down significantly. Of course, I only have four devices in this test environment, in a production environment with many many devices the impact of one device change is much less. Next, I'll run the replicators to get everything put back into place and then rerun the dispersion report:: ... start object replicators and monitor logs until they're caught up ... $ swift-dispersion-report Queried 2621 containers for dispersion reporting, 17s, 0 retries 100.00% of container copies found (7863 of 7863) Sample represents 1.00% of the container partition space Queried 2619 objects for dispersion reporting, 7s, 0 retries 100.00% of object copies found (7857 of 7857) Sample represents 1.00% of the object partition space You can also run the report for only containers or objects:: $ swift-dispersion-report --container-only Queried 2621 containers for dispersion reporting, 17s, 0 retries 100.00% of container copies found (7863 of 7863) Sample represents 1.00% of the container partition space $ swift-dispersion-report --object-only Queried 2619 objects for dispersion reporting, 7s, 0 retries 100.00% of object copies found (7857 of 7857) Sample represents 1.00% of the object partition space Alternatively, the dispersion report can also be output in JSON format. This allows it to be more easily consumed by third party utilities:: $ swift-dispersion-report -j {"object": {"retries:": 0, "missing_two": 0, "copies_found": 7863, "missing_one": 0, "copies_expected": 7863, "pct_found": 100.0, "overlapping": 0, "missing_all": 0}, "container": {"retries:": 0, "missing_two": 0, "copies_found": 12534, "missing_one": 0, "copies_expected": 12534, "pct_found": 100.0, "overlapping": 15, "missing_all": 0}} Note that you may select which storage policy to use by setting the option '--policy-name silver' or '-P silver' (silver is the example policy name here). If no policy is specified, the default will be used per the swift.conf file. When you specify a policy the containers created also include the policy index, thus even when running a container_only report, you will need to specify the policy not using the default. ----------------------------------------------- Geographically Distributed Swift Considerations ----------------------------------------------- Swift provides two features that may be used to distribute replicas of objects across multiple geographically distributed data-centers: with :doc:`overview_global_cluster` object replicas may be dispersed across devices from different data-centers by using `regions` in ring device descriptors; with :doc:`overview_container_sync` objects may be copied between independent Swift clusters in each data-center. The operation and configuration of each are described in their respective documentation. The following points should be considered when selecting the feature that is most appropriate for a particular use case: #. Global Clusters allows the distribution of object replicas across data-centers to be controlled by the cluster operator on per-policy basis, since the distribution is determined by the assignment of devices from each data-center in each policy's ring file. With Container Sync the end user controls the distribution of objects across clusters on a per-container basis. #. Global Clusters requires an operator to coordinate ring deployments across multiple data-centers. Container Sync allows for independent management of separate Swift clusters in each data-center, and for existing Swift clusters to be used as peers in Container Sync relationships without deploying new policies/rings. #. Global Clusters seamlessly supports features that may rely on cross-container operations such as large objects and versioned writes. Container Sync requires the end user to ensure that all required containers are sync'd for these features to work in all data-centers. #. Global Clusters makes objects available for GET or HEAD requests in both data-centers even if a replica of the object has not yet been asynchronously migrated between data-centers, by forwarding requests between data-centers. Container Sync is unable to serve requests for an object in a particular data-center until the asynchronous sync process has copied the object to that data-center. #. Global Clusters may require less storage capacity than Container Sync to achieve equivalent durability of objects in each data-center. Global Clusters can restore replicas that are lost or corrupted in one data-center using replicas from other data-centers. Container Sync requires each data-center to independently manage the durability of objects, which may result in each data-center storing more replicas than with Global Clusters. #. Global Clusters execute all account/container metadata updates synchronously to account/container replicas in all data-centers, which may incur delays when making updates across WANs. Container Sync only copies objects between data-centers and all Swift internal traffic is confined to each data-center. #. Global Clusters does not yet guarantee the availability of objects stored in Erasure Coded policies when one data-center is offline. With Container Sync the availability of objects in each data-center is independent of the state of other data-centers once objects have been synced. Container Sync also allows objects to be stored using different policy types in different data-centers. ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Checking handoff partition distribution ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ You can check if handoff partitions are piling up on a server by comparing the expected number of partitions with the actual number on your disks. First get the number of partitions that are currently assigned to a server using the ``dispersion`` command from ``swift-ring-builder``:: swift-ring-builder sample.builder dispersion --verbose Dispersion is 0.000000, Balance is 0.000000, Overload is 0.00% Required overload is 0.000000% -------------------------------------------------------------------------- Tier Parts % Max 0 1 2 3 -------------------------------------------------------------------------- r1 8192 0.00 2 0 0 8192 0 r1z1 4096 0.00 1 4096 4096 0 0 r1z1-172.16.10.1 4096 0.00 1 4096 4096 0 0 r1z1-172.16.10.1/sda1 4096 0.00 1 4096 4096 0 0 r1z2 4096 0.00 1 4096 4096 0 0 r1z2-172.16.10.2 4096 0.00 1 4096 4096 0 0 r1z2-172.16.10.2/sda1 4096 0.00 1 4096 4096 0 0 r1z3 4096 0.00 1 4096 4096 0 0 r1z3-172.16.10.3 4096 0.00 1 4096 4096 0 0 r1z3-172.16.10.3/sda1 4096 0.00 1 4096 4096 0 0 r1z4 4096 0.00 1 4096 4096 0 0 r1z4-172.16.20.4 4096 0.00 1 4096 4096 0 0 r1z4-172.16.20.4/sda1 4096 0.00 1 4096 4096 0 0 r2 8192 0.00 2 0 8192 0 0 r2z1 4096 0.00 1 4096 4096 0 0 r2z1-172.16.20.1 4096 0.00 1 4096 4096 0 0 r2z1-172.16.20.1/sda1 4096 0.00 1 4096 4096 0 0 r2z2 4096 0.00 1 4096 4096 0 0 r2z2-172.16.20.2 4096 0.00 1 4096 4096 0 0 r2z2-172.16.20.2/sda1 4096 0.00 1 4096 4096 0 0 As you can see from the output, each server should store 4096 partitions, and each region should store 8192 partitions. This example used a partition power of 13 and 3 replicas. With write_affinity enabled it is expected to have a higher number of partitions on disk compared to the value reported by the swift-ring-builder dispersion command. The number of additional (handoff) partitions in region r1 depends on your cluster size, the amount of incoming data as well as the replication speed. Let's use the example from above with 6 nodes in 2 regions, and write_affinity configured to write to region r1 first. `swift-ring-builder` reported that each node should store 4096 partitions:: Expected partitions for region r2: 8192 Handoffs stored across 4 nodes in region r1: 8192 / 4 = 2048 Maximum number of partitions on each server in region r1: 2048 + 4096 = 6144 Worst case is that handoff partitions in region 1 are populated with new object replicas faster than replication is able to move them to region 2. In that case you will see ~ 6144 partitions per server in region r1. Your actual number should be lower and between 4096 and 6144 partitions (preferably on the lower side). Now count the number of object partitions on a given server in region 1, for example on 172.16.10.1. Note that the pathnames might be different; `/srv/node/` is the default mount location, and `objects` applies only to storage policy 0 (storage policy 1 would use `objects-1` and so on):: find -L /srv/node/ -maxdepth 3 -type d -wholename "*objects/*" | wc -l If this number is always on the upper end of the expected partition number range (4096 to 6144) or increasing you should check your replication speed and maybe even disable write_affinity. Please refer to the next section how to collect metrics from Swift, and especially :ref:`swift-recon -r ` how to check replication stats. .. _cluster_telemetry_and_monitoring: -------------------------------- Cluster Telemetry and Monitoring -------------------------------- Various metrics and telemetry can be obtained from the account, container, and object servers using the recon server middleware and the swift-recon cli. To do so update your account, container, or object servers pipelines to include recon and add the associated filter config. .. highlight:: cfg object-server.conf sample:: [pipeline:main] pipeline = recon object-server [filter:recon] use = egg:swift#recon recon_cache_path = /var/cache/swift container-server.conf sample:: [pipeline:main] pipeline = recon container-server [filter:recon] use = egg:swift#recon recon_cache_path = /var/cache/swift account-server.conf sample:: [pipeline:main] pipeline = recon account-server [filter:recon] use = egg:swift#recon recon_cache_path = /var/cache/swift .. highlight:: none The recon_cache_path simply sets the directory where stats for a few items will be stored. Depending on the method of deployment you may need to create this directory manually and ensure that Swift has read/write access. Finally, if you also wish to track asynchronous pending on your object servers you will need to setup a cronjob to run the swift-recon-cron script periodically on your object servers:: */5 * * * * swift /usr/bin/swift-recon-cron /etc/swift/object-server.conf Once the recon middleware is enabled, a GET request for "/recon/" to the backend object server will return a JSON-formatted response:: fhines@ubuntu:~$ curl -i http://localhost:6030/recon/async HTTP/1.1 200 OK Content-Type: application/json Content-Length: 20 Date: Tue, 18 Oct 2011 21:03:01 GMT {"async_pending": 0} Note that the default port for the object server is 6200, except on a Swift All-In-One installation, which uses 6010, 6020, 6030, and 6040. The following metrics and telemetry are currently exposed: ========================= ======================================================================================== Request URI Description ------------------------- ---------------------------------------------------------------------------------------- /recon/load returns 1,5, and 15 minute load average /recon/mem returns /proc/meminfo /recon/mounted returns *ALL* currently mounted filesystems /recon/unmounted returns all unmounted drives if mount_check = True /recon/diskusage returns disk utilization for storage devices /recon/driveaudit returns # of drive audit errors /recon/ringmd5 returns object/container/account ring md5sums /recon/swiftconfmd5 returns swift.conf md5sum /recon/quarantined returns # of quarantined objects/accounts/containers /recon/sockstat returns consumable info from /proc/net/sockstat|6 /recon/devices returns list of devices and devices dir i.e. /srv/node /recon/async returns count of async pending /recon/replication returns object replication info (for backward compatibility) /recon/replication/ returns replication info for given type (account, container, object) /recon/auditor/ returns auditor stats on last reported scan for given type (account, container, object) /recon/updater/ returns last updater sweep times for given type (container, object) /recon/expirer/object returns time elapsed and number of objects deleted during last object expirer sweep /recon/version returns Swift version /recon/time returns node time ========================= ======================================================================================== Note that 'object_replication_last' and 'object_replication_time' in object replication info are considered to be transitional and will be removed in the subsequent releases. Use 'replication_last' and 'replication_time' instead. This information can also be queried via the swift-recon command line utility:: fhines@ubuntu:~$ swift-recon -h Usage: usage: swift-recon [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] account|container|object Defaults to object server. ex: swift-recon container -l --auditor Options: -h, --help show this help message and exit -v, --verbose Print verbose info --suppress Suppress most connection related errors -a, --async Get async stats -r, --replication Get replication stats --auditor Get auditor stats --updater Get updater stats --expirer Get expirer stats -u, --unmounted Check cluster for unmounted devices -d, --diskusage Get disk usage stats -l, --loadstats Get cluster load average stats -q, --quarantined Get cluster quarantine stats --md5 Get md5sum of servers ring and compare to local copy --sockstat Get cluster socket usage stats -T, --time Check time synchronization --all Perform all checks. Equal to -arudlqT --md5 --sockstat --auditor --updater --expirer --driveaudit --validate-servers -z ZONE, --zone=ZONE Only query servers in specified zone -t SECONDS, --timeout=SECONDS Time to wait for a response from a server --swiftdir=SWIFTDIR Default = /etc/swift .. _recon-replication: For example, to obtain container replication info from all hosts in zone "3":: fhines@ubuntu:~$ swift-recon container -r --zone 3 =============================================================================== --> Starting reconnaissance on 1 hosts =============================================================================== [2012-04-02 02:45:48] Checking on replication [failure] low: 0.000, high: 0.000, avg: 0.000, reported: 1 [success] low: 486.000, high: 486.000, avg: 486.000, reported: 1 [replication_time] low: 20.853, high: 20.853, avg: 20.853, reported: 1 [attempted] low: 243.000, high: 243.000, avg: 243.000, reported: 1 --------------------------- Reporting Metrics to StatsD --------------------------- .. highlight:: cfg If you have a StatsD_ server running, Swift may be configured to send it real-time operational metrics. To enable this, set the following configuration entries (see the sample configuration files):: log_statsd_host = localhost log_statsd_port = 8125 log_statsd_default_sample_rate = 1.0 log_statsd_sample_rate_factor = 1.0 log_statsd_metric_prefix = [empty-string] If `log_statsd_host` is not set, this feature is disabled. The default values for the other settings are given above. The `log_statsd_host` can be a hostname, an IPv4 address, or an IPv6 address (not surrounded with brackets, as this is unnecessary since the port is specified separately). If a hostname resolves to an IPv4 address, an IPv4 socket will be used to send StatsD UDP packets, even if the hostname would also resolve to an IPv6 address. .. _StatsD: http://codeascraft.etsy.com/2011/02/15/measure-anything-measure-everything/ .. _Graphite: http://graphiteapp.org/ .. _Ganglia: http://ganglia.sourceforge.net/ The sample rate is a real number between 0 and 1 which defines the probability of sending a sample for any given event or timing measurement. This sample rate is sent with each sample to StatsD and used to multiply the value. For example, with a sample rate of 0.5, StatsD will multiply that counter's value by 2 when flushing the metric to an upstream monitoring system (Graphite_, Ganglia_, etc.). Some relatively high-frequency metrics have a default sample rate less than one. If you want to override the default sample rate for all metrics whose default sample rate is not specified in the Swift source, you may set `log_statsd_default_sample_rate` to a value less than one. This is NOT recommended (see next paragraph). A better way to reduce StatsD load is to adjust `log_statsd_sample_rate_factor` to a value less than one. The `log_statsd_sample_rate_factor` is multiplied to any sample rate (either the global default or one specified by the actual metric logging call in the Swift source) prior to handling. In other words, this one tunable can lower the frequency of all StatsD logging by a proportional amount. To get the best data, start with the default `log_statsd_default_sample_rate` and `log_statsd_sample_rate_factor` values of 1 and only lower `log_statsd_sample_rate_factor` if needed. The `log_statsd_default_sample_rate` should not be used and remains for backward compatibility only. The metric prefix will be prepended to every metric sent to the StatsD server For example, with:: log_statsd_metric_prefix = proxy01 the metric `proxy-server.errors` would be sent to StatsD as `proxy01.proxy-server.errors`. This is useful for differentiating different servers when sending statistics to a central StatsD server. If you run a local StatsD server per node, you could configure a per-node metrics prefix there and leave `log_statsd_metric_prefix` blank. Note that metrics reported to StatsD are counters or timing data (which are sent in units of milliseconds). StatsD usually expands timing data out to min, max, avg, count, and 90th percentile per timing metric, but the details of this behavior will depend on the configuration of your StatsD server. Some important "gauge" metrics may still need to be collected using another method. For example, the `object-server.async_pendings` StatsD metric counts the generation of async_pendings in real-time, but will not tell you the current number of async_pending container updates on disk at any point in time. Note also that the set of metrics collected, their names, and their semantics are not locked down and will change over time. Metrics for `account-auditor`: ========================== ========================================================= Metric Name Description -------------------------- --------------------------------------------------------- `account-auditor.errors` Count of audit runs (across all account databases) which caught an Exception. `account-auditor.passes` Count of individual account databases which passed audit. `account-auditor.failures` Count of individual account databases which failed audit. `account-auditor.timing` Timing data for individual account database audits. ========================== ========================================================= Metrics for `account-reaper`: ============================================== ==================================================== Metric Name Description ---------------------------------------------- ---------------------------------------------------- `account-reaper.errors` Count of devices failing the mount check. `account-reaper.timing` Timing data for each reap_account() call. `account-reaper.return_codes.X` Count of HTTP return codes from various operations (e.g. object listing, container deletion, etc.). The value for X is the first digit of the return code (2 for 201, 4 for 404, etc.). `account-reaper.containers_failures` Count of failures to delete a container. `account-reaper.containers_deleted` Count of containers successfully deleted. `account-reaper.containers_remaining` Count of containers which failed to delete with zero successes. `account-reaper.containers_possibly_remaining` Count of containers which failed to delete with at least one success. `account-reaper.objects_failures` Count of failures to delete an object. `account-reaper.objects_deleted` Count of objects successfully deleted. `account-reaper.objects_remaining` Count of objects which failed to delete with zero successes. `account-reaper.objects_possibly_remaining` Count of objects which failed to delete with at least one success. ============================================== ==================================================== Metrics for `account-server` ("Not Found" is not considered an error and requests which increment `errors` are not included in the timing data): ======================================== ======================================================= Metric Name Description ---------------------------------------- ------------------------------------------------------- `account-server.DELETE.errors.timing` Timing data for each DELETE request resulting in an error: bad request, not mounted, missing timestamp. `account-server.DELETE.timing` Timing data for each DELETE request not resulting in an error. `account-server.PUT.errors.timing` Timing data for each PUT request resulting in an error: bad request, not mounted, conflict, recently-deleted. `account-server.PUT.timing` Timing data for each PUT request not resulting in an error. `account-server.HEAD.errors.timing` Timing data for each HEAD request resulting in an error: bad request, not mounted. `account-server.HEAD.timing` Timing data for each HEAD request not resulting in an error. `account-server.GET.errors.timing` Timing data for each GET request resulting in an error: bad request, not mounted, bad delimiter, account listing limit too high, bad accept header. `account-server.GET.timing` Timing data for each GET request not resulting in an error. `account-server.REPLICATE.errors.timing` Timing data for each REPLICATE request resulting in an error: bad request, not mounted. `account-server.REPLICATE.timing` Timing data for each REPLICATE request not resulting in an error. `account-server.POST.errors.timing` Timing data for each POST request resulting in an error: bad request, bad or missing timestamp, not mounted. `account-server.POST.timing` Timing data for each POST request not resulting in an error. ======================================== ======================================================= Metrics for `account-replicator`: ===================================== ==================================================== Metric Name Description ------------------------------------- ---------------------------------------------------- `account-replicator.diffs` Count of syncs handled by sending differing rows. `account-replicator.diff_caps` Count of "diffs" operations which failed because "max_diffs" was hit. `account-replicator.no_changes` Count of accounts found to be in sync. `account-replicator.hashmatches` Count of accounts found to be in sync via hash comparison (`broker.merge_syncs` was called). `account-replicator.rsyncs` Count of completely missing accounts which were sent via rsync. `account-replicator.remote_merges` Count of syncs handled by sending entire database via rsync. `account-replicator.attempts` Count of database replication attempts. `account-replicator.failures` Count of database replication attempts which failed due to corruption (quarantined) or inability to read as well as attempts to individual nodes which failed. `account-replicator.removes.` Count of databases on deleted because the delete_timestamp was greater than the put_timestamp and the database had no rows or because it was successfully sync'ed to other locations and doesn't belong here anymore. `account-replicator.successes` Count of replication attempts to an individual node which were successful. `account-replicator.timing` Timing data for each database replication attempt not resulting in a failure. ===================================== ==================================================== Metrics for `container-auditor`: ============================ ==================================================== Metric Name Description ---------------------------- ---------------------------------------------------- `container-auditor.errors` Incremented when an Exception is caught in an audit pass (only once per pass, max). `container-auditor.passes` Count of individual containers passing an audit. `container-auditor.failures` Count of individual containers failing an audit. `container-auditor.timing` Timing data for each container audit. ============================ ==================================================== Metrics for `container-replicator`: ======================================= ==================================================== Metric Name Description --------------------------------------- ---------------------------------------------------- `container-replicator.diffs` Count of syncs handled by sending differing rows. `container-replicator.diff_caps` Count of "diffs" operations which failed because "max_diffs" was hit. `container-replicator.no_changes` Count of containers found to be in sync. `container-replicator.hashmatches` Count of containers found to be in sync via hash comparison (`broker.merge_syncs` was called). `container-replicator.rsyncs` Count of completely missing containers where were sent via rsync. `container-replicator.remote_merges` Count of syncs handled by sending entire database via rsync. `container-replicator.attempts` Count of database replication attempts. `container-replicator.failures` Count of database replication attempts which failed due to corruption (quarantined) or inability to read as well as attempts to individual nodes which failed. `container-replicator.removes.` Count of databases deleted on because the delete_timestamp was greater than the put_timestamp and the database had no rows or because it was successfully sync'ed to other locations and doesn't belong here anymore. `container-replicator.successes` Count of replication attempts to an individual node which were successful. `container-replicator.timing` Timing data for each database replication attempt not resulting in a failure. ======================================= ==================================================== Metrics for `container-server` ("Not Found" is not considered an error and requests which increment `errors` are not included in the timing data): ========================================== ==================================================== Metric Name Description ------------------------------------------ ---------------------------------------------------- `container-server.DELETE.errors.timing` Timing data for DELETE request errors: bad request, not mounted, missing timestamp, conflict. `container-server.DELETE.timing` Timing data for each DELETE request not resulting in an error. `container-server.PUT.errors.timing` Timing data for PUT request errors: bad request, missing timestamp, not mounted, conflict. `container-server.PUT.timing` Timing data for each PUT request not resulting in an error. `container-server.HEAD.errors.timing` Timing data for HEAD request errors: bad request, not mounted. `container-server.HEAD.timing` Timing data for each HEAD request not resulting in an error. `container-server.GET.errors.timing` Timing data for GET request errors: bad request, not mounted, parameters not utf8, bad accept header. `container-server.GET.timing` Timing data for each GET request not resulting in an error. `container-server.REPLICATE.errors.timing` Timing data for REPLICATE request errors: bad request, not mounted. `container-server.REPLICATE.timing` Timing data for each REPLICATE request not resulting in an error. `container-server.POST.errors.timing` Timing data for POST request errors: bad request, bad x-container-sync-to, not mounted. `container-server.POST.timing` Timing data for each POST request not resulting in an error. ========================================== ==================================================== Metrics for `container-sync`: =============================== ==================================================== Metric Name Description ------------------------------- ---------------------------------------------------- `container-sync.skips` Count of containers skipped because they don't have sync'ing enabled. `container-sync.failures` Count of failures sync'ing of individual containers. `container-sync.syncs` Count of individual containers sync'ed successfully. `container-sync.deletes` Count of container database rows sync'ed by deletion. `container-sync.deletes.timing` Timing data for each container database row synchronization via deletion. `container-sync.puts` Count of container database rows sync'ed by Putting. `container-sync.puts.timing` Timing data for each container database row synchronization via Putting. =============================== ==================================================== Metrics for `container-updater`: ============================== ==================================================== Metric Name Description ------------------------------ ---------------------------------------------------- `container-updater.successes` Count of containers which successfully updated their account. `container-updater.failures` Count of containers which failed to update their account. `container-updater.no_changes` Count of containers which didn't need to update their account. `container-updater.timing` Timing data for processing a container; only includes timing for containers which needed to update their accounts (i.e. "successes" and "failures" but not "no_changes"). ============================== ==================================================== Metrics for `object-auditor`: ============================ ==================================================== Metric Name Description ---------------------------- ---------------------------------------------------- `object-auditor.quarantines` Count of objects failing audit and quarantined. `object-auditor.errors` Count of errors encountered while auditing objects. `object-auditor.timing` Timing data for each object audit (does not include any rate-limiting sleep time for max_files_per_second, but does include rate-limiting sleep time for max_bytes_per_second). ============================ ==================================================== Metrics for `object-expirer`: ======================== ==================================================== Metric Name Description ------------------------ ---------------------------------------------------- `object-expirer.objects` Count of objects expired. `object-expirer.errors` Count of errors encountered while attempting to expire an object. `object-expirer.timing` Timing data for each object expiration attempt, including ones resulting in an error. ======================== ==================================================== Metrics for `object-reconstructor`: ====================================================== ====================================================== Metric Name Description ------------------------------------------------------ ------------------------------------------------------ `object-reconstructor.partition.delete.count.` A count of partitions on which were reconstructed and synced to another node because they didn't belong on this node. This metric is tracked per-device to allow for "quiescence detection" for object reconstruction activity on each device. `object-reconstructor.partition.delete.timing` Timing data for partitions reconstructed and synced to another node because they didn't belong on this node. This metric is not tracked per device. `object-reconstructor.partition.update.count.` A count of partitions on which were reconstructed and synced to another node, but also belong on this node. As with delete.count, this metric is tracked per-device. `object-reconstructor.partition.update.timing` Timing data for partitions reconstructed which also belong on this node. This metric is not tracked per-device. `object-reconstructor.suffix.hashes` Count of suffix directories whose hash (of filenames) was recalculated. `object-reconstructor.suffix.syncs` Count of suffix directories reconstructed with ssync. ====================================================== ====================================================== Metrics for `object-replicator`: =================================================== ==================================================== Metric Name Description --------------------------------------------------- ---------------------------------------------------- `object-replicator.partition.delete.count.` A count of partitions on which were replicated to another node because they didn't belong on this node. This metric is tracked per-device to allow for "quiescence detection" for object replication activity on each device. `object-replicator.partition.delete.timing` Timing data for partitions replicated to another node because they didn't belong on this node. This metric is not tracked per device. `object-replicator.partition.update.count.` A count of partitions on which were replicated to another node, but also belong on this node. As with delete.count, this metric is tracked per-device. `object-replicator.partition.update.timing` Timing data for partitions replicated which also belong on this node. This metric is not tracked per-device. `object-replicator.suffix.hashes` Count of suffix directories whose hash (of filenames) was recalculated. `object-replicator.suffix.syncs` Count of suffix directories replicated with rsync. =================================================== ==================================================== Metrics for `object-server`: ======================================= ==================================================== Metric Name Description --------------------------------------- ---------------------------------------------------- `object-server.quarantines` Count of objects (files) found bad and moved to quarantine. `object-server.async_pendings` Count of container updates saved as async_pendings (may result from PUT or DELETE requests). `object-server.POST.errors.timing` Timing data for POST request errors: bad request, missing timestamp, delete-at in past, not mounted. `object-server.POST.timing` Timing data for each POST request not resulting in an error. `object-server.PUT.errors.timing` Timing data for PUT request errors: bad request, not mounted, missing timestamp, object creation constraint violation, delete-at in past. `object-server.PUT.timeouts` Count of object PUTs which exceeded max_upload_time. `object-server.PUT.timing` Timing data for each PUT request not resulting in an error. `object-server.PUT..timing` Timing data per kB transferred (ms/kB) for each non-zero-byte PUT request on each device. Monitoring problematic devices, higher is bad. `object-server.GET.errors.timing` Timing data for GET request errors: bad request, not mounted, header timestamps before the epoch, precondition failed. File errors resulting in a quarantine are not counted here. `object-server.GET.timing` Timing data for each GET request not resulting in an error. Includes requests which couldn't find the object (including disk errors resulting in file quarantine). `object-server.HEAD.errors.timing` Timing data for HEAD request errors: bad request, not mounted. `object-server.HEAD.timing` Timing data for each HEAD request not resulting in an error. Includes requests which couldn't find the object (including disk errors resulting in file quarantine). `object-server.DELETE.errors.timing` Timing data for DELETE request errors: bad request, missing timestamp, not mounted, precondition failed. Includes requests which couldn't find or match the object. `object-server.DELETE.timing` Timing data for each DELETE request not resulting in an error. `object-server.REPLICATE.errors.timing` Timing data for REPLICATE request errors: bad request, not mounted. `object-server.REPLICATE.timing` Timing data for each REPLICATE request not resulting in an error. ======================================= ==================================================== Metrics for `object-updater`: ============================ ==================================================== Metric Name Description ---------------------------- ---------------------------------------------------- `object-updater.errors` Count of drives not mounted or async_pending files with an unexpected name. `object-updater.timing` Timing data for object sweeps to flush async_pending container updates. Does not include object sweeps which did not find an existing async_pending storage directory. `object-updater.quarantines` Count of async_pending container updates which were corrupted and moved to quarantine. `object-updater.successes` Count of successful container updates. `object-updater.failures` Count of failed container updates. `object-updater.unlinks` Count of async_pending files unlinked. An async_pending file is unlinked either when it is successfully processed or when the replicator sees that there is a newer async_pending file for the same object. ============================ ==================================================== Metrics for `proxy-server` (in the table, `` is the proxy-server controller responsible for the request and will be one of "account", "container", or "object"): ======================================== ==================================================== Metric Name Description ---------------------------------------- ---------------------------------------------------- `proxy-server.errors` Count of errors encountered while serving requests before the controller type is determined. Includes invalid Content-Length, errors finding the internal controller to handle the request, invalid utf8, and bad URLs. `proxy-server..handoff_count` Count of node hand-offs; only tracked if log_handoffs is set in the proxy-server config. `proxy-server..handoff_all_count` Count of times *only* hand-off locations were utilized; only tracked if log_handoffs is set in the proxy-server config. `proxy-server..client_timeouts` Count of client timeouts (client did not read within `client_timeout` seconds during a GET or did not supply data within `client_timeout` seconds during a PUT). `proxy-server..client_disconnects` Count of detected client disconnects during PUT operations (does NOT include caught Exceptions in the proxy-server which caused a client disconnect). ======================================== ==================================================== Metrics for `proxy-logging` middleware (in the table, `` is either the proxy-server controller responsible for the request: "account", "container", "object", or the string "SOS" if the request came from the `Swift Origin Server`_ middleware. The `` portion will be one of "GET", "HEAD", "POST", "PUT", "DELETE", "COPY", "OPTIONS", or "BAD_METHOD". The list of valid HTTP methods is configurable via the `log_statsd_valid_http_methods` config variable and the default setting yields the above behavior): .. _Swift Origin Server: https://github.com/dpgoetz/sos ==================================================== ============================================ Metric Name Description ---------------------------------------------------- -------------------------------------------- `proxy-server....timing` Timing data for requests, start to finish. The portion is the numeric HTTP status code for the request (e.g. "200" or "404"). `proxy-server..GET..first-byte.timing` Timing data up to completion of sending the response headers (only for GET requests). and are as for the main timing metric. `proxy-server....xfer` This counter metric is the sum of bytes transferred in (from clients) and out (to clients) for requests. The , , and portions of the metric are just like the main timing metric. ==================================================== ============================================ The `proxy-logging` middleware also groups these metrics by policy. The `` portion represents a policy index): ========================================================================== ===================================== Metric Name Description -------------------------------------------------------------------------- ------------------------------------- `proxy-server.object.policy....timing` Timing data for requests, aggregated by policy index. `proxy-server.object.policy..GET..first-byte.timing` Timing data up to completion of sending the response headers, aggregated by policy index. `proxy-server.object.policy....xfer` Sum of bytes transferred in and out, aggregated by policy index. ========================================================================== ===================================== Metrics for `tempauth` middleware (in the table, `` represents the actual configured reseller_prefix or "`NONE`" if the reseller_prefix is the empty string): ========================================= ==================================================== Metric Name Description ----------------------------------------- ---------------------------------------------------- `tempauth..unauthorized` Count of regular requests which were denied with HTTPUnauthorized. `tempauth..forbidden` Count of regular requests which were denied with HTTPForbidden. `tempauth..token_denied` Count of token requests which were denied. `tempauth..errors` Count of errors. ========================================= ==================================================== ------------------------ Debugging Tips and Tools ------------------------ When a request is made to Swift, it is given a unique transaction id. This id should be in every log line that has to do with that request. This can be useful when looking at all the services that are hit by a single request. If you need to know where a specific account, container or object is in the cluster, `swift-get-nodes` will show the location where each replica should be. If you are looking at an object on the server and need more info, `swift-object-info` will display the account, container, replica locations and metadata of the object. If you are looking at a container on the server and need more info, `swift-container-info` will display all the information like the account, container, replica locations and metadata of the container. If you are looking at an account on the server and need more info, `swift-account-info` will display the account, replica locations and metadata of the account. If you want to audit the data for an account, `swift-account-audit` can be used to crawl the account, checking that all containers and objects can be found. ----------------- Managing Services ----------------- Swift services are generally managed with ``swift-init``. the general usage is ``swift-init ``, where service is the Swift service to manage (for example object, container, account, proxy) and command is one of: ========== =============================================== Command Description ---------- ----------------------------------------------- start Start the service stop Stop the service restart Restart the service shutdown Attempt to gracefully shutdown the service reload Attempt to gracefully restart the service ========== =============================================== A graceful shutdown or reload will finish any current requests before completely stopping the old service. There is also a special case of ``swift-init all ``, which will run the command for all swift services. In cases where there are multiple configs for a service, a specific config can be managed with ``swift-init . ``. For example, when a separate replication network is used, there might be ``/etc/swift/object-server/public.conf`` for the object server and ``/etc/swift/object-server/replication.conf`` for the replication services. In this case, the replication services could be restarted with ``swift-init object-server.replication restart``. -------------- Object Auditor -------------- On system failures, the XFS file system can sometimes truncate files it's trying to write and produce zero-byte files. The object-auditor will catch these problems but in the case of a system crash it would be advisable to run an extra, less rate limited sweep to check for these specific files. You can run this command as follows:: swift-object-auditor /path/to/object-server/config/file.conf once -z 1000 ``-z`` means to only check for zero-byte files at 1000 files per second. At times it is useful to be able to run the object auditor on a specific device or set of devices. You can run the object-auditor as follows:: swift-object-auditor /path/to/object-server/config/file.conf once --devices=sda,sdb This will run the object auditor on only the sda and sdb devices. This param accepts a comma separated list of values. ----------------- Object Replicator ----------------- At times it is useful to be able to run the object replicator on a specific device or partition. You can run the object-replicator as follows:: swift-object-replicator /path/to/object-server/config/file.conf once --devices=sda,sdb This will run the object replicator on only the sda and sdb devices. You can likewise run that command with ``--partitions``. Both params accept a comma separated list of values. If both are specified they will be ANDed together. These can only be run in "once" mode. ------------- Swift Orphans ------------- Swift Orphans are processes left over after a reload of a Swift server. For example, when upgrading a proxy server you would probably finish with a ``swift-init proxy-server reload`` or ``/etc/init.d/swift-proxy reload``. This kills the parent proxy server process and leaves the child processes running to finish processing whatever requests they might be handling at the time. It then starts up a new parent proxy server process and its children to handle new incoming requests. This allows zero-downtime upgrades with no impact to existing requests. The orphaned child processes may take a while to exit, depending on the length of the requests they were handling. However, sometimes an old process can be hung up due to some bug or hardware issue. In these cases, these orphaned processes will hang around forever. ``swift-orphans`` can be used to find and kill these orphans. ``swift-orphans`` with no arguments will just list the orphans it finds that were started more than 24 hours ago. You shouldn't really check for orphans until 24 hours after you perform a reload, as some requests can take a long time to process. ``swift-orphans -k TERM`` will send the SIG_TERM signal to the orphans processes, or you can ``kill -TERM`` the pids yourself if you prefer. You can run ``swift-orphans --help`` for more options. ------------ Swift Oldies ------------ Swift Oldies are processes that have just been around for a long time. There's nothing necessarily wrong with this, but it might indicate a hung process if you regularly upgrade and reload/restart services. You might have so many servers that you don't notice when a reload/restart fails; ``swift-oldies`` can help with this. For example, if you upgraded and reloaded/restarted everything 2 days ago, and you've already cleaned up any orphans with ``swift-orphans``, you can run ``swift-oldies -a 48`` to find any Swift processes still around that were started more than 2 days ago and then investigate them accordingly. ------------------- Custom Log Handlers ------------------- Swift supports setting up custom log handlers for services by specifying a comma-separated list of functions to invoke when logging is setup. It does so via the ``log_custom_handlers`` configuration option. Logger hooks invoked are passed the same arguments as Swift's get_logger function (as well as the getLogger and LogAdapter object): ============== =============================================== Name Description -------------- ----------------------------------------------- conf Configuration dict to read settings from name Name of the logger received log_to_console (optional) Write log messages to console on stderr log_route Route for the logging received fmt Override log format received logger The logging.getLogger object adapted_logger The LogAdapter object ============== =============================================== A basic example that sets up a custom logger might look like the following: .. code-block:: python def my_logger(conf, name, log_to_console, log_route, fmt, logger, adapted_logger): my_conf_opt = conf.get('some_custom_setting') my_handler = third_party_logstore_handler(my_conf_opt) logger.addHandler(my_handler) See :ref:`custom-logger-hooks-label` for sample use cases. ------------------------ Securing OpenStack Swift ------------------------ Please refer to the security guide at https://docs.openstack.org/security-guide and in particular the `Object Storage `__ section. swift-2.17.0/doc/source/overview_auth.rst0000666000175100017510000003744613236061617020474 0ustar zuulzuul00000000000000=============== The Auth System =============== -------- Overview -------- Swift supports a number of auth systems that share the following common characteristics: * The authentication/authorization part can be an external system or a subsystem run within Swift as WSGI middleware * The user of Swift passes in an auth token with each request * Swift validates each token with the external auth system or auth subsystem and caches the result * The token does not change from request to request, but does expire The token can be passed into Swift using the X-Auth-Token or the X-Storage-Token header. Both have the same format: just a simple string representing the token. Some auth systems use UUID tokens, some an MD5 hash of something unique, some use "something else" but the salient point is that the token is a string which can be sent as-is back to the auth system for validation. Swift will make calls to the auth system, giving the auth token to be validated. For a valid token, the auth system responds with an overall expiration time in seconds from now. To avoid the overhead in validating the same token over and over again, Swift will cache the token for a configurable time, but no longer than the expiration time. The Swift project includes two auth systems: - :ref:`temp_auth` - :ref:`keystone_auth` It is also possible to write your own auth system as described in :ref:`extending_auth`. .. _temp_auth: -------- TempAuth -------- TempAuth is used primarily in Swift's functional test environment and can be used in other test environments (such as :doc:`development_saio`). It is not recommended to use TempAuth in a production system. However, TempAuth is fully functional and can be used as a model to develop your own auth system. TempAuth has the concept of admin and non-admin users within an account. Admin users can do anything within the account. Non-admin users can only perform read operations. However, some privileged metadata such as X-Container-Sync-Key is not accessible to non-admin users. Users with the special group ``.reseller_admin`` can operate on any account. For an example usage please see :mod:`swift.common.middleware.tempauth`. If a request is coming from a reseller the auth system sets the request environ reseller_request to True. This can be used by other middlewares. Other users may be granted the ability to perform operations on an account or container via ACLs. TempAuth supports two types of ACL: - Per container ACLs based on the container's ``X-Container-Read`` and ``X-Container-Write`` metadata. See :ref:`container_acls` for more information. - Per account ACLs based on the account's ``X-Account-Access-Control`` metadata. For more information see :ref:`account_acls`. TempAuth will now allow OPTIONS requests to go through without a token. The TempAuth middleware is responsible for creating its own tokens. A user makes a request containing their username and password and TempAuth responds with a token. This token is then used to perform subsequent requests on the user's account, containers and objects. .. _keystone_auth: ------------- Keystone Auth ------------- Swift is able to authenticate against OpenStack Keystone_. In this environment, Keystone is responsible for creating and validating tokens. The :ref:`keystoneauth` middleware is responsible for implementing the auth system within Swift as described here. The :ref:`keystoneauth` middleware supports per container based ACLs on the container's ``X-Container-Read`` and ``X-Container-Write`` metadata. For more information see :ref:`container_acls`. The account-level ACL is not supported by Keystone auth. In order to use the ``keystoneauth`` middleware the ``auth_token`` middleware from KeystoneMiddleware_ will need to be configured. The ``authtoken`` middleware performs the authentication token validation and retrieves actual user authentication information. It can be found in the KeystoneMiddleware_ distribution. The :ref:`keystoneauth` middleware performs authorization and mapping the Keystone roles to Swift's ACLs. .. _KeystoneMiddleware: https://docs.openstack.org/keystonemiddleware/latest/ .. _Keystone: https://docs.openstack.org/keystone/latest/ .. _configuring_keystone_auth: Configuring Swift to use Keystone ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Configuring Swift to use Keystone_ is relatively straightforward. The first step is to ensure that you have the ``auth_token`` middleware installed. It can either be dropped in your python path or installed via the KeystoneMiddleware_ package. You need at first make sure you have a service endpoint of type ``object-store`` in Keystone pointing to your Swift proxy. For example having this in your ``/etc/keystone/default_catalog.templates`` :: catalog.RegionOne.object_store.name = Swift Service catalog.RegionOne.object_store.publicURL = http://swiftproxy:8080/v1/AUTH_$(tenant_id)s catalog.RegionOne.object_store.adminURL = http://swiftproxy:8080/ catalog.RegionOne.object_store.internalURL = http://swiftproxy:8080/v1/AUTH_$(tenant_id)s On your Swift proxy server you will want to adjust your main pipeline and add auth_token and keystoneauth in your ``/etc/swift/proxy-server.conf`` like this :: [pipeline:main] pipeline = [....] authtoken keystoneauth proxy-logging proxy-server add the configuration for the authtoken middleware:: [filter:authtoken] paste.filter_factory = keystonemiddleware.auth_token:filter_factory auth_uri = http://keystonehost:5000/ auth_url = http://keystonehost:35357/ auth_plugin = password project_domain_id = default user_domain_id = default project_name = service username = swift password = password cache = swift.cache include_service_catalog = False delay_auth_decision = True The actual values for these variables will need to be set depending on your situation, but in short: * ``auth_uri`` should point to a Keystone service from which users may retrieve tokens. This value is used in the `WWW-Authenticate` header that auth_token sends with any denial response. * ``auth_url`` points to the Keystone Admin service. This information is used by the middleware to actually query Keystone about the validity of the authentication tokens. It is not necessary to append any Keystone API version number to this URI. * The auth credentials (``project_domain_id``, ``user_domain_id``, ``username``, ``project_name``, ``password``) will be used to retrieve an admin token. That token will be used to authorize user tokens behind the scenes. These credentials must match the Keystone credentials for the Swift service. The example values shown here assume a user named 'swift' with admin role on a project named 'service', both being in the Keystone domain with id 'default'. Refer to the `KeystoneMiddleware documentation `_ for other examples. * ``cache`` is set to ``swift.cache``. This means that the middleware will get the Swift memcache from the request environment. * ``include_service_catalog`` defaults to ``True`` if not set. This means that when validating a token, the service catalog is retrieved and stored in the ``X-Service-Catalog`` header. Since Swift does not use the ``X-Service-Catalog`` header, there is no point in getting the service catalog. We recommend you set ``include_service_catalog`` to ``False``. .. note:: The authtoken config variable ``delay_auth_decision`` must be set to ``True``. The default is ``False``, but that breaks public access, :ref:`staticweb`, :ref:`formpost`, :ref:`tempurl`, and authenticated capabilities requests (using :ref:`discoverability`). and you can finally add the keystoneauth configuration. Here is a simple configuration:: [filter:keystoneauth] use = egg:swift#keystoneauth operator_roles = admin, swiftoperator Use an appropriate list of roles in operator_roles. For example, in some systems, the role ``_member_`` or ``Member`` is used to indicate that the user is allowed to operate on project resources. OpenStack Service Using Composite Tokens ---------------------------------------- Some OpenStack services such as Cinder and Glance may use a "service account". In this mode, you configure a separate account where the service stores project data that it manages. This account is not used directly by the end-user. Instead, all access is done through the service. To access the "service" account, the service must present two tokens: one from the end-user and another from its own service user. Only when both tokens are present can the account be accessed. This section describes how to set the configuration options to correctly control access to both the "normal" and "service" accounts. In this example, end users use the ``AUTH_`` prefix in account names, whereas services use the ``SERVICE_`` prefix:: [filter:keystoneauth] use = egg:swift#keystoneauth reseller_prefix = AUTH, SERVICE operator_roles = admin, swiftoperator SERVICE_service_roles = service The actual values for these variable will need to be set depending on your situation as follows: * The first item in the reseller_prefix list must match Keystone's endpoint (see ``/etc/keystone/default_catalog.templates`` above). Normally this is ``AUTH``. * The second item in the reseller_prefix list is the prefix used by the OpenStack services(s). You must configure this value (``SERVICE`` in the example) with whatever the other OpenStack service(s) use. * Set the operator_roles option to contain a role or roles that end-user's have on project's they use. * Set the SERVICE_service_roles value to a role or roles that only the OpenStack service user has. Do not use a role that is assigned to "normal" end users. In this example, the role ``service`` is used. The service user is granted this role to a *single* project only. You do not need to make the service user a member of every project. This configuration works as follows: * The end-user presents a user token to an OpenStack service. The service then makes a Swift request to the account with the ``SERVICE`` prefix. * The service forwards the original user token with the request. It also adds it's own service token. * Swift validates both tokens. When validated, the user token gives the ``admin`` or ``swiftoperator`` role(s). When validated, the service token gives the ``service`` role. * Swift interprets the above configuration as follows: * Did the user token provide one of the roles listed in operator_roles? * Did the service token have the ``service`` role as described by the ``SERVICE_service_roles`` options. * If both conditions are met, the request is granted. Otherwise, Swift rejects the request. In the above example, all services share the same account. You can separate each service into its own account. For example, the following provides a dedicated account for each of the Glance and Cinder services. In addition, you must assign the ``glance_service`` and ``cinder_service`` to the appropriate service users:: [filter:keystoneauth] use = egg:swift#keystoneauth reseller_prefix = AUTH, IMAGE, VOLUME operator_roles = admin, swiftoperator IMAGE_service_roles = glance_service VOLUME_service_roles = cinder_service Access control using keystoneauth ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ By default the only users able to perform operations (e.g. create a container) on an account are those having a Keystone role for the corresponding Keystone project that matches one of the roles specified in the ``operator_roles`` option. Users who have one of the ``operator_roles`` will be able to set container ACLs to grant other users permission to read and/or write objects in specific containers, using ``X-Container-Read`` and ``X-Container-Write`` headers respectively. In addition to the ACL formats described :mod:`here `, keystoneauth supports ACLs using the format:: other_project_id:other_user_id. where ``other_project_id`` is the UUID of a Keystone project and ``other_user_id`` is the UUID of a Keystone user. This will allow the other user to access a container provided their token is scoped on the other project. Both ``other_project_id`` and ``other_user_id`` may be replaced with the wildcard character ``*`` which will match any project or user respectively. Be sure to use Keystone UUIDs rather than names in container ACLs. .. note:: For backwards compatibility, keystoneauth will by default grant container ACLs expressed as ``other_project_name:other_user_name`` (i.e. using Keystone names rather than UUIDs) in the special case when both the other project and the other user are in Keystone's default domain and the project being accessed is also in the default domain. For further information see :ref:`keystoneauth` Users with the Keystone role defined in ``reseller_admin_role`` (``ResellerAdmin`` by default) can operate on any account. The auth system sets the request environ reseller_request to True if a request is coming from a user with this role. This can be used by other middlewares. Troubleshooting tips for keystoneauth deployment ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Some common mistakes can result in API requests failing when first deploying keystone with Swift: * Incorrect configuration of the Swift endpoint in the Keystone service. By default, keystoneauth expects the account part of a URL to have the form ``AUTH_``. Sometimes the ``AUTH_`` prefix is missed when configuring Swift endpoints in Keystone, as described in the `Install Guide `_. This is easily diagnosed by inspecting the proxy-server log file for a failed request URL and checking that the URL includes the ``AUTH_`` prefix (or whatever reseller prefix may have been configured for keystoneauth):: GOOD: proxy-server: 127.0.0.1 127.0.0.1 07/Sep/2016/16/06/58 HEAD /v1/AUTH_cfb8d9d45212408b90bc0776117aec9e HTTP/1.0 204 ... BAD: proxy-server: 127.0.0.1 127.0.0.1 07/Sep/2016/16/07/35 HEAD /v1/cfb8d9d45212408b90bc0776117aec9e HTTP/1.0 403 ... * Incorrect configuration of the ``authtoken`` middleware options in the Swift proxy server. The ``authtoken`` middleware communicates with the Keystone service to validate tokens that are presented with client requests. To do this ``authtoken`` must authenticate itself with Keystone using the credentials configured in the ``[filter:authtoken]`` section of ``/etc/swift/proxy-server.conf``. Errors in these credentials can result in ``authtoken`` failing to validate tokens and may be revealed in the proxy server logs by a message such as:: proxy-server: Identity server rejected authorization .. note:: More detailed log messaging may be seen by setting the ``authtoken`` option ``log_level = debug``. The ``authtoken`` configuration options may be checked by attempting to use them to communicate directly with Keystone using an ``openstack`` command line. For example, given the ``authtoken`` configuration sample shown in :ref:`configuring_keystone_auth`, the following command should return a service catalog:: openstack --os-identity-api-version=3 --os-auth-url=http://keystonehost:5000/ \ --os-username=swift --os-user-domain-id=default \ --os-project-name=service --os-project-domain-id=default \ --os-password=password catalog show object-store If this ``openstack`` command fails then it is likely that there is a problem with the ``authtoken`` configuration. .. _extending_auth: -------------- Extending Auth -------------- TempAuth is written as wsgi middleware, so implementing your own auth is as easy as writing new wsgi middleware, and plugging it in to the proxy server. The `Swauth `_ project is an example of an additional auth service. See :doc:`development_auth` for detailed information on extending the auth system. swift-2.17.0/doc/source/overview_policies.rst0000666000175100017510000010241313236061617021325 0ustar zuulzuul00000000000000================ Storage Policies ================ Storage Policies allow for some level of segmenting the cluster for various purposes through the creation of multiple object rings. The Storage Policies feature is implemented throughout the entire code base so it is an important concept in understanding Swift architecture. As described in :doc:`overview_ring`, Swift uses modified hashing rings to determine where data should reside in the cluster. There is a separate ring for account databases, container databases, and there is also one object ring per storage policy. Each object ring behaves exactly the same way and is maintained in the same manner, but with policies, different devices can belong to different rings. By supporting multiple object rings, Swift allows the application and/or deployer to essentially segregate the object storage within a single cluster. There are many reasons why this might be desirable: * Different levels of durability: If a provider wants to offer, for example, 2x replication and 3x replication but doesn't want to maintain 2 separate clusters, they would setup a 2x and a 3x replication policy and assign the nodes to their respective rings. Furthermore, if a provider wanted to offer a cold storage tier, they could create an erasure coded policy. * Performance: Just as SSDs can be used as the exclusive members of an account or database ring, an SSD-only object ring can be created as well and used to implement a low-latency/high performance policy. * Collecting nodes into group: Different object rings may have different physical servers so that objects in specific storage policies are always placed in a particular data center or geography. * Different Storage implementations: Another example would be to collect together a set of nodes that use a different Diskfile (e.g., Kinetic, GlusterFS) and use a policy to direct traffic just to those nodes. * Different read and write affinity settings: proxy-servers can be configured to use different read and write affinity options for each policy. See :ref:`proxy_server_per_policy_config` for more details. .. note:: Today, Swift supports two different policy types: Replication and Erasure Code. See :doc:`overview_erasure_code` for details. Also note that Diskfile refers to backend object storage plug-in architecture. See :doc:`development_ondisk_backends` for details. ----------------------- Containers and Policies ----------------------- Policies are implemented at the container level. There are many advantages to this approach, not the least of which is how easy it makes life on applications that want to take advantage of them. It also ensures that Storage Policies remain a core feature of Swift independent of the auth implementation. Policies were not implemented at the account/auth layer because it would require changes to all auth systems in use by Swift deployers. Each container has a new special immutable metadata element called the storage policy index. Note that internally, Swift relies on policy indexes and not policy names. Policy names exist for human readability and translation is managed in the proxy. When a container is created, one new optional header is supported to specify the policy name. If no name is specified, the default policy is used (and if no other policies defined, Policy-0 is considered the default). We will be covering the difference between default and Policy-0 in the next section. Policies are assigned when a container is created. Once a container has been assigned a policy, it cannot be changed (unless it is deleted/recreated). The implications on data placement/movement for large datasets would make this a task best left for applications to perform. Therefore, if a container has an existing policy of, for example 3x replication, and one wanted to migrate that data to an Erasure Code policy, the application would create another container specifying the other policy parameters and then simply move the data from one container to the other. Policies apply on a per container basis allowing for minimal application awareness; once a container has been created with a specific policy, all objects stored in it will be done so in accordance with that policy. If a container with a specific name is deleted (requires the container be empty) a new container may be created with the same name without any restriction on storage policy enforced by the deleted container which previously shared the same name. Containers have a many-to-one relationship with policies meaning that any number of containers can share one policy. There is no limit to how many containers can use a specific policy. The notion of associating a ring with a container introduces an interesting scenario: What would happen if 2 containers of the same name were created with different Storage Policies on either side of a network outage at the same time? Furthermore, what would happen if objects were placed in those containers, a whole bunch of them, and then later the network outage was restored? Well, without special care it would be a big problem as an application could end up using the wrong ring to try and find an object. Luckily there is a solution for this problem, a daemon known as the Container Reconciler works tirelessly to identify and rectify this potential scenario. -------------------- Container Reconciler -------------------- Because atomicity of container creation cannot be enforced in a distributed eventually consistent system, object writes into the wrong storage policy must be eventually merged into the correct storage policy by an asynchronous daemon. Recovery from object writes during a network partition which resulted in a split brain container created with different storage policies are handled by the `swift-container-reconciler` daemon. The container reconciler works off a queue similar to the object-expirer. The queue is populated during container-replication. It is never considered incorrect to enqueue an object to be evaluated by the container-reconciler because if there is nothing wrong with the location of the object the reconciler will simply dequeue it. The container-reconciler queue is an indexed log for the real location of an object for which a discrepancy in the storage policy of the container was discovered. To determine the correct storage policy of a container, it is necessary to update the status_changed_at field in the container_stat table when a container changes status from deleted to re-created. This transaction log allows the container-replicator to update the correct storage policy both when replicating a container and handling REPLICATE requests. Because each object write is a separate distributed transaction it is not possible to determine the correctness of the storage policy for each object write with respect to the entire transaction log at a given container database. As such, container databases will always record the object write regardless of the storage policy on a per object row basis. Object byte and count stats are tracked per storage policy in each container and reconciled using normal object row merge semantics. The object rows are ensured to be fully durable during replication using the normal container replication. After the container replicator pushes its object rows to available primary nodes any misplaced object rows are bulk loaded into containers based off the object timestamp under the ``.misplaced_objects`` system account. The rows are initially written to a handoff container on the local node, and at the end of the replication pass the ``.misplaced_objects`` containers are replicated to the correct primary nodes. The container-reconciler processes the ``.misplaced_objects`` containers in descending order and reaps its containers as the objects represented by the rows are successfully reconciled. The container-reconciler will always validate the correct storage policy for enqueued objects using direct container HEAD requests which are accelerated via caching. Because failure of individual storage nodes in aggregate is assumed to be common at scale, the container-reconciler will make forward progress with a simple quorum majority. During a combination of failures and rebalances it is possible that a quorum could provide an incomplete record of the correct storage policy - so an object write may have to be applied more than once. Because storage nodes and container databases will not process writes with an ``X-Timestamp`` less than or equal to their existing record when objects writes are re-applied their timestamp is slightly incremented. In order for this increment to be applied transparently to the client a second vector of time has been added to Swift for internal use. See :class:`~swift.common.utils.Timestamp`. As the reconciler applies object writes to the correct storage policy it cleans up writes which no longer apply to the incorrect storage policy and removes the rows from the ``.misplaced_objects`` containers. After all rows have been successfully processed it sleeps and will periodically check for newly enqueued rows to be discovered during container replication. .. _default-policy: ------------------------- Default versus 'Policy-0' ------------------------- Storage Policies is a versatile feature intended to support both new and pre-existing clusters with the same level of flexibility. For that reason, we introduce the ``Policy-0`` concept which is not the same as the "default" policy. As you will see when we begin to configure policies, each policy has a single name and an arbitrary number of aliases (human friendly, configurable) as well as an index (or simply policy number). Swift reserves index 0 to map to the object ring that's present in all installations (e.g., ``/etc/swift/object.ring.gz``). You can name this policy anything you like, and if no policies are defined it will report itself as ``Policy-0``, however you cannot change the index as there must always be a policy with index 0. Another important concept is the default policy which can be any policy in the cluster. The default policy is the policy that is automatically chosen when a container creation request is sent without a storage policy being specified. :ref:`configure-policy` describes how to set the default policy. The difference from ``Policy-0`` is subtle but extremely important. ``Policy-0`` is what is used by Swift when accessing pre-storage-policy containers which won't have a policy - in this case we would not use the default as it might not have the same policy as legacy containers. When no other policies are defined, Swift will always choose ``Policy-0`` as the default. In other words, default means "create using this policy if nothing else is specified" and ``Policy-0`` means "use the legacy policy if a container doesn't have one" which really means use ``object.ring.gz`` for lookups. .. note:: With the Storage Policy based code, it's not possible to create a container that doesn't have a policy. If nothing is provided, Swift will still select the default and assign it to the container. For containers created before Storage Policies were introduced, the legacy Policy-0 will be used. .. _deprecate-policy: -------------------- Deprecating Policies -------------------- There will be times when a policy is no longer desired; however simply deleting the policy and associated rings would be problematic for existing data. In order to ensure that resources are not orphaned in the cluster (left on disk but no longer accessible) and to provide proper messaging to applications when a policy needs to be retired, the notion of deprecation is used. :ref:`configure-policy` describes how to deprecate a policy. Swift's behavior with deprecated policies is as follows: * The deprecated policy will not appear in /info * PUT/GET/DELETE/POST/HEAD are still allowed on the pre-existing containers created with a deprecated policy * Clients will get an ''400 Bad Request'' error when trying to create a new container using the deprecated policy * Clients still have access to policy statistics via HEAD on pre-existing containers .. note:: A policy cannot be both the default and deprecated. If you deprecate the default policy, you must specify a new default. You can also use the deprecated feature to rollout new policies. If you want to test a new storage policy before making it generally available you could deprecate the policy when you initially roll it the new configuration and rings to all nodes. Being deprecated will render it innate and unable to be used. To test it you will need to create a container with that storage policy; which will require a single proxy instance (or a set of proxy-servers which are only internally accessible) that has been one-off configured with the new policy NOT marked deprecated. Once the container has been created with the new storage policy any client authorized to use that container will be able to add and access data stored in that container in the new storage policy. When satisfied you can roll out a new ``swift.conf`` which does not mark the policy as deprecated to all nodes. .. _configure-policy: -------------------- Configuring Policies -------------------- .. note:: See :doc:`policies_saio` for a step by step guide on adding a policy to the SAIO setup. It is important that the deployer have a solid understanding of the semantics for configuring policies. Configuring a policy is a three-step process: #. Edit your ``/etc/swift/swift.conf`` file to define your new policy. #. Create the corresponding policy object ring file. #. (Optional) Create policy-specific proxy-server configuration settings. Defining a policy ----------------- Each policy is defined by a section in the ``/etc/swift/swift.conf`` file. The section name must be of the form ``[storage-policy:]`` where ```` is the policy index. There's no reason other than readability that policy indexes be sequential but the following rules are enforced: * If a policy with index ``0`` is not declared and no other policies are defined, Swift will create a default policy with index ``0``. * The policy index must be a non-negative integer. * Policy indexes must be unique. .. warning:: The index of a policy should never be changed once a policy has been created and used. Changing a policy index may cause loss of access to data. Each policy section contains the following options: * ``name = `` (required) - The primary name of the policy. - Policy names are case insensitive. - Policy names must contain only letters, digits or a dash. - Policy names must be unique. - Policy names can be changed. - The name ``Policy-0`` can only be used for the policy with index ``0``. * ``aliases = [, , ...]`` (optional) - A comma-separated list of alternative names for the policy. - The default value is an empty list (i.e. no aliases). - All alias names must follow the rules for the ``name`` option. - Aliases can be added to and removed from the list. - Aliases can be useful to retain support for old primary names if the primary name is changed. * ``default = [true|false]`` (optional) - If ``true`` then this policy will be used when the client does not specify a policy. - The default value is ``false``. - The default policy can be changed at any time, by setting ``default = true`` in the desired policy section. - If no policy is declared as the default and no other policies are defined, the policy with index ``0`` is set as the default; - Otherwise, exactly one policy must be declared default. - Deprecated policies cannot be declared the default. - See :ref:`default-policy` for more information. * ``deprecated = [true|false]`` (optional) - If ``true`` then new containers cannot be created using this policy. - The default value is ``false``. - Any policy may be deprecated by adding the ``deprecated`` option to the desired policy section. However, a deprecated policy may not also be declared the default. Therefore, since there must always be a default policy, there must also always be at least one policy which is not deprecated. - See :ref:`deprecate-policy` for more information. * ``policy_type = [replication|erasure_coding]`` (optional) - The option ``policy_type`` is used to distinguish between different policy types. - The default value is ``replication``. - When defining an EC policy use the value ``erasure_coding``. The EC policy type has additional required options. See :ref:`using_ec_policy` for details. The following is an example of a properly configured ``swift.conf`` file. See :doc:`policies_saio` for full instructions on setting up an all-in-one with this example configuration.:: [swift-hash] # random unique strings that can never change (DO NOT LOSE) # Use only printable chars (python -c "import string; print(string.printable)") swift_hash_path_prefix = changeme swift_hash_path_suffix = changeme [storage-policy:0] name = gold aliases = yellow, orange policy_type = replication default = yes [storage-policy:1] name = silver policy_type = replication deprecated = yes Creating a ring --------------- Once ``swift.conf`` is configured for a new policy, a new ring must be created. The ring tools are not policy name aware so it's critical that the correct policy index be used when creating the new policy's ring file. Additional object rings are created using ``swift-ring-builder`` in the same manner as the legacy ring except that ``-N`` is appended after the word ``object`` in the builder file name, where ``N`` matches the policy index used in ``swift.conf``. So, to create the ring for policy index ``1``:: swift-ring-builder object-1.builder create 10 3 1 Continue to use the same naming convention when using ``swift-ring-builder`` to add devices, rebalance etc. This naming convention is also used in the pattern for per-policy storage node data directories. .. note:: The same drives can indeed be used for multiple policies and the details of how that's managed on disk will be covered in a later section, it's important to understand the implications of such a configuration before setting one up. Make sure it's really what you want to do, in many cases it will be, but in others maybe not. Proxy server configuration (optional) ------------------------------------- The :ref:`proxy-server` configuration options related to read and write affinity may optionally be overridden for individual storage policies. See :ref:`proxy_server_per_policy_config` for more details. -------------- Using Policies -------------- Using policies is very simple - a policy is only specified when a container is initially created. There are no other API changes. Creating a container can be done without any special policy information:: curl -v -X PUT -H 'X-Auth-Token: ' \ http://127.0.0.1:8080/v1/AUTH_test/myCont0 Which will result in a container created that is associated with the policy name 'gold' assuming we're using the swift.conf example from above. It would use 'gold' because it was specified as the default. Now, when we put an object into this container, it will get placed on nodes that are part of the ring we created for policy 'gold'. If we wanted to explicitly state that we wanted policy 'gold' the command would simply need to include a new header as shown below:: curl -v -X PUT -H 'X-Auth-Token: ' \ -H 'X-Storage-Policy: gold' http://127.0.0.1:8080/v1/AUTH_test/myCont0 And that's it! The application does not need to specify the policy name ever again. There are some illegal operations however: * If an invalid (typo, non-existent) policy is specified: 400 Bad Request * if you try to change the policy either via PUT or POST: 409 Conflict If you'd like to see how the storage in the cluster is being used, simply HEAD the account and you'll see not only the cumulative numbers, as before, but per policy statistics as well. In the example below there's 3 objects total with two of them in policy 'gold' and one in policy 'silver':: curl -i -X HEAD -H 'X-Auth-Token: ' \ http://127.0.0.1:8080/v1/AUTH_test and your results will include (some output removed for readability):: X-Account-Container-Count: 3 X-Account-Object-Count: 3 X-Account-Bytes-Used: 21 X-Storage-Policy-Gold-Object-Count: 2 X-Storage-Policy-Gold-Bytes-Used: 14 X-Storage-Policy-Silver-Object-Count: 1 X-Storage-Policy-Silver-Bytes-Used: 7 -------------- Under the Hood -------------- Now that we've explained a little about what Policies are and how to configure/use them, let's explore how Storage Policies fit in at the nuts-n-bolts level. Parsing and Configuring ----------------------- The module, :ref:`storage_policy`, is responsible for parsing the ``swift.conf`` file, validating the input, and creating a global collection of configured policies via class :class:`.StoragePolicyCollection`. This collection is made up of policies of class :class:`.StoragePolicy`. The collection class includes handy functions for getting to a policy either by name or by index , getting info about the policies, etc. There's also one very important function, :meth:`~.StoragePolicyCollection.get_object_ring`. Object rings are members of the :class:`.StoragePolicy` class and are actually not instantiated until the :meth:`~.StoragePolicy.load_ring` method is called. Any caller anywhere in the code base that needs to access an object ring must use the :data:`.POLICIES` global singleton to access the :meth:`~.StoragePolicyCollection.get_object_ring` function and provide the policy index which will call :meth:`~.StoragePolicy.load_ring` if needed; however, when starting request handling services such as the :ref:`proxy-server` rings are proactively loaded to provide moderate protection against a mis-configuration resulting in a run time error. The global is instantiated when Swift starts and provides a mechanism to patch policies for the test code. Middleware ---------- Middleware can take advantage of policies through the :data:`.POLICIES` global and by importing :func:`.get_container_info` to gain access to the policy index associated with the container in question. From the index it can then use the :data:`.POLICIES` singleton to grab the right ring. For example, :ref:`list_endpoints` is policy aware using the means just described. Another example is :ref:`recon` which will report the md5 sums for all of the rings. Proxy Server ------------ The :ref:`proxy-server` module's role in Storage Policies is essentially to make sure the correct ring is used as its member element. Before policies, the one object ring would be instantiated when the :class:`.Application` class was instantiated and could be overridden by test code via init parameter. With policies, however, there is no init parameter and the :class:`.Application` class instead depends on the :data:`.POLICIES` global singleton to retrieve the ring which is instantiated the first time it's needed. So, instead of an object ring member of the :class:`.Application` class, there is an accessor function, :meth:`~.Application.get_object_ring`, that gets the ring from :data:`.POLICIES`. In general, when any module running on the proxy requires an object ring, it does so via first getting the policy index from the cached container info. The exception is during container creation where it uses the policy name from the request header to look up policy index from the :data:`.POLICIES` global. Once the proxy has determined the policy index, it can use the :meth:`~.Application.get_object_ring` method described earlier to gain access to the correct ring. It then has the responsibility of passing the index information, not the policy name, on to the back-end servers via the header ``X -Backend-Storage-Policy-Index``. Going the other way, the proxy also strips the index out of headers that go back to clients, and makes sure they only see the friendly policy names. On Disk Storage --------------- Policies each have their own directories on the back-end servers and are identified by their storage policy indexes. Organizing the back-end directory structures by policy index helps keep track of things and also allows for sharing of disks between policies which may or may not make sense depending on the needs of the provider. More on this later, but for now be aware of the following directory naming convention: * ``/objects`` maps to objects associated with Policy-0 * ``/objects-N`` maps to storage policy index #N * ``/async_pending`` maps to async pending update for Policy-0 * ``/async_pending-N`` maps to async pending update for storage policy index #N * ``/tmp`` maps to the DiskFile temporary directory for Policy-0 * ``/tmp-N`` maps to the DiskFile temporary directory for policy index #N * ``/quarantined/objects`` maps to the quarantine directory for Policy-0 * ``/quarantined/objects-N`` maps to the quarantine directory for policy index #N Note that these directory names are actually owned by the specific Diskfile implementation, the names shown above are used by the default Diskfile. Object Server ------------- The :ref:`object-server` is not involved with selecting the storage policy placement directly. However, because of how back-end directory structures are setup for policies, as described earlier, the object server modules do play a role. When the object server gets a :class:`.Diskfile`, it passes in the policy index and leaves the actual directory naming/structure mechanisms to :class:`.Diskfile`. By passing in the index, the instance of :class:`.Diskfile` being used will assure that data is properly located in the tree based on its policy. For the same reason, the :ref:`object-updater` also is policy aware. As previously described, different policies use different async pending directories so the updater needs to know how to scan them appropriately. The :ref:`object-replicator` is policy aware in that, depending on the policy, it may have to do drastically different things, or maybe not. For example, the difference in handling a replication job for 2x versus 3x is trivial; however, the difference in handling replication between 3x and erasure code is most definitely not. In fact, the term 'replication' really isn't appropriate for some policies like erasure code; however, the majority of the framework for collecting and processing jobs is common. Thus, those functions in the replicator are leveraged for all policies and then there is policy specific code required for each policy, added when the policy is defined if needed. The ssync functionality is policy aware for the same reason. Some of the other modules may not obviously be affected, but the back-end directory structure owned by :class:`.Diskfile` requires the policy index parameter. Therefore ssync being policy aware really means passing the policy index along. See :class:`~swift.obj.ssync_sender` and :class:`~swift.obj.ssync_receiver` for more information on ssync. For :class:`.Diskfile` itself, being policy aware is all about managing the back-end structure using the provided policy index. In other words, callers who get a :class:`.Diskfile` instance provide a policy index and :class:`.Diskfile`'s job is to keep data separated via this index (however it chooses) such that policies can share the same media/nodes if desired. The included implementation of :class:`.Diskfile` lays out the directory structure described earlier but that's owned within :class:`.Diskfile`; external modules have no visibility into that detail. A common function is provided to map various directory names and/or strings based on their policy index. For example :class:`.Diskfile` defines :func:`.get_data_dir` which builds off of a generic :func:`.get_policy_string` to consistently build policy aware strings for various usage. Container Server ---------------- The :ref:`container-server` plays a very important role in Storage Policies, it is responsible for handling the assignment of a policy to a container and the prevention of bad things like changing policies or picking the wrong policy to use when nothing is specified (recall earlier discussion on Policy-0 versus default). The :ref:`container-updater` is policy aware, however its job is very simple, to pass the policy index along to the :ref:`account-server` via a request header. The :ref:`container-backend` is responsible for both altering existing DB schema as well as assuring new DBs are created with a schema that supports storage policies. The "on-demand" migration of container schemas allows Swift to upgrade without downtime (sqlite's alter statements are fast regardless of row count). To support rolling upgrades (and downgrades) the incompatible schema changes to the ``container_stat`` table are made to a ``container_info`` table, and the ``container_stat`` table is replaced with a view that includes an ``INSTEAD OF UPDATE`` trigger which makes it behave like the old table. The policy index is stored here for use in reporting information about the container as well as managing split-brain scenario induced discrepancies between containers and their storage policies. Furthermore, during split-brain, containers must be prepared to track object updates from multiple policies so the object table also includes a ``storage_policy_index`` column. Per-policy object counts and bytes are updated in the ``policy_stat`` table using ``INSERT`` and ``DELETE`` triggers similar to the pre-policy triggers that updated ``container_stat`` directly. The :ref:`container-replicator` daemon will pro-actively migrate legacy schemas as part of its normal consistency checking process when it updates the ``reconciler_sync_point`` entry in the ``container_info`` table. This ensures that read heavy containers which do not encounter any writes will still get migrated to be fully compatible with the post-storage-policy queries without having to fall back and retry queries with the legacy schema to service container read requests. The :ref:`container-sync-daemon` functionality only needs to be policy aware in that it accesses the object rings. Therefore, it needs to pull the policy index out of the container information and use it to select the appropriate object ring from the :data:`.POLICIES` global. Account Server -------------- The :ref:`account-server`'s role in Storage Policies is really limited to reporting. When a HEAD request is made on an account (see example provided earlier), the account server is provided with the storage policy index and builds the ``object_count`` and ``byte_count`` information for the client on a per policy basis. The account servers are able to report per-storage-policy object and byte counts because of some policy specific DB schema changes. A policy specific table, ``policy_stat``, maintains information on a per policy basis (one row per policy) in the same manner in which the ``account_stat`` table does. The ``account_stat`` table still serves the same purpose and is not replaced by ``policy_stat``, it holds the total account stats whereas ``policy_stat`` just has the break downs. The backend is also responsible for migrating pre-storage-policy accounts by altering the DB schema and populating the ``policy_stat`` table for Policy-0 with current ``account_stat`` data at that point in time. The per-storage-policy object and byte counts are not updated with each object PUT and DELETE request, instead container updates to the account server are performed asynchronously by the ``swift-container-updater``. .. _upgrade-policy: Upgrading and Confirming Functionality -------------------------------------- Upgrading to a version of Swift that has Storage Policy support is not difficult, in fact, the cluster administrator isn't required to make any special configuration changes to get going. Swift will automatically begin using the existing object ring as both the default ring and the Policy-0 ring. Adding the declaration of policy 0 is totally optional and in its absence, the name given to the implicit policy 0 will be 'Policy-0'. Let's say for testing purposes that you wanted to take an existing cluster that already has lots of data on it and upgrade to Swift with Storage Policies. From there you want to go ahead and create a policy and test a few things out. All you need to do is: #. Upgrade all of your Swift nodes to a policy-aware version of Swift #. Define your policies in ``/etc/swift/swift.conf`` #. Create the corresponding object rings #. Create containers and objects and confirm their placement is as expected For a specific example that takes you through these steps, please see :doc:`policies_saio` .. note:: If you downgrade from a Storage Policy enabled version of Swift to an older version that doesn't support policies, you will not be able to access any data stored in policies other than the policy with index 0 but those objects WILL appear in container listings (possibly as duplicates if there was a network partition and un-reconciled objects). It is EXTREMELY important that you perform any necessary integration testing on the upgraded deployment before enabling an additional storage policy to ensure a consistent API experience for your clients. DO NOT downgrade to a version of Swift that does not support storage policies once you expose multiple storage policies. swift-2.17.0/doc/source/overview_encryption.rst0000666000175100017510000006403013236061617021712 0ustar zuulzuul00000000000000================= Object Encryption ================= Swift supports the optional encryption of object data at rest on storage nodes. The encryption of object data is intended to mitigate the risk of users' data being read if an unauthorised party were to gain physical access to a disk. .. note:: Swift's data-at-rest encryption accepts plaintext object data from the client, encrypts it in the cluster, and stores the encrypted data. This protects object data from inadvertently being exposed if a data drive leaves the Swift cluster. If a user wishes to ensure that the plaintext data is always encrypted while in transit and in storage, it is strongly recommended that the data be encrypted before sending it to the Swift cluster. Encrypting on the client side is the only way to ensure that the data is fully encrypted for its entire lifecycle. Encryption of data at rest is implemented by middleware that may be included in the proxy server WSGI pipeline. The feature is internal to a Swift cluster and not exposed through the API. Clients are unaware that data is encrypted by this feature internally to the Swift service; internally encrypted data should never be returned to clients via the Swift API. The following data are encrypted while at rest in Swift: * Object content i.e. the content of an object PUT request's body * The entity tag (ETag) of objects that have non-zero content * All custom user object metadata values i.e. metadata sent using X-Object-Meta- prefixed headers with PUT or POST requests Any data or metadata not included in the list above are not encrypted, including: * Account, container and object names * Account and container custom user metadata values * All custom user metadata names * Object Content-Type values * Object size * System metadata .. note:: This feature is intended to provide `confidentiality` of data that is at rest i.e. to protect user data from being read by an attacker that gains access to disks on which object data is stored. This feature is not intended to prevent undetectable `modification` of user data at rest. This feature is not intended to protect against an attacker that gains access to Swift's internal network connections, or gains access to key material or is able to modify the Swift code running on Swift nodes. .. _encryption_deployment: ------------------------ Deployment and operation ------------------------ Encryption is deployed by adding two middleware filters to the proxy server WSGI pipeline and including their respective filter configuration sections in the `proxy-server.conf` file. :ref:`Additional steps ` are required if the container sync feature is being used. The `keymaster` and `encryption` middleware filters must be to the right of all other middleware in the pipeline apart from the final proxy-logging middleware, and in the order shown in this example:: keymaster encryption proxy-logging proxy-server [filter:keymaster] use = egg:swift#keymaster encryption_root_secret = your_secret [filter:encryption] use = egg:swift#encryption # disable_encryption = False See the `proxy-server.conf-sample` file for further details on the middleware configuration options. The keymaster config option ``encryption_root_secret`` MUST be set to a value of at least 44 valid base-64 characters before the middleware is used and should be consistent across all proxy servers. The minimum length of 44 has been chosen because it is the length of a base-64 encoded 32 byte value. Alternatives to specifying the encryption root secret directly in the `proxy-server.conf` file are storing it in a separate file, or storing it in an :ref:`external key management system ` such as `Barbican `_. .. note:: The ``encryption_root_secret`` option holds the master secret key used for encryption. The security of all encrypted data critically depends on this key and it should therefore be set to a high-entropy value. For example, a suitable ``encryption_root_secret`` may be obtained by base-64 encoding a 32 byte (or longer) value generated by a cryptographically secure random number generator. The ``encryption_root_secret`` value is necessary to recover any encrypted data from the storage system, and therefore, it must be guarded against accidental loss. Its value (and consequently, the proxy-server.conf file) should not be stored on any disk that is in any account, container or object ring. The ``encryption_root_secret`` value should not be changed once deployed. Doing so would prevent Swift from properly decrypting data that was encrypted using the former value, and would therefore result in the loss of that data. One method for generating a suitable value for ``encryption_root_secret`` is to use the ``openssl`` command line tool:: openssl rand -base64 32 Once deployed, the encryption filter will by default encrypt object data and metadata when handling PUT and POST requests and decrypt object data and metadata when handling GET and HEAD requests. COPY requests are transformed into GET and PUT requests by the :ref:`copy` middleware before reaching the encryption middleware and as a result object data and metadata is decrypted and re-encrypted when copied. .. _encryption_root_secret_in_external_kms: Encryption Root Secret in External Key Management System -------------------------------------------------------- The benefits of using a dedicated system for storing the encryption root secret include the auditing and access control infrastructure that are already in place in such a system, and the fact that an encryption root secret stored in a key management system (KMS) may be backed by a hardware security module (HSM) for additional security. Another significant benefit of storing the root encryption secret in an external KMS is that it is in this case never stored on a disk in the Swift cluster. Make sure the required dependencies are installed for retrieving an encryption root secret from an external KMS. This can be done when installing Swift (add the ``-e`` flag to install as a development version) by changing to the Swift directory and running the following command to install Swift together with the ``kms_keymaster`` extra dependencies:: sudo pip install .[kms_keymaster] Another way to install the dependencies is by making sure the following lines exist in the requirements.txt file, and installing them using ``pip install -r requirements.txt``:: cryptography>=1.6 # BSD/Apache-2.0 castellan>=0.6.0 .. note:: If any of the required packages is already installed, the ``--upgrade`` flag may be required for the ``pip`` commands in order for the required minimum version to be installed. To make use of an encryption root secret stored in an external KMS, replace the keymaster middleware with the kms_keymaster middleware in the proxy server WSGI pipeline in `proxy-server.conf`, in the order shown in this example:: kms_keymaster encryption proxy-logging proxy-server and add a section to the same file:: [filter:kms_keymaster] use = egg:swift#kms_keymaster keymaster_config_path = file_with_kms_keymaster_config Create or edit the file `file_with_kms_keymaster_config` referenced above. For further details on the middleware configuration options, see the `keymaster.conf-sample` file. An example of the content of this file, with optional parameters omitted, is below:: [kms_keymaster] key_id = changeme username = swift password = password project_name = swift auth_endpoint = http://keystonehost:5000/v3 The encryption root secret shall be created and stored in the external key management system before it can be used by the keymaster. It shall be stored as a symmetric key, with content type ``application/octet-stream``, ``base64`` content encoding, ``AES`` algorithm, bit length ``256``, and secret type ``symmetric``. The mode ``ctr`` may also be stored for informational purposes - it is not currently checked by the keymaster. The following command can be used to store the currently configured ``encryption_root_secret`` value from the `proxy-server.conf` file in Barbican:: openstack secret store --name swift_root_secret \ --payload-content-type="application/octet-stream" \ --payload-content-encoding="base64" --algorithm aes --bit-length 256 \ --mode ctr --secret-type symmetric --payload Alternatively, the existing root secret can also be stored in Barbican using `curl `__. .. note:: The credentials used to store the secret in Barbican shall be the same ones that the proxy server uses to retrieve the secret, i.e., the ones configured in the `keymaster.conf` file. For clarity reasons the commands shown here omit the credentials - they may be specified explicitly, or in environment variables. Instead of using an existing root secret, Barbican can also be asked to generate a new 256-bit root secret, with content type ``application/octet-stream`` and algorithm ``AES`` (the ``mode`` parameter is currently optional):: openstack secret order create --name swift_root_secret \ --payload-content-type="application/octet-stream" --algorithm aes \ --bit-length 256 --mode ctr key The ``order create`` creates an asynchronous request to create the actual secret. The order can be retrieved using ``openstack secret order get``, and once the order completes successfully, the output will show the key id of the generated root secret. Keys currently stored in Barbican can be listed using the ``openstack secret list`` command. .. note:: Both the order (the asynchronous request for creating or storing a secret), and the actual secret itself, have similar unique identifiers. Once the order has been completed, the key id is shown in the output of the ``order get`` command. The keymaster uses the explicitly configured username and password (and project name etc.) from the `keymaster.conf` file for retrieving the encryption root secret from an external key management system. The `Castellan library `_ is used to communicate with Barbican. For the proxy server, reading the encryption root secret directly from the `proxy-server.conf` file, from the `keymaster.conf` file pointed to from the `proxy-server.conf` file, or from an external key management system such as Barbican, are all functionally equivalent. In case reading the encryption root secret from the external key management system fails, the proxy server will not start up. If the encryption root secret is retrieved successfully, it is cached in memory in the proxy server. For further details on the configuration options, see the `[filter:kms_keymaster]` section in the `proxy-server.conf-sample` file, and the `keymaster.conf-sample` file. Upgrade Considerations ---------------------- When upgrading an existing cluster to deploy encryption, the following sequence of steps is recommended: #. Upgrade all object servers #. Upgrade all proxy servers #. Add keymaster and encryption middlewares to every proxy server's middleware pipeline with the encryption ``disable_encryption`` option set to ``True`` and the keymaster ``encryption_root_secret`` value set as described above. #. If required, follow the steps for :ref:`container_sync_client_config`. #. Finally, change the encryption ``disable_encryption`` option to ``False`` Objects that existed in the cluster prior to the keymaster and encryption middlewares being deployed are still readable with GET and HEAD requests. The content of those objects will not be encrypted unless they are written again by a PUT or COPY request. Any user metadata of those objects will not be encrypted unless it is written again by a PUT, POST or COPY request. Disabling Encryption -------------------- Once deployed, the keymaster and encryption middlewares should not be removed from the pipeline. To do so will cause encrypted object data and/or metadata to be returned in response to GET or HEAD requests for objects that were previously encrypted. Encryption of inbound object data may be disabled by setting the encryption ``disable_encryption`` option to ``True``, in which case existing encrypted objects will remain encrypted but new data written with PUT, POST or COPY requests will not be encrypted. The keymaster and encryption middlewares should remain in the pipeline even when encryption of new objects is not required. The encryption middleware is needed to handle GET requests for objects that may have been previously encrypted. The keymaster is needed to provide keys for those requests. .. _container_sync_client_config: Container sync configuration ---------------------------- If container sync is being used then the keymaster and encryption middlewares must be added to the container sync internal client pipeline. The following configuration steps are required: #. Create a custom internal client configuration file for container sync (if one is not already in use) based on the sample file `internal-client.conf-sample`. For example, copy `internal-client.conf-sample` to `/etc/swift/container-sync-client.conf`. #. Modify this file to include the middlewares in the pipeline in the same way as described above for the proxy server. #. Modify the container-sync section of all container server config files to point to this internal client config file using the ``internal_client_conf_path`` option. For example:: internal_client_conf_path = /etc/swift/container-sync-client.conf .. note:: The ``encryption_root_secret`` value is necessary to recover any encrypted data from the storage system, and therefore, it must be guarded against accidental loss. Its value (and consequently, the custom internal client configuration file) should not be stored on any disk that is in any account, container or object ring. .. note:: These container sync configuration steps will be necessary for container sync probe tests to pass if the encryption middlewares are included in the proxy pipeline of a test cluster. -------------- Implementation -------------- Encryption scheme ----------------- Plaintext data is encrypted to ciphertext using the AES cipher with 256-bit keys implemented by the python `cryptography package `_. The cipher is used in counter (CTR) mode so that any byte or range of bytes in the ciphertext may be decrypted independently of any other bytes in the ciphertext. This enables very simple handling of ranged GETs. In general an item of unencrypted data, ``plaintext``, is transformed to an item of encrypted data, ``ciphertext``:: ciphertext = E(plaintext, k, iv) where ``E`` is the encryption function, ``k`` is an encryption key and ``iv`` is a unique initialization vector (IV) chosen for each encryption context. For example, the object body is one encryption context with a randomly chosen IV. The IV is stored as metadata of the encrypted item so that it is available for decryption:: plaintext = D(ciphertext, k, iv) where ``D`` is the decryption function. The implementation of CTR mode follows `NIST SP800-38A `_, and the full IV passed to the encryption or decryption function serves as the initial counter block. In general any encrypted item has accompanying crypto-metadata that describes the IV and the cipher algorithm used for the encryption:: crypto_metadata = {"iv": <16 byte value>, "cipher": "AES_CTR_256"} This crypto-metadata is stored either with the ciphertext (for user metadata and etags) or as a separate header (for object bodies). Key management -------------- A keymaster middleware is responsible for providing the keys required for each encryption and decryption operation. Two keys are required when handling object requests: a `container key` that is uniquely associated with the container path and an `object key` that is uniquely associated with the object path. These keys are made available to the encryption middleware via a callback function that the keymaster installs in the WSGI request environ. The current keymaster implementation derives container and object keys from the ``encryption_root_secret`` in a deterministic way by constructing a SHA256 HMAC using the ``encryption_root_secret`` as a key and the container or object path as a message, for example:: object_key = HMAC(encryption_root_secret, "/a/c/o") Other strategies for providing object and container keys may be employed by future implementations of alternative keymaster middleware. During each object PUT, a random key is generated to encrypt the object body. This random key is then encrypted using the object key provided by the keymaster. This makes it safe to store the encrypted random key alongside the encrypted object data and metadata. This process of `key wrapping` enables more efficient re-keying events when the object key may need to be replaced and consequently any data encrypted using that key must be re-encrypted. Key wrapping minimizes the amount of data encrypted using those keys to just other randomly chosen keys which can be re-wrapped efficiently without needing to re-encrypt the larger amounts of data that were encrypted using the random keys. .. note:: Re-keying is not currently implemented. Key wrapping is implemented in anticipation of future re-keying operations. Encryption middleware --------------------- The encryption middleware is composed of an `encrypter` component and a `decrypter` component. Encrypter operation ^^^^^^^^^^^^^^^^^^^ Custom user metadata ++++++++++++++++++++ The encrypter encrypts each item of custom user metadata using the object key provided by the keymaster and an IV that is randomly chosen for that metadata item. The encrypted values are stored as :ref:`transient_sysmeta` with associated crypto-metadata appended to the encrypted value. For example:: X-Object-Meta-Private1: value1 X-Object-Meta-Private2: value2 are transformed to:: X-Object-Transient-Sysmeta-Crypto-Meta-Private1: E(value1, object_key, header_iv_1); swift_meta={"iv": header_iv_1, "cipher": "AES_CTR_256"} X-Object-Transient-Sysmeta-Crypto-Meta-Private2: E(value2, object_key, header_iv_2); swift_meta={"iv": header_iv_2, "cipher": "AES_CTR_256"} The unencrypted custom user metadata headers are removed. Object body +++++++++++ Encryption of an object body is performed using a randomly chosen body key and a randomly chosen IV:: body_ciphertext = E(body_plaintext, body_key, body_iv) The body_key is wrapped using the object key provided by the keymaster and a randomly chosen IV:: wrapped_body_key = E(body_key, object_key, body_key_iv) The encrypter stores the associated crypto-metadata in a system metadata header:: X-Object-Sysmeta-Crypto-Body-Meta: {"iv": body_iv, "cipher": "AES_CTR_256", "body_key": {"key": wrapped_body_key, "iv": body_key_iv}} Note that in this case there is an extra item of crypto-metadata which stores the wrapped body key and its IV. Entity tag ++++++++++ While encrypting the object body the encrypter also calculates the ETag (md5 digest) of the plaintext body. This value is encrypted using the object key provided by the keymaster and a randomly chosen IV, and saved as an item of system metadata, with associated crypto-metadata appended to the encrypted value:: X-Object-Sysmeta-Crypto-Etag: E(md5(plaintext), object_key, etag_iv); swift_meta={"iv": etag_iv, "cipher": "AES_CTR_256"} The encrypter also forces an encrypted version of the plaintext ETag to be sent with container updates by adding an update override header to the PUT request. The associated crypto-metadata is appended to the encrypted ETag value of this update override header:: X-Object-Sysmeta-Container-Update-Override-Etag: E(md5(plaintext), container_key, override_etag_iv); meta={"iv": override_etag_iv, "cipher": "AES_CTR_256"} The container key is used for this encryption so that the decrypter is able to decrypt the ETags in container listings when handling a container request, since object keys may not be available in that context. Since the plaintext ETag value is only known once the encrypter has completed processing the entire object body, the ``X-Object-Sysmeta-Crypto-Etag`` and ``X-Object-Sysmeta-Container-Update-Override-Etag`` headers are sent after the encrypted object body using the proxy server's support for request footers. .. _conditional_requests: Conditional Requests ++++++++++++++++++++ In general, an object server evaluates conditional requests with ``If[-None]-Match`` headers by comparing values listed in an ``If[-None]-Match`` header against the ETag that is stored in the object metadata. This is not possible when the ETag stored in object metadata has been encrypted. The encrypter therefore calculates an HMAC using the object key and the ETag while handling object PUT requests, and stores this under the metadata key ``X-Object-Sysmeta-Crypto-Etag-Mac``:: X-Object-Sysmeta-Crypto-Etag-Mac: HMAC(object_key, md5(plaintext)) Like other ETag-related metadata, this is sent after the encrypted object body using the proxy server's support for request footers. The encrypter similarly calculates an HMAC for each ETag value included in ``If[-None]-Match`` headers of conditional GET or HEAD requests, and appends these to the ``If[-None]-Match`` header. The encrypter also sets the ``X-Backend-Etag-Is-At`` header to point to the previously stored ``X-Object-Sysmeta-Crypto-Etag-Mac`` metadata so that the object server evaluates the conditional request by comparing the HMAC values included in the ``If[-None]-Match`` with the value stored under ``X-Object-Sysmeta-Crypto-Etag-Mac``. For example, given a conditional request with header:: If-Match: match_etag the encrypter would transform the request headers to include:: If-Match: match_etag,HMAC(object_key, match_etag) X-Backend-Etag-Is-At: X-Object-Sysmeta-Crypto-Etag-Mac This enables the object server to perform an encrypted comparison to check whether the ETags match, without leaking the ETag itself or leaking information about the object body. Decrypter operation ^^^^^^^^^^^^^^^^^^^ For each GET or HEAD request to an object, the decrypter inspects the response for encrypted items (revealed by crypto-metadata headers), and if any are discovered then it will: #. Fetch the object and container keys from the keymaster via its callback #. Decrypt the ``X-Object-Sysmeta-Crypto-Etag`` value #. Decrypt the ``X-Object-Sysmeta-Container-Update-Override-Etag`` value #. Decrypt metadata header values using the object key #. Decrypt the wrapped body key found in ``X-Object-Sysmeta-Crypto-Body-Meta`` #. Decrypt the body using the body key For each GET request to a container that would include ETags in its response body, the decrypter will: #. GET the response body with the container listing #. Fetch the container key from the keymaster via its callback #. Decrypt any encrypted ETag entries in the container listing using the container key Impact on other Swift services and features ------------------------------------------- Encryption has no impact on :ref:`versioned_writes` other than that any previously unencrypted objects will be encrypted as they are copied to or from the versions container. Keymaster and encryption middlewares should be placed after ``versioned_writes`` in the proxy server pipeline, as described in :ref:`encryption_deployment`. `Container Sync` uses an internal client to GET objects that are to be sync'd. This internal client must be configured to use the keymaster and encryption middlewares as described :ref:`above `. Encryption has no impact on the `object-auditor` service. Since the ETag header saved with the object at rest is the md5 sum of the encrypted object body then the auditor will verify that encrypted data is valid. Encryption has no impact on the `object-expirer` service. ``X-Delete-At`` and ``X-Delete-After`` headers are not encrypted. Encryption has no impact on the `object-replicator` and `object-reconstructor` services. These services are unaware of the object or EC fragment data being encrypted. Encryption has no impact on the `container-reconciler` service. The `container-reconciler` uses an internal client to move objects between different policy rings. The destination object has the same URL as the source object and the object is moved without re-encryption. Considerations for developers ----------------------------- Developers should be aware that keymaster and encryption middlewares rely on the path of an object remaining unchanged. The included keymaster derives keys for containers and objects based on their paths and the ``encryption_root_secret``. The keymaster does not rely on object metadata to inform its generation of keys for GET and HEAD requests because when handling :ref:`conditional_requests` it is required to provide the object key before any metadata has been read from the object. Developers should therefore give careful consideration to any new features that would relocate object data and metadata within a Swift cluster by means that do not cause the object data and metadata to pass through the encryption middlewares in the proxy pipeline and be re-encrypted. The crypto-metadata associated with each encrypted item does include some `key_id` metadata that is provided by the keymaster and contains the path used to derive keys. This `key_id` metadata is persisted in anticipation of future scenarios when it may be necessary to decrypt an object that has been relocated without re-encrypting, in which case the metadata could be used to derive the keys that were used for encryption. However, this alone is not sufficient to handle conditional requests and to decrypt container listings where objects have been relocated, and further work will be required to solve those issues. swift-2.17.0/doc/source/ring_background.rst0000666000175100017510000011043113236061617020725 0ustar zuulzuul00000000000000================================== Building a Consistent Hashing Ring ================================== --------------------- Authored by Greg Holt --------------------- This is a compilation of five posts I made earlier discussing how to build a consistent hashing ring. The posts seemed to be accessed quite frequently, so I've gathered them all here on one page for easier reading. Part 1 ====== "Consistent Hashing" is a term used to describe a process where data is distributed using a hashing algorithm to determine its location. Using only the hash of the id of the data you can determine exactly where that data should be. This mapping of hashes to locations is usually termed a "ring". Probably the simplest hash is just a modulus of the id. For instance, if all ids are numbers and you have two machines you wish to distribute data to, you could just put all odd numbered ids on one machine and even numbered ids on the other. Assuming you have a balanced number of odd and even numbered ids, and a balanced data size per id, your data would be balanced between the two machines. Since data ids are often textual names and not numbers, like paths for files or URLs, it makes sense to use a "real" hashing algorithm to convert the names to numbers first. Using MD5 for instance, the hash of the name 'mom.png' is '4559a12e3e8da7c2186250c2f292e3af' and the hash of 'dad.png' is '096edcc4107e9e18d6a03a43b3853bea'. Now, using the modulus, we can place 'mom.jpg' on the odd machine and 'dad.png' on the even one. Another benefit of using a hashing algorithm like MD5 is that the resulting hashes have a known even distribution, meaning your ids will be evenly distributed without worrying about keeping the id values themselves evenly distributed. Here is a simple example of this in action: .. code-block:: python from hashlib import md5 from struct import unpack_from NODE_COUNT = 100 DATA_ID_COUNT = 10000000 node_counts = [0] * NODE_COUNT for data_id in xrange(DATA_ID_COUNT): data_id = str(data_id) # This just pulls part of the hash out as an integer hsh = unpack_from('>I', md5(data_id).digest())[0] node_id = hsh % NODE_COUNT node_counts[node_id] += 1 desired_count = DATA_ID_COUNT / NODE_COUNT print '%d: Desired data ids per node' % desired_count max_count = max(node_counts) over = 100.0 * (max_count - desired_count) / desired_count print '%d: Most data ids on one node, %.02f%% over' % \ (max_count, over) min_count = min(node_counts) under = 100.0 * (desired_count - min_count) / desired_count print '%d: Least data ids on one node, %.02f%% under' % \ (min_count, under) :: 100000: Desired data ids per node 100695: Most data ids on one node, 0.69% over 99073: Least data ids on one node, 0.93% under So that's not bad at all; less than a percent over/under for distribution per node. In the next part of this series we'll examine where modulus distribution causes problems and how to improve our ring to overcome them. Part 2 ====== In Part 1 of this series, we did a simple test of using the modulus of a hash to locate data. We saw very good distribution, but that's only part of the story. Distributed systems not only need to distribute load, but they often also need to grow as more and more data is placed in it. So let's imagine we have a 100 node system up and running using our previous algorithm, but it's starting to get full so we want to add another node. When we add that 101st node to our algorithm we notice that many ids now map to different nodes than they previously did. We're going to have to shuffle a ton of data around our system to get it all into place again. Let's examine what's happened on a much smaller scale: just 2 nodes again, node 0 gets even ids and node 1 gets odd ids. So data id 100 would map to node 0, data id 101 to node 1, data id 102 to node 0, etc. This is simply node = id % 2. Now we add a third node (node 2) for more space, so we want node = id % 3. So now data id 100 maps to node id 1, data id 101 to node 2, and data id 102 to node 0. So we have to move data for 2 of our 3 ids so they can be found again. Let's examine this at a larger scale: .. code-block:: python from hashlib import md5 from struct import unpack_from NODE_COUNT = 100 NEW_NODE_COUNT = 101 DATA_ID_COUNT = 10000000 moved_ids = 0 for data_id in xrange(DATA_ID_COUNT): data_id = str(data_id) hsh = unpack_from('>I', md5(str(data_id)).digest())[0] node_id = hsh % NODE_COUNT new_node_id = hsh % NEW_NODE_COUNT if node_id != new_node_id: moved_ids += 1 percent_moved = 100.0 * moved_ids / DATA_ID_COUNT print '%d ids moved, %.02f%%' % (moved_ids, percent_moved) :: 9900989 ids moved, 99.01% Wow, that's severe. We'd have to shuffle around 99% of our data just to increase our capacity 1%! We need a new algorithm that combats this behavior. This is where the "ring" really comes in. We can assign ranges of hashes directly to nodes and then use an algorithm that minimizes the changes to those ranges. Back to our small scale, let's say our ids range from 0 to 999. We have two nodes and we'll assign data ids 0–499 to node 0 and 500–999 to node 1. Later, when we add node 2, we can take half the data ids from node 0 and half from node 1, minimizing the amount of data that needs to move. Let's examine this at a larger scale: .. code-block:: python from bisect import bisect_left from hashlib import md5 from struct import unpack_from NODE_COUNT = 100 NEW_NODE_COUNT = 101 DATA_ID_COUNT = 10000000 node_range_starts = [] for node_id in xrange(NODE_COUNT): node_range_starts.append(DATA_ID_COUNT / NODE_COUNT * node_id) new_node_range_starts = [] for new_node_id in xrange(NEW_NODE_COUNT): new_node_range_starts.append(DATA_ID_COUNT / NEW_NODE_COUNT * new_node_id) moved_ids = 0 for data_id in xrange(DATA_ID_COUNT): data_id = str(data_id) hsh = unpack_from('>I', md5(str(data_id)).digest())[0] node_id = bisect_left(node_range_starts, hsh % DATA_ID_COUNT) % NODE_COUNT new_node_id = bisect_left(new_node_range_starts, hsh % DATA_ID_COUNT) % NEW_NODE_COUNT if node_id != new_node_id: moved_ids += 1 percent_moved = 100.0 * moved_ids / DATA_ID_COUNT print '%d ids moved, %.02f%%' % (moved_ids, percent_moved) :: 4901707 ids moved, 49.02% Okay, that is better. But still, moving 50% of our data to add 1% capacity is not very good. If we examine what happened more closely we'll see what is an "accordion effect". We shrunk node 0's range a bit to give to the new node, but that shifted all the other node's ranges by the same amount. We can minimize the change to a node's assigned range by assigning several smaller ranges instead of the single broad range we were before. This can be done by creating "virtual nodes" for each node. So 100 nodes might have 1000 virtual nodes. Let's examine how that might work. .. code-block:: python from bisect import bisect_left from hashlib import md5 from struct import unpack_from NODE_COUNT = 100 DATA_ID_COUNT = 10000000 VNODE_COUNT = 1000 vnode_range_starts = [] vnode2node = [] for vnode_id in xrange(VNODE_COUNT): vnode_range_starts.append(DATA_ID_COUNT / VNODE_COUNT * vnode_id) vnode2node.append(vnode_id % NODE_COUNT) new_vnode2node = list(vnode2node) new_node_id = NODE_COUNT NEW_NODE_COUNT = NODE_COUNT + 1 vnodes_to_reassign = VNODE_COUNT / NEW_NODE_COUNT while vnodes_to_reassign > 0: for node_to_take_from in xrange(NODE_COUNT): for vnode_id, node_id in enumerate(new_vnode2node): if node_id == node_to_take_from: new_vnode2node[vnode_id] = new_node_id vnodes_to_reassign -= 1 break if vnodes_to_reassign <= 0: break moved_ids = 0 for data_id in xrange(DATA_ID_COUNT): data_id = str(data_id) hsh = unpack_from('>I', md5(str(data_id)).digest())[0] vnode_id = bisect_left(vnode_range_starts, hsh % DATA_ID_COUNT) % VNODE_COUNT node_id = vnode2node[vnode_id] new_node_id = new_vnode2node[vnode_id] if node_id != new_node_id: moved_ids += 1 percent_moved = 100.0 * moved_ids / DATA_ID_COUNT print '%d ids moved, %.02f%%' % (moved_ids, percent_moved) :: 90423 ids moved, 0.90% There we go, we added 1% capacity and only moved 0.9% of existing data. The vnode_range_starts list seems a bit out of place though. Its values are calculated and never change for the lifetime of the cluster, so let's optimize that out. .. code-block:: python from bisect import bisect_left from hashlib import md5 from struct import unpack_from NODE_COUNT = 100 DATA_ID_COUNT = 10000000 VNODE_COUNT = 1000 vnode2node = [] for vnode_id in xrange(VNODE_COUNT): vnode2node.append(vnode_id % NODE_COUNT) new_vnode2node = list(vnode2node) new_node_id = NODE_COUNT vnodes_to_reassign = VNODE_COUNT / (NODE_COUNT + 1) while vnodes_to_reassign > 0: for node_to_take_from in xrange(NODE_COUNT): for vnode_id, node_id in enumerate(vnode2node): if node_id == node_to_take_from: vnode2node[vnode_id] = new_node_id vnodes_to_reassign -= 1 break if vnodes_to_reassign <= 0: break moved_ids = 0 for data_id in xrange(DATA_ID_COUNT): data_id = str(data_id) hsh = unpack_from('>I', md5(str(data_id)).digest())[0] vnode_id = hsh % VNODE_COUNT node_id = vnode2node[vnode_id] new_node_id = new_vnode2node[vnode_id] if node_id != new_node_id: moved_ids += 1 percent_moved = 100.0 * moved_ids / DATA_ID_COUNT print '%d ids moved, %.02f%%' % (moved_ids, percent_moved) :: 89841 ids moved, 0.90% There we go. In the next part of this series, will further examine the algorithm's limitations and how to improve on it. Part 3 ====== In Part 2 of this series, we reached an algorithm that performed well even when adding new nodes to the cluster. We used 1000 virtual nodes that could be independently assigned to nodes, allowing us to minimize the amount of data moved when a node was added. The number of virtual nodes puts a cap on how many real nodes you can have. For example, if you have 1000 virtual nodes and you try to add a 1001st real node, you can't assign a virtual node to it without leaving another real node with no assignment, leaving you with just 1000 active real nodes still. Unfortunately, the number of virtual nodes created at the beginning can never change for the life of the cluster without a lot of careful work. For example, you could double the virtual node count by splitting each existing virtual node in half and assigning both halves to the same real node. However, if the real node uses the virtual node's id to optimally store the data (for example, all data might be stored in /[virtual node id]/[data id]) it would have to move data around locally to reflect the change. And it would have to resolve data using both the new and old locations while the moves were taking place, making atomic operations difficult or impossible. Let's continue with this assumption that changing the virtual node count is more work than it's worth, but keep in mind that some applications might be fine with this. The easiest way to deal with this limitation is to make the limit high enough that it won't matter. For instance, if we decide our cluster will never exceed 60,000 real nodes, we can just make 60,000 virtual nodes. Also, we should include in our calculations the relative size of our nodes. For instance, a year from now we might have real nodes that can handle twice the capacity of our current nodes. So we'd want to assign twice the virtual nodes to those future nodes, so maybe we should raise our virtual node estimate to 120,000. A good rule to follow might be to calculate 100 virtual nodes to each real node at maximum capacity. This would allow you to alter the load on any given node by 1%, even at max capacity, which is pretty fine tuning. So now we're at 6,000,000 virtual nodes for a max capacity cluster of 60,000 real nodes. 6 million virtual nodes seems like a lot, and it might seem like we'd use up way too much memory. But the only structure this affects is the virtual node to real node mapping. The base amount of memory required would be 6 million times 2 bytes (to store a real node id from 0 to 65,535). 12 megabytes of memory just isn't that much to use these days. Even with all the overhead of flexible data types, things aren't that bad. I changed the code from the previous part in this series to have 60,000 real and 6,000,000 virtual nodes, changed the list to an array('H'), and python topped out at 27m of resident memory – and that includes two rings. To change terminology a bit, we're going to start calling these virtual nodes "partitions". This will make it a bit easier to discern between the two types of nodes we've been talking about so far. Also, it makes sense to talk about partitions as they are really just unchanging sections of the hash space. We're also going to always keep the partition count a power of two. This makes it easy to just use bit manipulation on the hash to determine the partition rather than modulus. It isn't much faster, but it is a little. So, here's our updated ring code, using 8,388,608 (2 ** 23) partitions and 65,536 nodes. We've upped the sample data id set and checked the distribution to make sure we haven't broken anything. .. code-block:: python from array import array from hashlib import md5 from struct import unpack_from PARTITION_POWER = 23 PARTITION_SHIFT = 32 - PARTITION_POWER NODE_COUNT = 65536 DATA_ID_COUNT = 100000000 part2node = array('H') for part in xrange(2 ** PARTITION_POWER): part2node.append(part % NODE_COUNT) node_counts = [0] * NODE_COUNT for data_id in xrange(DATA_ID_COUNT): data_id = str(data_id) part = unpack_from('>I', md5(str(data_id)).digest())[0] >> PARTITION_SHIFT node_id = part2node[part] node_counts[node_id] += 1 desired_count = DATA_ID_COUNT / NODE_COUNT print '%d: Desired data ids per node' % desired_count max_count = max(node_counts) over = 100.0 * (max_count - desired_count) / desired_count print '%d: Most data ids on one node, %.02f%% over' % \ (max_count, over) min_count = min(node_counts) under = 100.0 * (desired_count - min_count) / desired_count print '%d: Least data ids on one node, %.02f%% under' % \ (min_count, under) :: 1525: Desired data ids per node 1683: Most data ids on one node, 10.36% over 1360: Least data ids on one node, 10.82% under Hmm. +–10% seems a bit high, but I reran with 65,536 partitions and 256 nodes and got +–0.4% so it's just that our sample size (100m) is too small for our number of partitions (8m). It'll take way too long to run experiments with an even larger sample size, so let's reduce back down to these lesser numbers. (To be certain, I reran at the full version with a 10 billion data id sample set and got +–1%, but it took 6.5 hours to run.) In the next part of this series, we'll talk about how to increase the durability of our data in the cluster. Part 4 ====== In Part 3 of this series, we just further discussed partitions (virtual nodes) and cleaned up our code a bit based on that. Now, let's talk about how to increase the durability and availability of our data in the cluster. For many distributed data stores, durability is quite important. Either RAID arrays or individually distinct copies of data are required. While RAID will increase the durability, it does nothing to increase the availability – if the RAID machine crashes, the data may be safe but inaccessible until repairs are done. If we keep distinct copies of the data on different machines and a machine crashes, the other copies will still be available while we repair the broken machine. An easy way to gain this multiple copy durability/availability is to just use multiple rings and groups of nodes. For instance, to achieve the industry standard of three copies, you'd split the nodes into three groups and each group would have its own ring and each would receive a copy of each data item. This can work well enough, but has the drawback that expanding capacity requires adding three nodes at a time and that losing one node essentially lowers capacity by three times that node's capacity. Instead, let's use a different, but common, approach of meeting our requirements with a single ring. This can be done by walking the ring from the starting point and looking for additional distinct nodes. Here's code that supports a variable number of replicas (set to 3 for testing): .. code-block:: python from array import array from hashlib import md5 from struct import unpack_from REPLICAS = 3 PARTITION_POWER = 16 PARTITION_SHIFT = 32 - PARTITION_POWER PARTITION_MAX = 2 ** PARTITION_POWER - 1 NODE_COUNT = 256 DATA_ID_COUNT = 10000000 part2node = array('H') for part in xrange(2 ** PARTITION_POWER): part2node.append(part % NODE_COUNT) node_counts = [0] * NODE_COUNT for data_id in xrange(DATA_ID_COUNT): data_id = str(data_id) part = unpack_from('>I', md5(str(data_id)).digest())[0] >> PARTITION_SHIFT node_ids = [part2node[part]] node_counts[node_ids[0]] += 1 for replica in xrange(1, REPLICAS): while part2node[part] in node_ids: part += 1 if part > PARTITION_MAX: part = 0 node_ids.append(part2node[part]) node_counts[node_ids[-1]] += 1 desired_count = DATA_ID_COUNT / NODE_COUNT * REPLICAS print '%d: Desired data ids per node' % desired_count max_count = max(node_counts) over = 100.0 * (max_count - desired_count) / desired_count print '%d: Most data ids on one node, %.02f%% over' % \ (max_count, over) min_count = min(node_counts) under = 100.0 * (desired_count - min_count) / desired_count print '%d: Least data ids on one node, %.02f%% under' % \ (min_count, under) :: 117186: Desired data ids per node 118133: Most data ids on one node, 0.81% over 116093: Least data ids on one node, 0.93% under That's pretty good; less than 1% over/under. While this works well, there are a couple of problems. First, because of how we've initially assigned the partitions to nodes, all the partitions for a given node have their extra copies on the same other two nodes. The problem here is that when a machine fails, the load on these other nodes will jump by that amount. It'd be better if we initially shuffled the partition assignment to distribute the failover load better. The other problem is a bit harder to explain, but deals with physical separation of machines. Imagine you can only put 16 machines in a rack in your datacenter. The 256 nodes we've been using would fill 16 racks. With our current code, if a rack goes out (power problem, network issue, etc.) there is a good chance some data will have all three copies in that rack, becoming inaccessible. We can fix this shortcoming by adding the concept of zones to our nodes, and then ensuring that replicas are stored in distinct zones. .. code-block:: python from array import array from hashlib import md5 from random import shuffle from struct import unpack_from REPLICAS = 3 PARTITION_POWER = 16 PARTITION_SHIFT = 32 - PARTITION_POWER PARTITION_MAX = 2 ** PARTITION_POWER - 1 NODE_COUNT = 256 ZONE_COUNT = 16 DATA_ID_COUNT = 10000000 node2zone = [] while len(node2zone) < NODE_COUNT: zone = 0 while zone < ZONE_COUNT and len(node2zone) < NODE_COUNT: node2zone.append(zone) zone += 1 part2node = array('H') for part in xrange(2 ** PARTITION_POWER): part2node.append(part % NODE_COUNT) shuffle(part2node) node_counts = [0] * NODE_COUNT zone_counts = [0] * ZONE_COUNT for data_id in xrange(DATA_ID_COUNT): data_id = str(data_id) part = unpack_from('>I', md5(str(data_id)).digest())[0] >> PARTITION_SHIFT node_ids = [part2node[part]] zones = [node2zone[node_ids[0]]] node_counts[node_ids[0]] += 1 zone_counts[zones[0]] += 1 for replica in xrange(1, REPLICAS): while part2node[part] in node_ids and \ node2zone[part2node[part]] in zones: part += 1 if part > PARTITION_MAX: part = 0 node_ids.append(part2node[part]) zones.append(node2zone[node_ids[-1]]) node_counts[node_ids[-1]] += 1 zone_counts[zones[-1]] += 1 desired_count = DATA_ID_COUNT / NODE_COUNT * REPLICAS print '%d: Desired data ids per node' % desired_count max_count = max(node_counts) over = 100.0 * (max_count - desired_count) / desired_count print '%d: Most data ids on one node, %.02f%% over' % \ (max_count, over) min_count = min(node_counts) under = 100.0 * (desired_count - min_count) / desired_count print '%d: Least data ids on one node, %.02f%% under' % \ (min_count, under) desired_count = DATA_ID_COUNT / ZONE_COUNT * REPLICAS print '%d: Desired data ids per zone' % desired_count max_count = max(zone_counts) over = 100.0 * (max_count - desired_count) / desired_count print '%d: Most data ids in one zone, %.02f%% over' % \ (max_count, over) min_count = min(zone_counts) under = 100.0 * (desired_count - min_count) / desired_count print '%d: Least data ids in one zone, %.02f%% under' % \ (min_count, under) :: 117186: Desired data ids per node 118782: Most data ids on one node, 1.36% over 115632: Least data ids on one node, 1.33% under 1875000: Desired data ids per zone 1878533: Most data ids in one zone, 0.19% over 1869070: Least data ids in one zone, 0.32% under So the shuffle and zone distinctions affected our distribution some, but still definitely good enough. This test took about 64 seconds to run on my machine. There's a completely alternate, and quite common, way of accomplishing these same requirements. This alternate method doesn't use partitions at all, but instead just assigns anchors to the nodes within the hash space. Finding the first node for a given hash just involves walking this anchor ring for the next node, and finding additional nodes works similarly as before. To attain the equivalent of our virtual nodes, each real node is assigned multiple anchors. .. code-block:: python from bisect import bisect_left from hashlib import md5 from struct import unpack_from REPLICAS = 3 NODE_COUNT = 256 ZONE_COUNT = 16 DATA_ID_COUNT = 10000000 VNODE_COUNT = 100 node2zone = [] while len(node2zone) < NODE_COUNT: zone = 0 while zone < ZONE_COUNT and len(node2zone) < NODE_COUNT: node2zone.append(zone) zone += 1 hash2index = [] index2node = [] for node in xrange(NODE_COUNT): for vnode in xrange(VNODE_COUNT): hsh = unpack_from('>I', md5(str(node)).digest())[0] index = bisect_left(hash2index, hsh) if index > len(hash2index): index = 0 hash2index.insert(index, hsh) index2node.insert(index, node) node_counts = [0] * NODE_COUNT zone_counts = [0] * ZONE_COUNT for data_id in xrange(DATA_ID_COUNT): data_id = str(data_id) hsh = unpack_from('>I', md5(str(data_id)).digest())[0] index = bisect_left(hash2index, hsh) if index >= len(hash2index): index = 0 node_ids = [index2node[index]] zones = [node2zone[node_ids[0]]] node_counts[node_ids[0]] += 1 zone_counts[zones[0]] += 1 for replica in xrange(1, REPLICAS): while index2node[index] in node_ids and \ node2zone[index2node[index]] in zones: index += 1 if index >= len(hash2index): index = 0 node_ids.append(index2node[index]) zones.append(node2zone[node_ids[-1]]) node_counts[node_ids[-1]] += 1 zone_counts[zones[-1]] += 1 desired_count = DATA_ID_COUNT / NODE_COUNT * REPLICAS print '%d: Desired data ids per node' % desired_count max_count = max(node_counts) over = 100.0 * (max_count - desired_count) / desired_count print '%d: Most data ids on one node, %.02f%% over' % \ (max_count, over) min_count = min(node_counts) under = 100.0 * (desired_count - min_count) / desired_count print '%d: Least data ids on one node, %.02f%% under' % \ (min_count, under) desired_count = DATA_ID_COUNT / ZONE_COUNT * REPLICAS print '%d: Desired data ids per zone' % desired_count max_count = max(zone_counts) over = 100.0 * (max_count - desired_count) / desired_count print '%d: Most data ids in one zone, %.02f%% over' % \ (max_count, over) min_count = min(zone_counts) under = 100.0 * (desired_count - min_count) / desired_count print '%d: Least data ids in one zone, %.02f%% under' % \ (min_count, under) :: 117186: Desired data ids per node 351282: Most data ids on one node, 199.76% over 15965: Least data ids on one node, 86.38% under 1875000: Desired data ids per zone 2248496: Most data ids in one zone, 19.92% over 1378013: Least data ids in one zone, 26.51% under This test took over 15 minutes to run! Unfortunately, this method also gives much less control over the distribution. To get better distribution, you have to add more virtual nodes, which eats up more memory and takes even more time to build the ring and perform distinct node lookups. The most common operation, data id lookup, can be improved (by predetermining each virtual node's failover nodes, for instance) but it starts off so far behind our first approach that we'll just stick with that. In the next part of this series, we'll start to wrap all this up into a useful Python module. Part 5 ====== In Part 4 of this series, we ended up with a multiple copy, distinctly zoned ring. Or at least the start of it. In this final part we'll package the code up into a useable Python module and then add one last feature. First, let's separate the ring itself from the building of the data for the ring and its testing. .. code-block:: python from array import array from hashlib import md5 from random import shuffle from struct import unpack_from from time import time class Ring(object): def __init__(self, nodes, part2node, replicas): self.nodes = nodes self.part2node = part2node self.replicas = replicas partition_power = 1 while 2 ** partition_power < len(part2node): partition_power += 1 if len(part2node) != 2 ** partition_power: raise Exception("part2node's length is not an " "exact power of 2") self.partition_shift = 32 - partition_power def get_nodes(self, data_id): data_id = str(data_id) part = unpack_from('>I', md5(data_id).digest())[0] >> self.partition_shift node_ids = [self.part2node[part]] zones = [self.nodes[node_ids[0]]] for replica in xrange(1, self.replicas): while self.part2node[part] in node_ids and \ self.nodes[self.part2node[part]] in zones: part += 1 if part >= len(self.part2node): part = 0 node_ids.append(self.part2node[part]) zones.append(self.nodes[node_ids[-1]]) return [self.nodes[n] for n in node_ids] def build_ring(nodes, partition_power, replicas): begin = time() part2node = array('H') for part in xrange(2 ** partition_power): part2node.append(part % len(nodes)) shuffle(part2node) ring = Ring(nodes, part2node, replicas) print '%.02fs to build ring' % (time() - begin) return ring def test_ring(ring): begin = time() DATA_ID_COUNT = 10000000 node_counts = {} zone_counts = {} for data_id in xrange(DATA_ID_COUNT): for node in ring.get_nodes(data_id): node_counts[node['id']] = \ node_counts.get(node['id'], 0) + 1 zone_counts[node['zone']] = \ zone_counts.get(node['zone'], 0) + 1 print '%ds to test ring' % (time() - begin) desired_count = \ DATA_ID_COUNT / len(ring.nodes) * REPLICAS print '%d: Desired data ids per node' % desired_count max_count = max(node_counts.itervalues()) over = \ 100.0 * (max_count - desired_count) / desired_count print '%d: Most data ids on one node, %.02f%% over' % \ (max_count, over) min_count = min(node_counts.itervalues()) under = \ 100.0 * (desired_count - min_count) / desired_count print '%d: Least data ids on one node, %.02f%% under' % \ (min_count, under) zone_count = \ len(set(n['zone'] for n in ring.nodes.itervalues())) desired_count = \ DATA_ID_COUNT / zone_count * ring.replicas print '%d: Desired data ids per zone' % desired_count max_count = max(zone_counts.itervalues()) over = \ 100.0 * (max_count - desired_count) / desired_count print '%d: Most data ids in one zone, %.02f%% over' % \ (max_count, over) min_count = min(zone_counts.itervalues()) under = \ 100.0 * (desired_count - min_count) / desired_count print '%d: Least data ids in one zone, %.02f%% under' % \ (min_count, under) if __name__ == '__main__': PARTITION_POWER = 16 REPLICAS = 3 NODE_COUNT = 256 ZONE_COUNT = 16 nodes = {} while len(nodes) < NODE_COUNT: zone = 0 while zone < ZONE_COUNT and len(nodes) < NODE_COUNT: node_id = len(nodes) nodes[node_id] = {'id': node_id, 'zone': zone} zone += 1 ring = build_ring(nodes, PARTITION_POWER, REPLICAS) test_ring(ring) :: 0.06s to build ring 82s to test ring 117186: Desired data ids per node 118773: Most data ids on one node, 1.35% over 115801: Least data ids on one node, 1.18% under 1875000: Desired data ids per zone 1878339: Most data ids in one zone, 0.18% over 1869914: Least data ids in one zone, 0.27% under It takes a bit longer to test our ring, but that's mostly because of the switch to dictionaries from arrays for various items. Having node dictionaries is nice because you can attach any node information you want directly there (ip addresses, tcp ports, drive paths, etc.). But we're still on track for further testing; our distribution is still good. Now, let's add our one last feature to our ring: the concept of weights. Weights are useful because the nodes you add later in a ring's life are likely to have more capacity than those you have at the outset. For this test, we'll make half our nodes have twice the weight. We'll have to change build_ring to give more partitions to the nodes with more weight and we'll change test_ring to take into account these weights. Since we've changed so much I'll just post the entire module again: .. code-block:: python from array import array from hashlib import md5 from random import shuffle from struct import unpack_from from time import time class Ring(object): def __init__(self, nodes, part2node, replicas): self.nodes = nodes self.part2node = part2node self.replicas = replicas partition_power = 1 while 2 ** partition_power < len(part2node): partition_power += 1 if len(part2node) != 2 ** partition_power: raise Exception("part2node's length is not an " "exact power of 2") self.partition_shift = 32 - partition_power def get_nodes(self, data_id): data_id = str(data_id) part = unpack_from('>I', md5(data_id).digest())[0] >> self.partition_shift node_ids = [self.part2node[part]] zones = [self.nodes[node_ids[0]]] for replica in xrange(1, self.replicas): while self.part2node[part] in node_ids and \ self.nodes[self.part2node[part]] in zones: part += 1 if part >= len(self.part2node): part = 0 node_ids.append(self.part2node[part]) zones.append(self.nodes[node_ids[-1]]) return [self.nodes[n] for n in node_ids] def build_ring(nodes, partition_power, replicas): begin = time() parts = 2 ** partition_power total_weight = \ float(sum(n['weight'] for n in nodes.itervalues())) for node in nodes.itervalues(): node['desired_parts'] = \ parts / total_weight * node['weight'] part2node = array('H') for part in xrange(2 ** partition_power): for node in nodes.itervalues(): if node['desired_parts'] >= 1: node['desired_parts'] -= 1 part2node.append(node['id']) break else: for node in nodes.itervalues(): if node['desired_parts'] >= 0: node['desired_parts'] -= 1 part2node.append(node['id']) break shuffle(part2node) ring = Ring(nodes, part2node, replicas) print '%.02fs to build ring' % (time() - begin) return ring def test_ring(ring): begin = time() DATA_ID_COUNT = 10000000 node_counts = {} zone_counts = {} for data_id in xrange(DATA_ID_COUNT): for node in ring.get_nodes(data_id): node_counts[node['id']] = \ node_counts.get(node['id'], 0) + 1 zone_counts[node['zone']] = \ zone_counts.get(node['zone'], 0) + 1 print '%ds to test ring' % (time() - begin) total_weight = float(sum(n['weight'] for n in ring.nodes.itervalues())) max_over = 0 max_under = 0 for node in ring.nodes.itervalues(): desired = DATA_ID_COUNT * REPLICAS * \ node['weight'] / total_weight diff = node_counts[node['id']] - desired if diff > 0: over = 100.0 * diff / desired if over > max_over: max_over = over else: under = 100.0 * (-diff) / desired if under > max_under: max_under = under print '%.02f%% max node over' % max_over print '%.02f%% max node under' % max_under max_over = 0 max_under = 0 for zone in set(n['zone'] for n in ring.nodes.itervalues()): zone_weight = sum(n['weight'] for n in ring.nodes.itervalues() if n['zone'] == zone) desired = DATA_ID_COUNT * REPLICAS * \ zone_weight / total_weight diff = zone_counts[zone] - desired if diff > 0: over = 100.0 * diff / desired if over > max_over: max_over = over else: under = 100.0 * (-diff) / desired if under > max_under: max_under = under print '%.02f%% max zone over' % max_over print '%.02f%% max zone under' % max_under if __name__ == '__main__': PARTITION_POWER = 16 REPLICAS = 3 NODE_COUNT = 256 ZONE_COUNT = 16 nodes = {} while len(nodes) < NODE_COUNT: zone = 0 while zone < ZONE_COUNT and len(nodes) < NODE_COUNT: node_id = len(nodes) nodes[node_id] = {'id': node_id, 'zone': zone, 'weight': 1.0 + (node_id % 2)} zone += 1 ring = build_ring(nodes, PARTITION_POWER, REPLICAS) test_ring(ring) :: 0.88s to build ring 86s to test ring 1.66% max over 1.46% max under 0.28% max zone over 0.23% max zone under So things are still good, even though we have differently weighted nodes. I ran another test with this code using random weights from 1 to 100 and got over/under values for nodes of 7.35%/18.12% and zones of 0.24%/0.22%, still pretty good considering the crazy weight ranges. Summary ======= Hopefully this series has been a good introduction to building a ring. This code is essentially how the OpenStack Swift ring works, except that Swift's ring has lots of additional optimizations, such as storing each replica assignment separately, and lots of extra features for building, validating, and otherwise working with rings. swift-2.17.0/doc/source/policies_saio.rst0000666000175100017510000001475113236061617020421 0ustar zuulzuul00000000000000=========================================== Adding Storage Policies to an Existing SAIO =========================================== Depending on when you downloaded your SAIO environment, it may already be prepared with two storage policies that enable some basic functional tests. In the event that you are adding a storage policy to an existing installation, however, the following section will walk you through the steps for setting up Storage Policies. Note that configuring more than one storage policy on your development environment is recommended but optional. Enabling multiple Storage Policies is very easy regardless of whether you are working with an existing installation or starting a brand new one. Now we will create two policies - the first one will be a standard triple replication policy that we will also explicitly set as the default and the second will be setup for reduced replication using a factor of 2x. We will call the first one 'gold' and the second one 'silver'. In this example both policies map to the same devices because it's also important for this sample implementation to be simple and easy to understand and adding a bunch of new devices isn't really required to implement a usable set of policies. 1. To define your policies, add the following to your ``/etc/swift/swift.conf`` file:: [storage-policy:0] name = gold aliases = yellow, orange default = yes [storage-policy:1] name = silver See :doc:`overview_policies` for detailed information on ``swift.conf`` policy options. 2. To create the object ring for the silver policy (index 1), add the following to your ``bin/remakerings`` script and re-run it (your script may already have these changes):: swift-ring-builder object-1.builder create 10 2 1 swift-ring-builder object-1.builder add r1z1-127.0.0.1:6010/sdb1 1 swift-ring-builder object-1.builder add r1z2-127.0.0.1:6020/sdb2 1 swift-ring-builder object-1.builder add r1z3-127.0.0.1:6030/sdb3 1 swift-ring-builder object-1.builder add r1z4-127.0.0.1:6040/sdb4 1 swift-ring-builder object-1.builder rebalance Note that the reduced replication of the silver policy is only a function of the replication parameter in the ``swift-ring-builder create`` command and is not specified in ``/etc/swift/swift.conf``. 3. Copy ``etc/container-reconciler.conf-sample`` to ``/etc/swift/container-reconciler.conf`` and fix the user option:: cp etc/container-reconciler.conf-sample /etc/swift/container-reconciler.conf sed -i "s/# user.*/user = $USER/g" /etc/swift/container-reconciler.conf ------------------ Using Policies ------------------ Setting up Storage Policies was very simple, and using them is even simpler. In this section, we will run some commands to create a few containers with different policies and store objects in them and see how Storage Policies effect placement of data in Swift. 1. We will be using the list_endpoints middleware to confirm object locations, so enable that now in your ``proxy-server.conf`` file by adding it to the pipeline and including the filter section as shown below (be sure to restart your proxy after making these changes):: pipeline = catch_errors gatekeeper healthcheck proxy-logging cache bulk \ slo dlo ratelimit crossdomain list-endpoints tempurl tempauth staticweb \ container-quotas account-quotas proxy-logging proxy-server [filter:list-endpoints] use = egg:swift#list_endpoints 2. Check to see that your policies are reported via /info:: swift -A http://127.0.0.1:8080/auth/v1.0 -U test:tester -K testing info You should see this: (only showing the policy output here):: policies: [{'aliases': 'gold, yellow, orange', 'default': True, 'name': 'gold'}, {'aliases': 'silver', 'name': 'silver'}] 3. Now create a container without specifying a policy, it will use the default, 'gold' and then put a test object in it (create the file ``file0.txt`` with your favorite editor with some content):: curl -v -X PUT -H 'X-Auth-Token: ' \ http://127.0.0.1:8080/v1/AUTH_test/myCont0 curl -X PUT -v -T file0.txt -H 'X-Auth-Token: ' \ http://127.0.0.1:8080/v1/AUTH_test/myCont0/file0.txt 4. Now confirm placement of the object with the :ref:`list_endpoints` middleware:: curl -X GET -v http://127.0.0.1:8080/endpoints/AUTH_test/myCont0/file0.txt You should see this: (note placement on expected devices):: ["http://127.0.0.1:6030/sdb3/761/AUTH_test/myCont0/file0.txt", "http://127.0.0.1:6010/sdb1/761/AUTH_test/myCont0/file0.txt", "http://127.0.0.1:6020/sdb2/761/AUTH_test/myCont0/file0.txt"] 5. Create a container using policy 'silver' and put a different file in it:: curl -v -X PUT -H 'X-Auth-Token: ' -H \ "X-Storage-Policy: silver" \ http://127.0.0.1:8080/v1/AUTH_test/myCont1 curl -X PUT -v -T file1.txt -H 'X-Auth-Token: ' \ http://127.0.0.1:8080/v1/AUTH_test/myCont1/ 6. Confirm placement of the object for policy 'silver':: curl -X GET -v http://127.0.0.1:8080/endpoints/AUTH_test/myCont1/file1.txt You should see this: (note placement on expected devices):: ["http://127.0.0.1:6010/sdb1/32/AUTH_test/myCont1/file1.txt", "http://127.0.0.1:6040/sdb4/32/AUTH_test/myCont1/file1.txt"] 7. Confirm account information with HEAD, make sure that your container-updater service is running and has executed once since you performed the PUTs or the account database won't be updated yet:: curl -i -X HEAD -H 'X-Auth-Token: ' \ http://127.0.0.1:8080/v1/AUTH_test You should see something like this (note that total and per policy stats object sizes will vary):: HTTP/1.1 204 No Content Content-Length: 0 X-Account-Object-Count: 2 X-Account-Bytes-Used: 174 X-Account-Container-Count: 2 X-Account-Storage-Policy-Gold-Object-Count: 1 X-Account-Storage-Policy-Gold-Bytes-Used: 84 X-Account-Storage-Policy-Silver-Object-Count: 1 X-Account-Storage-Policy-Silver-Bytes-Used: 90 X-Timestamp: 1397230339.71525 Content-Type: text/plain; charset=utf-8 Accept-Ranges: bytes X-Trans-Id: tx96e7496b19bb44abb55a3-0053482c75 X-Openstack-Request-Id: tx96e7496b19bb44abb55a3-0053482c75 Date: Fri, 11 Apr 2014 17:55:01 GMT swift-2.17.0/doc/source/overview_object_versioning.rst0000666000175100017510000000020313236061617023221 0ustar zuulzuul00000000000000Object Versioning ================= .. automodule:: swift.common.middleware.versioned_writes :members: :show-inheritance: swift-2.17.0/doc/source/getting_started.rst0000666000175100017510000000315313236061617020760 0ustar zuulzuul00000000000000=============== Getting Started =============== ------------------- System Requirements ------------------- Swift development currently targets Ubuntu Server 16.04, but should work on most Linux platforms. Swift is written in Python and has these dependencies: * Python 2.7 * rsync 3.0 * The Python packages listed in `the requirements file `_ * Testing additionally requires `the test dependencies `_ * Testing requires `these distribution packages `_ There is no current support for Python 3. ----------- Development ----------- To get started with development with Swift, or to just play around, the following docs will be useful: * :doc:`Swift All in One ` - Set up a VM with Swift installed * :doc:`Development Guidelines ` * :doc:`First Contribution to Swift ` * :doc:`Associated Projects ` -------------------------- CLI client and SDK library -------------------------- There are many clients in the :ref:`ecosystem `. The official CLI and SDK is python-swiftclient. * `Source code `_ * `Python Package Index `_ ---------- Production ---------- If you want to set up and configure Swift for a production cluster, the following doc should be useful: * :doc:`Multiple Server Swift Installation ` swift-2.17.0/doc/source/_extra/0000775000175100017510000000000013236061751016314 5ustar zuulzuul00000000000000swift-2.17.0/doc/source/_extra/.htaccess0000666000175100017510000000020613236061617020113 0ustar zuulzuul00000000000000# docs redirects are defined here redirectmatch 301 ^/swift/([^/]+)/team.html$ https://github.com/openstack/swift/blob/master/AUTHORS swift-2.17.0/doc/source/howto_installmultinode.rst0000666000175100017510000000117313236061617022400 0ustar zuulzuul00000000000000===================================================== Instructions for a Multiple Server Swift Installation ===================================================== Please refer to the latest official `OpenStack Installation Guides `_ for the most up-to-date documentation. Current Install Guides ---------------------- * `Object Storage installation guide for OpenStack Ocata `__ * `Object Storage installation guide for OpenStack Newton `__ swift-2.17.0/doc/source/index.rst0000666000175100017510000000715013236061617016701 0ustar zuulzuul00000000000000.. Copyright 2010-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ================================= Welcome to Swift's documentation! ================================= Swift is a highly available, distributed, eventually consistent object/blob store. Organizations can use Swift to store lots of data efficiently, safely, and cheaply. This documentation is generated by the Sphinx toolkit and lives in the source tree. Additional documentation on Swift and other components of OpenStack can be found on the `OpenStack wiki`_ and at http://docs.openstack.org. .. _`OpenStack wiki`: http://wiki.openstack.org .. note:: If you're looking for associated projects that enhance or use Swift, please see the :ref:`associated_projects` page. .. toctree:: :maxdepth: 1 getting_started Overview and Concepts ===================== .. toctree:: :maxdepth: 1 api/object_api_v1_overview overview_architecture overview_ring overview_policies overview_reaper overview_auth overview_acl overview_replication ratelimit overview_large_objects overview_object_versioning overview_global_cluster overview_container_sync overview_expiring_objects cors crossdomain overview_erasure_code overview_encryption overview_backing_store ring_background ring_partpower associated_projects Developer Documentation ======================= .. toctree:: :maxdepth: 1 development_guidelines development_saio first_contribution_swift policies_saio development_auth development_middleware development_ondisk_backends Administrator Documentation =========================== .. toctree:: :maxdepth: 1 howto_installmultinode deployment_guide apache_deployment_guide admin_guide replication_network logs ops_runbook/index admin/index install/index Object Storage v1 REST API Documentation ======================================== See `Complete Reference for the Object Storage REST API `_ The following provides supporting information for the REST API: .. toctree:: :maxdepth: 1 api/object_api_v1_overview.rst api/discoverability.rst api/authentication.rst api/container_quotas.rst api/object_versioning.rst api/large_objects.rst api/temporary_url_middleware.rst api/form_post_middleware.rst api/use_content-encoding_metadata.rst api/use_the_content-disposition_metadata.rst OpenStack End User Guide ======================== The `OpenStack End User Guide `_ has additional information on using Swift. See the `Manage objects and containers `_ section. Source Documentation ==================== .. toctree:: :maxdepth: 2 ring proxy account container db object misc middleware Indices and tables ================== * :ref:`genindex` * :ref:`modindex` * :ref:`search` swift-2.17.0/doc/source/cors.rst0000666000175100017510000001037213236061617016540 0ustar zuulzuul00000000000000==== CORS ==== CORS_ is a mechanism to allow code running in a browser (Javascript for example) make requests to a domain other then the one from where it originated. Swift supports CORS requests to containers and objects. CORS metadata is held on the container only. The values given apply to the container itself and all objects within it. The supported headers are, +------------------------------------------------+------------------------------+ | Metadata | Use | +================================================+==============================+ | X-Container-Meta-Access-Control-Allow-Origin | Origins to be allowed to | | | make Cross Origin Requests, | | | space separated. | +------------------------------------------------+------------------------------+ | X-Container-Meta-Access-Control-Max-Age | Max age for the Origin to | | | hold the preflight results. | +------------------------------------------------+------------------------------+ | X-Container-Meta-Access-Control-Expose-Headers | Headers exposed to the user | | | agent (e.g. browser) in the | | | actual request response. | | | Space separated. | +------------------------------------------------+------------------------------+ Before a browser issues an actual request it may issue a `preflight request`_. The preflight request is an OPTIONS call to verify the Origin is allowed to make the request. The sequence of events are, * Browser makes OPTIONS request to Swift * Swift returns 200/401 to browser based on allowed origins * If 200, browser makes the "actual request" to Swift, i.e. PUT, POST, DELETE, HEAD, GET When a browser receives a response to an actual request it only exposes those headers listed in the ``Access-Control-Expose-Headers`` header. By default Swift returns the following values for this header, * "simple response headers" as listed on http://www.w3.org/TR/cors/#simple-response-header * the headers ``etag``, ``x-timestamp``, ``x-trans-id``, ``x-openstack-request-id`` * all metadata headers (``X-Container-Meta-*`` for containers and ``X-Object-Meta-*`` for objects) * headers listed in ``X-Container-Meta-Access-Control-Expose-Headers`` .. note:: An OPTIONS request to a symlink object will respond with the options for the symlink only, the request will not be redirected to the target object. Therefore, if the symlink's target object is in another container with CORS settings, the response will not reflect the settings. ----------------- Sample Javascript ----------------- To see some CORS Javascript in action download the `test CORS page`_ (source below). Host it on a webserver and take note of the protocol and hostname (origin) you'll be using to request the page, e.g. http://localhost. Locate a container you'd like to query. Needless to say the Swift cluster hosting this container should have CORS support. Append the origin of the test page to the container's ``X-Container-Meta-Access-Control-Allow-Origin`` header,:: curl -X POST -H 'X-Auth-Token: xxx' \ -H 'X-Container-Meta-Access-Control-Allow-Origin: http://localhost' \ http://192.168.56.3:8080/v1/AUTH_test/cont1 At this point the container is now accessible to CORS clients hosted on http://localhost. Open the test CORS page in your browser. #. Populate the Token field #. Populate the URL field with the URL of either a container or object #. Select the request method #. Hit Submit Assuming the request succeeds you should see the response header and body. If something went wrong the response status will be 0. .. _test CORS page: -------------- Test CORS Page -------------- A sample cross-site test page is located in the project source tree ``doc/source/test-cors.html``. .. literalinclude:: test-cors.html .. _CORS: https://developer.mozilla.org/en-US/docs/HTTP/Access_control_CORS .. _preflight request: https://developer.mozilla.org/en-US/docs/HTTP/Access_control_CORS#Preflighted_requests swift-2.17.0/doc/source/test-cors.html0000666000175100017510000000372513236061617017655 0ustar zuulzuul00000000000000 Test CORS Token


Method


URL (Container or Object)



    




    

  

swift-2.17.0/doc/source/api/0000775000175100017510000000000013236061751015603 5ustar  zuulzuul00000000000000swift-2.17.0/doc/source/api/form_post_middleware.rst0000666000175100017510000001503413236061617022550 0ustar  zuulzuul00000000000000====================
Form POST middleware
====================

To discover whether your Object Storage system supports this feature,
check with your service provider or send a **GET** request using the :file:`/info`
path.

You can upload objects directly to the Object Storage system from a
browser by using the form **POST** middleware. This middleware uses
account or container secret keys to generate a cryptographic signature for the
request. This means that you do not need to send an authentication token
in the ``X-Auth-Token`` header to perform the request.

The form **POST** middleware uses the same secret keys as the temporary
URL middleware uses. For information about how to set these keys, see
:ref:`secret_keys`.

For information about the form **POST** middleware configuration
options, see :ref:`formpost` in the *Source Documentation*.

Form POST format
~~~~~~~~~~~~~~~~

To upload objects to a cluster, you can use an HTML form **POST**
request.

The format of the form **POST** request is:

**Example 1.14. Form POST format**

.. code::

    
        
        
        
        
        
        
        
]]> **action="SWIFT_URL"** Set to full URL where the objects are to be uploaded. The names of uploaded files are appended to the specified *SWIFT_URL*. So, you can upload directly to the root of a container with a URL like: .. code:: https://swift-cluster.example.com/v1/my_account/container/ Optionally, you can include an object prefix to separate uploads, such as: .. code:: https://swift-cluster.example.com/v1/my_account/container/OBJECT_PREFIX **method="POST"** Must be ``POST``. **enctype="multipart/form-data"** Must be ``multipart/form-data``. **name="redirect" value="REDIRECT_URL"** Redirects the browser to the *REDIRECT_URL* after the upload completes. The URL has status and message query parameters added to it, which specify the HTTP status code for the upload and an optional error message. The 2\ *nn* status code indicates success. The *REDIRECT_URL* can be an empty string. If so, the ``Location`` response header is not set. **name="max\_file\_size" value="BYTES"** Required. Indicates the size, in bytes, of the maximum single file upload. **name="max\_file\_count" value= "COUNT"** Required. Indicates the maximum number of files that can be uploaded with the form. **name="expires" value="UNIX_TIMESTAMP"** The UNIX timestamp that specifies the time before which the form must be submitted before it becomes no longer valid. **name="signature" value="HMAC"** The HMAC-SHA1 signature of the form. **type="file" name="FILE_NAME"** File name of the file to be uploaded. You can include from one to the ``max_file_count`` value of files. The file attributes must appear after the other attributes to be processed correctly. If attributes appear after the file attributes, they are not sent with the sub-request because all attributes in the file cannot be parsed on the server side unless the whole file is read into memory; the server does not have enough memory to service these requests. Attributes that follow the file attributes are ignored. Optionally, if you want the uploaded files to be temporary you can set x-delete-at or x-delete-after attributes by adding one of these as a form input: .. code:: **type= "submit"** Must be ``submit``. HMAC-SHA1 signature for form POST ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Form **POST** middleware uses an HMAC-SHA1 cryptographic signature. This signature includes these elements from the form: - The path. Starting with ``/v1/`` onwards and including a container name and, optionally, an object prefix. In `Example 1.15`, "HMAC-SHA1 signature for form POST" the path is ``/v1/my_account/container/object_prefix``. Do not URL-encode the path at this stage. - A redirect URL. If there is no redirect URL, use the empty string. - Maximum file size. In `Example 1.15`, "HMAC-SHA1 signature for form POST" the ``max_file_size`` is ``104857600`` bytes. - The maximum number of objects to upload. In `Example 1.15`, "HMAC-SHA1 signature for form POST" ``max_file_count`` is ``10``. - Expiry time. In `Example 1.15, "HMAC-SHA1 signature for form POST" the expiry time is set to ``600`` seconds into the future. - The secret key. Set as the ``X-Account-Meta-Temp-URL-Key`` header value for accounts or ``X-Container-Meta-Temp-URL-Key`` header value for containers. See :ref:`secret_keys` for more information. The following example code generates a signature for use with form **POST**: **Example 1.15. HMAC-SHA1 signature for form POST** .. code:: import hmac from hashlib import sha1 from time import time path = '/v1/my_account/container/object_prefix' redirect = 'https://myserver.com/some-page' max_file_size = 104857600 max_file_count = 10 expires = int(time() + 600) key = 'MYKEY' hmac_body = '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size, max_file_count, expires) signature = hmac.new(key, hmac_body, sha1).hexdigest() For more information, see `RFC 2104: HMAC: Keyed-Hashing for Message Authentication `__. Form POST example ~~~~~~~~~~~~~~~~~ The following example shows how to submit a form by using a cURL command. In this example, the object prefix is ``photos/`` and the file being uploaded is called ``flower.jpg``. This example uses the **swift-form-signature** script to compute the ``expires`` and ``signature`` values. .. code:: $ bin/swift-form-signature /v1/my_account/container/photos/ https://example.com/done.html 5373952000 1 200 MYKEY Expires: 1390825338 Signature: 35129416ebda2f1a21b3c2b8939850dfc63d8f43 .. code:: $ curl -i https://swift-cluster.example.com/v1/my_account/container/photos/ -X POST \ -F max_file_size=5373952000 -F max_file_count=1 -F expires=1390825338 \ -F signature=35129416ebda2f1a21b3c2b8939850dfc63d8f43 \ -F redirect=https://example.com/done.html \ -F file=@flower.jpg swift-2.17.0/doc/source/api/object_versioning.rst0000666000175100017510000003020513236061617022051 0ustar zuulzuul00000000000000================= Object versioning ================= You can store multiple versions of your content so that you can recover from unintended overwrites. Object versioning is an easy way to implement version control, which you can use with any type of content. .. note:: You cannot version a large-object manifest file, but the large-object manifest file can point to versioned segments. .. note:: It is strongly recommended that you put non-current objects in a different container than the container where current object versions reside. To allow object versioning within a cluster, the cloud provider should add the ``versioned_writes`` filter to the pipeline and set the ``allow_versioned_writes`` option to ``true`` in the ``[filter:versioned_writes]`` section of the proxy-server configuration file. To enable object versioning for a container, you must specify an "archive container" that will retain non-current versions via either the ``X-Versions-Location`` or ``X-History-Location`` header. These two headers enable two distinct modes of operation. Either mode may be used within a cluster, but only one mode may be active for any given container. You must UTF-8-encode and then URL-encode the container name before you include it in the header. For both modes, **PUT** requests will archive any pre-existing objects before writing new data, and **GET** requests will serve the current version. **COPY** requests behave like a **GET** followed by a **PUT**; that is, if the copy *source* is in a versioned container then the current version will be copied, and if the copy *destination* is in a versioned container then any pre-existing object will be archived before writing new data. If object versioning was enabled using ``X-History-Location``, then object **DELETE** requests will copy the current version to the archive container then remove it from the versioned container. If object versioning was enabled using ``X-Versions-Location``, then object **DELETE** requests will restore the most-recent version from the archive container, overwriting the current version. Example Using ``X-Versions-Location`` ------------------------------------- #. Create the ``current`` container: .. code:: # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: archive" .. code:: HTTP/1.1 201 Created Content-Length: 0 Content-Type: text/html; charset=UTF-8 X-Trans-Id: txb91810fb717347d09eec8-0052e18997 X-Openstack-Request-Id: txb91810fb717347d09eec8-0052e18997 Date: Thu, 23 Jan 2014 21:28:55 GMT #. Create the first version of an object in the ``current`` container: .. code:: # curl -i $publicURL/current/my_object --data-binary 1 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" .. code:: HTTP/1.1 201 Created Last-Modified: Thu, 23 Jan 2014 21:31:22 GMT Content-Length: 0 Etag: d41d8cd98f00b204e9800998ecf8427e Content-Type: text/html; charset=UTF-8 X-Trans-Id: tx5992d536a4bd4fec973aa-0052e18a2a X-Openstack-Request-Id: tx5992d536a4bd4fec973aa-0052e18a2a Date: Thu, 23 Jan 2014 21:31:22 GMT Nothing is written to the non-current version container when you initially **PUT** an object in the ``current`` container. However, subsequent **PUT** requests that edit an object trigger the creation of a version of that object in the ``archive`` container. These non-current versions are named as follows: .. code:: / Where ``length`` is the 3-character, zero-padded hexadecimal character length of the object, ```` is the object name, and ```` is the time when the object was initially created as a current version. #. Create a second version of the object in the ``current`` container: .. code:: # curl -i $publicURL/current/my_object --data-binary 2 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" .. code:: HTTP/1.1 201 Created Last-Modified: Thu, 23 Jan 2014 21:41:32 GMT Content-Length: 0 Etag: d41d8cd98f00b204e9800998ecf8427e Content-Type: text/html; charset=UTF-8 X-Trans-Id: tx468287ce4fc94eada96ec-0052e18c8c X-Openstack-Request-Id: tx468287ce4fc94eada96ec-0052e18c8c Date: Thu, 23 Jan 2014 21:41:32 GMT #. Issue a **GET** request to a versioned object to get the current version of the object. You do not have to do any request redirects or metadata lookups. List older versions of the object in the ``archive`` container: .. code:: # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token" .. code:: HTTP/1.1 200 OK Content-Length: 30 X-Container-Object-Count: 1 Accept-Ranges: bytes X-Timestamp: 1390513280.79684 X-Container-Bytes-Used: 0 Content-Type: text/plain; charset=utf-8 X-Trans-Id: tx9a441884997542d3a5868-0052e18d8e X-Openstack-Request-Id: tx9a441884997542d3a5868-0052e18d8e Date: Thu, 23 Jan 2014 21:45:50 GMT 009my_object/1390512682.92052 .. note:: A **POST** request to a versioned object updates only the metadata for the object and does not create a new version of the object. New versions are created only when the content of the object changes. #. Issue a **DELETE** request to a versioned object to remove the current version of the object and replace it with the next-most current version in the non-current container. .. code:: # curl -i $publicURL/current/my_object -X DELETE -H "X-Auth-Token: $token" .. code:: HTTP/1.1 204 No Content Content-Length: 0 Content-Type: text/html; charset=UTF-8 X-Trans-Id: tx006d944e02494e229b8ee-0052e18edd X-Openstack-Request-Id: tx006d944e02494e229b8ee-0052e18edd Date: Thu, 23 Jan 2014 21:51:25 GMT List objects in the ``archive`` container to show that the archived object was moved back to the ``current`` container: .. code:: # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token" .. code:: HTTP/1.1 204 No Content Content-Length: 0 X-Container-Object-Count: 0 Accept-Ranges: bytes X-Timestamp: 1390513280.79684 X-Container-Bytes-Used: 0 Content-Type: text/html; charset=UTF-8 X-Trans-Id: tx044f2a05f56f4997af737-0052e18eed X-Openstack-Request-Id: tx044f2a05f56f4997af737-0052e18eed Date: Thu, 23 Jan 2014 21:51:41 GMT This next-most current version carries with it any metadata last set on it. If want to completely remove an object and you have five versions of it, you must **DELETE** it five times. Example Using ``X-History-Location`` ------------------------------------ #. Create the ``current`` container: .. code:: # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-History-Location: archive" .. code:: HTTP/1.1 201 Created Content-Length: 0 Content-Type: text/html; charset=UTF-8 X-Trans-Id: txb91810fb717347d09eec8-0052e18997 X-Openstack-Request-Id: txb91810fb717347d09eec8-0052e18997 Date: Thu, 23 Jan 2014 21:28:55 GMT #. Create the first version of an object in the ``current`` container: .. code:: # curl -i $publicURL/current/my_object --data-binary 1 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" .. code:: HTTP/1.1 201 Created Last-Modified: Thu, 23 Jan 2014 21:31:22 GMT Content-Length: 0 Etag: d41d8cd98f00b204e9800998ecf8427e Content-Type: text/html; charset=UTF-8 X-Trans-Id: tx5992d536a4bd4fec973aa-0052e18a2a X-Openstack-Request-Id: tx5992d536a4bd4fec973aa-0052e18a2a Date: Thu, 23 Jan 2014 21:31:22 GMT Nothing is written to the non-current version container when you initially **PUT** an object in the ``current`` container. However, subsequent **PUT** requests that edit an object trigger the creation of a version of that object in the ``archive`` container. These non-current versions are named as follows: .. code:: / Where ``length`` is the 3-character, zero-padded hexadecimal character length of the object, ```` is the object name, and ```` is the time when the object was initially created as a current version. #. Create a second version of the object in the ``current`` container: .. code:: # curl -i $publicURL/current/my_object --data-binary 2 -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" .. code:: HTTP/1.1 201 Created Last-Modified: Thu, 23 Jan 2014 21:41:32 GMT Content-Length: 0 Etag: d41d8cd98f00b204e9800998ecf8427e Content-Type: text/html; charset=UTF-8 X-Trans-Id: tx468287ce4fc94eada96ec-0052e18c8c X-Openstack-Request-Id: tx468287ce4fc94eada96ec-0052e18c8c Date: Thu, 23 Jan 2014 21:41:32 GMT #. Issue a **GET** request to a versioned object to get the current version of the object. You do not have to do any request redirects or metadata lookups. List older versions of the object in the ``archive`` container: .. code:: # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token" .. code:: HTTP/1.1 200 OK Content-Length: 30 X-Container-Object-Count: 1 Accept-Ranges: bytes X-Timestamp: 1390513280.79684 X-Container-Bytes-Used: 0 Content-Type: text/plain; charset=utf-8 X-Trans-Id: tx9a441884997542d3a5868-0052e18d8e X-Openstack-Request-Id: tx9a441884997542d3a5868-0052e18d8e Date: Thu, 23 Jan 2014 21:45:50 GMT 009my_object/1390512682.92052 .. note:: A **POST** request to a versioned object updates only the metadata for the object and does not create a new version of the object. New versions are created only when the content of the object changes. #. Issue a **DELETE** request to a versioned object to copy the current version of the object to the archive container then delete it from the current container. Subsequent **GET** requests to the object in the current container will return ``404 Not Found``. .. code:: # curl -i $publicURL/current/my_object -X DELETE -H "X-Auth-Token: $token" .. code:: HTTP/1.1 204 No Content Content-Length: 0 Content-Type: text/html; charset=UTF-8 X-Trans-Id: tx006d944e02494e229b8ee-0052e18edd X-Openstack-Request-Id: tx006d944e02494e229b8ee-0052e18edd Date: Thu, 23 Jan 2014 21:51:25 GMT List older versions of the object in the ``archive`` container:: .. code:: # curl -i $publicURL/archive?prefix=009my_object -X GET -H "X-Auth-Token: $token" .. code:: HTTP/1.1 200 OK Content-Length: 90 X-Container-Object-Count: 3 Accept-Ranges: bytes X-Timestamp: 1390513280.79684 X-Container-Bytes-Used: 0 Content-Type: text/html; charset=UTF-8 X-Trans-Id: tx044f2a05f56f4997af737-0052e18eed X-Openstack-Request-Id: tx044f2a05f56f4997af737-0052e18eed Date: Thu, 23 Jan 2014 21:51:41 GMT 009my_object/1390512682.92052 009my_object/1390512692.23062 009my_object/1390513885.67732 In addition to the two previous versions of the object, the archive container has a "delete marker" to record when the object was deleted. To permanently delete a previous version, issue a **DELETE** to the version in the archive container. Disabling Object Versioning --------------------------- To disable object versioning for the ``current`` container, remove its ``X-Versions-Location`` metadata header by sending an empty key value. .. code:: # curl -i $publicURL/current -X PUT -H "Content-Length: 0" -H "X-Auth-Token: $token" -H "X-Versions-Location: " .. code:: HTTP/1.1 202 Accepted Content-Length: 76 Content-Type: text/html; charset=UTF-8 X-Trans-Id: txe2476de217134549996d0-0052e19038 X-Openstack-Request-Id: txe2476de217134549996d0-0052e19038 Date: Thu, 23 Jan 2014 21:57:12 GMT

Accepted

The request is accepted for processing.

swift-2.17.0/doc/source/api/temporary_url_middleware.rst0000666000175100017510000001701513236061617023445 0ustar zuulzuul00000000000000======================== Temporary URL middleware ======================== To discover whether your Object Storage system supports this feature, check with your service provider or send a **GET** request using the ``/info`` path. A temporary URL gives users temporary access to objects. For example, a website might want to provide a link to download a large object in Object Storage, but the Object Storage account has no public access. The website can generate a URL that provides time-limited **GET** access to the object. When the web browser user clicks on the link, the browser downloads the object directly from Object Storage, eliminating the need for the website to act as a proxy for the request. Furthermore, a temporary URL can be prefix-based. These URLs contain a signature which is valid for all objects which share a common prefix. They are useful for sharing a set of objects. Ask your cloud administrator to enable the temporary URL feature. For information, see :ref:`tempurl` in the *Source Documentation*. Note ~~~~ To use **POST** requests to upload objects to specific Object Storage locations, use :doc:`form_post_middleware` instead of temporary URL middleware. Temporary URL format ~~~~~~~~~~~~~~~~~~~~ A temporary URL is comprised of the URL for an object with added query parameters: **Example Temporary URL format** .. code:: https://swift-cluster.example.com/v1/my_account/container/object ?temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709 &temp_url_expires=1323479485 &filename=My+Test+File.pdf The example shows these elements: **Object URL**: Required. The full path URL to the object. **temp\_url\_sig**: Required. An HMAC-SHA1 cryptographic signature that defines the allowed HTTP method, expiration date, full path to the object, and the secret key for the temporary URL. **temp\_url\_expires**: Required. An expiration date as a UNIX Epoch timestamp or ISO 8601 UTC timestamp. For example, ``1390852007`` or ``2014-01-27T19:46:47Z`` can be used to represent ``Mon, 27 Jan 2014 19:46:47 GMT``. For more information, see `Epoch & Unix Timestamp Conversion Tools `__. **filename**: Optional. Overrides the default file name. Object Storage generates a default file name for **GET** temporary URLs that is based on the object name. Object Storage returns this value in the ``Content-Disposition`` response header. Browsers can interpret this file name value as a file attachment to be saved. A prefix-based temporary URL is similar but requires the parameter ``temp_url_prefix``, which must be equal to the common prefix shared by all object names for which the URL is valid. .. code:: https://swift-cluster.example.com/v1/my_account/container/my_prefix/object ?temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709 &temp_url_expires=2011-12-10T01:11:25Z &temp_url_prefix=my_prefix .. _secret_keys: Secret Keys ~~~~~~~~~~~ The cryptographic signature used in Temporary URLs and also in :doc:`form_post_middleware` uses a secret key. Object Storage allows you to store two secret key values per account, and two per container. When validating a request, Object Storage checks signatures against all keys. Using two keys at each level enables key rotation without invalidating existing temporary URLs. To set the keys at the account level, set one or both of the following request headers to arbitrary values on a **POST** request to the account: - ``X-Account-Meta-Temp-URL-Key`` - ``X-Account-Meta-Temp-URL-Key-2`` To set the keys at the container level, set one or both of the following request headers to arbitrary values on a **POST** or **PUT** request to the container: - ``X-Container-Meta-Temp-URL-Key`` - ``X-Container-Meta-Temp-URL-Key-2`` The arbitrary values serve as the secret keys. For example, use the **swift post** command to set the secret key to *``MYKEY``*: .. code:: $ swift post -m "Temp-URL-Key:MYKEY" Note ~~~~ Changing these headers invalidates any previously generated temporary URLs within 60 seconds, which is the memcache time for the key. HMAC-SHA1 signature for temporary URLs ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Temporary URL middleware uses an HMAC-SHA1 cryptographic signature. This signature includes these elements: - The allowed method. Typically, **GET** or **PUT**. - Expiry time. In the example for the HMAC-SHA1 signature for temporary URLs below, the expiry time is set to ``86400`` seconds (or 1 day) into the future. Please be aware that you have to use a UNIX timestamp for generating the signature (in the API request it is also allowed to use an ISO 8601 UTC timestamp). - The path. Starting with ``/v1/`` onwards and including a container name and object. The path for prefix-based signatures must start with ``prefix:/v1/``. Do not URL-encode the path at this stage. - The secret key. Use one of the key values as described in :ref:`secret_keys`. These sample Python codes show how to compute a signature for use with temporary URLs: **Example HMAC-SHA1 signature for object-based temporary URLs** .. code:: import hmac from hashlib import sha1 from time import time method = 'GET' duration_in_seconds = 60*60*24 expires = int(time() + duration_in_seconds) path = '/v1/my_account/container/object' key = 'MYKEY' hmac_body = '%s\n%s\n%s' % (method, expires, path) signature = hmac.new(key, hmac_body, sha1).hexdigest() **Example HMAC-SHA1 signature for prefix-based temporary URLs** .. code:: import hmac from hashlib import sha1 from time import time method = 'GET' duration_in_seconds = 60*60*24 expires = int(time() + duration_in_seconds) path = 'prefix:/v1/my_account/container/my_prefix' key = 'MYKEY' hmac_body = '%s\n%s\n%s' % (method, expires, path) signature = hmac.new(key, hmac_body, sha1).hexdigest() Do not URL-encode the path when you generate the HMAC-SHA1 signature. However, when you make the actual HTTP request, you should properly URL-encode the URL. The *``MYKEY``* value is one of the key values as described in :ref:`secret_keys`. For more information, see `RFC 2104: HMAC: Keyed-Hashing for Message Authentication `__. If you want to transform a UNIX timestamp into an ISO 8601 UTC timestamp, you can use following code snippet: .. code:: import time time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(timestamp)) Using the ``swift`` tool to generate a Temporary URL ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The ``swift`` tool provides the tempurl_ option that auto-generates the *``temp_url_sig``* and *``temp_url_expires``* query parameters. For example, you might run this command: .. code:: $ swift tempurl GET 3600 /v1/my_account/container/object MYKEY This command returns the path: .. code:: /v1/my_account/container/object ?temp_url_sig=5c4cc8886f36a9d0919d708ade98bf0cc71c9e91 &temp_url_expires=1374497657 To create the temporary URL, prefix this path with the Object Storage storage host name. For example, prefix the path with ``https://swift-cluster.example.com``, as follows: .. code:: https://swift-cluster.example.com/v1/my_account/container/object ?temp_url_sig=5c4cc8886f36a9d0919d708ade98bf0cc71c9e91 &temp_url_expires=1374497657 Note that if the above example is copied exactly, and used in a command shell, then the ampersand is interpreted as an operator and the URL will be truncated. Enclose the URL in quotation marks to avoid this. .. _tempurl: https://docs.openstack.org/python-swiftclient/latest/cli/index.html#swift-tempurl swift-2.17.0/doc/source/api/large_objects.rst0000666000175100017510000003327013236061617021150 0ustar zuulzuul00000000000000============= Large objects ============= By default, the content of an object cannot be greater than 5 GB. However, you can use a number of smaller objects to construct a large object. The large object is comprised of two types of objects: - **Segment objects** store the object content. You can divide your content into segments, and upload each segment into its own segment object. Segment objects do not have any special features. You create, update, download, and delete segment objects just as you would normal objects. - A **manifest object** links the segment objects into one logical large object. When you download a manifest object, Object Storage concatenates and returns the contents of the segment objects in the response body of the request. This behavior extends to the response headers returned by **GET** and **HEAD** requests. The ``Content-Length`` response header value is the total size of all segment objects. Object Storage calculates the ``ETag`` response header value by taking the ``ETag`` value of each segment, concatenating them together, and returning the MD5 checksum of the result. The manifest object types are: **Static large objects** The manifest object content is an ordered list of the names of the segment objects in JSON format. **Dynamic large objects** The manifest object has a ``X-Object-Manifest`` metadata header. The value of this header is ``{container}/{prefix}``, where ``{container}`` is the name of the container where the segment objects are stored, and ``{prefix}`` is a string that all segment objects have in common. The manifest object should have no content. However, this is not enforced. Note ~~~~ If you make a **COPY** request by using a manifest object as the source, the new object is a normal, and not a segment, object. If the total size of the source segment objects exceeds 5 GB, the **COPY** request fails. However, you can make a duplicate of the manifest object and this new object can be larger than 5 GB. Static large objects ~~~~~~~~~~~~~~~~~~~~ To create a static large object, divide your content into pieces and create (upload) a segment object to contain each piece. Create a manifest object. Include the ``multipart-manifest=put`` query parameter at the end of the manifest object name to indicate that this is a manifest object. The body of the **PUT** request on the manifest object comprises a json list, where each element is an object representing a segment. These objects may contain the following attributes: - ``path`` (required). The container and object name in the format: ``{container-name}/{object-name}`` - ``etag`` (optional). If provided, this value must match the ``ETag`` of the segment object. This was included in the response headers when the segment was created. Generally, this will be the MD5 sum of the segment. - ``size_bytes`` (optional). The size of the segment object. If provided, this value must match the ``Content-Length`` of that object. - ``range`` (optional). The subset of the referenced object that should be used for segment data. This behaves similar to the ``Range`` header. If omitted, the entire object will be used. Providing the optional ``etag`` and ``size_bytes`` attributes for each segment ensures that the upload cannot corrupt your data. **Example Static large object manifest list** This example shows three segment objects. You can use several containers and the object names do not have to conform to a specific pattern, in contrast to dynamic large objects. .. code:: [ { "path": "mycontainer/objseg1", "etag": "0228c7926b8b642dfb29554cd1f00963", "size_bytes": 1468006 }, { "path": "mycontainer/pseudodir/seg-obj2", "etag": "5bfc9ea51a00b790717eeb934fb77b9b", "size_bytes": 1572864 }, { "path": "other-container/seg-final", "etag": "b9c3da507d2557c1ddc51f27c54bae51", "size_bytes": 256 } ] | The ``Content-Length`` request header must contain the length of the json content—not the length of the segment objects. However, after the **PUT** operation completes, the ``Content-Length`` metadata is set to the total length of all the object segments. When using the ``ETag`` request header in a **PUT** operation, it must contain the MD5 checksum of the concatenated ``ETag`` values of the object segments. You can also set the ``Content-Type`` request header and custom object metadata. When the **PUT** operation sees the ``multipart-manifest=put`` query parameter, it reads the request body and verifies that each segment object exists and that the sizes and ETags match. If there is a mismatch, the **PUT** operation fails. This verification process can take a long time to complete, particularly as the number of segments increases. You may include a ``heartbeat=on`` query parameter to have the server: 1. send a ``202 Accepted`` response before it begins validating segments, 2. periodically send whitespace characters to keep the connection alive, and 3. send a final response code in the body. .. note:: The server may still immediately respond with ``400 Bad Request`` if it can determine that the request is invalid before making backend requests. If everything matches, the manifest object is created. The ``X-Static-Large-Object`` metadata is set to ``true`` indicating that this is a static object manifest. Normally when you perform a **GET** operation on the manifest object, the response body contains the concatenated content of the segment objects. To download the manifest list, use the ``multipart-manifest=get`` query parameter. The resulting list is not formatted the same as the manifest you originally used in the **PUT** operation. If you use the **DELETE** operation on a manifest object, the manifest object is deleted. The segment objects are not affected. However, if you add the ``multipart-manifest=delete`` query parameter, the segment objects are deleted and if all are successfully deleted, the manifest object is also deleted. To change the manifest, use a **PUT** operation with the ``multipart-manifest=put`` query parameter. This request creates a manifest object. You can also update the object metadata in the usual way. Dynamic large objects ~~~~~~~~~~~~~~~~~~~~~ You must segment objects that are larger than 5 GB before you can upload them. You then upload the segment objects like you would any other object and create a dynamic large manifest object. The manifest object tells Object Storage how to find the segment objects that comprise the large object. The segments remain individually addressable, but retrieving the manifest object streams all the segments concatenated. There is no limit to the number of segments that can be a part of a single large object, but ``Content-Length`` is included in **GET** or **HEAD** response only if the number of segments is smaller than container listing limit. In other words, the number of segments that fit within a single container listing page. To ensure the download works correctly, you must upload all the object segments to the same container and ensure that each object name is prefixed in such a way that it sorts in the order in which it should be concatenated. You also create and upload a manifest file. The manifest file is a zero-byte file with the extra ``X-Object-Manifest`` ``{container}/{prefix}`` header, where ``{container}`` is the container the object segments are in and ``{prefix}`` is the common prefix for all the segments. You must UTF-8-encode and then URL-encode the container and common prefix in the ``X-Object-Manifest`` header. It is best to upload all the segments first and then create or update the manifest. With this method, the full object is not available for downloading until the upload is complete. Also, you can upload a new set of segments to a second location and update the manifest to point to this new location. During the upload of the new segments, the original manifest is still available to download the first set of segments. .. note:: When updating a manifest object using a POST request, a ``X-Object-Manifest`` header must be included for the object to continue to behave as a manifest object. **Example Upload segment of large object request: HTTP** .. code:: PUT /{api_version}/{account}/{container}/{object} HTTP/1.1 Host: storage.clouddrive.com X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb ETag: 8a964ee2a5e88be344f36c22562a6486 Content-Length: 1 X-Object-Meta-PIN: 1234 No response body is returned. A status code of 2\ *``nn``* (between 200 and 299, inclusive) indicates a successful write; status 411 Length Required denotes a missing ``Content-Length`` or ``Content-Type`` header in the request. If the MD5 checksum of the data written to the storage system does NOT match the (optionally) supplied ETag value, a 422 Unprocessable Entity response is returned. You can continue uploading segments like this example shows, prior to uploading the manifest. **Example Upload next segment of large object request: HTTP** .. code:: PUT /{api_version}/{account}/{container}/{object} HTTP/1.1 Host: storage.clouddrive.com X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb ETag: 8a964ee2a5e88be344f36c22562a6486 Content-Length: 1 X-Object-Meta-PIN: 1234 Next, upload the manifest you created that indicates the container the object segments reside within. Note that uploading additional segments after the manifest is created causes the concatenated object to be that much larger but you do not need to recreate the manifest file for subsequent additional segments. **Example Upload manifest request: HTTP** .. code:: PUT /{api_version}/{account}/{container}/{object} HTTP/1.1 Host: storage.clouddrive.com X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb Content-Length: 0 X-Object-Meta-PIN: 1234 X-Object-Manifest: {container}/{prefix} **Example Upload manifest response: HTTP** .. code:: [...] The ``Content-Type`` in the response for a **GET** or **HEAD** on the manifest is the same as the ``Content-Type`` set during the **PUT** request that created the manifest. You can easily change the ``Content-Type`` by reissuing the **PUT** request. Comparison of static and dynamic large objects ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ While static and dynamic objects have similar behavior, here are their differences: End-to-end integrity -------------------- With static large objects, integrity can be assured. The list of segments may include the MD5 checksum (``ETag``) of each segment. You cannot upload the manifest object if the ``ETag`` in the list differs from the uploaded segment object. If a segment is somehow lost, an attempt to download the manifest object results in an error. With dynamic large objects, integrity is not guaranteed. The eventual consistency model means that although you have uploaded a segment object, it might not appear in the container listing until later. If you download the manifest before it appears in the container, it does not form part of the content returned in response to a **GET** request. Upload Order ------------ With static large objects, you must upload the segment objects before you upload the manifest object. With dynamic large objects, you can upload manifest and segment objects in any order. In case a premature download of the manifest occurs, we recommend users upload the manifest object after the segments. However, the system does not enforce the order. Removal or addition of segment objects -------------------------------------- With static large objects, you cannot add or remove segment objects from the manifest. However, you can create a completely new manifest object of the same name with a different manifest list. With dynamic large objects, you can upload new segment objects or remove existing segments. The names must simply match the ``{prefix}`` supplied in ``X-Object-Manifest``. Segment object size and number ------------------------------ With static large objects, the segment objects must be at least 1 byte in size. However, if the segment objects are less than 1MB (by default), the SLO download is (by default) rate limited. At most, 1000 segments are supported (by default) and the manifest has a limit (by default) of 2MB in size. With dynamic large objects, segment objects can be any size. Segment object container name ----------------------------- With static large objects, the manifest list includes the container name of each object. Segment objects can be in different containers. With dynamic large objects, all segment objects must be in the same container. Manifest object metadata ------------------------ With static large objects, the manifest object has ``X-Static-Large-Object`` set to ``true``. You do not set this metadata directly. Instead the system sets it when you **PUT** a static manifest object. With dynamic large objects, the ``X-Object-Manifest`` value is the ``{container}/{prefix}``, which indicates where the segment objects are located. You supply this request header in the **PUT** operation. Copying the manifest object --------------------------- The semantics are the same for both static and dynamic large objects. When copying large objects, the **COPY** operation does not create a manifest object but a normal object with content same as what you would get on a **GET** request to the original manifest object. To copy the manifest object, you include the ``multipart-manifest=get`` query parameter in the **COPY** request. The new object contains the same manifest as the original. The segment objects are not copied. Instead, both the original and new manifest objects share the same set of segment objects. swift-2.17.0/doc/source/api/authentication.rst0000666000175100017510000000422213236061617021357 0ustar zuulzuul00000000000000============== Authentication ============== The owner of an Object Storage account controls access to that account and its containers and objects. An owner is the user who has the ''admin'' role for that tenant. The tenant is also known as the project or account. As the account owner, you can modify account metadata and create, modify, and delete containers and objects. To identify yourself as the account owner, include an authentication token in the ''X-Auth-Token'' header in the API request. Depending on the token value in the ''X-Auth-Token'' header, one of the following actions occur: - ''X-Auth-Token'' contains the token for the account owner. The request is permitted and has full access to make changes to the account. - The ''X-Auth-Token'' header is omitted or it contains a token for a non-owner or a token that is not valid. The request fails with a 401 Unauthorized or 403 Forbidden response. You have no access to accounts or containers, unless an access control list (ACL) explicitly grants access. The account owner can grant account and container access to users through access control lists (ACLs). In addition, it is possible to provide an additional token in the ''X-Service-Token'' header. More information about how this is used is in :doc:`../overview_backing_store`. The following list describes the authentication services that you can use with Object Storage: - OpenStack Identity (keystone): For Object Storage, account is synonymous with project or tenant ID. - Tempauth middleware: Object Storage includes this middleware. User and account management is performed in Object Storage itself. - Swauth middleware: Stored in github, this custom middleware is modeled on Tempauth. Usage is similar to Tempauth. - Other custom middleware: Write it yourself to fit your environment. Specifically, you use the ''X-Auth-Token'' header to pass an authentication token to an API request. Authentication tokens expire after a time period that the authentication service defines. When a token expires, use of the token causes requests to fail with a 401 Unauthorized response. To continue, you must obtain a new token. swift-2.17.0/doc/source/api/use_the_content-disposition_metadata.rst0000666000175100017510000000225713236061617025736 0ustar zuulzuul00000000000000==================================== Use the Content-Disposition metadata ==================================== To override the default behavior for a browser, use the ``Content-Disposition`` header to specify the override behavior and assign this header to an object. For example, this header might specify that the browser use a download program to save this file rather than show the file, which is the default. **Example Override browser default behavior request: HTTP** This example assigns an attachment type to the ``Content-Disposition`` header. This attachment type indicates that the file is to be downloaded as ``goodbye.txt``: .. code:: # curl -i $publicURL/marktwain/goodbye -X POST -H "X-Auth-Token: $token" -H "Content-Length: 14" -H "Content-Type: application/octet-stream" -H "Content-Disposition: attachment; filename=goodbye.txt" .. code:: HTTP/1.1 202 Accepted Content-Length: 76 Content-Type: text/html; charset=UTF-8 X-Trans-Id: txa9b5e57d7f354d7ea9f57-0052e17e13 X-Openstack-Request-Id: txa9b5e57d7f354d7ea9f57-0052e17e13 Date: Thu, 23 Jan 2014 20:39:47 GMT

Accepted

The request is accepted for processing.

swift-2.17.0/doc/source/api/object_api_v1_overview.rst0000666000175100017510000001626113236061617023001 0ustar zuulzuul00000000000000Object Storage API overview --------------------------- OpenStack Object Storage is a highly available, distributed, eventually consistent object/blob store. You create, modify, and get objects and metadata by using the Object Storage API, which is implemented as a set of Representational State Transfer (REST) web services. For an introduction to OpenStack Object Storage, see the :doc:`/admin/index`. You use the HTTPS (SSL) protocol to interact with Object Storage, and you use standard HTTP calls to perform API operations. You can also use language-specific APIs, which use the RESTful API, that make it easier for you to integrate into your applications. To assert your right to access and change data in an account, you identify yourself to Object Storage by using an authentication token. To get a token, you present your credentials to an authentication service. The authentication service returns a token and the URL for the account. Depending on which authentication service that you use, the URL for the account appears in: - **OpenStack Identity Service**. The URL is defined in the service catalog. - **Tempauth**. The URL is provided in the ``X-Storage-Url`` response header. In both cases, the URL is the full URL and includes the account resource. The Object Storage API supports the standard, non-serialized response format, which is the default, and both JSON and XML serialized response formats. The Object Storage system organizes data in a hierarchy, as follows: - **Account**. Represents the top-level of the hierarchy. Your service provider creates your account and you own all resources in that account. The account defines a namespace for containers. A container might have the same name in two different accounts. In the OpenStack environment, *account* is synonymous with a project or tenant. - **Container**. Defines a namespace for objects. An object with the same name in two different containers represents two different objects. You can create any number of containers within an account. In addition to containing objects, you can also use the container to control access to objects by using an access control list (ACL). You cannot store an ACL with individual objects. In addition, you configure and control many other features, such as object versioning, at the container level. You can bulk-delete up to 10,000 containers in a single request. You can set a storage policy on a container with predefined names and definitions from your cloud provider. - **Object**. Stores data content, such as documents, images, and so on. You can also store custom metadata with an object. With the Object Storage API, you can: - Store an unlimited number of objects. Each object can be as large as 5 GB, which is the default. You can configure the maximum object size. - Upload and store objects of any size with large object creation. - Use cross-origin resource sharing to manage object security. - Compress files using content-encoding metadata. - Override browser behavior for an object using content-disposition metadata. - Schedule objects for deletion. - Bulk-delete up to 10,000 objects in a single request. - Auto-extract archive files. - Generate a URL that provides time-limited **GET** access to an object. - Upload objects directly to the Object Storage system from a browser by using form **POST** middleware. - Create symbolic links to other objects. The account, container, and object hierarchy affects the way you interact with the Object Storage API. Specifically, the resource path reflects this structure and has this format: .. code:: /v1/{account}/{container}/{object} For example, for the ``flowers/rose.jpg`` object in the ``images`` container in the ``12345678912345`` account, the resource path is: .. code:: /v1/12345678912345/images/flowers/rose.jpg Notice that the object name contains the ``/`` character. This slash does not indicate that Object Storage has a sub-hierarchy called ``flowers`` because containers do not store objects in actual sub-folders. However, the inclusion of ``/`` or a similar convention inside object names enables you to create pseudo-hierarchical folders and directories. For example, if the endpoint for Object Storage is ``objects.mycloud.com``, the returned URL is ``https://objects.mycloud.com/v1/12345678912345``. To access a container, append the container name to the resource path. To access an object, append the container and the object name to the path. If you have a large number of containers or objects, you can use query parameters to page through large lists of containers or objects. Use the ``marker``, ``limit``, and ``end_marker`` query parameters to control how many items are returned in a list and where the list starts or ends. If you want to page through in reverse order, you can use the query parameter ``reverse``, noting that your marker and end_markers should be switched when applied to a reverse listing. I.e, for a list of objects ``[a, b, c, d, e]`` the non-reversed could be: .. code:: /v1/{account}/{container}/?marker=a&end_marker=d b c However, when reversed marker and end_marker are applied to a reversed list: .. code:: /v1/{account}/{container}/?marker=d&end_marker=a&reverse=on c b Object Storage HTTP requests have the following default constraints. Your service provider might use different default values. ============================ ============= ===== Item Maximum value Notes ============================ ============= ===== Number of HTTP headers 90 Length of HTTP headers 4096 bytes Length per HTTP request line 8192 bytes Length of HTTP request 5 GB Length of container names 256 bytes Cannot contain the ``/`` character. Length of object names 1024 bytes By default, there are no character restrictions. ============================ ============= ===== You must UTF-8-encode and then URL-encode container and object names before you call the API binding. If you use an API binding that performs the URL-encoding for you, do not URL-encode the names before you call the API binding. Otherwise, you double-encode these names. Check the length restrictions against the URL-encoded string. The API Reference describes the operations that you can perform with the Object Storage API: - `Storage accounts `__: Use to perform account-level tasks. Lists containers for a specified account. Creates, updates, and deletes account metadata. Shows account metadata. - `Storage containers `__: Use to perform container-level tasks. Lists objects in a specified container. Creates, shows details for, and deletes containers. Creates, updates, shows, and deletes container metadata. - `Storage objects `__: Use to perform object-level tasks. Creates, replaces, shows details for, and deletes objects. Copies objects with another object with a new or different name. Updates object metadata. swift-2.17.0/doc/source/api/discoverability.rst0000666000175100017510000000165113236061617021537 0ustar zuulzuul00000000000000=============== Discoverability =============== Your Object Storage system might not enable all features that you read about because your service provider chooses which features to enable. To discover which features are enabled in your Object Storage system, use the ``/info`` request. However, your service provider might have disabled the ``/info`` request, or you might be using an older version that does not support the ``/info`` request. To use the ``/info`` request, send a **GET** request using the ``/info`` path to the Object Store endpoint as shown in this example: .. code:: # curl https://storage.clouddrive.com/info This example shows a truncated response body: .. code:: { "swift":{ "version":"1.11.0" }, "staticweb":{ }, "tempurl":{ } } This output shows that the Object Storage system has enabled the static website and temporary URL features. swift-2.17.0/doc/source/api/use_content-encoding_metadata.rst0000666000175100017510000000140213236061617024307 0ustar zuulzuul00000000000000============================= Use Content-Encoding metadata ============================= When you create an object or update its metadata, you can optionally set the ``Content-Encoding`` metadata. This metadata enables you to indicate that the object content is compressed without losing the identity of the underlying media type (``Content-Type``) of the file, such as a video. **Example Content-Encoding header request: HTTP** This example assigns an attachment type to the ``Content-Encoding`` header that indicates how the file is downloaded: .. code:: PUT //// HTTP/1.1 Host: storage.clouddrive.com X-Auth-Token: eaaafd18-0fed-4b3a-81b4-663c99ec1cbb Content-Type: video/mp4 Content-Encoding: gzip swift-2.17.0/doc/source/api/container_quotas.rst0000666000175100017510000000230313236061617021714 0ustar zuulzuul00000000000000================ Container quotas ================ You can set quotas on the size and number of objects stored in a container by setting the following metadata: - ``X-Container-Meta-Quota-Bytes``. The size, in bytes, of objects that can be stored in a container. - ``X-Container-Meta-Quota-Count``. The number of objects that can be stored in a container. When you exceed a container quota, subsequent requests to create objects fail with a 413 Request Entity Too Large error. The Object Storage system uses an eventual consistency model. When you create a new object, the container size and object count might not be immediately updated. Consequently, you might be allowed to create objects even though you have actually exceeded the quota. At some later time, the system updates the container size and object count to the actual values. At this time, subsequent requests fails. In addition, if you are currently under the ``X-Container-Meta-Quota-Bytes`` limit and a request uses chunked transfer encoding, the system cannot know if the request will exceed the quota so the system allows the request. However, once the quota is exceeded, any subsequent uploads that use chunked transfer encoding fail. swift-2.17.0/doc/source/overview_container_sync.rst0000666000175100017510000005350113236061617022537 0ustar zuulzuul00000000000000====================================== Container to Container Synchronization ====================================== -------- Overview -------- Swift has a feature where all the contents of a container can be mirrored to another container through background synchronization. Swift cluster operators configure their cluster to allow/accept sync requests to/from other clusters, and the user specifies where to sync their container to along with a secret synchronization key. .. note:: If you are using the :ref:`Large Objects ` feature and syncing to another cluster then you will need to ensure that manifest files and segment files are synced. If segment files are in a different container than their manifest then both the manifest's container and the segments' container must be synced. The target container for synced segment files must always have the same name as their source container in order for them to be resolved by synced manifests. Be aware that manifest files may be synced before segment files even if they are in the same container and were created after the segment files. In the case of :ref:`Static Large Objects `, a GET request for a manifest whose segments have yet to be completely synced will fail with none or only part of the large object content being returned. In the case of :ref:`Dynamic Large Objects `, a GET request for a manifest whose segments have yet to be completely synced will either fail or return unexpected (and most likely incorrect) content. .. note:: If you are using encryption middleware in the cluster from which objects are being synced, then you should follow the instructions for :ref:`container_sync_client_config` to be compatible with encryption. .. note:: If you are using symlink middleware in the cluster from which objects are being synced, then you should follow the instructions for :ref:`symlink_container_sync_client_config` to be compatible with symlinks. -------------------------- Configuring Container Sync -------------------------- Create a ``container-sync-realms.conf`` file specifying the allowable clusters and their information:: [realm1] key = realm1key key2 = realm1key2 cluster_clustername1 = https://host1/v1/ cluster_clustername2 = https://host2/v1/ [realm2] key = realm2key key2 = realm2key2 cluster_clustername3 = https://host3/v1/ cluster_clustername4 = https://host4/v1/ Each section name is the name of a sync realm. A sync realm is a set of clusters that have agreed to allow container syncing with each other. Realm names will be considered case insensitive. The key is the overall cluster-to-cluster key used in combination with the external users' key that they set on their containers' ``X-Container-Sync-Key`` metadata header values. These keys will be used to sign each request the container sync daemon makes and used to validate each incoming container sync request. The key2 is optional and is an additional key incoming requests will be checked against. This is so you can rotate keys if you wish; you move the existing key to key2 and make a new key value. Any values in the realm section whose names begin with ``cluster_`` will indicate the name and endpoint of a cluster and will be used by external users in their containers' ``X-Container-Sync-To`` metadata header values with the format "//realm_name/cluster_name/account_name/container_name". Realm and cluster names are considered case insensitive. The endpoint is what the container sync daemon will use when sending out requests to that cluster. Keep in mind this endpoint must be reachable by all container servers, since that is where the container sync daemon runs. Note that the endpoint ends with /v1/ and that the container sync daemon will then add the account/container/obj name after that. Distribute this ``container-sync-realms.conf`` file to all your proxy servers and container servers. You also need to add the container_sync middleware to your proxy pipeline. It needs to be after any memcache middleware and before any auth middleware. The container_sync section only needs the "use" item. For example:: [pipeline:main] pipeline = healthcheck proxy-logging cache container_sync tempauth proxy-logging proxy-server [filter:container_sync] use = egg:swift#container_sync The container sync daemon will use an internal client to sync objects. Even if you don't configure the internal client, the container sync daemon will work with default configuration. The default configuration is as same as ``internal-client.conf-sample``. If you want to configure the internal client, please update ``internal_client_conf_path`` of container-server.conf. The configuration file at the path will be used for the internal client. ------------------------------------------------------- Old-Style: Configuring a Cluster's Allowable Sync Hosts ------------------------------------------------------- This section is for the old-style of using container sync. See the previous section, Configuring Container Sync, for the new-style. With the old-style, the Swift cluster operator must allow synchronization with a set of hosts before the user can enable container synchronization. First, the backend container server needs to be given this list of hosts in the ``container-server.conf`` file:: [DEFAULT] # This is a comma separated list of hosts allowed in the # X-Container-Sync-To field for containers. # allowed_sync_hosts = 127.0.0.1 allowed_sync_hosts = host1,host2,etc. ... [container-sync] # You can override the default log routing for this app here (don't # use set!): # log_name = container-sync # log_facility = LOG_LOCAL0 # log_level = INFO # Will sync, at most, each container once per interval # interval = 300 # Maximum amount of time to spend syncing each container # container_time = 60 ---------------------- Logging Container Sync ---------------------- Tracking sync progress, problems, and just general activity can only be achieved with log processing currently for container synchronization. In that light, you may wish to set the above `log_` options to direct the container-sync logs to a different file for easier monitoring. Additionally, it should be noted there is no way for an end user to detect sync progress or problems other than HEADing both containers and comparing the overall information. ----------------------------- Container Sync Statistics ----------------------------- Container Sync INFO level logs contains activity metrics and accounting information foe insightful tracking. Currently two different statistics are collected: About once an hour or so, accumulated statistics of all operations performed by Container Sync are reported to the log file with the following format: "Since (time): (sync) synced [(delete) deletes, (put) puts], (skip) skipped, (fail) failed" time: last report time sync: number of containers with sync turned on that were successfully synced delete: number of successful DELETE object requests to the target cluster put: number of successful PUT object request to the target cluster skip: number of containers whose sync has been turned off, but are not yet cleared from the sync store fail: number of containers with failure (due to exception, timeout or other reason) For each container synced, per container statistics are reported with the following format: Container sync report: (container), time window start: (start), time window end: %(end), puts: (puts), posts: (posts), deletes: (deletes), bytes: (bytes), sync_point1: (point1), sync_point2: (point2), total_rows: (total) container: account/container statistics are for start: report start time end: report end time puts: number of successful PUT object requests to the target container posts: N/A (0) deletes: number of successful DELETE object requests to the target container bytes: number of bytes sent over the network to the target container point1: progress indication - the container's x_container_sync_point1 point2: progress indication - the container's x_container_sync_point2 total: number of objects processed at the container it is possible that more than one server syncs a container, therefore logfiles from all servers need to be evaluated ---------------------------------------------------------- Using the ``swift`` tool to set up synchronized containers ---------------------------------------------------------- .. note:: The ``swift`` tool is available from the `python-swiftclient`_ library. .. note:: You must be the account admin on the account to set synchronization targets and keys. You simply tell each container where to sync to and give it a secret synchronization key. First, let's get the account details for our two cluster accounts:: $ swift -A http://cluster1/auth/v1.0 -U test:tester -K testing stat -v StorageURL: http://cluster1/v1/AUTH_208d1854-e475-4500-b315-81de645d060e Auth Token: AUTH_tkd5359e46ff9e419fa193dbd367f3cd19 Account: AUTH_208d1854-e475-4500-b315-81de645d060e Containers: 0 Objects: 0 Bytes: 0 $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 stat -v StorageURL: http://cluster2/v1/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c Auth Token: AUTH_tk816a1aaf403c49adb92ecfca2f88e430 Account: AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c Containers: 0 Objects: 0 Bytes: 0 Now, let's make our first container and tell it to synchronize to a second we'll make next:: $ swift -A http://cluster1/auth/v1.0 -U test:tester -K testing post \ -t '//realm_name/clustername2/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c/container2' \ -k 'secret' container1 The ``-t`` indicates the cluster to sync to, which is the realm name of the section from container-sync-realms.conf, followed by the cluster name from that section (without the cluster\_ prefix), followed by the account and container names we want to sync to. The ``-k`` specifies the secret key the two containers will share for synchronization; this is the user key, the cluster key in container-sync-realms.conf will also be used behind the scenes. Now, we'll do something similar for the second cluster's container:: $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 post \ -t '//realm_name/clustername1/AUTH_208d1854-e475-4500-b315-81de645d060e/container1' \ -k 'secret' container2 That's it. Now we can upload a bunch of stuff to the first container and watch as it gets synchronized over to the second:: $ swift -A http://cluster1/auth/v1.0 -U test:tester -K testing \ upload container1 . photo002.png photo004.png photo001.png photo003.png $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 \ list container2 [Nothing there yet, so we wait a bit...] .. note:: If you're an operator running SAIO and just testing, each time you configure a container for synchronization and place objects in the source container you will need to ensure that container-sync runs before attempting to retrieve objects from the target container. That is, you need to run:: swift-init container-sync once Now expect to see objects copied from the first container to the second:: $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 \ list container2 photo001.png photo002.png photo003.png photo004.png You can also set up a chain of synced containers if you want more than two. You'd point 1 -> 2, then 2 -> 3, and finally 3 -> 1 for three containers. They'd all need to share the same secret synchronization key. .. _`python-swiftclient`: http://github.com/openstack/python-swiftclient ----------------------------------- Using curl (or other tools) instead ----------------------------------- So what's ``swift`` doing behind the scenes? Nothing overly complicated. It translates the ``-t `` option into an ``X-Container-Sync-To: `` header and the ``-k `` option into an ``X-Container-Sync-Key: `` header. For instance, when we created the first container above and told it to synchronize to the second, we could have used this curl command:: $ curl -i -X POST -H 'X-Auth-Token: AUTH_tkd5359e46ff9e419fa193dbd367f3cd19' \ -H 'X-Container-Sync-To: //realm_name/clustername2/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c/container2' \ -H 'X-Container-Sync-Key: secret' \ 'http://cluster1/v1/AUTH_208d1854-e475-4500-b315-81de645d060e/container1' HTTP/1.1 204 No Content Content-Length: 0 Content-Type: text/plain; charset=UTF-8 Date: Thu, 24 Feb 2011 22:39:14 GMT --------------------------------------------------------------------- Old-Style: Using the ``swift`` tool to set up synchronized containers --------------------------------------------------------------------- .. note:: The ``swift`` tool is available from the `python-swiftclient`_ library. .. note:: You must be the account admin on the account to set synchronization targets and keys. This is for the old-style of container syncing using allowed_sync_hosts. You simply tell each container where to sync to and give it a secret synchronization key. First, let's get the account details for our two cluster accounts:: $ swift -A http://cluster1/auth/v1.0 -U test:tester -K testing stat -v StorageURL: http://cluster1/v1/AUTH_208d1854-e475-4500-b315-81de645d060e Auth Token: AUTH_tkd5359e46ff9e419fa193dbd367f3cd19 Account: AUTH_208d1854-e475-4500-b315-81de645d060e Containers: 0 Objects: 0 Bytes: 0 $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 stat -v StorageURL: http://cluster2/v1/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c Auth Token: AUTH_tk816a1aaf403c49adb92ecfca2f88e430 Account: AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c Containers: 0 Objects: 0 Bytes: 0 Now, let's make our first container and tell it to synchronize to a second we'll make next:: $ swift -A http://cluster1/auth/v1.0 -U test:tester -K testing post \ -t 'http://cluster2/v1/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c/container2' \ -k 'secret' container1 The ``-t`` indicates the URL to sync to, which is the ``StorageURL`` from cluster2 we retrieved above plus the container name. The ``-k`` specifies the secret key the two containers will share for synchronization. Now, we'll do something similar for the second cluster's container:: $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 post \ -t 'http://cluster1/v1/AUTH_208d1854-e475-4500-b315-81de645d060e/container1' \ -k 'secret' container2 That's it. Now we can upload a bunch of stuff to the first container and watch as it gets synchronized over to the second:: $ swift -A http://cluster1/auth/v1.0 -U test:tester -K testing \ upload container1 . photo002.png photo004.png photo001.png photo003.png $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 \ list container2 [Nothing there yet, so we wait a bit...] [If you're an operator running SAIO and just testing, you may need to run 'swift-init container-sync once' to perform a sync scan.] $ swift -A http://cluster2/auth/v1.0 -U test2:tester2 -K testing2 \ list container2 photo001.png photo002.png photo003.png photo004.png You can also set up a chain of synced containers if you want more than two. You'd point 1 -> 2, then 2 -> 3, and finally 3 -> 1 for three containers. They'd all need to share the same secret synchronization key. .. _`python-swiftclient`: http://github.com/openstack/python-swiftclient ---------------------------------------------- Old-Style: Using curl (or other tools) instead ---------------------------------------------- This is for the old-style of container syncing using allowed_sync_hosts. So what's ``swift`` doing behind the scenes? Nothing overly complicated. It translates the ``-t `` option into an ``X-Container-Sync-To: `` header and the ``-k `` option into an ``X-Container-Sync-Key: `` header. For instance, when we created the first container above and told it to synchronize to the second, we could have used this curl command:: $ curl -i -X POST -H 'X-Auth-Token: AUTH_tkd5359e46ff9e419fa193dbd367f3cd19' \ -H 'X-Container-Sync-To: http://cluster2/v1/AUTH_33cdcad8-09fb-4940-90da-0f00cbf21c7c/container2' \ -H 'X-Container-Sync-Key: secret' \ 'http://cluster1/v1/AUTH_208d1854-e475-4500-b315-81de645d060e/container1' HTTP/1.1 204 No Content Content-Length: 0 Content-Type: text/plain; charset=UTF-8 Date: Thu, 24 Feb 2011 22:39:14 GMT -------------------------------------------------- What's going on behind the scenes, in the cluster? -------------------------------------------------- Container ring devices have a directory called ``containers``, where container databases reside. In addition to ``containers``, each container ring device also has a directory called ``sync-containers``. ``sync-containers`` holds symlinks to container databases that were configured for container sync using ``x-container-sync-to`` and ``x-container-sync-key`` metadata keys. The swift-container-sync process does the job of sending updates to the remote container. This is done by scanning ``sync-containers`` for container databases. For each container db found, newer rows since the last sync will trigger PUTs or DELETEs to the other container. ``sync-containers`` is maintained as follows: Whenever the container-server processes a PUT or a POST request that carries ``x-container-sync-to`` and ``x-container-sync-key`` metadata keys the server creates a symlink to the container database in ``sync-containers``. Whenever the container server deletes a synced container, the appropriate symlink is deleted from ``sync-containers``. In addition to the container-server, the container-replicator process does the job of identifying containers that should be synchronized. This is done by scanning the local devices for container databases and checking for x-container-sync-to and x-container-sync-key metadata values. If they exist then a symlink to the container database is created in a sync-containers sub-directory on the same device. Similarly, when the container sync metadata keys are deleted, the container server and container-replicator would take care of deleting the symlinks from ``sync-containers``. .. note:: The swift-container-sync process runs on each container server in the cluster and talks to the proxy servers (or load balancers) in the remote cluster. Therefore, the container servers must be permitted to initiate outbound connections to the remote proxy servers (or load balancers). The actual syncing is slightly more complicated to make use of the three (or number-of-replicas) main nodes for a container without each trying to do the exact same work but also without missing work if one node happens to be down. Two sync points are kept in each container database. When syncing a container, the container-sync process figures out which replica of the container it has. In a standard 3-replica scenario, the process will have either replica number 0, 1, or 2. This is used to figure out which rows are belong to this sync process and which ones don't. An example may help. Assume a replica count of 3 and database row IDs are 1..6. Also, assume that container-sync is running on this container for the first time, hence SP1 = SP2 = -1. :: SP1 SP2 | v -1 0 1 2 3 4 5 6 First, the container-sync process looks for rows with id between SP1 and SP2. Since this is the first run, SP1 = SP2 = -1, and there aren't any such rows. :: SP1 SP2 | v -1 0 1 2 3 4 5 6 Second, the container-sync process looks for rows with id greater than SP1, and syncs those rows which it owns. Ownership is based on the hash of the object name, so it's not always guaranteed to be exactly one out of every three rows, but it usually gets close. For the sake of example, let's say that this process ends up owning rows 2 and 5. Once it's finished trying to sync those rows, it updates SP1 to be the biggest row-id that it's seen, which is 6 in this example. :: SP2 SP1 | | v v -1 0 1 2 3 4 5 6 While all that was going on, clients uploaded new objects into the container, creating new rows in the database. :: SP2 SP1 | | v v -1 0 1 2 3 4 5 6 7 8 9 10 11 12 On the next run, the container-sync starts off looking at rows with ids between SP1 and SP2. This time, there are a bunch of them. The sync process try to sync all of them. If it succeeds, it will set SP2 to equal SP1. If it fails, it will set SP2 to the failed object and will continue to try all other objects till SP1, setting SP2 to the first object that failed. Under normal circumstances, the container-sync processes will have already taken care of synchronizing all rows, between SP1 and SP2, resulting in a set of quick checks. However, if one of the sync processes failed for some reason, then this is a vital fallback to make sure all the objects in the container get synchronized. Without this seemingly-redundant work, any container-sync failure results in unsynchronized objects. Note that the container sync will persistently retry to sync any faulty object until success, while logging each failure. Once it's done with the fallback rows, and assuming no faults occurred, SP2 is advanced to SP1. :: SP2 SP1 | v -1 0 1 2 3 4 5 6 7 8 9 10 11 12 Then, rows with row ID greater than SP1 are synchronized (provided this container-sync process is responsible for them), and SP1 is moved up to the greatest row ID seen. :: SP2 SP1 | | v v -1 0 1 2 3 4 5 6 7 8 9 10 11 12 swift-2.17.0/doc/source/account.rst0000666000175100017510000000117213236061617017224 0ustar zuulzuul00000000000000.. _account: ******* Account ******* .. _account-auditor: Account Auditor =============== .. automodule:: swift.account.auditor :members: :undoc-members: :show-inheritance: .. _account-backend: Account Backend =============== .. automodule:: swift.account.backend :members: :undoc-members: :show-inheritance: .. _account-reaper: Account Reaper ============== .. automodule:: swift.account.reaper :members: :undoc-members: :show-inheritance: .. _account-server: Account Server ============== .. automodule:: swift.account.server :members: :undoc-members: :show-inheritance: swift-2.17.0/doc/source/logs.rst0000666000175100017510000001621013236061617016533 0ustar zuulzuul00000000000000==== Logs ==== Swift has quite verbose logging, and the generated logs can be used for cluster monitoring, utilization calculations, audit records, and more. As an overview, Swift's logs are sent to syslog and organized by log level and syslog facility. All log lines related to the same request have the same transaction id. This page documents the log formats used in the system. .. note:: By default, Swift will log full log lines. However, with the ``log_max_line_length`` setting and depending on your logging server software, lines may be truncated or shortened. With ``log_max_line_length < 7``, the log line will be truncated. With ``log_max_line_length >= 7``, the log line will be "shortened": about half the max length followed by " ... " followed by the other half the max length. Unless you use exceptionally short values, you are unlikely to run across this with the following documented log lines, but you may see it with debugging and error log lines. ---------- Proxy Logs ---------- The proxy logs contain the record of all external API requests made to the proxy server. Swift's proxy servers log requests using a custom format designed to provide robust information and simple processing. The log format is:: client_ip remote_addr datetime request_method request_path protocol status_int referer user_agent auth_token bytes_recvd bytes_sent client_etag transaction_id headers request_time source log_info request_start_time request_end_time policy_index =================== ========================================================== **Log Field** **Value** ------------------- ---------------------------------------------------------- client_ip Swift's guess at the end-client IP, taken from various headers in the request. remote_addr The IP address of the other end of the TCP connection. datetime Timestamp of the request, in day/month/year/hour/minute/second format. request_method The HTTP verb in the request. request_path The path portion of the request. protocol The transport protocol used (currently one of http or https). status_int The response code for the request. referer The value of the HTTP Referer header. user_agent The value of the HTTP User-Agent header. auth_token The value of the auth token. This may be truncated or otherwise obscured. bytes_recvd The number of bytes read from the client for this request. bytes_sent The number of bytes sent to the client in the body of the response. This is how many bytes were yielded to the WSGI server. client_etag The etag header value given by the client. transaction_id The transaction id of the request. headers The headers given in the request. request_time The duration of the request. source The "source" of the request. This may be set for requests that are generated in order to fulfill client requests, e.g. bulk uploads. log_info Various info that may be useful for diagnostics, e.g. the value of any x-delete-at header. request_start_time High-resolution timestamp from the start of the request. request_end_time High-resolution timestamp from the end of the request. policy_index The value of the storage policy index. =================== ========================================================== In one log line, all of the above fields are space-separated and url-encoded. If any value is empty, it will be logged as a "-". This allows for simple parsing by splitting each line on whitespace. New values may be placed at the end of the log line from time to time, but the order of the existing values will not change. Swift log processing utilities should look for the first N fields they require (e.g. in Python using something like ``log_line.split()[:14]`` to get up through the transaction id). Swift Source ============ The ``source`` value in the proxy logs is used to identify the originator of a request in the system. For example, if the client initiates a bulk upload, the proxy server may end up doing many requests. The initial bulk upload request will be logged as normal, but all of the internal "child requests" will have a source value indicating they came from the bulk functionality. ======================= ============================= **Logged Source Value** **Originator of the Request** ----------------------- ----------------------------- FP :ref:`formpost` SLO :ref:`static-large-objects` SW :ref:`staticweb` TU :ref:`tempurl` BD :ref:`bulk` (delete) EA :ref:`bulk` (extract) CQ :ref:`container-quotas` CS :ref:`container-sync` TA :ref:`common_tempauth` DLO :ref:`dynamic-large-objects` LE :ref:`list_endpoints` KS :ref:`keystoneauth` RL :ref:`ratelimit` VW :ref:`versioned_writes` SSC :ref:`copy` SYM :ref:`symlink` ======================= ============================= ----------------- Storage Node Logs ----------------- Swift's account, container, and object server processes each log requests that they receive, if they have been configured to do so with the ``log_requests`` config parameter (which defaults to true). The format for these log lines is:: remote_addr - - [datetime] "request_method request_path" status_int content_length "referer" "transaction_id" "user_agent" request_time additional_info server_pid policy_index =================== ========================================================== **Log Field** **Value** ------------------- ---------------------------------------------------------- remote_addr The IP address of the other end of the TCP connection. datetime Timestamp of the request, in "day/month/year:hour:minute:second +0000" format. request_method The HTTP verb in the request. request_path The path portion of the request. status_int The response code for the request. content_length The value of the Content-Length header in the response. referer The value of the HTTP Referer header. transaction_id The transaction id of the request. user_agent The value of the HTTP User-Agent header. Swift services report a user-agent string of the service name followed by the process ID, such as ``"proxy-server "`` or ``"object-updater "``. request_time The duration of the request. additional_info Additional useful information. server_pid The process id of the server policy_index The value of the storage policy index. =================== ========================================================== swift-2.17.0/doc/source/overview_backing_store.rst0000666000175100017510000002717113236061617022337 0ustar zuulzuul00000000000000 ============================================= Using Swift as Backing Store for Service Data ============================================= ---------- Background ---------- This section provides guidance to OpenStack Service developers for how to store your users' data in Swift. An example of this is that a user requests that Nova save a snapshot of a VM. Nova passes the request to Glance, Glance writes the image to a Swift container as a set of objects. Throughout this section, the following terminology and concepts are used: * User or end-user. This is a person making a request that will result in an OpenStack Service making a request to Swift. * Project (also known as Tenant). This is the unit of resource ownership. While data such as snapshot images or block volume backups may be stored as a result of an end-user's request, the reality is that these are project data. * Service. This is a program or system used by end-users. Specifically, it is any program or system that is capable of receiving end-user's tokens and validating the token with the Keystone Service and has a need to store data in Swift. Glance and Cinder are examples of such Services. * Service User. This is a Keystone user that has been assigned to a Service. This allows the Service to generate and use its own tokens so that it can interact with other Services as itself. * Service Project. This is a project (tenant) that is associated with a Service. There may be a single project shared by many Services or there may be a project dedicated to each Service. In this document, the main purpose of the Service Project is to allow the system operator to configure specific roles for each Service User. ------------------------------- Alternate Backing Store Schemes ------------------------------- There are three schemes described here: * Dedicated Service Account (Single Tenant) Your Service has a dedicated Service Project (hence a single dedicated Swift account). Data for all users and projects are stored in this account. Your Service must have a user assigned to it (the Service User). When you have data to store on behalf of one of your users, you use the Service User credentials to get a token for the Service Project and request Swift to store the data in the Service Project. With this scheme, data for all users is stored in a single account. This is transparent to your users and since the credentials for the Service User are typically not shared with anyone, your users' cannot access their data by making a request directly to Swift. However, since data belonging to all users is stored in one account, it presents a single point of vulnerably to accidental deletion or a leak of the service-user credentials. * Multi Project (Multi Tenant) Data belonging to a project is stored in the Swift account associated with the project. Users make requests to your Service using a token scoped to a project in the normal way. You can then use this same token to store the user data in the project's Swift account. The effect is that data is stored in multiple projects (aka tenants). Hence this scheme has been known as the "multi tenant" scheme. With this scheme, access is controlled by Keystone. The users must have a role that allows them to perform the request to your Service. In addition, they must have a role that also allows them to store data in the Swift account. By default, the admin or swiftoperator roles are used for this purpose (specific systems may use other role names). If the user does not have the appropriate roles, when your Service attempts to access Swift, the operation will fail. Since you are using the user's token to access the data, it follows that the user can use the same token to access Swift directly -- bypassing your Service. When end-users are browsing containers, they will also see your Service's containers and objects -- and may potentially delete the data. Conversely, there is no single account where all data so leakage of credentials will only affect a single project/tenant. * Service Prefix Account Data belonging to a project is stored in a Swift account associated with the project. This is similar to the Multi Project scheme described above. However, the Swift account is different than the account that users access. Specifically, it has a different account prefix. For example, for the project 1234, the user account is named AUTH_1234. Your Service uses a different account, for example, SERVICE_1234. To access the SERVICE_1234 account, you must present two tokens: the user's token is put in the X-Auth-Token header. You present your Service's token in the X-Service-Token header. Swift is configured such that only when both tokens are presented will it allow access. Specifically, the user cannot bypass your Service because they only have their own token. Conversely, your Service can only access the data while it has a copy of the user's token -- the Service's token by itself will not grant access. The data stored in the Service Prefix Account cannot be seen by end-users. So they cannot delete this data -- they can only access the data if they make a request through your Service. The data is also more secure. To make an unauthorized access, someone would need to compromise both an end-user's and your Service User credentials. Even then, this would only expose one project -- not other projects. The Service Prefix Account scheme combines features of the Dedicated Service Account and Multi Project schemes. It has the private, dedicated, characteristics of the Dedicated Service Account scheme but does not present a single point of attack. Using the Service Prefix Account scheme is a little more involved than the other schemes, so the rest of this document describes it more detail. ------------------------------- Service Prefix Account Overview ------------------------------- The following diagram shows the flow through the system from the end-user, to your Service and then onto Swift:: client \ \ : \ x-auth-token: \ SERVICE \ \ PUT: /v1/SERVICE_1234// \ x-auth-token: \ x-service-token: \ Swift The sequence of events and actions are as follows: * Request arrives at your Service * The is validated by the keystonemiddleware.auth_token middleware. The user's role(s) are used to determine if the user can perform the request. See :doc:`overview_auth` for technical information on the authentication system. * As part of this request, your Service needs to access Swift (either to write or read a container or object). In this example, you want to perform a PUT on /. * In the wsgi environment, the auth_token module will have populated the HTTP_X_SERVICE_CATALOG item. This lists the Swift endpoint and account. This is something such as https:///v1/AUTH_1234 where ``AUTH_`` is a prefix and ``1234`` is the project id. * The ``AUTH_`` prefix is the default value. However, your system may use a different prefix. To determine the actual prefix, search for the first underscore ('_') character in the account name. If there is no underscore character in the account name, this means there is no prefix. * Your Service should have a configuration parameter that provides the appropriate prefix to use for storing data in Swift. There is more discussion of this below, but for now assume the prefix is ``SERVICE_``. * Replace the prefix (``AUTH_`` in above examples) in the path with ``SERVICE_``, so the full URL to access the object becomes https:///v1/SERVICE_1234//. * Make the request to Swift, using this URL. In the X-Auth-Token header place a copy of the . In the X-Service-Token header, place your Service's token. If you use python-swiftclient you can achieve this by: * Putting the URL in the ``preauthurl`` parameter * Putting the in ``preauthtoken`` parameter * Adding the X-Service-Token to the ``headers`` parameter Using the HTTP_X_SERVICE_CATALOG to get Swift Account Name ---------------------------------------------------------- The auth_token middleware populates the wsgi environment with information when it validates the user's token. The HTTP_X_SERVICE_CATALOG item is a JSON string containing details of the OpenStack endpoints. For Swift, this also contains the project's Swift account name. Here is an example of a catalog entry for Swift:: "serviceCatalog": [ ... { .... "type": "object-store", "endpoints": [ ... { ... "publicURL": "https:///v1/AUTH_1234", "region": "" ... } ... ... } } To get the End-user's account: * Look for an entry with ``type`` of ``object-store`` * If there are several regions, there will be several endpoints. Use the appropriate region name and select the ``publicURL`` item. * The Swift account name is the final item in the path ("AUTH_1234" in this example). Getting a Service Token ----------------------- A Service Token is no different than any other token and is requested from Keystone using user credentials and project in the usual way. The core requirement is that your Service User has the appropriate role. In practice: * Your Service must have a user assigned to it (the Service User). * Your Service has a project assigned to it (the Service Project). * The Service User must have a role on the Service Project. This role is distinct from any of the normal end-user roles. * The role used must the role configured in the /etc/swift/proxy-server.conf. This is the ``_service_roles`` option. In this example, the role is the ``service`` role:: [keystoneauth] reseller_prefix = AUTH_, SERVICE_ SERVICE_service_role = service The ``service`` role should only be granted to OpenStack Services. It should not be granted to users. Single or multiple Service Prefixes? ------------------------------------ Most of the examples used in this document used a single prefix. The prefix, ``SERVICE`` was used. By using a single prefix, an operator is allowing all OpenStack Services to share the same account for data associated with a given project. For test systems or deployments well protected on private firewalled networks, this is appropriate. However, if one Service is compromised, that Service can access data created by another Service. To prevent this, multiple Service Prefixes may be used. This also requires that the operator configure multiple service roles. For example, in a system that has Glance and Cinder, the following Swift configuration could be used:: [keystoneauth] reseller_prefix = AUTH_, IMAGE_, BLOCK_ IMAGE_service_roles = image_service BLOCK_service_roles = block_service The Service User for Glance would be granted the ``image_service`` role on its Service Project and the Cinder Service user is granted the ``block_service`` role on its project. In this scheme, if the Cinder Service was compromised, it would not be able to access any Glance data. Container Naming ---------------- Since a single Service Prefix is possible, container names should be prefixed with a unique string to prevent name clashes. We suggest you use the service type field (as used in the service catalog). For example, The Glance Service would use "image" as a prefix. swift-2.17.0/doc/source/development_guidelines.rst0000666000175100017510000002242213236061617022323 0ustar zuulzuul00000000000000====================== Development Guidelines ====================== ----------------- Coding Guidelines ----------------- For the most part we try to follow PEP 8 guidelines which can be viewed here: http://www.python.org/dev/peps/pep-0008/ ------------------ Testing Guidelines ------------------ Swift has a comprehensive suite of tests and pep8 checks that are run on all submitted code, and it is recommended that developers execute the tests themselves to catch regressions early. Developers are also expected to keep the test suite up-to-date with any submitted code changes. Swift's tests and pep8 checks can be executed in an isolated environment with ``tox``: http://tox.testrun.org/ To execute the tests: * Ensure ``pip`` and ``virtualenv`` are upgraded to satisfy the version requirements listed in the OpenStack `global requirements`_:: pip install pip -U pip install virtualenv -U .. _`global requirements`: https://github.com/openstack/requirements/blob/master/global-requirements.txt * Install ``tox``:: pip install tox * Generate list of distribution packages to install for testing:: tox -e bindep Now install these packages using your distribution package manager like apt-get, dnf, yum, or zypper. * Run ``tox`` from the root of the swift repo:: tox .. note:: If you installed using ``cd ~/swift; sudo python setup.py develop``, you may need to do ``cd ~/swift; sudo chown -R ${USER}:${USER} swift.egg-info`` prior to running ``tox``. * By default ``tox`` will run all of the unit test and pep8 checks listed in the ``tox.ini`` file ``envlist`` option. A subset of the test environments can be specified on the ``tox`` command line or by setting the ``TOXENV`` environment variable. For example, to run only the pep8 checks and python2.7 unit tests use:: tox -e pep8,py27 or:: TOXENV=py27,pep8 tox .. note:: As of ``tox`` version 2.0.0, most environment variables are not automatically passed to the test environment. Swift's ``tox.ini`` overrides this default behavior so that variable names matching ``SWIFT_*`` and ``*_proxy`` will be passed, but you may need to run ``tox --recreate`` for this to take effect after upgrading from ``tox`` <2.0.0. Conversely, if you do not want those environment variables to be passed to the test environment then you will need to unset them before calling ``tox``. Also, if you ever encounter DistributionNotFound, try to use ``tox --recreate`` or remove the ``.tox`` directory to force ``tox`` to recreate the dependency list. Swift's tests require having an XFS directory available in ``/tmp`` or in the ``TMPDIR`` environment variable. Swift's functional tests may be executed against a :doc:`development_saio` or other running Swift cluster using the command:: tox -e func The endpoint and authorization credentials to be used by functional tests should be configured in the ``test.conf`` file as described in the section :ref:`setup_scripts`. The environment variable ``SWIFT_TEST_POLICY`` may be set to specify a particular storage policy *name* that will be used for testing. When set, tests that would otherwise not specify a policy or choose a random policy from those available will instead use the policy specified. Tests that use more than one policy will include the specified policy in the set of policies used. The specified policy must be available on the cluster under test. For example, this command would run the functional tests using policy 'silver':: SWIFT_TEST_POLICY=silver tox -e func To run a single functional test, use the ``--no-discover`` option together with a path to a specific test method, for example:: tox -e func -- --no-discover test.functional.tests.TestFile.testCopy In-process functional testing ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ If the ``test.conf`` file is not found then the functional test framework will instantiate a set of Swift servers in the same process that executes the functional tests. This 'in-process test' mode may also be enabled (or disabled) by setting the environment variable ``SWIFT_TEST_IN_PROCESS`` to a true (or false) value prior to executing ``tox -e func``. When using the 'in-process test' mode some server configuration options may be set using environment variables: - the optional in-memory object server may be selected by setting the environment variable ``SWIFT_TEST_IN_MEMORY_OBJ`` to a true value. - encryption may be added to the proxy pipeline by setting the environment variable ``SWIFT_TEST_IN_PROCESS_CONF_LOADER`` to ``encryption``. - a 2+1 EC policy may be installed as the default policy by setting the environment variable ``SWIFT_TEST_IN_PROCESS_CONF_LOADER`` to ``ec``. - logging to stdout may be enabled by setting ``SWIFT_TEST_DEBUG_LOGS``. For example, this command would run the in-process mode functional tests with encryption enabled in the proxy-server:: SWIFT_TEST_IN_PROCESS=1 SWIFT_TEST_IN_PROCESS_CONF_LOADER=encryption \ tox -e func This particular example may also be run using the ``func-encryption`` tox environment:: tox -e func-encryption The ``tox.ini`` file also specifies test environments for running other in-process functional test configurations, e.g.:: tox -e func-ec To debug the functional tests, use the 'in-process test' mode and pass the ``--pdb`` flag to ``tox``:: SWIFT_TEST_IN_PROCESS=1 tox -e func -- --pdb \ test.functional.tests.TestFile.testCopy The 'in-process test' mode searches for ``proxy-server.conf`` and ``swift.conf`` config files from which it copies config options and overrides some options to suit in process testing. The search will first look for config files in a ```` that may optionally be specified using the environment variable:: SWIFT_TEST_IN_PROCESS_CONF_DIR= If ``SWIFT_TEST_IN_PROCESS_CONF_DIR`` is not set, or if a config file is not found in ````, the search will then look in the ``etc/`` directory in the source tree. If the config file is still not found, the corresponding sample config file from ``etc/`` is used (e.g. ``proxy-server.conf-sample`` or ``swift.conf-sample``). When using the 'in-process test' mode ``SWIFT_TEST_POLICY`` may be set to specify a particular storage policy *name* that will be used for testing as described above. When set, this policy must exist in the ``swift.conf`` file and its corresponding ring file must exist in ```` (if specified) or ``etc/``. The test setup will set the specified policy to be the default and use its ring file properties for constructing the test object ring. This allows in-process testing to be run against various policy types and ring files. For example, this command would run the in-process mode functional tests using config files found in ``$HOME/my_tests`` and policy 'silver':: SWIFT_TEST_IN_PROCESS=1 SWIFT_TEST_IN_PROCESS_CONF_DIR=$HOME/my_tests \ SWIFT_TEST_POLICY=silver tox -e func ------------ Coding Style ------------ Swift uses flake8 with the OpenStack `hacking`_ module to enforce coding style. Install flake8 and hacking with pip or by the packages of your Operating System. It is advised to integrate flake8+hacking with your editor to get it automated and not get `caught` by Jenkins. For example for Vim the `syntastic`_ plugin can do this for you. .. _`hacking`: https://pypi.python.org/pypi/hacking .. _`syntastic`: https://github.com/scrooloose/syntastic ------------------------ Documentation Guidelines ------------------------ The documentation in docstrings should follow the PEP 257 conventions (as mentioned in the PEP 8 guidelines). More specifically: #. Triple quotes should be used for all docstrings. #. If the docstring is simple and fits on one line, then just use one line. #. For docstrings that take multiple lines, there should be a newline after the opening quotes, and before the closing quotes. #. Sphinx is used to build documentation, so use the restructured text markup to designate parameters, return values, etc. Documentation on the sphinx specific markup can be found here: http://sphinx.pocoo.org/markup/index.html Installing Sphinx: #. Install sphinx (On Ubuntu: ``sudo apt-get install python-sphinx``) #. ``python setup.py build_sphinx`` -------- Manpages -------- For sanity check of your change in manpage, use this command in the root of your Swift repo:: ./.manpages --------------------- License and Copyright --------------------- You can have the following copyright and license statement at the top of each source file. Copyright assignment is optional. New files should contain the current year. Substantial updates can have another year added, and date ranges are not needed.:: # Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. swift-2.17.0/doc/source/overview_large_objects.rst0000666000175100017510000001524313236061617022325 0ustar zuulzuul00000000000000.. _large-objects: ==================== Large Object Support ==================== -------- Overview -------- Swift has a limit on the size of a single uploaded object; by default this is 5GB. However, the download size of a single object is virtually unlimited with the concept of segmentation. Segments of the larger object are uploaded and a special manifest file is created that, when downloaded, sends all the segments concatenated as a single object. This also offers much greater upload speed with the possibility of parallel uploads of the segments. .. _dynamic-large-objects: .. _dlo-doc: --------------------- Dynamic Large Objects --------------------- .. automodule:: swift.common.middleware.dlo :members: :show-inheritance: .. _static-large-objects: .. _slo-doc: -------------------- Static Large Objects -------------------- .. automodule:: swift.common.middleware.slo :members: :show-inheritance: ---------- Direct API ---------- SLO support centers around the user generated manifest file. After the user has uploaded the segments into their account a manifest file needs to be built and uploaded. All object segments, must be at least 1 byte in size. Please see the SLO docs for :ref:`slo-doc` further details. ---------------- Additional Notes ---------------- * With a ``GET`` or ``HEAD`` of a manifest file, the ``X-Object-Manifest: /`` header will be returned with the concatenated object so you can tell where it's getting its segments from. * When updating a manifest object using a POST request, a ``X-Object-Manifest`` header must be included for the object to continue to behave as a manifest object. * The response's ``Content-Length`` for a ``GET`` or ``HEAD`` on the manifest file will be the sum of all the segments in the ``/`` listing, dynamically. So, uploading additional segments after the manifest is created will cause the concatenated object to be that much larger; there's no need to recreate the manifest file. * The response's ``Content-Type`` for a ``GET`` or ``HEAD`` on the manifest will be the same as the ``Content-Type`` set during the ``PUT`` request that created the manifest. You can easily change the ``Content-Type`` by reissuing the ``PUT``. * The response's ``ETag`` for a ``GET`` or ``HEAD`` on the manifest file will be the MD5 sum of the concatenated string of ETags for each of the segments in the manifest (for DLO, from the listing ``/``). Usually in Swift the ETag is the MD5 sum of the contents of the object, and that holds true for each segment independently. But it's not meaningful to generate such an ETag for the manifest itself so this method was chosen to at least offer change detection. .. note:: If you are using the container sync feature you will need to ensure both your manifest file and your segment files are synced if they happen to be in different containers. ------- History ------- Dynamic large object support has gone through various iterations before settling on this implementation. The primary factor driving the limitation of object size in Swift is maintaining balance among the partitions of the ring. To maintain an even dispersion of disk usage throughout the cluster the obvious storage pattern was to simply split larger objects into smaller segments, which could then be glued together during a read. Before the introduction of large object support some applications were already splitting their uploads into segments and re-assembling them on the client side after retrieving the individual pieces. This design allowed the client to support backup and archiving of large data sets, but was also frequently employed to improve performance or reduce errors due to network interruption. The major disadvantage of this method is that knowledge of the original partitioning scheme is required to properly reassemble the object, which is not practical for some use cases, such as CDN origination. In order to eliminate any barrier to entry for clients wanting to store objects larger than 5GB, initially we also prototyped fully transparent support for large object uploads. A fully transparent implementation would support a larger max size by automatically splitting objects into segments during upload within the proxy without any changes to the client API. All segments were completely hidden from the client API. This solution introduced a number of challenging failure conditions into the cluster, wouldn't provide the client with any option to do parallel uploads, and had no basis for a resume feature. The transparent implementation was deemed just too complex for the benefit. The current "user manifest" design was chosen in order to provide a transparent download of large objects to the client and still provide the uploading client a clean API to support segmented uploads. To meet an many use cases as possible Swift supports two types of large object manifests. Dynamic and static large object manifests both support the same idea of allowing the user to upload many segments to be later downloaded as a single file. Dynamic large objects rely on a container listing to provide the manifest. This has the advantage of allowing the user to add/removes segments from the manifest at any time. It has the disadvantage of relying on eventually consistent container listings. All three copies of the container dbs must be updated for a complete list to be guaranteed. Also, all segments must be in a single container, which can limit concurrent upload speed. Static large objects rely on a user provided manifest file. A user can upload objects into multiple containers and then reference those objects (segments) in a self generated manifest file. Future GETs to that file will download the concatenation of the specified segments. This has the advantage of being able to immediately download the complete object once the manifest has been successfully PUT. Being able to upload segments into separate containers also improves concurrent upload speed. It has the disadvantage that the manifest is finalized once PUT. Any changes to it means it has to be replaced. Between these two methods the user has great flexibility in how (s)he chooses to upload and retrieve large objects to Swift. Swift does not, however, stop the user from harming themselves. In both cases the segments are deletable by the user at any time. If a segment was deleted by mistake, a dynamic large object, having no way of knowing it was ever there, would happily ignore the deleted file and the user will get an incomplete file. A static large object would, when failing to retrieve the object specified in the manifest, drop the connection and the user would receive partial results. swift-2.17.0/doc/source/ring.rst0000666000175100017510000000104713236061617016530 0ustar zuulzuul00000000000000.. _consistent_hashing_ring: ******************************** Partitioned Consistent Hash Ring ******************************** .. _ring: Ring ==== .. automodule:: swift.common.ring.ring :members: :undoc-members: :show-inheritance: .. _ring-builder: Ring Builder ============ .. automodule:: swift.common.ring.builder :members: :undoc-members: :show-inheritance: Composite Ring Builder ====================== .. automodule:: swift.common.ring.composite_builder :members: :undoc-members: :show-inheritance: swift-2.17.0/doc/source/overview_architecture.rst0000666000175100017510000002307213236061617022203 0ustar zuulzuul00000000000000============================ Swift Architectural Overview ============================ ------------ Proxy Server ------------ The Proxy Server is responsible for tying together the rest of the Swift architecture. For each request, it will look up the location of the account, container, or object in the ring (see below) and route the request accordingly. For Erasure Code type policies, the Proxy Server is also responsible for encoding and decoding object data. See :doc:`overview_erasure_code` for complete information on Erasure Code support. The public API is also exposed through the Proxy Server. A large number of failures are also handled in the Proxy Server. For example, if a server is unavailable for an object PUT, it will ask the ring for a handoff server and route there instead. When objects are streamed to or from an object server, they are streamed directly through the proxy server to or from the user -- the proxy server does not spool them. -------- The Ring -------- A ring represents a mapping between the names of entities stored on disk and their physical location. There are separate rings for accounts, containers, and one object ring per storage policy. When other components need to perform any operation on an object, container, or account, they need to interact with the appropriate ring to determine its location in the cluster. The Ring maintains this mapping using zones, devices, partitions, and replicas. Each partition in the ring is replicated, by default, 3 times across the cluster, and the locations for a partition are stored in the mapping maintained by the ring. The ring is also responsible for determining which devices are used for handoff in failure scenarios. The replicas of each partition will be isolated onto as many distinct regions, zones, servers and devices as the capacity of these failure domains allow. If there are less failure domains at a given tier than replicas of the partition assigned within a tier (e.g. a 3 replica cluster with 2 servers), or the available capacity across the failure domains within a tier are not well balanced it will not be possible to achieve both even capacity distribution (`balance`) as well as complete isolation of replicas across failure domains (`dispersion`). When this occurs the ring management tools will display a warning so that the operator can evaluate the cluster topology. Data is evenly distributed across the capacity available in the cluster as described by the devices weight. Weights can be used to balance the distribution of partitions on drives across the cluster. This can be useful, for example, when different sized drives are used in a cluster. Device weights can also be used when adding or removing capacity or failure domains to control how many partitions are reassigned during a rebalance to be moved as soon as replication bandwidth allows. .. note:: Prior to Swift 2.1.0 it was not possible to restrict partition movement by device weight when adding new failure domains, and would allow extremely unbalanced rings. The greedy dispersion algorithm is now subject to the constraints of the physical capacity in the system, but can be adjusted with-in reason via the overload option. Artificially unbalancing the partition assignment without respect to capacity can introduce unexpected full devices when a given failure domain does not physically support its share of the used capacity in the tier. When partitions need to be moved around (for example if a device is added to the cluster), the ring ensures that a minimum number of partitions are moved at a time, and only one replica of a partition is moved at a time. The ring is used by the Proxy server and several background processes (like replication). See :doc:`overview_ring` for complete information on the ring. ---------------- Storage Policies ---------------- Storage Policies provide a way for object storage providers to differentiate service levels, features and behaviors of a Swift deployment. Each Storage Policy configured in Swift is exposed to the client via an abstract name. Each device in the system is assigned to one or more Storage Policies. This is accomplished through the use of multiple object rings, where each Storage Policy has an independent object ring, which may include a subset of hardware implementing a particular differentiation. For example, one might have the default policy with 3x replication, and create a second policy which, when applied to new containers only uses 2x replication. Another might add SSDs to a set of storage nodes and create a performance tier storage policy for certain containers to have their objects stored there. Yet another might be the use of Erasure Coding to define a cold-storage tier. This mapping is then exposed on a per-container basis, where each container can be assigned a specific storage policy when it is created, which remains in effect for the lifetime of the container. Applications require minimal awareness of storage policies to use them; once a container has been created with a specific policy, all objects stored in it will be done so in accordance with that policy. The Storage Policies feature is implemented throughout the entire code base so it is an important concept in understanding Swift architecture. See :doc:`overview_policies` for complete information on storage policies. ------------- Object Server ------------- The Object Server is a very simple blob storage server that can store, retrieve and delete objects stored on local devices. Objects are stored as binary files on the filesystem with metadata stored in the file's extended attributes (xattrs). This requires that the underlying filesystem choice for object servers support xattrs on files. Some filesystems, like ext3, have xattrs turned off by default. Each object is stored using a path derived from the object name's hash and the operation's timestamp. Last write always wins, and ensures that the latest object version will be served. A deletion is also treated as a version of the file (a 0 byte file ending with ".ts", which stands for tombstone). This ensures that deleted files are replicated correctly and older versions don't magically reappear due to failure scenarios. ---------------- Container Server ---------------- The Container Server's primary job is to handle listings of objects. It doesn't know where those object's are, just what objects are in a specific container. The listings are stored as sqlite database files, and replicated across the cluster similar to how objects are. Statistics are also tracked that include the total number of objects, and total storage usage for that container. -------------- Account Server -------------- The Account Server is very similar to the Container Server, excepting that it is responsible for listings of containers rather than objects. ----------- Replication ----------- Replication is designed to keep the system in a consistent state in the face of temporary error conditions like network outages or drive failures. The replication processes compare local data with each remote copy to ensure they all contain the latest version. Object replication uses a hash list to quickly compare subsections of each partition, and container and account replication use a combination of hashes and shared high water marks. Replication updates are push based. For object replication, updating is just a matter of rsyncing files to the peer. Account and container replication push missing records over HTTP or rsync whole database files. The replicator also ensures that data is removed from the system. When an item (object, container, or account) is deleted, a tombstone is set as the latest version of the item. The replicator will see the tombstone and ensure that the item is removed from the entire system. See :doc:`overview_replication` for complete information on replication. -------------- Reconstruction -------------- The reconstructor is used by Erasure Code policies and is analogous to the replicator for Replication type policies. See :doc:`overview_erasure_code` for complete information on both Erasure Code support as well as the reconstructor. -------- Updaters -------- There are times when container or account data can not be immediately updated. This usually occurs during failure scenarios or periods of high load. If an update fails, the update is queued locally on the filesystem, and the updater will process the failed updates. This is where an eventual consistency window will most likely come in to play. For example, suppose a container server is under load and a new object is put in to the system. The object will be immediately available for reads as soon as the proxy server responds to the client with success. However, the container server did not update the object listing, and so the update would be queued for a later update. Container listings, therefore, may not immediately contain the object. In practice, the consistency window is only as large as the frequency at which the updater runs and may not even be noticed as the proxy server will route listing requests to the first container server which responds. The server under load may not be the one that serves subsequent listing requests -- one of the other two replicas may handle the listing. -------- Auditors -------- Auditors crawl the local server checking the integrity of the objects, containers, and accounts. If corruption is found (in the case of bit rot, for example), the file is quarantined, and replication will replace the bad file from another replica. If other errors are found they are logged (for example, an object's listing can't be found on any container server it should be).swift-2.17.0/doc/source/deployment_guide.rst0000666000175100017510000044754413236061617021146 0ustar zuulzuul00000000000000 Deployment Guide ================ ----------------------- Hardware Considerations ----------------------- Swift is designed to run on commodity hardware. At Rackspace, our storage servers are currently running fairly generic 4U servers with 24 2T SATA drives and 8 cores of processing power. RAID on the storage drives is not required and not recommended. Swift's disk usage pattern is the worst case possible for RAID, and performance degrades very quickly using RAID 5 or 6. ------------------ Deployment Options ------------------ The Swift services run completely autonomously, which provides for a lot of flexibility when architecting the hardware deployment for Swift. The 4 main services are: #. Proxy Services #. Object Services #. Container Services #. Account Services The Proxy Services are more CPU and network I/O intensive. If you are using 10g networking to the proxy, or are terminating SSL traffic at the proxy, greater CPU power will be required. The Object, Container, and Account Services (Storage Services) are more disk and network I/O intensive. The easiest deployment is to install all services on each server. There is nothing wrong with doing this, as it scales each service out horizontally. At Rackspace, we put the Proxy Services on their own servers and all of the Storage Services on the same server. This allows us to send 10g networking to the proxy and 1g to the storage servers, and keep load balancing to the proxies more manageable. Storage Services scale out horizontally as storage servers are added, and we can scale overall API throughput by adding more Proxies. If you need more throughput to either Account or Container Services, they may each be deployed to their own servers. For example you might use faster (but more expensive) SAS or even SSD drives to get faster disk I/O to the databases. A high-availability (HA) deployment of Swift requires that multiple proxy servers are deployed and requests are load-balanced between them. Each proxy server instance is stateless and able to respond to requests for the entire cluster. Load balancing and network design is left as an exercise to the reader, but this is a very important part of the cluster, so time should be spent designing the network for a Swift cluster. --------------------- Web Front End Options --------------------- Swift comes with an integral web front end. However, it can also be deployed as a request processor of an Apache2 using mod_wsgi as described in :doc:`Apache Deployment Guide `. .. _ring-preparing: ------------------ Preparing the Ring ------------------ The first step is to determine the number of partitions that will be in the ring. We recommend that there be a minimum of 100 partitions per drive to insure even distribution across the drives. A good starting point might be to figure out the maximum number of drives the cluster will contain, and then multiply by 100, and then round up to the nearest power of two. For example, imagine we are building a cluster that will have no more than 5,000 drives. That would mean that we would have a total number of 500,000 partitions, which is pretty close to 2^19, rounded up. It is also a good idea to keep the number of partitions small (relatively). The more partitions there are, the more work that has to be done by the replicators and other backend jobs and the more memory the rings consume in process. The goal is to find a good balance between small rings and maximum cluster size. The next step is to determine the number of replicas to store of the data. Currently it is recommended to use 3 (as this is the only value that has been tested). The higher the number, the more storage that is used but the less likely you are to lose data. It is also important to determine how many zones the cluster should have. It is recommended to start with a minimum of 5 zones. You can start with fewer, but our testing has shown that having at least five zones is optimal when failures occur. We also recommend trying to configure the zones at as high a level as possible to create as much isolation as possible. Some example things to take into consideration can include physical location, power availability, and network connectivity. For example, in a small cluster you might decide to split the zones up by cabinet, with each cabinet having its own power and network connectivity. The zone concept is very abstract, so feel free to use it in whatever way best isolates your data from failure. Each zone exists in a region. A region is also an abstract concept that may be used to distinguish between geographically separated areas as well as can be used within same datacenter. Regions and zones are referenced by a positive integer. You can now start building the ring with:: swift-ring-builder create This will start the ring build process creating the with 2^ partitions. is the time in hours before a specific partition can be moved in succession (24 is a good value for this). Devices can be added to the ring with:: swift-ring-builder add rz-:/_ This will add a device to the ring where is the name of the builder file that was created previously, is the number of the region the zone is in, is the number of the zone this device is in, is the ip address of the server the device is in, is the port number that the server is running on, is the name of the device on the server (for example: sdb1), is a string of metadata for the device (optional), and is a float weight that determines how many partitions are put on the device relative to the rest of the devices in the cluster (a good starting point is 100.0 x TB on the drive).Add each device that will be initially in the cluster. Once all of the devices are added to the ring, run:: swift-ring-builder rebalance This will distribute the partitions across the drives in the ring. It is important whenever making changes to the ring to make all the changes required before running rebalance. This will ensure that the ring stays as balanced as possible, and as few partitions are moved as possible. The above process should be done to make a ring for each storage service (Account, Container and Object). The builder files will be needed in future changes to the ring, so it is very important that these be kept and backed up. The resulting .tar.gz ring file should be pushed to all of the servers in the cluster. For more information about building rings, running swift-ring-builder with no options will display help text with available commands and options. More information on how the ring works internally can be found in the :doc:`Ring Overview `. .. _server-per-port-configuration: ------------------------------- Running object-servers Per Disk ------------------------------- The lack of true asynchronous file I/O on Linux leaves the object-server workers vulnerable to misbehaving disks. Because any object-server worker can service a request for any disk, and a slow I/O request blocks the eventlet hub, a single slow disk can impair an entire storage node. This also prevents object servers from fully utilizing all their disks during heavy load. Another way to get full I/O isolation is to give each disk on a storage node a different port in the storage policy rings. Then set the :ref:`servers_per_port ` option in the object-server config. NOTE: while the purpose of this config setting is to run one or more object-server worker processes per *disk*, the implementation just runs object-servers per unique port of local devices in the rings. The deployer must combine this option with appropriately-configured rings to benefit from this feature. Here's an example (abbreviated) old-style ring (2 node cluster with 2 disks each):: Devices: id region zone ip address port replication ip replication port name 0 1 1 1.1.0.1 6200 1.1.0.1 6200 d1 1 1 1 1.1.0.1 6200 1.1.0.1 6200 d2 2 1 2 1.1.0.2 6200 1.1.0.2 6200 d3 3 1 2 1.1.0.2 6200 1.1.0.2 6200 d4 And here's the same ring set up for `servers_per_port`:: Devices: id region zone ip address port replication ip replication port name 0 1 1 1.1.0.1 6200 1.1.0.1 6200 d1 1 1 1 1.1.0.1 6201 1.1.0.1 6201 d2 2 1 2 1.1.0.2 6200 1.1.0.2 6200 d3 3 1 2 1.1.0.2 6201 1.1.0.2 6201 d4 When migrating from normal to `servers_per_port`, perform these steps in order: #. Upgrade Swift code to a version capable of doing `servers_per_port`. #. Enable `servers_per_port` with a > 0 value #. Restart `swift-object-server` processes with a SIGHUP. At this point, you will have the `servers_per_port` number of `swift-object-server` processes serving all requests for all disks on each node. This preserves availability, but you should perform the next step as quickly as possible. #. Push out new rings that actually have different ports per disk on each server. One of the ports in the new ring should be the same as the port used in the old ring ("6200" in the example above). This will cover existing proxy-server processes who haven't loaded the new ring yet. They can still talk to any storage node regardless of whether or not that storage node has loaded the ring and started object-server processes on the new ports. If you do not run a separate object-server for replication, then this setting must be available to the object-replicator and object-reconstructor (i.e. appear in the [DEFAULT] config section). .. _general-service-configuration: ----------------------------- General Service Configuration ----------------------------- Most Swift services fall into two categories. Swift's wsgi servers and background daemons. For more information specific to the configuration of Swift's wsgi servers with paste deploy see :ref:`general-server-configuration`. Configuration for servers and daemons can be expressed together in the same file for each type of server, or separately. If a required section for the service trying to start is missing there will be an error. The sections not used by the service are ignored. Consider the example of an object storage node. By convention, configuration for the object-server, object-updater, object-replicator, object-auditor, and object-reconstructor exist in a single file ``/etc/swift/object-server.conf``:: [DEFAULT] reclaim_age = 604800 [pipeline:main] pipeline = object-server [app:object-server] use = egg:swift#object [object-replicator] [object-updater] [object-auditor] Swift services expect a configuration path as the first argument:: $ swift-object-auditor Usage: swift-object-auditor CONFIG [options] Error: missing config path argument If you omit the object-auditor section this file could not be used as the configuration path when starting the ``swift-object-auditor`` daemon:: $ swift-object-auditor /etc/swift/object-server.conf Unable to find object-auditor config section in /etc/swift/object-server.conf If the configuration path is a directory instead of a file all of the files in the directory with the file extension ".conf" will be combined to generate the configuration object which is delivered to the Swift service. This is referred to generally as "directory based configuration". Directory based configuration leverages ConfigParser's native multi-file support. Files ending in ".conf" in the given directory are parsed in lexicographical order. Filenames starting with '.' are ignored. A mixture of file and directory configuration paths is not supported - if the configuration path is a file only that file will be parsed. The Swift service management tool ``swift-init`` has adopted the convention of looking for ``/etc/swift/{type}-server.conf.d/`` if the file ``/etc/swift/{type}-server.conf`` file does not exist. When using directory based configuration, if the same option under the same section appears more than once in different files, the last value parsed is said to override previous occurrences. You can ensure proper override precedence by prefixing the files in the configuration directory with numerical values.:: /etc/swift/ default.base object-server.conf.d/ 000_default.conf -> ../default.base 001_default-override.conf 010_server.conf 020_replicator.conf 030_updater.conf 040_auditor.conf You can inspect the resulting combined configuration object using the ``swift-config`` command line tool .. _general-server-configuration: ---------------------------- General Server Configuration ---------------------------- Swift uses paste.deploy (http://pythonpaste.org/deploy/) to manage server configurations. Default configuration options are set in the `[DEFAULT]` section, and any options specified there can be overridden in any of the other sections BUT ONLY BY USING THE SYNTAX ``set option_name = value``. This is the unfortunate way paste.deploy works and I'll try to explain it in full. First, here's an example paste.deploy configuration file:: [DEFAULT] name1 = globalvalue name2 = globalvalue name3 = globalvalue set name4 = globalvalue [pipeline:main] pipeline = myapp [app:myapp] use = egg:mypkg#myapp name2 = localvalue set name3 = localvalue set name5 = localvalue name6 = localvalue The resulting configuration that myapp receives is:: global {'__file__': '/etc/mypkg/wsgi.conf', 'here': '/etc/mypkg', 'name1': 'globalvalue', 'name2': 'globalvalue', 'name3': 'localvalue', 'name4': 'globalvalue', 'name5': 'localvalue', 'set name4': 'globalvalue'} local {'name6': 'localvalue'} So, `name1` got the global value which is fine since it's only in the `DEFAULT` section anyway. `name2` got the global value from `DEFAULT` even though it appears to be overridden in the `app:myapp` subsection. This is just the unfortunate way paste.deploy works (at least at the time of this writing.) `name3` got the local value from the `app:myapp` subsection because it is using the special paste.deploy syntax of ``set option_name = value``. So, if you want a default value for most app/filters but want to override it in one subsection, this is how you do it. `name4` got the global value from `DEFAULT` since it's only in that section anyway. But, since we used the ``set`` syntax in the `DEFAULT` section even though we shouldn't, notice we also got a ``set name4`` variable. Weird, but probably not harmful. `name5` got the local value from the `app:myapp` subsection since it's only there anyway, but notice that it is in the global configuration and not the local configuration. This is because we used the ``set`` syntax to set the value. Again, weird, but not harmful since Swift just treats the two sets of configuration values as one set anyway. `name6` got the local value from `app:myapp` subsection since it's only there, and since we didn't use the ``set`` syntax, it's only in the local configuration and not the global one. Though, as indicated above, there is no special distinction with Swift. That's quite an explanation for something that should be so much simpler, but it might be important to know how paste.deploy interprets configuration files. The main rule to remember when working with Swift configuration files is: .. note:: Use the ``set option_name = value`` syntax in subsections if the option is also set in the ``[DEFAULT]`` section. Don't get in the habit of always using the ``set`` syntax or you'll probably mess up your non-paste.deploy configuration files. -------------------- Common configuration -------------------- An example of common configuration file can be found at etc/swift.conf-sample The following configuration options are available: =================== ========== ============================================= Option Default Description ------------------- ---------- --------------------------------------------- max_header_size 8192 max_header_size is the max number of bytes in the utf8 encoding of each header. Using 8192 as default because eventlet use 8192 as max size of header line. This value may need to be increased when using identity v3 API tokens including more than 7 catalog entries. See also include_service_catalog in proxy-server.conf-sample (documented in overview_auth.rst). extra_header_count 0 By default the maximum number of allowed headers depends on the number of max allowed metadata settings plus a default value of 32 for regular http headers. If for some reason this is not enough (custom middleware for example) it can be increased with the extra_header_count constraint. =================== ========== ============================================= --------------------------- Object Server Configuration --------------------------- An Example Object Server configuration can be found at etc/object-server.conf-sample in the source code repository. The following configuration sections are available: * :ref:`[DEFAULT] ` * `[object-server]`_ * `[object-replicator]`_ * `[object-reconstructor]`_ * `[object-updater]`_ * `[object-auditor]`_ .. _object-server-default-options: ********* [DEFAULT] ********* ================================ ========== ============================================ Option Default Description -------------------------------- ---------- -------------------------------------------- swift_dir /etc/swift Swift configuration directory devices /srv/node Parent directory of where devices are mounted mount_check true Whether or not check if the devices are mounted to prevent accidentally writing to the root device bind_ip 0.0.0.0 IP Address for server to bind to bind_port 6200 Port for server to bind to bind_timeout 30 Seconds to attempt bind before giving up backlog 4096 Maximum number of allowed pending connections workers auto Override the number of pre-forked workers that will accept connections. If set it should be an integer, zero means no fork. If unset, it will try to default to the number of effective cpu cores and fallback to one. Increasing the number of workers helps slow filesystem operations in one request from negatively impacting other requests, but only the :ref:`servers_per_port ` option provides complete I/O isolation with no measurable overhead. servers_per_port 0 If each disk in each storage policy ring has unique port numbers for its "ip" value, you can use this setting to have each object-server worker only service requests for the single disk matching the port in the ring. The value of this setting determines how many worker processes run for each port (disk) in the ring. If you have 24 disks per server, and this setting is 4, then each storage node will have 1 + (24 * 4) = 97 total object-server processes running. This gives complete I/O isolation, drastically reducing the impact of slow disks on storage node performance. The object-replicator and object-reconstructor need to see this setting too, so it must be in the [DEFAULT] section. See :ref:`server-per-port-configuration`. max_clients 1024 Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. disable_fallocate false Disable "fast fail" fallocate checks if the underlying filesystem does not support it. log_name swift Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory log_max_line_length 0 Caps the length of log lines to the value given; no limit if set to 0, the default. log_custom_handlers None Comma-separated list of functions to call to setup custom log handlers. log_udp_host Override log_address log_udp_port 514 UDP log port log_statsd_host None Enables StatsD logging; IPv4/IPv6 address or a hostname. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. log_statsd_port 8125 log_statsd_default_sample_rate 1.0 log_statsd_sample_rate_factor 1.0 log_statsd_metric_prefix eventlet_debug false If true, turn on debug logging for eventlet fallocate_reserve 1% You can set fallocate_reserve to the number of bytes or percentage of disk space you'd like fallocate to reserve, whether there is space for the given file size or not. Percentage will be used if the value ends with a '%'. This is useful for systems that behave badly when they completely run out of space; you can make the services pretend they're out of space early. conn_timeout 0.5 Time to wait while attempting to connect to another backend node. node_timeout 3 Time to wait while sending each chunk of data to another backend node. client_timeout 60 Time to wait while receiving each chunk of data from a client or another backend node network_chunk_size 65536 Size of chunks to read/write over the network disk_chunk_size 65536 Size of chunks to read/write to disk container_update_timeout 1 Time to wait while sending a container update on object update. reclaim_age 604800 Time elapsed in seconds before the tombstone file representing a deleted object can be reclaimed. This is the maximum window for your consistency engine. If a node that was disconnected from the cluster because of a fault is reintroduced into the cluster after this window without having its data purged it will result in dark data. This setting should be consistent across all object services. nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ================================ ========== ============================================ .. _object-server-options: *************** [object-server] *************** ================================== ====================== =============================================== Option Default Description ---------------------------------- ---------------------- ----------------------------------------------- use paste.deploy entry point for the object server. For most cases, this should be `egg:swift#object`. set log_name object-server Label used when logging set log_facility LOG_LOCAL0 Syslog log facility set log_level INFO Logging level set log_requests True Whether or not to log each request set log_address /dev/log Logging directory user swift User to run as max_upload_time 86400 Maximum time allowed to upload an object slow 0 If > 0, Minimum time in seconds for a PUT or DELETE request to complete. This is only useful to simulate slow devices during testing and development. mb_per_sync 512 On PUT requests, sync file every n MB keep_cache_size 5242880 Largest object size to keep in buffer cache keep_cache_private false Allow non-public objects to stay in kernel's buffer cache allowed_headers Content-Disposition, Comma separated list of headers Content-Encoding, that can be set in metadata on an object. X-Delete-At, This list is in addition to X-Object-Manifest, X-Object-Meta-* headers and cannot include X-Static-Large-Object Content-Type, etag, Content-Length, or deleted auto_create_account_prefix . Prefix used when automatically creating accounts. replication_server Configure parameter for creating specific server. To handle all verbs, including replication verbs, do not specify "replication_server" (this is the default). To only handle replication, set to a True value (e.g. "True" or "1"). To handle only non-replication verbs, set to "False". Unless you have a separate replication network, you should not specify any value for "replication_server". replication_concurrency 4 Set to restrict the number of concurrent incoming SSYNC requests; set to 0 for unlimited replication_concurrency_per_device 1 Set to restrict the number of concurrent incoming SSYNC requests per device; set to 0 for unlimited requests per devices. This can help control I/O to each device. This does not override replication_concurrency described above, so you may need to adjust both parameters depending on your hardware or network capacity. replication_lock_timeout 15 Number of seconds to wait for an existing replication device lock before giving up. replication_failure_threshold 100 The number of subrequest failures before the replication_failure_ratio is checked replication_failure_ratio 1.0 If the value of failures / successes of SSYNC subrequests exceeds this ratio, the overall SSYNC request will be aborted splice no Use splice() for zero-copy object GETs. This requires Linux kernel version 3.0 or greater. If you set "splice = yes" but the kernel does not support it, error messages will appear in the object server logs at startup, but your object servers should continue to function. nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. eventlet_tpool_num_threads auto The number of threads in eventlet's thread pool. Most IO will occur in the object server's main thread, but certain "heavy" IO operations will occur in separate IO threads, managed by eventlet. The default value is auto, whose actual value is dependent on the servers_per_port value. If servers_per_port is zero then it uses eventlet's default (currently 20 threads). If the servers_per_port is nonzero then it'll only use 1 thread per process. This value can be overridden with an integer value. ================================== ====================== =============================================== ******************* [object-replicator] ******************* =========================== ======================== ================================ Option Default Description --------------------------- ------------------------ -------------------------------- log_name object-replicator Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory daemonize yes Whether or not to run replication as a daemon interval 30 Time in seconds to wait between replication passes concurrency 1 Number of replication workers to spawn sync_method rsync The sync method to use; default is rsync but you can use ssync to try the EXPERIMENTAL all-swift-code-no-rsync-callouts method. Once ssync is verified as or better than, rsync, we plan to deprecate rsync so we can move on with more features for replication. rsync_timeout 900 Max duration of a partition rsync rsync_bwlimit 0 Bandwidth limit for rsync in kB/s. 0 means unlimited. rsync_io_timeout 30 Timeout value sent to rsync --timeout and --contimeout options rsync_compress no Allow rsync to compress data which is transmitted to destination node during sync. However, this is applicable only when destination node is in a different region than the local one. NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might slow down the syncing process. stats_interval 300 Interval in seconds between logging replication statistics handoffs_first false If set to True, partitions that are not supposed to be on the node will be replicated first. The default setting should not be changed, except for extreme situations. handoff_delete auto By default handoff partitions will be removed when it has successfully replicated to all the canonical nodes. If set to an integer n, it will remove the partition if it is successfully replicated to n nodes. The default setting should not be changed, except for extreme situations. node_timeout DEFAULT or 10 Request timeout to external services. This uses what's set here, or what's set in the DEFAULT section, or 10 (though other sections use 3 as the final default). http_timeout 60 Max duration of an http request. This is for REPLICATE finalization calls and so should be longer than node_timeout. lockup_timeout 1800 Attempts to kill all workers if nothing replicates for lockup_timeout seconds rsync_module {replication_ip}::object Format of the rsync module where the replicator will send data. The configuration value can include some variables that will be extracted from the ring. Variables must follow the format {NAME} where NAME is one of: ip, port, replication_ip, replication_port, region, zone, device, meta. See etc/rsyncd.conf-sample for some examples. rsync_error_log_line_length 0 Limits how long rsync error log lines are ring_check_interval 15 Interval for checking new ring file recon_cache_path /var/cache/swift Path to recon cache nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. =========================== ======================== ================================ ********************** [object-reconstructor] ********************** =========================== ======================== ================================ Option Default Description --------------------------- ------------------------ -------------------------------- log_name object-reconstructor Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory daemonize yes Whether or not to run reconstruction as a daemon interval 30 Time in seconds to wait between reconstruction passes reconstructor_workers 0 Maximum number of worker processes to spawn. Each worker will handle a subset of devices. Devices will be assigned evenly among the workers so that workers cycle at similar intervals (which can lead to fewer workers than requested). You can not have more workers than devices. If you have no devices only a single worker is spawned. concurrency 1 Number of reconstruction threads to spawn per reconstructor process. stats_interval 300 Interval in seconds between logging reconstruction statistics handoffs_only false The handoffs_only mode option is for special case emergency situations during rebalance such as disk full in the cluster. This option SHOULD NOT BE CHANGED, except for extreme situations. When handoffs_only mode is enabled the reconstructor will *only* revert fragments from handoff nodes to primary nodes and will not sync primary nodes with neighboring primary nodes. This will force the reconstructor to sync and delete handoffs' fragments more quickly and minimize the time of the rebalance by limiting the number of rebuilds. The handoffs_only option is only for temporary use and should be disabled as soon as the emergency situation has been resolved. node_timeout DEFAULT or 10 Request timeout to external services. The value used is the value set in this section, or the value set in the DEFAULT section, or 10. http_timeout 60 Max duration of an http request. This is for REPLICATE finalization calls and so should be longer than node_timeout. lockup_timeout 1800 Attempts to kill all threads if no fragment has been reconstructed for lockup_timeout seconds. ring_check_interval 15 Interval for checking new ring file recon_cache_path /var/cache/swift Path to recon cache nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. =========================== ======================== ================================ **************** [object-updater] **************** =================== =================== ========================================== Option Default Description ------------------- ------------------- ------------------------------------------ log_name object-updater Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory interval 300 Minimum time for a pass to take concurrency 1 Number of updater workers to spawn node_timeout DEFAULT or 10 Request timeout to external services. This uses what's set here, or what's set in the DEFAULT section, or 10 (though other sections use 3 as the final default). objects_per_second 50 Maximum objects updated per second. Should be tuned according to individual system specs. 0 is unlimited. slowdown 0.01 Time in seconds to wait between objects. Deprecated in favor of objects_per_second. report_interval 300 Interval in seconds between logging statistics about the current update pass. recon_cache_path /var/cache/swift Path to recon cache nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. =================== =================== ========================================== **************** [object-auditor] **************** =========================== =================== ========================================== Option Default Description --------------------------- ------------------- ------------------------------------------ log_name object-auditor Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory log_time 3600 Frequency of status logs in seconds. interval 30 Time in seconds to wait between auditor passes disk_chunk_size 65536 Size of chunks read during auditing files_per_second 20 Maximum files audited per second per auditor process. Should be tuned according to individual system specs. 0 is unlimited. bytes_per_second 10000000 Maximum bytes audited per second per auditor process. Should be tuned according to individual system specs. 0 is unlimited. concurrency 1 The number of parallel processes to use for checksum auditing. zero_byte_files_per_second 50 object_size_stats recon_cache_path /var/cache/swift Path to recon cache rsync_tempfile_timeout auto Time elapsed in seconds before rsync tempfiles will be unlinked. Config value of "auto" try to use object-replicator's rsync_timeout + 900 or fallback to 86400 (1 day). nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. =========================== =================== ========================================== ------------------------------ Container Server Configuration ------------------------------ An example Container Server configuration can be found at etc/container-server.conf-sample in the source code repository. The following configuration sections are available: * :ref:`[DEFAULT] ` * `[container-server]`_ * `[container-replicator]`_ * `[container-updater]`_ * `[container-auditor]`_ .. _container_server_default_options: ********* [DEFAULT] ********* =============================== ========== ============================================ Option Default Description ------------------------------- ---------- -------------------------------------------- swift_dir /etc/swift Swift configuration directory devices /srv/node Parent directory of where devices are mounted mount_check true Whether or not check if the devices are mounted to prevent accidentally writing to the root device bind_ip 0.0.0.0 IP Address for server to bind to bind_port 6201 Port for server to bind to bind_timeout 30 Seconds to attempt bind before giving up backlog 4096 Maximum number of allowed pending connections workers auto Override the number of pre-forked workers that will accept connections. If set it should be an integer, zero means no fork. If unset, it will try to default to the number of effective cpu cores and fallback to one. Increasing the number of workers may reduce the possibility of slow file system operations in one request from negatively impacting other requests. See :ref:`general-service-tuning`. max_clients 1024 Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. user swift User to run as disable_fallocate false Disable "fast fail" fallocate checks if the underlying filesystem does not support it. log_name swift Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory log_max_line_length 0 Caps the length of log lines to the value given; no limit if set to 0, the default. log_custom_handlers None Comma-separated list of functions to call to setup custom log handlers. log_udp_host Override log_address log_udp_port 514 UDP log port log_statsd_host None Enables StatsD logging; IPv4/IPv6 address or a hostname. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. log_statsd_port 8125 log_statsd_default_sample_rate 1.0 log_statsd_sample_rate_factor 1.0 log_statsd_metric_prefix eventlet_debug false If true, turn on debug logging for eventlet fallocate_reserve 1% You can set fallocate_reserve to the number of bytes or percentage of disk space you'd like fallocate to reserve, whether there is space for the given file size or not. Percentage will be used if the value ends with a '%'. This is useful for systems that behave badly when they completely run out of space; you can make the services pretend they're out of space early. db_preallocation off If you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation. nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. =============================== ========== ============================================ ****************** [container-server] ****************** ============================== ================ ======================================== Option Default Description ------------------------------ ---------------- ---------------------------------------- use paste.deploy entry point for the container server. For most cases, this should be `egg:swift#container`. set log_name container-server Label used when logging set log_facility LOG_LOCAL0 Syslog log facility set log_level INFO Logging level set log_requests True Whether or not to log each request set log_address /dev/log Logging directory node_timeout 3 Request timeout to external services conn_timeout 0.5 Connection timeout to external services allow_versions false Enable/Disable object versioning feature auto_create_account_prefix . Prefix used when automatically replication_server Configure parameter for creating specific server. To handle all verbs, including replication verbs, do not specify "replication_server" (this is the default). To only handle replication, set to a True value (e.g. "True" or "1"). To handle only non-replication verbs, set to "False". Unless you have a separate replication network, you should not specify any value for "replication_server". nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ============================== ================ ======================================== ********************** [container-replicator] ********************** ================== =========================== ============================= Option Default Description ------------------ --------------------------- ----------------------------- log_name container-replicator Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory per_diff 1000 Maximum number of database rows that will be sync'd in a single HTTP replication request. Databases with less than or equal to this number of differing rows will always be sync'd using an HTTP replication request rather than using rsync. max_diffs 100 Maximum number of HTTP replication requests attempted on each replication pass for any one container. This caps how long the replicator will spend trying to sync a given database per pass so the other databases don't get starved. concurrency 8 Number of replication workers to spawn interval 30 Time in seconds to wait between replication passes node_timeout 10 Request timeout to external services conn_timeout 0.5 Connection timeout to external services reclaim_age 604800 Time elapsed in seconds before a container can be reclaimed rsync_module {replication_ip}::container Format of the rsync module where the replicator will send data. The configuration value can include some variables that will be extracted from the ring. Variables must follow the format {NAME} where NAME is one of: ip, port, replication_ip, replication_port, region, zone, device, meta. See etc/rsyncd.conf-sample for some examples. rsync_compress no Allow rsync to compress data which is transmitted to destination node during sync. However, this is applicable only when destination node is in a different region than the local one. NOTE: Objects that are already compressed (for example: .tar.gz, mp3) might slow down the syncing process. recon_cache_path /var/cache/swift Path to recon cache nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ================== =========================== ============================= ******************* [container-updater] ******************* ======================== ================= ================================== Option Default Description ------------------------ ----------------- ---------------------------------- log_name container-updater Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory interval 300 Minimum time for a pass to take concurrency 4 Number of updater workers to spawn node_timeout 3 Request timeout to external services conn_timeout 0.5 Connection timeout to external services containers_per_second 50 Maximum containers updated per second. Should be tuned according to individual system specs. 0 is unlimited. slowdown 0.01 Time in seconds to wait between containers. Deprecated in favor of containers_per_second. account_suppression_time 60 Seconds to suppress updating an account that has generated an error (timeout, not yet found, etc.) recon_cache_path /var/cache/swift Path to recon cache nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ======================== ================= ================================== ******************* [container-auditor] ******************* ===================== ================= ======================================= Option Default Description --------------------- ----------------- --------------------------------------- log_name container-auditor Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory interval 1800 Minimum time for a pass to take containers_per_second 200 Maximum containers audited per second. Should be tuned according to individual system specs. 0 is unlimited. recon_cache_path /var/cache/swift Path to recon cache nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ===================== ================= ======================================= ---------------------------- Account Server Configuration ---------------------------- An example Account Server configuration can be found at etc/account-server.conf-sample in the source code repository. The following configuration sections are available: * :ref:`[DEFAULT] ` * `[account-server]`_ * `[account-replicator]`_ * `[account-auditor]`_ * `[account-reaper]`_ .. _account_server_default_options: ********* [DEFAULT] ********* =============================== ========== ============================================= Option Default Description ------------------------------- ---------- --------------------------------------------- swift_dir /etc/swift Swift configuration directory devices /srv/node Parent directory or where devices are mounted mount_check true Whether or not check if the devices are mounted to prevent accidentally writing to the root device bind_ip 0.0.0.0 IP Address for server to bind to bind_port 6202 Port for server to bind to bind_timeout 30 Seconds to attempt bind before giving up backlog 4096 Maximum number of allowed pending connections workers auto Override the number of pre-forked workers that will accept connections. If set it should be an integer, zero means no fork. If unset, it will try to default to the number of effective cpu cores and fallback to one. Increasing the number of workers may reduce the possibility of slow file system operations in one request from negatively impacting other requests. See :ref:`general-service-tuning`. max_clients 1024 Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. user swift User to run as db_preallocation off If you don't mind the extra disk space usage in overhead, you can turn this on to preallocate disk space with SQLite databases to decrease fragmentation. disable_fallocate false Disable "fast fail" fallocate checks if the underlying filesystem does not support it. log_name swift Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory log_max_line_length 0 Caps the length of log lines to the value given; no limit if set to 0, the default. log_custom_handlers None Comma-separated list of functions to call to setup custom log handlers. log_udp_host Override log_address log_udp_port 514 UDP log port log_statsd_host None Enables StatsD logging; IPv4/IPv6 address or a hostname. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. log_statsd_port 8125 log_statsd_default_sample_rate 1.0 log_statsd_sample_rate_factor 1.0 log_statsd_metric_prefix eventlet_debug false If true, turn on debug logging for eventlet fallocate_reserve 1% You can set fallocate_reserve to the number of bytes or percentage of disk space you'd like fallocate to reserve, whether there is space for the given file size or not. Percentage will be used if the value ends with a '%'. This is useful for systems that behave badly when they completely run out of space; you can make the services pretend they're out of space early. nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. =============================== ========== ============================================= **************** [account-server] **************** ============================= ============== ========================================== Option Default Description ----------------------------- -------------- ------------------------------------------ use Entry point for paste.deploy for the account server. For most cases, this should be `egg:swift#account`. set log_name account-server Label used when logging set log_facility LOG_LOCAL0 Syslog log facility set log_level INFO Logging level set log_requests True Whether or not to log each request set log_address /dev/log Logging directory auto_create_account_prefix . Prefix used when automatically creating accounts. replication_server Configure parameter for creating specific server. To handle all verbs, including replication verbs, do not specify "replication_server" (this is the default). To only handle replication, set to a True value (e.g. "True" or "1"). To handle only non-replication verbs, set to "False". Unless you have a separate replication network, you should not specify any value for "replication_server". nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ============================= ============== ========================================== ******************** [account-replicator] ******************** ================== ========================= =============================== Option Default Description ------------------ ------------------------- ------------------------------- log_name account-replicator Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory per_diff 1000 Maximum number of database rows that will be sync'd in a single HTTP replication request. Databases with less than or equal to this number of differing rows will always be sync'd using an HTTP replication request rather than using rsync. max_diffs 100 Maximum number of HTTP replication requests attempted on each replication pass for any one container. This caps how long the replicator will spend trying to sync a given database per pass so the other databases don't get starved. concurrency 8 Number of replication workers to spawn interval 30 Time in seconds to wait between replication passes node_timeout 10 Request timeout to external services conn_timeout 0.5 Connection timeout to external services reclaim_age 604800 Time elapsed in seconds before an account can be reclaimed rsync_module {replication_ip}::account Format of the rsync module where the replicator will send data. The configuration value can include some variables that will be extracted from the ring. Variables must follow the format {NAME} where NAME is one of: ip, port, replication_ip, replication_port, region, zone, device, meta. See etc/rsyncd.conf-sample for some examples. rsync_compress no Allow rsync to compress data which is transmitted to destination node during sync. However, this is applicable only when destination node is in a different region than the local one. NOTE: Objects that are already compressed (for example: .tar.gz, mp3) might slow down the syncing process. recon_cache_path /var/cache/swift Path to recon cache nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ================== ========================= =============================== ***************** [account-auditor] ***************** ==================== ================ ======================================= Option Default Description -------------------- ---------------- --------------------------------------- log_name account-auditor Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory interval 1800 Minimum time for a pass to take accounts_per_second 200 Maximum accounts audited per second. Should be tuned according to individual system specs. 0 is unlimited. recon_cache_path /var/cache/swift Path to recon cache nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ==================== ================ ======================================= **************** [account-reaper] **************** ================== =============== ========================================= Option Default Description ------------------ --------------- ----------------------------------------- log_name account-reaper Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_address /dev/log Logging directory concurrency 25 Number of replication workers to spawn interval 3600 Minimum time for a pass to take node_timeout 10 Request timeout to external services conn_timeout 0.5 Connection timeout to external services delay_reaping 0 Normally, the reaper begins deleting account information for deleted accounts immediately; you can set this to delay its work however. The value is in seconds, 2592000 = 30 days, for example. The sum of this value and the container-updater ``interval`` should be less than the account-replicator ``reclaim_age``. This ensures that once the account-reaper has deleted a container there is sufficient time for the container-updater to report to the account before the account DB is removed. reap_warn_after 2892000 If the account fails to be be reaped due to a persistent error, the account reaper will log a message such as: Account has not been reaped since You can search logs for this message if space is not being reclaimed after you delete account(s). This is in addition to any time requested by delay_reaping. nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ================== =============== ========================================= .. _proxy-server-config: -------------------------- Proxy Server Configuration -------------------------- An example Proxy Server configuration can be found at etc/proxy-server.conf-sample in the source code repository. The following configuration sections are available: * :ref:`[DEFAULT] ` * `[proxy-server]`_ * Individual sections for `Proxy middlewares`_ .. _proxy_server_default_options: ********* [DEFAULT] ********* ==================================== ======================== ======================================== Option Default Description ------------------------------------ ------------------------ ---------------------------------------- bind_ip 0.0.0.0 IP Address for server to bind to bind_port 80 Port for server to bind to bind_timeout 30 Seconds to attempt bind before giving up backlog 4096 Maximum number of allowed pending connections swift_dir /etc/swift Swift configuration directory workers auto Override the number of pre-forked workers that will accept connections. If set it should be an integer, zero means no fork. If unset, it will try to default to the number of effective cpu cores and fallback to one. See :ref:`general-service-tuning`. max_clients 1024 Maximum number of clients one worker can process simultaneously (it will actually accept(2) N + 1). Setting this to one (1) will only handle one request at a time, without accepting another request concurrently. user swift User to run as cert_file Path to the ssl .crt. This should be enabled for testing purposes only. key_file Path to the ssl .key. This should be enabled for testing purposes only. cors_allow_origin This is a list of hosts that are included with any CORS request by default and returned with the Access-Control-Allow-Origin header in addition to what the container has set. strict_cors_mode True cors_expose_headers This is a list of headers that are included in the header Access-Control-Expose-Headers in addition to what the container has set. client_timeout 60 trans_id_suffix This optional suffix (default is empty) that would be appended to the swift transaction id allows one to easily figure out from which cluster that X-Trans-Id belongs to. This is very useful when one is managing more than one swift cluster. log_name swift Label used when logging log_facility LOG_LOCAL0 Syslog log facility log_level INFO Logging level log_headers False log_address /dev/log Logging directory log_max_line_length 0 Caps the length of log lines to the value given; no limit if set to 0, the default. log_custom_handlers None Comma separated list of functions to call to setup custom log handlers. log_udp_host Override log_address log_udp_port 514 UDP log port log_statsd_host None Enables StatsD logging; IPv4/IPv6 address or a hostname. If a hostname resolves to an IPv4 and IPv6 address, the IPv4 address will be used. log_statsd_port 8125 log_statsd_default_sample_rate 1.0 log_statsd_sample_rate_factor 1.0 log_statsd_metric_prefix eventlet_debug false If true, turn on debug logging for eventlet expose_info true Enables exposing configuration settings via HTTP GET /info. admin_key Key to use for admin calls that are HMAC signed. Default is empty, which will disable admin calls to /info. disallowed_sections swift.valid_api_versions Allows the ability to withhold sections from showing up in the public calls to /info. You can withhold subsections by separating the dict level with a ".". expiring_objects_container_divisor 86400 expiring_objects_account_name expiring_objects nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. ==================================== ======================== ======================================== ************** [proxy-server] ************** ====================================== =============== ===================================== Option Default Description -------------------------------------- --------------- ------------------------------------- use Entry point for paste.deploy for the proxy server. For most cases, this should be `egg:swift#proxy`. set log_name proxy-server Label used when logging set log_facility LOG_LOCAL0 Syslog log facility set log_level INFO Log level set log_headers True If True, log headers in each request set log_handoffs True If True, the proxy will log whenever it has to failover to a handoff node recheck_account_existence 60 Cache timeout in seconds to send memcached for account existence recheck_container_existence 60 Cache timeout in seconds to send memcached for container existence object_chunk_size 65536 Chunk size to read from object servers client_chunk_size 65536 Chunk size to read from clients memcache_servers 127.0.0.1:11211 Comma separated list of memcached servers ip:port or [ipv6addr]:port memcache_max_connections 2 Max number of connections to each memcached server per worker node_timeout 10 Request timeout to external services recoverable_node_timeout node_timeout Request timeout to external services for requests that, on failure, can be recovered from. For example, object GET. client_timeout 60 Timeout to read one chunk from a client conn_timeout 0.5 Connection timeout to external services error_suppression_interval 60 Time in seconds that must elapse since the last error for a node to be considered no longer error limited error_suppression_limit 10 Error count to consider a node error limited allow_account_management false Whether account PUTs and DELETEs are even callable account_autocreate false If set to 'true' authorized accounts that do not yet exist within the Swift cluster will be automatically created. max_containers_per_account 0 If set to a positive value, trying to create a container when the account already has at least this maximum containers will result in a 403 Forbidden. Note: This is a soft limit, meaning a user might exceed the cap for recheck_account_existence before the 403s kick in. max_containers_whitelist This is a comma separated list of account names that ignore the max_containers_per_account cap. rate_limit_after_segment 10 Rate limit the download of large object segments after this segment is downloaded. rate_limit_segments_per_sec 1 Rate limit large object downloads at this rate. request_node_count 2 * replicas Set to the number of nodes to contact for a normal request. You can use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request. swift_owner_headers up to the auth system in use, but usually indicates administrative responsibilities. sorting_method shuffle Storage nodes can be chosen at random (shuffle), by using timing measurements (timing), or by using an explicit match (affinity). Using timing measurements may allow for lower overall latency, while using affinity allows for finer control. In both the timing and affinity cases, equally-sorting nodes are still randomly chosen to spread load. This option may be overridden in a per-policy configuration section. timing_expiry 300 If the "timing" sorting_method is used, the timings will only be valid for the number of seconds configured by timing_expiry. concurrent_gets off Use replica count number of threads concurrently during a GET/HEAD and return with the first successful response. In the EC case, this parameter only affects an EC HEAD as an EC GET behaves differently. concurrency_timeout conn_timeout This parameter controls how long to wait before firing off the next concurrent_get thread. A value of 0 would we fully concurrent, any other number will stagger the firing of the threads. This number should be between 0 and node_timeout. The default is conn_timeout (0.5). nice_priority None Scheduling priority of server processes. Niceness values range from -20 (most favorable to the process) to 19 (least favorable to the process). The default does not modify priority. ionice_class None I/O scheduling class of server processes. I/O niceness class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort), and IOPRIO_CLASS_IDLE (idle). The default does not modify class and priority. Linux supports io scheduling priorities and classes since 2.6.13 with the CFQ io scheduler. Work only with ionice_priority. ionice_priority None I/O scheduling priority of server processes. I/O niceness priority is a number which goes from 0 to 7. The higher the value, the lower the I/O priority of the process. Work only with ionice_class. Ignored if IOPRIO_CLASS_IDLE is set. read_affinity None Specifies which backend servers to prefer on reads; used in conjunction with the sorting_method option being set to 'affinity'. Format is a comma separated list of affinity descriptors of the form =. The may be r for selecting nodes in region N or rz for selecting nodes in region N, zone M. The value should be a whole number that represents the priority to be given to the selection; lower numbers are higher priority. Default is empty, meaning no preference. This option may be overridden in a per-policy configuration section. write_affinity None Specifies which backend servers to prefer on writes. Format is a comma separated list of affinity descriptors of the form r for region N or rz for region N, zone M. Default is empty, meaning no preference. This option may be overridden in a per-policy configuration section. write_affinity_node_count 2 * replicas The number of local (as governed by the write_affinity setting) nodes to attempt to contact first on writes, before any non-local ones. The value should be an integer number, or use '* replicas' at the end to have it use the number given times the number of replicas for the ring being used for the request. This option may be overridden in a per-policy configuration section. write_affinity_handoff_delete_count auto The number of local (as governed by the write_affinity setting) handoff nodes to attempt to contact on deletion, in addition to primary nodes. Example: in geographically distributed deployment, If replicas=3, sometimes there may be 1 primary node and 2 local handoff nodes in one region holding the object after uploading but before object replicated to the appropriate locations in other regions. In this case, include these handoff nodes to send request when deleting object could help make correct decision for the response. The default value 'auto' means Swift will calculate the number automatically, the default value is (replicas - len(local_primary_nodes)). This option may be overridden in a per-policy configuration section. ====================================== =============== ===================================== .. _proxy_server_per_policy_config: ************************ Per policy configuration ************************ Some proxy-server configuration options may be overridden for individual :doc:`overview_policies` by including per-policy config section(s). These options are: - ``sorting_method`` - ``read_affinity`` - ``write_affinity`` - ``write_affinity_node_count`` - ``write_affinity_handoff_delete_count`` The per-policy config section name must be of the form:: [proxy-server:policy:] .. note:: The per-policy config section name should refer to the policy index, not the policy name. .. note:: The first part of proxy-server config section name must match the name of the proxy-server config section. This is typically ``proxy-server`` as shown above, but if different then the names of any per-policy config sections must be changed accordingly. The value of an option specified in a per-policy section will override any value given in the proxy-server section for that policy only. Otherwise the value of these options will be that specified in the proxy-server section. For example, the following section provides policy-specific options for a policy with index ``3``:: [proxy-server:policy:3] sorting_method = affinity read_affinity = r2=1 write_affinity = r2 write_affinity_node_count = 1 * replicas write_affinity_handoff_delete_count = 2 .. note:: It is recommended that per-policy config options are *not* included in the ``[DEFAULT]`` section. If they are then the following behavior applies. Per-policy config sections will inherit options in the ``[DEFAULT]`` section of the config file, and any such inheritance will take precedence over inheriting options from the proxy-server config section. Per-policy config section options will override options in the ``[DEFAULT]`` section. Unlike the behavior described under `General Server Configuration`_ for paste-deploy ``filter`` and ``app`` sections, the ``set`` keyword is not required for options to override in per-policy config sections. For example, given the following settings in a config file:: [DEFAULT] sorting_method = affinity read_affinity = r0=100 write_affinity = r0 [app:proxy-server] use = egg:swift#proxy # use of set keyword here overrides [DEFAULT] option set read_affinity = r1=100 # without set keyword, [DEFAULT] option overrides in a paste-deploy section write_affinity = r1 [proxy-server:policy:0] sorting_method = affinity # set keyword not required here to override [DEFAULT] option write_affinity = r1 would result in policy with index ``0`` having settings: * ``read_affinity = r0=100`` (inherited from the ``[DEFAULT]`` section) * ``write_affinity = r1`` (specified in the policy 0 section) and any other policy would have the default settings of: * ``read_affinity = r1=100`` (set in the proxy-server section) * ``write_affinity = r0`` (inherited from the ``[DEFAULT]`` section) ***************** Proxy Middlewares ***************** Many features in Swift are implemented as middleware in the proxy-server pipeline. See :doc:`middleware` and the ``proxy-server.conf-sample`` file for more information. In particular, the use of some type of :doc:`authentication and authorization middleware ` is highly recommended. ------------------------ Memcached Considerations ------------------------ Several of the Services rely on Memcached for caching certain types of lookups, such as auth tokens, and container/account existence. Swift does not do any caching of actual object data. Memcached should be able to run on any servers that have available RAM and CPU. At Rackspace, we run Memcached on the proxy servers. The `memcache_servers` config option in the `proxy-server.conf` should contain all memcached servers. ----------- System Time ----------- Time may be relative but it is relatively important for Swift! Swift uses timestamps to determine which is the most recent version of an object. It is very important for the system time on each server in the cluster to by synced as closely as possible (more so for the proxy server, but in general it is a good idea for all the servers). At Rackspace, we use NTP with a local NTP server to ensure that the system times are as close as possible. This should also be monitored to ensure that the times do not vary too much. .. _general-service-tuning: ---------------------- General Service Tuning ---------------------- Most services support either a `worker` or `concurrency` value in the settings. This allows the services to make effective use of the cores available. A good starting point to set the concurrency level for the proxy and storage services to 2 times the number of cores available. If more than one service is sharing a server, then some experimentation may be needed to find the best balance. At Rackspace, our Proxy servers have dual quad core processors, giving us 8 cores. Our testing has shown 16 workers to be a pretty good balance when saturating a 10g network and gives good CPU utilization. Our Storage server processes all run together on the same servers. These servers have dual quad core processors, for 8 cores total. We run the Account, Container, and Object servers with 8 workers each. Most of the background jobs are run at a concurrency of 1, with the exception of the replicators which are run at a concurrency of 2. The `max_clients` parameter can be used to adjust the number of client requests an individual worker accepts for processing. The fewer requests being processed at one time, the less likely a request that consumes the worker's CPU time, or blocks in the OS, will negatively impact other requests. The more requests being processed at one time, the more likely one worker can utilize network and disk capacity. On systems that have more cores, and more memory, where one can afford to run more workers, raising the number of workers and lowering the maximum number of clients serviced per worker can lessen the impact of CPU intensive or stalled requests. The `nice_priority` parameter can be used to set program scheduling priority. The `ionice_class` and `ionice_priority` parameters can be used to set I/O scheduling class and priority on the systems that use an I/O scheduler that supports I/O priorities. As at kernel 2.6.17 the only such scheduler is the Completely Fair Queuing (CFQ) I/O scheduler. If you run your Storage servers all together on the same servers, you can slow down the auditors or prioritize object-server I/O via these parameters (but probably do not need to change it on the proxy). It is a new feature and the best practices are still being developed. On some systems it may be required to run the daemons as root. For more info also see setpriority(2) and ioprio_set(2). The above configuration setting should be taken as suggestions and testing of configuration settings should be done to ensure best utilization of CPU, network connectivity, and disk I/O. ------------------------- Filesystem Considerations ------------------------- Swift is designed to be mostly filesystem agnostic--the only requirement being that the filesystem supports extended attributes (xattrs). After thorough testing with our use cases and hardware configurations, XFS was the best all-around choice. If you decide to use a filesystem other than XFS, we highly recommend thorough testing. For distros with more recent kernels (for example Ubuntu 12.04 Precise), we recommend using the default settings (including the default inode size of 256 bytes) when creating the file system:: mkfs.xfs /dev/sda1 In the last couple of years, XFS has made great improvements in how inodes are allocated and used. Using the default inode size no longer has an impact on performance. For distros with older kernels (for example Ubuntu 10.04 Lucid), some settings can dramatically impact performance. We recommend the following when creating the file system:: mkfs.xfs -i size=1024 /dev/sda1 Setting the inode size is important, as XFS stores xattr data in the inode. If the metadata is too large to fit in the inode, a new extent is created, which can cause quite a performance problem. Upping the inode size to 1024 bytes provides enough room to write the default metadata, plus a little headroom. The following example mount options are recommended when using XFS:: mount -t xfs -o noatime,nodiratime,nobarrier,logbufs=8 /dev/sda1 /srv/node/sda We do not recommend running Swift on RAID, but if you are using RAID it is also important to make sure that the proper sunit and swidth settings get set so that XFS can make most efficient use of the RAID array. For a standard Swift install, all data drives are mounted directly under ``/srv/node`` (as can be seen in the above example of mounting ``/dev/sda1`` as ``/srv/node/sda``). If you choose to mount the drives in another directory, be sure to set the `devices` config option in all of the server configs to point to the correct directory. The mount points for each drive in ``/srv/node/`` should be owned by the root user almost exclusively (``root:root 755``). This is required to prevent rsync from syncing files into the root drive in the event a drive is unmounted. Swift uses system calls to reserve space for new objects being written into the system. If your filesystem does not support `fallocate()` or `posix_fallocate()`, be sure to set the `disable_fallocate = true` config parameter in account, container, and object server configs. Most current Linux distributions ship with a default installation of updatedb. This tool runs periodically and updates the file name database that is used by the GNU locate tool. However, including Swift object and container database files is most likely not required and the periodic update affects the performance quite a bit. To disable the inclusion of these files add the path where Swift stores its data to the setting PRUNEPATHS in `/etc/updatedb.conf`:: PRUNEPATHS="... /tmp ... /var/spool ... /srv/node" --------------------- General System Tuning --------------------- Rackspace currently runs Swift on Ubuntu Server 10.04, and the following changes have been found to be useful for our use cases. The following settings should be in `/etc/sysctl.conf`:: # disable TIME_WAIT.. wait.. net.ipv4.tcp_tw_recycle=1 net.ipv4.tcp_tw_reuse=1 # disable syn cookies net.ipv4.tcp_syncookies = 0 # double amount of allowed conntrack net.ipv4.netfilter.ip_conntrack_max = 262144 To load the updated sysctl settings, run ``sudo sysctl -p`` A note about changing the TIME_WAIT values. By default the OS will hold a port open for 60 seconds to ensure that any remaining packets can be received. During high usage, and with the number of connections that are created, it is easy to run out of ports. We can change this since we are in control of the network. If you are not in control of the network, or do not expect high loads, then you may not want to adjust those values. ---------------------- Logging Considerations ---------------------- Swift is set up to log directly to syslog. Every service can be configured with the `log_facility` option to set the syslog log facility destination. We recommended using syslog-ng to route the logs to specific log files locally on the server and also to remote log collecting servers. Additionally, custom log handlers can be used via the custom_log_handlers setting. swift-2.17.0/doc/source/development_middleware.rst0000666000175100017510000003161413236061617022313 0ustar zuulzuul00000000000000======================= Middleware and Metadata ======================= ---------------- Using Middleware ---------------- `Python WSGI Middleware`_ (or just "middleware") can be used to "wrap" the request and response of a Python WSGI application (i.e. a webapp, or REST/HTTP API), like Swift's WSGI servers (proxy-server, account-server, container-server, object-server). Swift uses middleware to add (sometimes optional) behaviors to the Swift WSGI servers. .. _Python WSGI Middleware: http://www.python.org/dev/peps/pep-0333/#middleware-components-that-play-both-sides Middleware can be added to the Swift WSGI servers by modifying their `paste`_ configuration file. The majority of Swift middleware is applied to the :ref:`proxy-server`. .. _paste: http://pythonpaste.org/ Given the following basic configuration:: [DEFAULT] log_level = DEBUG user = [pipeline:main] pipeline = proxy-server [app:proxy-server] use = egg:swift#proxy You could add the :ref:`healthcheck` middleware by adding a section for that filter and adding it to the pipeline:: [DEFAULT] log_level = DEBUG user = [pipeline:main] pipeline = healthcheck proxy-server [filter:healthcheck] use = egg:swift#healthcheck [app:proxy-server] use = egg:swift#proxy Some middleware is required and will be inserted into your pipeline automatically by core swift code (e.g. the proxy-server will insert :ref:`catch_errors` and :ref:`gatekeeper` at the start of the pipeline if they are not already present). You can see which features are available on a given Swift endpoint (including middleware) using the :ref:`discoverability` interface. ---------------------------- Creating Your Own Middleware ---------------------------- The best way to see how to write middleware is to look at examples. Many optional features in Swift are implemented as :ref:`common_middleware` and provided in ``swift.common.middleware``, but Swift middleware may be packaged and distributed as a separate project. Some examples are listed on the :ref:`associated_projects` page. A contrived middleware example that modifies request behavior by inspecting custom HTTP headers (e.g. X-Webhook) and uses :ref:`sysmeta` to persist data to backend storage as well as common patterns like a :func:`.get_container_info` cache/query and :func:`.wsgify` decorator is presented below:: from swift.common.http import is_success from swift.common.swob import wsgify from swift.common.utils import split_path, get_logger from swift.common.request_helper import get_sys_meta_prefix from swift.proxy.controllers.base import get_container_info from eventlet import Timeout import six if six.PY3: from eventlet.green.urllib import request as urllib2 else: from eventlet.green import urllib2 # x-container-sysmeta-webhook SYSMETA_WEBHOOK = get_sys_meta_prefix('container') + 'webhook' class WebhookMiddleware(object): def __init__(self, app, conf): self.app = app self.logger = get_logger(conf, log_route='webhook') @wsgify def __call__(self, req): obj = None try: (version, account, container, obj) = \ split_path(req.path_info, 4, 4, True) except ValueError: # not an object request pass if 'x-webhook' in req.headers: # translate user's request header to sysmeta req.headers[SYSMETA_WEBHOOK] = \ req.headers['x-webhook'] if 'x-remove-webhook' in req.headers: # empty value will tombstone sysmeta req.headers[SYSMETA_WEBHOOK] = '' # account and object storage will ignore x-container-sysmeta-* resp = req.get_response(self.app) if obj and is_success(resp.status_int) and req.method == 'PUT': container_info = get_container_info(req.environ, self.app) # container_info may have our new sysmeta key webhook = container_info['sysmeta'].get('webhook') if webhook: # create a POST request with obj name as body webhook_req = urllib2.Request(webhook, data=obj) with Timeout(20): try: urllib2.urlopen(webhook_req).read() except (Exception, Timeout): self.logger.exception( 'failed POST to webhook %s' % webhook) else: self.logger.info( 'successfully called webhook %s' % webhook) if 'x-container-sysmeta-webhook' in resp.headers: # translate sysmeta from the backend resp to # user-visible client resp header resp.headers['x-webhook'] = resp.headers[SYSMETA_WEBHOOK] return resp def webhook_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def webhook_filter(app, conf): return WebhookMiddleware(app) return webhook_filter In practice this middleware will call the URL stored on the container as X-Webhook on all successful object uploads. If this example was at ``/swift/common/middleware/webhook.py`` - you could add it to your proxy by creating a new filter section and adding it to the pipeline:: [DEFAULT] log_level = DEBUG user = [pipeline:main] pipeline = healthcheck webhook proxy-server [filter:webhook] paste.filter_factory = swift.common.middleware.webhook:webhook_factory [filter:healthcheck] use = egg:swift#healthcheck [app:proxy-server] use = egg:swift#proxy Most python packages expose middleware as entrypoints. See `PasteDeploy`_ documentation for more information about the syntax of the ``use`` option. All middleware included with Swift is installed to support the ``egg:swift`` syntax. .. _PasteDeploy: http://pythonpaste.org/deploy/#egg-uris Middleware may advertize its availability and capabilities via Swift's :ref:`discoverability` support by using :func:`.register_swift_info`:: from swift.common.utils import register_swift_info def webhook_factory(global_conf, **local_conf): register_swift_info('webhook') def webhook_filter(app): return WebhookMiddleware(app) return webhook_filter -------------- Swift Metadata -------------- Generally speaking metadata is information about a resource that is associated with the resource but is not the data contained in the resource itself - which is set and retrieved via HTTP headers. (e.g. the "Content-Type" of a Swift object that is returned in HTTP response headers) All user resources in Swift (i.e. account, container, objects) can have user metadata associated with them. Middleware may also persist custom metadata to accounts and containers safely using System Metadata. Some core Swift features which predate sysmeta have added exceptions for custom non-user metadata headers (e.g. :ref:`acls`, :ref:`large-objects`) .. _usermeta: ^^^^^^^^^^^^^ User Metadata ^^^^^^^^^^^^^ User metadata takes the form of ``X--Meta-: ``, where ```` depends on the resources type (i.e. Account, Container, Object) and ```` and ```` are set by the client. User metadata should generally be reserved for use by the client or client applications. A perfect example use-case for user metadata is `python-swiftclient`_'s ``X-Object-Meta-Mtime`` which it stores on object it uploads to implement its ``--changed`` option which will only upload files that have changed since the last upload. .. _python-swiftclient: https://github.com/openstack/python-swiftclient New middleware should avoid storing metadata within the User Metadata namespace to avoid potential conflict with existing user metadata when introducing new metadata keys. An example of legacy middleware that borrows the user metadata namespace is :ref:`tempurl`. An example of middleware which uses custom non-user metadata to avoid the user metadata namespace is :ref:`slo-doc`. User metadata that is stored by a PUT or POST request to a container or account resource persists until it is explicitly removed by a subsequent PUT or POST request that includes a header ``X--Meta-`` with no value or a header ``X-Remove--Meta-: ``. In the latter case the ```` is not stored. All user metadata stored with an account or container resource is deleted when the account or container is deleted. User metadata that is stored with an object resource has a different semantic; object user metadata persists until any subsequent PUT or POST request is made to the same object, at which point all user metadata stored with that object is deleted en-masse and replaced with any user metadata included with the PUT or POST request. As a result, it is not possible to update a subset of the user metadata items stored with an object while leaving some items unchanged. .. _sysmeta: ^^^^^^^^^^^^^^^ System Metadata ^^^^^^^^^^^^^^^ System metadata takes the form of ``X--Sysmeta-: ``, where ```` depends on the resources type (i.e. Account, Container, Object) and ```` and ```` are set by trusted code running in a Swift WSGI Server. All headers on client requests in the form of ``X--Sysmeta-`` will be dropped from the request before being processed by any middleware. All headers on responses from back-end systems in the form of ``X--Sysmeta-`` will be removed after all middlewares have processed the response but before the response is sent to the client. See :ref:`gatekeeper` middleware for more information. System metadata provides a means to store potentially private custom metadata with associated Swift resources in a safe and secure fashion without actually having to plumb custom metadata through the core swift servers. The incoming filtering ensures that the namespace can not be modified directly by client requests, and the outgoing filter ensures that removing middleware that uses a specific system metadata key renders it benign. New middleware should take advantage of system metadata. System metadata may be set on accounts and containers by including headers with a PUT or POST request. Where a header name matches the name of an existing item of system metadata, the value of the existing item will be updated. Otherwise existing items are preserved. A system metadata header with an empty value will cause any existing item with the same name to be deleted. System metadata may be set on objects using only PUT requests. All items of existing system metadata will be deleted and replaced en-masse by any system metadata headers included with the PUT request. System metadata is neither updated nor deleted by a POST request: updating individual items of system metadata with a POST request is not yet supported in the same way that updating individual items of user metadata is not supported. In cases where middleware needs to store its own metadata with a POST request, it may use Object Transient Sysmeta. .. _transient_sysmeta: ^^^^^^^^^^^^^^^^^^^^^^^^ Object Transient-Sysmeta ^^^^^^^^^^^^^^^^^^^^^^^^ If middleware needs to store object metadata with a POST request it may do so using headers of the form ``X-Object-Transient-Sysmeta-: ``. All headers on client requests in the form of ``X-Object-Transient-Sysmeta-`` will be dropped from the request before being processed by any middleware. All headers on responses from back-end systems in the form of ``X-Object-Transient-Sysmeta-`` will be removed after all middlewares have processed the response but before the response is sent to the client. See :ref:`gatekeeper` middleware for more information. Transient-sysmeta updates on an object have the same semantic as user metadata updates on an object (see :ref:`usermeta`) i.e. whenever any PUT or POST request is made to an object, all existing items of transient-sysmeta are deleted en-masse and replaced with any transient-sysmeta included with the PUT or POST request. Transient-sysmeta set by a middleware is therefore prone to deletion by a subsequent client-generated POST request unless the middleware is careful to include its transient-sysmeta with every POST. Likewise, user metadata set by a client is prone to deletion by a subsequent middleware-generated POST request, and for that reason middleware should avoid generating POST requests that are independent of any client request. Transient-sysmeta deliberately uses a different header prefix to user metadata so that middlewares can avoid potential conflict with user metadata keys. Transient-sysmeta deliberately uses a different header prefix to system metadata to emphasize the fact that the data is only persisted until a subsequent POST. swift-2.17.0/doc/source/container.rst0000666000175100017510000000222613236061617017553 0ustar zuulzuul00000000000000.. _Container: ********* Container ********* .. _container-auditor: Container Auditor ================= .. automodule:: swift.container.auditor :members: :undoc-members: :show-inheritance: .. _container-backend: Container Backend ================= .. automodule:: swift.container.backend :members: :undoc-members: :show-inheritance: .. _container-server: Container Server ================ .. automodule:: swift.container.server :members: :undoc-members: :show-inheritance: .. _container-reconciler: Container Reconciler ==================== .. automodule:: swift.container.reconciler :members: :undoc-members: :show-inheritance: .. _container-replicator: Container Replicator ==================== .. automodule:: swift.container.replicator :members: :undoc-members: :show-inheritance: .. _container-sync-daemon: Container Sync ============== .. automodule:: swift.container.sync :members: :undoc-members: :show-inheritance: .. _container-updater: Container Updater ================= .. automodule:: swift.container.updater :members: :undoc-members: :show-inheritance: swift-2.17.0/swift.egg-info/0000775000175100017510000000000013236061751015613 5ustar zuulzuul00000000000000swift-2.17.0/swift.egg-info/requires.txt0000664000175100017510000000035213236061747020220 0ustar zuulzuul00000000000000dnspython>=1.14.0 eventlet>=0.17.4 greenlet>=0.3.1 netifaces!=0.10.0,!=0.10.1,>=0.5 pastedeploy>=1.3.3 six>=1.9.0 xattr>=0.4 PyECLib>=1.3.1 cryptography!=2.0,>=1.6 [kms_keymaster] oslo.config!=4.3.0,!=4.4.0,>=4.0.0 castellan>=0.13.0 swift-2.17.0/swift.egg-info/not-zip-safe0000664000175100017510000000000113236061733020041 0ustar zuulzuul00000000000000 swift-2.17.0/swift.egg-info/top_level.txt0000664000175100017510000000000613236061747020346 0ustar zuulzuul00000000000000swift swift-2.17.0/swift.egg-info/dependency_links.txt0000664000175100017510000000000113236061747021666 0ustar zuulzuul00000000000000 swift-2.17.0/swift.egg-info/entry_points.txt0000664000175100017510000000424713236061747021125 0ustar zuulzuul00000000000000[paste.app_factory] account = swift.account.server:app_factory container = swift.container.server:app_factory mem_object = swift.obj.mem_server:app_factory object = swift.obj.server:app_factory proxy = swift.proxy.server:app_factory [paste.filter_factory] account_quotas = swift.common.middleware.account_quotas:filter_factory bulk = swift.common.middleware.bulk:filter_factory catch_errors = swift.common.middleware.catch_errors:filter_factory cname_lookup = swift.common.middleware.cname_lookup:filter_factory container_quotas = swift.common.middleware.container_quotas:filter_factory container_sync = swift.common.middleware.container_sync:filter_factory copy = swift.common.middleware.copy:filter_factory crossdomain = swift.common.middleware.crossdomain:filter_factory dlo = swift.common.middleware.dlo:filter_factory domain_remap = swift.common.middleware.domain_remap:filter_factory encryption = swift.common.middleware.crypto:filter_factory formpost = swift.common.middleware.formpost:filter_factory gatekeeper = swift.common.middleware.gatekeeper:filter_factory healthcheck = swift.common.middleware.healthcheck:filter_factory keymaster = swift.common.middleware.crypto.keymaster:filter_factory keystoneauth = swift.common.middleware.keystoneauth:filter_factory kms_keymaster = swift.common.middleware.crypto.kms_keymaster:filter_factory list_endpoints = swift.common.middleware.list_endpoints:filter_factory listing_formats = swift.common.middleware.listing_formats:filter_factory memcache = swift.common.middleware.memcache:filter_factory name_check = swift.common.middleware.name_check:filter_factory proxy_logging = swift.common.middleware.proxy_logging:filter_factory ratelimit = swift.common.middleware.ratelimit:filter_factory recon = swift.common.middleware.recon:filter_factory slo = swift.common.middleware.slo:filter_factory staticweb = swift.common.middleware.staticweb:filter_factory symlink = swift.common.middleware.symlink:filter_factory tempauth = swift.common.middleware.tempauth:filter_factory tempurl = swift.common.middleware.tempurl:filter_factory versioned_writes = swift.common.middleware.versioned_writes:filter_factory xprofile = swift.common.middleware.xprofile:filter_factory swift-2.17.0/swift.egg-info/pbr.json0000664000175100017510000000005613236061747017277 0ustar zuulzuul00000000000000{"git_version": "32d1b32", "is_release": true}swift-2.17.0/swift.egg-info/PKG-INFO0000664000175100017510000001727713236061747016733 0ustar zuulzuul00000000000000Metadata-Version: 1.1 Name: swift Version: 2.17.0 Summary: OpenStack Object Storage Home-page: https://docs.openstack.org/swift/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description-Content-Type: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/badges/swift.svg :target: https://governance.openstack.org/reference/tags/index.html .. Change things from this point on Swift ===== A distributed object storage system designed to scale from a single machine to thousands of servers. Swift is optimized for multi-tenancy and high concurrency. Swift is ideal for backups, web and mobile content, and any other unstructured data that can grow without bound. Swift provides a simple, REST-based API fully documented at https://docs.openstack.org/. Swift was originally developed as the basis for Rackspace's Cloud Files and was open-sourced in 2010 as part of the OpenStack project. It has since grown to include contributions from many companies and has spawned a thriving ecosystem of 3rd party tools. Swift's contributors are listed in the AUTHORS file. Docs ---- To build documentation install sphinx (``pip install sphinx``), run ``python setup.py build_sphinx``, and then browse to /doc/build/html/index.html. These docs are auto-generated after every commit and available online at https://docs.openstack.org/swift/latest/. For Developers -------------- Getting Started ~~~~~~~~~~~~~~~ Swift is part of OpenStack and follows the code contribution, review, and testing processes common to all OpenStack projects. If you would like to start contributing, check out these `notes `__ to help you get started. The best place to get started is the `"SAIO - Swift All In One" `__. This document will walk you through setting up a development cluster of Swift in a VM. The SAIO environment is ideal for running small-scale tests against swift and trying out new features and bug fixes. Tests ~~~~~ There are three types of tests included in Swift's source tree. #. Unit tests #. Functional tests #. Probe tests Unit tests check that small sections of the code behave properly. For example, a unit test may test a single function to ensure that various input gives the expected output. This validates that the code is correct and regressions are not introduced. Functional tests check that the client API is working as expected. These can be run against any endpoint claiming to support the Swift API (although some tests require multiple accounts with different privilege levels). These are "black box" tests that ensure that client apps written against Swift will continue to work. Probe tests are "white box" tests that validate the internal workings of a Swift cluster. They are written to work against the `"SAIO - Swift All In One" `__ dev environment. For example, a probe test may create an object, delete one replica, and ensure that the background consistency processes find and correct the error. You can run unit tests with ``.unittests``, functional tests with ``.functests``, and probe tests with ``.probetests``. There is an additional ``.alltests`` script that wraps the other three. To fully run the tests, the target environment must use a filesystem that supports large xattrs. XFS is strongly recommended. For unit tests and in- process functional tests, either mount ``/tmp`` with XFS or provide another XFS filesystem via the ``TMPDIR`` environment variable. Without this setting, tests should still pass, but a very large number will be skipped. Code Organization ~~~~~~~~~~~~~~~~~ - bin/: Executable scripts that are the processes run by the deployer - doc/: Documentation - etc/: Sample config files - examples/: Config snippets used in the docs - swift/: Core code - account/: account server - cli/: code that backs some of the CLI tools in bin/ - common/: code shared by different modules - middleware/: "standard", officially-supported middleware - ring/: code implementing Swift's ring - container/: container server - locale/: internationalization (translation) data - obj/: object server - proxy/: proxy server - test/: Unit, functional, and probe tests Data Flow ~~~~~~~~~ Swift is a WSGI application and uses eventlet's WSGI server. After the processes are running, the entry point for new requests is the ``Application`` class in ``swift/proxy/server.py``. From there, a controller is chosen, and the request is processed. The proxy may choose to forward the request to a back-end server. For example, the entry point for requests to the object server is the ``ObjectController`` class in ``swift/obj/server.py``. For Deployers ------------- Deployer docs are also available at https://docs.openstack.org/swift/latest/. A good starting point is at https://docs.openstack.org/swift/latest/deployment_guide.html There is an `ops runbook `__ that gives information about how to diagnose and troubleshoot common issues when running a Swift cluster. You can run functional tests against a swift cluster with ``.functests``. These functional tests require ``/etc/swift/test.conf`` to run. A sample config file can be found in this source tree in ``test/sample.conf``. For Client Apps --------------- For client applications, official Python language bindings are provided at https://github.com/openstack/python-swiftclient. Complete API documentation at https://developer.openstack.org/api-ref/object-store/ There is a large ecosystem of applications and libraries that support and work with OpenStack Swift. Several are listed on the `associated projects `__ page. -------------- For more information come hang out in #openstack-swift on freenode. Thanks, The Swift Development Team Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 swift-2.17.0/swift.egg-info/SOURCES.txt0000664000175100017510000005146013236061751017505 0ustar zuulzuul00000000000000.alltests .coveragerc .functests .mailmap .manpages .probetests .testr.conf .unittests .zuul.yaml AUTHORS CHANGELOG CONTRIBUTING.rst LICENSE MANIFEST.in README.rst REVIEW_GUIDELINES.rst babel.cfg bandit.yaml bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/index.rst api-ref/source/metadata_header_encoding.inc api-ref/source/metadata_header_syntax.inc api-ref/source/parameters.yaml api-ref/source/storage-account-services.inc api-ref/source/storage-container-services.inc api-ref/source/storage-object-services.inc api-ref/source/storage_endpoints.inc api-ref/source/storage_info.inc api-ref/source/samples/account-containers-list-http-request-json.txt api-ref/source/samples/account-containers-list-http-request-xml.txt api-ref/source/samples/account-containers-list-http-response-json.txt api-ref/source/samples/account-containers-list-http-response-xml.txt api-ref/source/samples/account-containers-list-response.json api-ref/source/samples/account-containers-list-response.xml api-ref/source/samples/capabilities-list-response.json api-ref/source/samples/containers-list-http-request.txt api-ref/source/samples/containers-list-http-response.txt api-ref/source/samples/endpoints-list-response-headers.json api-ref/source/samples/endpoints-list-response.json api-ref/source/samples/goodbyeworld.txt api-ref/source/samples/helloworld.txt api-ref/source/samples/objects-list-http-response-json.txt api-ref/source/samples/objects-list-http-response-xml.txt api-ref/source/samples/objects-list-response.json api-ref/source/samples/objects-list-response.xml bin/swift-account-audit bin/swift-account-auditor bin/swift-account-info bin/swift-account-reaper bin/swift-account-replicator bin/swift-account-server bin/swift-config bin/swift-container-auditor bin/swift-container-info bin/swift-container-reconciler bin/swift-container-replicator bin/swift-container-server bin/swift-container-sync bin/swift-container-updater bin/swift-dispersion-populate bin/swift-dispersion-report bin/swift-drive-audit bin/swift-form-signature bin/swift-get-nodes bin/swift-init bin/swift-object-auditor bin/swift-object-expirer bin/swift-object-info bin/swift-object-reconstructor bin/swift-object-relinker bin/swift-object-replicator bin/swift-object-server bin/swift-object-updater bin/swift-oldies bin/swift-orphans bin/swift-proxy-server bin/swift-recon bin/swift-recon-cron bin/swift-reconciler-enqueue bin/swift-ring-builder bin/swift-ring-builder-analyzer doc/manpages/account-server.conf.5 doc/manpages/container-reconciler.conf.5 doc/manpages/container-server.conf.5 doc/manpages/container-sync-realms.conf.5 doc/manpages/dispersion.conf.5 doc/manpages/object-expirer.conf.5 doc/manpages/object-server.conf.5 doc/manpages/proxy-server.conf.5 doc/manpages/swift-account-audit.1 doc/manpages/swift-account-auditor.1 doc/manpages/swift-account-info.1 doc/manpages/swift-account-reaper.1 doc/manpages/swift-account-replicator.1 doc/manpages/swift-account-server.1 doc/manpages/swift-config.1 doc/manpages/swift-container-auditor.1 doc/manpages/swift-container-info.1 doc/manpages/swift-container-reconciler.1 doc/manpages/swift-container-replicator.1 doc/manpages/swift-container-server.1 doc/manpages/swift-container-sync.1 doc/manpages/swift-container-updater.1 doc/manpages/swift-dispersion-populate.1 doc/manpages/swift-dispersion-report.1 doc/manpages/swift-drive-audit.1 doc/manpages/swift-form-signature.1 doc/manpages/swift-get-nodes.1 doc/manpages/swift-init.1 doc/manpages/swift-object-auditor.1 doc/manpages/swift-object-expirer.1 doc/manpages/swift-object-info.1 doc/manpages/swift-object-reconstructor.1 doc/manpages/swift-object-replicator.1 doc/manpages/swift-object-server.1 doc/manpages/swift-object-updater.1 doc/manpages/swift-oldies.1 doc/manpages/swift-orphans.1 doc/manpages/swift-proxy-server.1 doc/manpages/swift-recon-cron.1 doc/manpages/swift-recon.1 doc/manpages/swift-reconciler-enqueue.1 doc/manpages/swift-ring-builder-analyzer.1 doc/manpages/swift-ring-builder.1 doc/manpages/swift.conf.5 doc/saio/rsyncd.conf doc/saio/bin/remakerings doc/saio/bin/resetswift doc/saio/bin/startmain doc/saio/bin/startrest doc/saio/rsyslog.d/10-swift.conf doc/saio/swift/container-reconciler.conf doc/saio/swift/container-sync-realms.conf doc/saio/swift/object-expirer.conf doc/saio/swift/proxy-server.conf doc/saio/swift/swift.conf doc/saio/swift/account-server/1.conf doc/saio/swift/account-server/2.conf doc/saio/swift/account-server/3.conf doc/saio/swift/account-server/4.conf doc/saio/swift/container-server/1.conf doc/saio/swift/container-server/2.conf doc/saio/swift/container-server/3.conf doc/saio/swift/container-server/4.conf doc/saio/swift/object-server/1.conf doc/saio/swift/object-server/2.conf doc/saio/swift/object-server/3.conf doc/saio/swift/object-server/4.conf doc/source/account.rst doc/source/admin_guide.rst doc/source/apache_deployment_guide.rst doc/source/associated_projects.rst doc/source/conf.py doc/source/container.rst doc/source/cors.rst doc/source/crossdomain.rst doc/source/db.rst doc/source/deployment_guide.rst doc/source/development_auth.rst doc/source/development_guidelines.rst doc/source/development_middleware.rst doc/source/development_ondisk_backends.rst doc/source/development_saio.rst doc/source/first_contribution_swift.rst doc/source/getting_started.rst doc/source/howto_installmultinode.rst doc/source/index.rst doc/source/logs.rst doc/source/middleware.rst doc/source/misc.rst doc/source/object.rst doc/source/overview_acl.rst doc/source/overview_architecture.rst doc/source/overview_auth.rst doc/source/overview_backing_store.rst doc/source/overview_container_sync.rst doc/source/overview_encryption.rst doc/source/overview_erasure_code.rst doc/source/overview_expiring_objects.rst doc/source/overview_global_cluster.rst doc/source/overview_large_objects.rst doc/source/overview_object_versioning.rst doc/source/overview_policies.rst doc/source/overview_reaper.rst doc/source/overview_replication.rst doc/source/overview_ring.rst doc/source/policies_saio.rst doc/source/proxy.rst doc/source/ratelimit.rst doc/source/replication_network.rst doc/source/ring.rst doc/source/ring_background.rst doc/source/ring_partpower.rst doc/source/test-cors.html doc/source/_extra/.htaccess doc/source/admin/index.rst doc/source/admin/objectstorage-EC.rst doc/source/admin/objectstorage-account-reaper.rst doc/source/admin/objectstorage-admin.rst doc/source/admin/objectstorage-arch.rst doc/source/admin/objectstorage-auditors.rst doc/source/admin/objectstorage-characteristics.rst doc/source/admin/objectstorage-components.rst doc/source/admin/objectstorage-features.rst doc/source/admin/objectstorage-intro.rst doc/source/admin/objectstorage-large-objects.rst doc/source/admin/objectstorage-monitoring.rst doc/source/admin/objectstorage-replication.rst doc/source/admin/objectstorage-ringbuilder.rst doc/source/admin/objectstorage-tenant-specific-image-storage.rst doc/source/admin/objectstorage-troubleshoot.rst doc/source/admin/figures/objectstorage-accountscontainers.png doc/source/admin/figures/objectstorage-arch.png doc/source/admin/figures/objectstorage-buildingblocks.png doc/source/admin/figures/objectstorage-nodes.png doc/source/admin/figures/objectstorage-partitions.png doc/source/admin/figures/objectstorage-replication.png doc/source/admin/figures/objectstorage-ring.png doc/source/admin/figures/objectstorage-usecase.png doc/source/admin/figures/objectstorage-zones.png doc/source/admin/figures/objectstorage.png doc/source/api/authentication.rst doc/source/api/container_quotas.rst doc/source/api/discoverability.rst doc/source/api/form_post_middleware.rst doc/source/api/large_objects.rst doc/source/api/object_api_v1_overview.rst doc/source/api/object_versioning.rst doc/source/api/temporary_url_middleware.rst doc/source/api/use_content-encoding_metadata.rst doc/source/api/use_the_content-disposition_metadata.rst doc/source/images/ec_overview.png doc/source/install/controller-common_prerequisites.txt doc/source/install/controller-include.txt doc/source/install/controller-install-debian.rst doc/source/install/controller-install-obs.rst doc/source/install/controller-install-rdo.rst doc/source/install/controller-install-ubuntu.rst doc/source/install/controller-install.rst doc/source/install/edit_hosts_file.txt doc/source/install/environment-networking.rst doc/source/install/finalize-installation-obs.rst doc/source/install/finalize-installation-rdo.rst doc/source/install/finalize-installation-ubuntu-debian.rst doc/source/install/finalize-installation.rst doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/initial-rings.rst doc/source/install/next-steps.rst doc/source/install/storage-include1.txt doc/source/install/storage-include2.txt doc/source/install/storage-include3.txt doc/source/install/storage-install-obs.rst doc/source/install/storage-install-rdo.rst doc/source/install/storage-install-ubuntu-debian.rst doc/source/install/storage-install.rst doc/source/install/verify.rst doc/source/ops_runbook/diagnose.rst doc/source/ops_runbook/index.rst doc/source/ops_runbook/maintenance.rst doc/source/ops_runbook/procedures.rst doc/source/ops_runbook/troubleshooting.rst etc/account-server.conf-sample etc/container-reconciler.conf-sample etc/container-server.conf-sample etc/container-sync-realms.conf-sample etc/dispersion.conf-sample etc/drive-audit.conf-sample etc/internal-client.conf-sample etc/keymaster.conf-sample etc/memcache.conf-sample etc/mime.types-sample etc/object-expirer.conf-sample etc/object-server.conf-sample etc/proxy-server.conf-sample etc/rsyncd.conf-sample etc/swift-rsyslog.conf-sample etc/swift.conf-sample examples/apache2/account-server.template examples/apache2/container-server.template examples/apache2/object-server.template examples/apache2/proxy-server.template examples/wsgi/account-server.wsgi.template examples/wsgi/container-server.wsgi.template examples/wsgi/object-server.wsgi.template examples/wsgi/proxy-server.wsgi.template releasenotes/notes/2_10_0_release-666a76f4975657a5.yaml releasenotes/notes/2_11_0_release-ac1d256e455d347e.yaml releasenotes/notes/2_12_0_release-06af226abc7b91ef.yaml releasenotes/notes/2_13_0_release-875e1fb1ef59f015.yaml releasenotes/notes/2_14_0_release-7c3ef515ebded888.yaml releasenotes/notes/2_15_0_release-0a05a011fb85a9c9.yaml releasenotes/notes/2_15_1_release-be25e67bfc5e886a.yaml releasenotes/notes/2_16_0_release-d48cb9b2629df8ab.yaml releasenotes/notes/2_17_0_release-bd35f18c41c5ef18.yaml releasenotes/source/conf.py releasenotes/source/current.rst releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po releasenotes/source/locale/ja/LC_MESSAGES/releasenotes.po swift/__init__.py swift.egg-info/PKG-INFO swift.egg-info/SOURCES.txt swift.egg-info/dependency_links.txt swift.egg-info/entry_points.txt swift.egg-info/not-zip-safe swift.egg-info/pbr.json swift.egg-info/requires.txt swift.egg-info/top_level.txt swift/account/__init__.py swift/account/auditor.py swift/account/backend.py swift/account/reaper.py swift/account/replicator.py swift/account/server.py swift/account/utils.py swift/cli/__init__.py swift/cli/dispersion_report.py swift/cli/form_signature.py swift/cli/info.py swift/cli/recon.py swift/cli/relinker.py swift/cli/ring_builder_analyzer.py swift/cli/ringbuilder.py swift/common/__init__.py swift/common/base_storage_server.py swift/common/bufferedhttp.py swift/common/constraints.py swift/common/container_sync_realms.py swift/common/daemon.py swift/common/db.py swift/common/db_replicator.py swift/common/direct_client.py swift/common/exceptions.py swift/common/header_key_dict.py swift/common/http.py swift/common/internal_client.py swift/common/linkat.py swift/common/manager.py swift/common/memcached.py swift/common/request_helpers.py swift/common/splice.py swift/common/storage_policy.py swift/common/swob.py swift/common/utils.py swift/common/wsgi.py swift/common/middleware/__init__.py swift/common/middleware/account_quotas.py swift/common/middleware/acl.py swift/common/middleware/bulk.py swift/common/middleware/catch_errors.py swift/common/middleware/cname_lookup.py swift/common/middleware/container_quotas.py swift/common/middleware/container_sync.py swift/common/middleware/copy.py swift/common/middleware/crossdomain.py swift/common/middleware/dlo.py swift/common/middleware/domain_remap.py swift/common/middleware/formpost.py swift/common/middleware/gatekeeper.py swift/common/middleware/healthcheck.py swift/common/middleware/keystoneauth.py swift/common/middleware/list_endpoints.py swift/common/middleware/listing_formats.py swift/common/middleware/memcache.py swift/common/middleware/name_check.py swift/common/middleware/proxy_logging.py swift/common/middleware/ratelimit.py swift/common/middleware/recon.py swift/common/middleware/slo.py swift/common/middleware/staticweb.py swift/common/middleware/symlink.py swift/common/middleware/tempauth.py swift/common/middleware/tempurl.py swift/common/middleware/versioned_writes.py swift/common/middleware/xprofile.py swift/common/middleware/crypto/__init__.py swift/common/middleware/crypto/crypto_utils.py swift/common/middleware/crypto/decrypter.py swift/common/middleware/crypto/encrypter.py swift/common/middleware/crypto/keymaster.py swift/common/middleware/crypto/kms_keymaster.py swift/common/middleware/x_profile/__init__.py swift/common/middleware/x_profile/exceptions.py swift/common/middleware/x_profile/html_viewer.py swift/common/middleware/x_profile/profile_model.py swift/common/ring/__init__.py swift/common/ring/builder.py swift/common/ring/composite_builder.py swift/common/ring/ring.py swift/common/ring/utils.py swift/container/__init__.py swift/container/auditor.py swift/container/backend.py swift/container/reconciler.py swift/container/replicator.py swift/container/server.py swift/container/sync.py swift/container/sync_store.py swift/container/updater.py swift/locale/de/LC_MESSAGES/swift.po swift/locale/en_GB/LC_MESSAGES/swift.po swift/locale/es/LC_MESSAGES/swift.po swift/locale/fr/LC_MESSAGES/swift.po swift/locale/it/LC_MESSAGES/swift.po swift/locale/ja/LC_MESSAGES/swift.po swift/locale/ko_KR/LC_MESSAGES/swift.po swift/locale/pt_BR/LC_MESSAGES/swift.po swift/locale/ru/LC_MESSAGES/swift.po swift/locale/tr_TR/LC_MESSAGES/swift.po swift/locale/zh_CN/LC_MESSAGES/swift.po swift/locale/zh_TW/LC_MESSAGES/swift.po swift/obj/__init__.py swift/obj/auditor.py swift/obj/diskfile.py swift/obj/expirer.py swift/obj/mem_diskfile.py swift/obj/mem_server.py swift/obj/reconstructor.py swift/obj/replicator.py swift/obj/server.py swift/obj/ssync_receiver.py swift/obj/ssync_sender.py swift/obj/updater.py swift/proxy/__init__.py swift/proxy/server.py swift/proxy/controllers/__init__.py swift/proxy/controllers/account.py swift/proxy/controllers/base.py swift/proxy/controllers/container.py swift/proxy/controllers/info.py swift/proxy/controllers/obj.py test/__init__.py test/sample.conf test/functional/__init__.py test/functional/mock_swift_key_manager.py test/functional/swift_test_client.py test/functional/test_access_control.py test/functional/test_account.py test/functional/test_container.py test/functional/test_dlo.py test/functional/test_object.py test/functional/test_slo.py test/functional/test_symlink.py test/functional/test_tempurl.py test/functional/test_versioned_writes.py test/functional/tests.py test/probe/__init__.py test/probe/brain.py test/probe/common.py test/probe/test_account_failures.py test/probe/test_account_get_fake_responses_match.py test/probe/test_account_reaper.py test/probe/test_container_failures.py test/probe/test_container_merge_policy_index.py test/probe/test_container_sync.py test/probe/test_db_replicator.py test/probe/test_empty_device_handoff.py test/probe/test_object_async_update.py test/probe/test_object_expirer.py test/probe/test_object_failures.py test/probe/test_object_handoff.py test/probe/test_object_metadata_replication.py test/probe/test_object_partpower_increase.py test/probe/test_reconstructor_rebuild.py test/probe/test_reconstructor_revert.py test/probe/test_replication_servers_working.py test/probe/test_signals.py test/unit/__init__.py test/unit/helpers.py test/unit/account/__init__.py test/unit/account/test_auditor.py test/unit/account/test_backend.py test/unit/account/test_reaper.py test/unit/account/test_replicator.py test/unit/account/test_server.py test/unit/account/test_utils.py test/unit/cli/__init__.py test/unit/cli/test_default_output.stub test/unit/cli/test_default_output_id_assigned.stub test/unit/cli/test_default_sorted_output.stub test/unit/cli/test_dispersion_report.py test/unit/cli/test_form_signature.py test/unit/cli/test_info.py test/unit/cli/test_ipv6_output.stub test/unit/cli/test_recon.py test/unit/cli/test_relinker.py test/unit/cli/test_ring_builder_analyzer.py test/unit/cli/test_ringbuilder.py test/unit/common/__init__.py test/unit/common/corrupted_example.db test/unit/common/malformed_example.db test/unit/common/malformed_schema_example.db test/unit/common/test_base_storage_server.py test/unit/common/test_bufferedhttp.py test/unit/common/test_constraints.py test/unit/common/test_container_sync_realms.py test/unit/common/test_daemon.py test/unit/common/test_db.py test/unit/common/test_db_replicator.py test/unit/common/test_direct_client.py test/unit/common/test_exceptions.py test/unit/common/test_header_key_dict.py test/unit/common/test_internal_client.py test/unit/common/test_linkat.py test/unit/common/test_manager.py test/unit/common/test_memcached.py test/unit/common/test_request_helpers.py test/unit/common/test_splice.py test/unit/common/test_storage_policy.py test/unit/common/test_swob.py test/unit/common/test_utils.py test/unit/common/test_wsgi.py test/unit/common/middleware/__init__.py test/unit/common/middleware/helpers.py test/unit/common/middleware/test_account_quotas.py test/unit/common/middleware/test_acl.py test/unit/common/middleware/test_bulk.py test/unit/common/middleware/test_cname_lookup.py test/unit/common/middleware/test_container_sync.py test/unit/common/middleware/test_copy.py test/unit/common/middleware/test_crossdomain.py test/unit/common/middleware/test_dlo.py test/unit/common/middleware/test_domain_remap.py test/unit/common/middleware/test_except.py test/unit/common/middleware/test_formpost.py test/unit/common/middleware/test_gatekeeper.py test/unit/common/middleware/test_healthcheck.py test/unit/common/middleware/test_keystoneauth.py test/unit/common/middleware/test_list_endpoints.py test/unit/common/middleware/test_listing_formats.py test/unit/common/middleware/test_memcache.py test/unit/common/middleware/test_name_check.py test/unit/common/middleware/test_proxy_logging.py test/unit/common/middleware/test_quotas.py test/unit/common/middleware/test_ratelimit.py test/unit/common/middleware/test_recon.py test/unit/common/middleware/test_slo.py test/unit/common/middleware/test_staticweb.py test/unit/common/middleware/test_subrequest_logging.py test/unit/common/middleware/test_symlink.py test/unit/common/middleware/test_tempauth.py test/unit/common/middleware/test_tempurl.py test/unit/common/middleware/test_versioned_writes.py test/unit/common/middleware/test_xprofile.py test/unit/common/middleware/crypto/__init__.py test/unit/common/middleware/crypto/crypto_helpers.py test/unit/common/middleware/crypto/test_crypto.py test/unit/common/middleware/crypto/test_crypto_utils.py test/unit/common/middleware/crypto/test_decrypter.py test/unit/common/middleware/crypto/test_encrypter.py test/unit/common/middleware/crypto/test_encryption.py test/unit/common/middleware/crypto/test_keymaster.py test/unit/common/middleware/crypto/test_kms_keymaster.py test/unit/common/ring/__init__.py test/unit/common/ring/test_builder.py test/unit/common/ring/test_composite_builder.py test/unit/common/ring/test_ring.py test/unit/common/ring/test_utils.py test/unit/container/__init__.py test/unit/container/test_auditor.py test/unit/container/test_backend.py test/unit/container/test_reconciler.py test/unit/container/test_replicator.py test/unit/container/test_server.py test/unit/container/test_sync.py test/unit/container/test_sync_store.py test/unit/container/test_updater.py test/unit/obj/__init__.py test/unit/obj/common.py test/unit/obj/test_auditor.py test/unit/obj/test_diskfile.py test/unit/obj/test_expirer.py test/unit/obj/test_reconstructor.py test/unit/obj/test_replicator.py test/unit/obj/test_server.py test/unit/obj/test_ssync.py test/unit/obj/test_ssync_receiver.py test/unit/obj/test_ssync_sender.py test/unit/obj/test_updater.py test/unit/proxy/__init__.py test/unit/proxy/test_mem_server.py test/unit/proxy/test_server.py test/unit/proxy/test_sysmeta.py test/unit/proxy/controllers/__init__.py test/unit/proxy/controllers/test_account.py test/unit/proxy/controllers/test_base.py test/unit/proxy/controllers/test_container.py test/unit/proxy/controllers/test_info.py test/unit/proxy/controllers/test_obj.py test/unit/test_locale/README test/unit/test_locale/__init__.py test/unit/test_locale/eo.po test/unit/test_locale/messages.mo test/unit/test_locale/test_locale.py test/unit/test_locale/eo/LC_MESSAGES/swift.mo tools/test-setup.shswift-2.17.0/.alltests0000777000175100017510000000052213236061617014626 0ustar zuulzuul00000000000000#!/bin/bash set -e TOP_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))") echo "==== Unit tests ====" resetswift $TOP_DIR/.unittests $@ echo "==== Func tests ====" resetswift startmain $TOP_DIR/.functests $@ echo "==== Probe tests ====" resetswift $TOP_DIR/.probetests $@ echo "All tests runs fine" exit 0 swift-2.17.0/requirements.txt0000666000175100017510000000074213236061617016257 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order # of appearance. Changing the order has an impact on the overall integration # process, which may cause wedges in the gate later. dnspython>=1.14.0 # http://www.dnspython.org/LICENSE eventlet>=0.17.4 # MIT greenlet>=0.3.1 netifaces>=0.5,!=0.10.0,!=0.10.1 pastedeploy>=1.3.3 six>=1.9.0 xattr>=0.4 PyECLib>=1.3.1 # BSD cryptography!=2.0,>=1.6 # BSD/Apache-2.0 swift-2.17.0/.functests0000777000175100017510000000051313236061617015011 0ustar zuulzuul00000000000000#!/bin/bash # How-To debug functional tests: # SWIFT_TEST_IN_PROCESS=1 tox -e func -- --pdb test.functional.tests.TestFile.testCopy SRC_DIR=$(python -c "import os; print os.path.dirname(os.path.realpath('$0'))") cd ${SRC_DIR} export TESTS_DIR=${SRC_DIR}/test/functional ostestr --serial --pretty $@ rvalue=$? cd - exit $rvalue swift-2.17.0/README.rst0000666000175100017510000001332213236061617014460 0ustar zuulzuul00000000000000======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/badges/swift.svg :target: https://governance.openstack.org/reference/tags/index.html .. Change things from this point on Swift ===== A distributed object storage system designed to scale from a single machine to thousands of servers. Swift is optimized for multi-tenancy and high concurrency. Swift is ideal for backups, web and mobile content, and any other unstructured data that can grow without bound. Swift provides a simple, REST-based API fully documented at https://docs.openstack.org/. Swift was originally developed as the basis for Rackspace's Cloud Files and was open-sourced in 2010 as part of the OpenStack project. It has since grown to include contributions from many companies and has spawned a thriving ecosystem of 3rd party tools. Swift's contributors are listed in the AUTHORS file. Docs ---- To build documentation install sphinx (``pip install sphinx``), run ``python setup.py build_sphinx``, and then browse to /doc/build/html/index.html. These docs are auto-generated after every commit and available online at https://docs.openstack.org/swift/latest/. For Developers -------------- Getting Started ~~~~~~~~~~~~~~~ Swift is part of OpenStack and follows the code contribution, review, and testing processes common to all OpenStack projects. If you would like to start contributing, check out these `notes `__ to help you get started. The best place to get started is the `"SAIO - Swift All In One" `__. This document will walk you through setting up a development cluster of Swift in a VM. The SAIO environment is ideal for running small-scale tests against swift and trying out new features and bug fixes. Tests ~~~~~ There are three types of tests included in Swift's source tree. #. Unit tests #. Functional tests #. Probe tests Unit tests check that small sections of the code behave properly. For example, a unit test may test a single function to ensure that various input gives the expected output. This validates that the code is correct and regressions are not introduced. Functional tests check that the client API is working as expected. These can be run against any endpoint claiming to support the Swift API (although some tests require multiple accounts with different privilege levels). These are "black box" tests that ensure that client apps written against Swift will continue to work. Probe tests are "white box" tests that validate the internal workings of a Swift cluster. They are written to work against the `"SAIO - Swift All In One" `__ dev environment. For example, a probe test may create an object, delete one replica, and ensure that the background consistency processes find and correct the error. You can run unit tests with ``.unittests``, functional tests with ``.functests``, and probe tests with ``.probetests``. There is an additional ``.alltests`` script that wraps the other three. To fully run the tests, the target environment must use a filesystem that supports large xattrs. XFS is strongly recommended. For unit tests and in- process functional tests, either mount ``/tmp`` with XFS or provide another XFS filesystem via the ``TMPDIR`` environment variable. Without this setting, tests should still pass, but a very large number will be skipped. Code Organization ~~~~~~~~~~~~~~~~~ - bin/: Executable scripts that are the processes run by the deployer - doc/: Documentation - etc/: Sample config files - examples/: Config snippets used in the docs - swift/: Core code - account/: account server - cli/: code that backs some of the CLI tools in bin/ - common/: code shared by different modules - middleware/: "standard", officially-supported middleware - ring/: code implementing Swift's ring - container/: container server - locale/: internationalization (translation) data - obj/: object server - proxy/: proxy server - test/: Unit, functional, and probe tests Data Flow ~~~~~~~~~ Swift is a WSGI application and uses eventlet's WSGI server. After the processes are running, the entry point for new requests is the ``Application`` class in ``swift/proxy/server.py``. From there, a controller is chosen, and the request is processed. The proxy may choose to forward the request to a back-end server. For example, the entry point for requests to the object server is the ``ObjectController`` class in ``swift/obj/server.py``. For Deployers ------------- Deployer docs are also available at https://docs.openstack.org/swift/latest/. A good starting point is at https://docs.openstack.org/swift/latest/deployment_guide.html There is an `ops runbook `__ that gives information about how to diagnose and troubleshoot common issues when running a Swift cluster. You can run functional tests against a swift cluster with ``.functests``. These functional tests require ``/etc/swift/test.conf`` to run. A sample config file can be found in this source tree in ``test/sample.conf``. For Client Apps --------------- For client applications, official Python language bindings are provided at https://github.com/openstack/python-swiftclient. Complete API documentation at https://developer.openstack.org/api-ref/object-store/ There is a large ecosystem of applications and libraries that support and work with OpenStack Swift. Several are listed on the `associated projects `__ page. -------------- For more information come hang out in #openstack-swift on freenode. Thanks, The Swift Development Team swift-2.17.0/swift/0000775000175100017510000000000013236061751014121 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/0000775000175100017510000000000013236061751015360 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/fr/0000775000175100017510000000000013236061751015767 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/fr/LC_MESSAGES/0000775000175100017510000000000013236061751017554 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/fr/LC_MESSAGES/swift.po0000666000175100017510000007365513236061620021265 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Maxime COQUEREL , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:42+0000\n" "Last-Translator: Copied by Zanata \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: French\n" msgid "" "\n" "user quit" msgstr "" "\n" "l'utilisateur quitte le programme" #, python-format msgid " - %s" msgstr "- %s" #, python-format msgid " - parallel, %s" msgstr "- parallel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d suffixe(s) vérifié(s) - %(hashed).2f%% haché(s), %(synced).2f%% " "synchronisé(s)" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions répliquées en " "%(time).2fs (%(rate).2f/sec ; %(remaining)s restante(s))" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s succès, %(failure)s échec(s)" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s : renvoi de l'erreur 503 pour %(statuses)s" #, python-format msgid "%s already started..." msgstr "%s déjà démarré..." #, python-format msgid "%s does not exist" msgstr "%s n'existe pas" #, python-format msgid "%s is not mounted" msgstr "%s n'est pas monté" #, python-format msgid "%s responded as unmounted" msgstr "%s ont été identifié(es) comme étant démonté(es)" #, python-format msgid "%s: Connection reset by peer" msgstr "%s : Connexion réinitialisée par l'homologue" #, python-format msgid ", %s containers deleted" msgstr ", %s containers supprimés" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s conteneur(s) restant(s), le cas échéant" #, python-format msgid ", %s containers remaining" msgstr ", %s conteneur(s) restant(s)" #, python-format msgid ", %s objects deleted" msgstr ", %s objets supprimés" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objet(s) restant(s), le cas échéant" #, python-format msgid ", %s objects remaining" msgstr ", %s objet(s) restant(s)" #, python-format msgid ", elapsed: %.02fs" msgstr ", temps écoulé : %.02fs" msgid ", return codes: " msgstr ", return codes: " msgid "Account" msgstr "Compte" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Audit de compte en mode \"Once\" terminé : %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Session d'audit de compte terminée : %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Tentative de réplication de %(count)d bases de données en %(time).5f " "secondes (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Code retour Rsync non valide : %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Démarrer l'audit de compte en mode \"Once\" (une fois)" msgid "Begin account audit pass." msgstr "Démarrer la session d'audit de compte." msgid "Begin container audit \"once\" mode" msgstr "Démarrer l'audit de conteneur en mode \"Once\" (une fois)" msgid "Begin container audit pass." msgstr "Démarrer la session d'audit de conteneur." msgid "Begin container sync \"once\" mode" msgstr "Démarrer la synchronisation de conteneurs en mode \"Once\" (une fois)" msgid "Begin container update single threaded sweep" msgstr "" "Démarrer le balayage des mises à jour du conteneur (unité d'exécution unique)" msgid "Begin container update sweep" msgstr "Démarrer le balayage des mises à jour du conteneur" msgid "Begin object update single threaded sweep" msgstr "" "Démarrer le balayage des mises à jour d'objet (unité d'exécution unique)" msgid "Begin object update sweep" msgstr "Démarrer le balayage des mises à jour d'objet" #, python-format msgid "Beginning pass on account %s" msgstr "Démarrage de la session d'audit sur le compte %s" msgid "Beginning replication run" msgstr "Démarrage du cycle de réplication" msgid "Broker error trying to rollback locked connection" msgstr "" "Erreur de courtier lors d'une tentative d'annulation d'une connexion " "verrouillée" #, python-format msgid "Can not access the file %s." msgstr "Ne peut pas accéder au fichier %s." #, python-format msgid "Can not load profile data from %s." msgstr "Impossible de charger des données de profil depuis %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Le client n'a pas lu les données du proxy en %s s" msgid "Client disconnected on read" msgstr "Client déconnecté lors de la lecture" msgid "Client disconnected without sending enough data" msgstr "Client déconnecté avant l'envoi de toutes les données requises" msgid "Client disconnected without sending last chunk" msgstr "Le client a été déconnecté avant l'envoi du dernier bloc" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Le chemin d'accès au client %(client)s ne correspond pas au chemin stocké " "dans les métadonnées d'objet %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "L'option de configuration internal_client_conf_path n'a pas été définie. La " "configuration par défaut est utilisée. Consultez les options dans internal-" "client.conf-sample." msgid "Connection refused" msgstr "Connexion refusée" msgid "Connection timeout" msgstr "Dépassement du délai d'attente de connexion" msgid "Container" msgstr "Conteneur" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Audit de conteneur en mode \"Once\" terminé : %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Session d'audit de conteneur terminée : %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Synchronisation de conteneurs en mode \"Once\" terminée : %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Le balayage des mises à jour du conteneur (unité d'exécution unique) est " "terminé : %(elapsed).02fs, %(success)s succès, %(fail)s échec(s), " "%(no_change)s inchangé(s)" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Le balayage des mises à jour du conteneur est terminé : %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Le balayage des mises à jour du conteneur (%(path)s) est terminé : " "%(elapsed).02fs, %(success)s succès, %(fail)s échec(s), %(no_change)s " "inchangé(s)" #, python-format msgid "Data download error: %s" msgstr "Erreur de téléchargement des données: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Session d'audit d'unité terminée : %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERREUR %(db_file)s : %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERREUR %(status)d %(body)s depuis le serveur %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERREUR %(status)d %(body)s depuis le serveur d'objets. Réf. : %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" "ERREUR %(status)d Attendu(s) : 100 - poursuivre depuis le serveur d'objets" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s " "(une nouvelle tentative sera effectuée ultérieurement). Réponse %(status)s " "%(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERREUR Réponse incorrecte %(status)s de %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERREUR Dépassement du délai de lecture du client (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERREUR Echec de la mise à jour du conteneur (sauvegarde pour mise à jour " "asynchrone ultérieure) : réponse %(status)d renvoyée par %(ip)s:%(port)s/" "%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERREUR Impossible d'obtenir les infos de compte %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERREUR Impossible d'obtenir les infos de conteneur %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "ERREUR Incident de fermeture du fichier disque %(data_file)s : %(exc)s : " "%(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERREUR Exception entraînant la déconnexion du client" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" "ERREUR Exception lors du transfert de données vers des serveurs d'objets %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERREUR Obtention impossible de mes propres adresses IP ?" msgid "ERROR Insufficient Storage" msgstr "ERREUR Stockage insuffisant" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "ERREUR L'objet %(obj)s a échoué à l'audit et a été en quarantaine : %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERREUR Problème lié à Pickle. Mise en quarantaine de %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERREUR Unité distante %s non montée" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERREUR lors de la synchronisation de %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERREUR lors de la synchronisation de %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERREUR lors de la tentative d'audit de %s" msgid "ERROR Unhandled exception in request" msgstr "ERREUR Exception non gérée dans la demande" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ error sur %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s " "(une nouvelle tentative sera effectuée ultérieurement)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s " "(une nouvelle tentative sera effectuée ultérieurement) : " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" "ERREUR Le fichier des mises à jour asynchrones en attente porte un nom " "inattendu %s" msgid "ERROR auditing" msgstr "Erreur d'audit" #, python-format msgid "ERROR auditing: %s" msgstr "ERREUR d'audit : %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERREUR Echec de la mise à jour du conteneur avec %(ip)s:%(port)s/%(dev)s " "(sauvegarde pour mise à jour asynchrone ultérieure)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "Erreur de lecture de la réponse HTTP depuis %s" #, python-format msgid "ERROR reading db %s" msgstr "ERREUR de lecture de db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERREUR Echec de Rsync avec %(code)s : %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERREUR de synchronisation de %(file)s avec le noeud %(node)s" msgid "ERROR trying to replicate" msgstr "ERREUR lors de la tentative de réplication" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERREUR pendant le nettoyage %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "ERREUR liée au serveur %(type)s %(ip)s:%(port)s/%(device)s. Réf. : %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERREUR de chargement des suppressions de %s : " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERREUR liée au serveur distant %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" "ERREUR : Echec de l'obtention des chemins d'accès aux partitions d'unité : %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERREUR : Impossible d'accéder à %(path)s : %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERREUR : Impossible d'exécuter l'audit : %s" msgid "Error hashing suffix" msgstr "Erreur suffixe hashing" msgid "Error listing devices" msgstr "Erreur lors du listage des unités" #, python-format msgid "Error on render profiling results: %s" msgstr "Erreur de rendu des résultats de profilage : %s" msgid "Error parsing recon cache file" msgstr "Erreur lors de l'analyse syntaxique du fichier cache Recon" msgid "Error reading recon cache file" msgstr "Erreur de lecture du fichier cache Recon" msgid "Error reading ringfile" msgstr "Erreur de lecture du fichier Ring" msgid "Error reading swift.conf" msgstr "Erreur de lecture de swift.conf" msgid "Error retrieving recon data" msgstr "Erreur lors de l'extraction des données Recon" msgid "Error syncing handoff partition" msgstr "Erreur lors de la synchronisation de la partition de transfert" msgid "Error syncing partition" msgstr "Erreur de synchronisation de la partition" #, python-format msgid "Error syncing with node: %s" msgstr "Erreur de synchronisation avec le noeud : %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Une erreur est survenue lors de la tentative de régénération de %(path)s " "policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Erreur : une erreur s'est produite" msgid "Error: missing config path argument" msgstr "Erreur: Manque argument de configuration du chemin" #, python-format msgid "Error: unable to locate %s" msgstr "Erreur: impossible de localiser %s" msgid "Exception dumping recon cache" msgstr "Exception lors du vidage de cache Recon" msgid "Exception in top-level account reaper loop" msgstr "Exception dans la boucle de collecteur de compte de niveau supérieur" msgid "Exception in top-level replication loop" msgstr "Exception dans la boucle de réplication de niveau supérieur" msgid "Exception in top-levelreconstruction loop" msgstr "Exception dans la boucle de reconstruction de niveau supérieur" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exception liée à %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Exception avec le compte %s" #, python-format msgid "Exception with containers for account %s" msgstr "Exception avec les containers pour le compte %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Exception liée aux objets pour le conteneur %(container)s et le compte " "%(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Attendus(s) : 100 - poursuivre sur %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "" "Suivi de la chaîne CNAME pour %(given_domain)s jusqu'à %(found_domain)s" msgid "Found configs:" msgstr "Configurations trouvées :" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Le premier mode de transferts contient d'autres transferts. Abandon de la " "session de réplication en cours." msgid "Host unreachable" msgstr "Hôte inaccessible" #, python-format msgid "Incomplete pass on account %s" msgstr "Session d'audit incomplète sur le compte %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Non valide X-Container-Sync-To format %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Hôte %r non valide dans X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrée en attente non valide %(file)s : %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Réponse %(resp)s non valide de %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Réponse %(resp)s non valide de %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Schéma %r non valide dans X-Container-Sync-To. Doit être \"//\", \"http\" ou " "\"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Arrêt de l'opération Rsync à exécution longue : %s" msgid "Lockup detected.. killing live coros." msgstr "Blocage détecté. Arrêt des coroutines actives." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s mappé avec %(found_domain)s" #, python-format msgid "No %s running" msgstr "Non démarré %s" #, python-format msgid "No permission to signal PID %d" msgstr "Aucun droit pour signaler le PID %d" #, python-format msgid "No policy with index %s" msgstr "Aucune statégie avec un index de type %s" #, python-format msgid "No realm key for %r" msgstr "Aucune clé de domaine pour %r" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" "Noeud marqué avec limite d'erreurs (error_limited) %(ip)s:%(port)s " "(%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" "Le nombre de serveurs d'objets reconnus n'est pas suffisant (%d obtenus)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Introuvable : %(sync_from)r => %(sync_to)r - objet " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Aucun élément reconstruit pendant %s secondes." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Aucun élément répliqué pendant %s secondes." msgid "Object" msgstr "Objet" msgid "Object PUT" msgstr "Object PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "L'opération d'insertion (PUT) d'objet a renvoyé l'erreur 202 pour 409 : " "%(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "" "L'opération d'insertion (PUT) d'objet a renvoyé l'erreur 412. %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "L'audit d'objet (%(type)s) en mode \"%(mode)s\" est terminé : " "%(elapsed).02fs. Nombre total mis en quarantaine : %(quars)d. Nombre total " "d'erreurs : %(errors)d. Nombre total de fichiers/sec : %(frate).2f. Nombre " "total d'octets/sec : %(brate).2f. Durée d'audit : %(audit).2f. Taux : " "%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Audit d'objet (%(type)s). Depuis %(start_time)s, localement : %(passes)d " "succès. %(quars)d en quarantaine. %(errors)d erreurs. Fichiers/sec : " "%(frate).2f. Octets/sec : %(brate).2f. Durée totale : %(total).2f. Durée " "d'audit : %(audit).2f. Taux : %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Statistiques de l'audit d'objet : %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "" "La reconstruction d'objet en mode Once (une fois) est terminée. (%.02f " "minutes)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Reconstruction d'objet terminée. (%.02f minutes)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" "La réplication d'objet en mode Once (une fois) est terminée. (%.02f minutes)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Réplication d'objet terminée. (%.02f minutes)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Des serveurs d'objets ont renvoyé %s en-têtes Etag non concordants" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Le balayage des mises à jour d'objet est terminé : %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Paramètres, requêtes et fragments interdits dans X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Temps de partition : maximum %(max).4fs, minimum %(min).4fs, moyenne " "%(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "Chemin requis dans X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problème lors du nettoyage de %s" #, python-format msgid "Profiling Error: %s" msgstr "Erreur de profilage : %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s n'est pas un répertoire et a donc été mis en quarantaine dans " "%(quar_path)s" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s n'est pas un répertoire et a donc été mis en quarantaine " "dans %(quar_path)s" #, python-format msgid "Quarantining DB %s" msgstr "Mise en quarantaine de la base de données %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Journal de mise en veille Ratelimit : %(sleep)s pour %(account)s/" "%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d bases de données ont été retirées" #, python-format msgid "Removing %s objects" msgstr "Suppression de %s objets" #, python-format msgid "Removing partition: %s" msgstr "Suppression partition: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "" "Supression du fichier PID %(pid_file)s, comportant un PID incorrect %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Suppression du fichier pid %s comportant un pid non valide" #, python-format msgid "Removing stale pid file %s" msgstr "Suppression du fichier PID %s périmé" msgid "Replication run OVER" msgstr "Le cycle de réplication est terminé" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Renvoi de 497 en raison du placement sur liste noire : %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Renvoi de 498 pour %(meth)s jusqu'à %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(Max Sleep) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Changement d'anneau détecté. Abandon de la session de reconstruction en " "cours." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Changement d'anneau détecté. Abandon de la session de réplication en cours." #, python-format msgid "Running %s once" msgstr "Exécution unique de %s" msgid "Running object reconstructor in script mode." msgstr "Exécution du reconstructeur d'objet en mode script." msgid "Running object replicator in script mode." msgstr "Exécution du réplicateur d'objet en mode script." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Depuis %(time)s : %(sync)s synchronisé(s) [%(delete)s suppression(s), " "%(put)s insertion(s)], %(skip)s ignoré(s), %(fail)s échec(s)" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Depuis %(time)s : audits de compte : %(passed)s succès, %(failed)s échec(s)" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Depuis %(time)s : audits de conteneur : %(pass)s succès, %(fail)s échec(s)" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s est ignoré car il n'est pas monté" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s est ignoré car il n'est pas monté" #, python-format msgid "Starting %s" msgstr "Démarrage %s" msgid "Starting object reconstruction pass." msgstr "Démarrage de la session de reconstruction d'objet." msgid "Starting object reconstructor in daemon mode." msgstr "Démarrage du reconstructeur d'objet en mode démon." msgid "Starting object replication pass." msgstr "Démarrage de la session de réplication d'objet." msgid "Starting object replicator in daemon mode." msgstr "Démarrage du réplicateur d'objet en mode démon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Succès de Rsync pour %(src)s dans %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Accès interdit au type de fichier" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Le total %(key)s du conteneur (%(total)s) ne correspond pas à la somme des " "clés %(key)s des différentes règles (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "" "Exception liée à un dépassement de délai concernant %(ip)s:%(port)s/" "%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentative d'exécution de %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Tentative de lecture de %(full_path)s" msgid "Trying to read during GET" msgstr "Tentative de lecture pendant une opération GET" msgid "Trying to read during GET (retrying)" msgstr "Tentative de lecture pendant une opération GET (nouvelle tentative)" msgid "Trying to send to client" msgstr "Tentative d'envoi au client" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Tentative de synchronisation de suffixes à l'aide de %s" #, python-format msgid "Trying to write to %s" msgstr "Tentative d'écriture sur %s" msgid "UNCAUGHT EXCEPTION" msgstr "EXCEPTION NON INTERCEPTEE" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" "Impossible de localiser %s dans libc. Laissé comme action nulle (no-op)." #, python-format msgid "Unable to locate config for %s" msgstr "Impossible de trouver la configuration pour %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Impossible de localiser fallocate, posix_fallocate dans libc. Laissé comme " "action nulle (no-op)." #, python-format msgid "Unable to read config from %s" msgstr "Impossible de lire le fichier de configuration depuis %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Non autorisé : %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "Exception non prise en charge" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Une exception inconnue s'est produite pendant une opération GET: %(account)r " "%(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Echec du rapport de mise à jour pour %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Rapport de mise à jour envoyé pour %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVERTISSEMENT : SSL ne doit être activé qu'à des fins de test. Utilisez la " "terminaison SSL externe pour un déploiement en production." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVERTISSEMENT : Impossible de modifier la limite de descripteur de fichier. " "Exécution en tant que non root ?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVERTISSEMENT : Impossible de modifier la limite maximale de processus. " "Exécution en tant que non root ?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVERTISSEMENT : Impossible de modifier la limite de mémoire. Exécution en " "tant que non root ?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avertissement : impossible d'appliquer Ratelimit sans client memcached" #, python-format msgid "method %s is not allowed." msgstr "Méthode %s interdite." msgid "no log file found" msgstr "Pas de fichier log trouvé" msgid "odfpy not installed." msgstr "odfpy n'est pas installé." #, python-format msgid "plotting results failed due to %s" msgstr "Echec du traçage des résultats. Cause : %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib non installé." swift-2.17.0/swift/locale/ru/0000775000175100017510000000000013236061751016006 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/ru/LC_MESSAGES/0000775000175100017510000000000013236061751017573 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/ru/LC_MESSAGES/swift.po0000666000175100017510000011031713236061620021267 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Russian\n" msgid "" "\n" "user quit" msgstr "" "\n" "Завершение работы пользователÑ" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - параллельно, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "Проверено ÑуффикÑов: %(checked)d - Ñ…Ñшировано: %(hashed).2f%%, " "Ñинхронизировано: %(synced).2f%%" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "Реплицировано разделов: %(replicated)d/%(total)d (%(percentage).2f%%) за " "Ð²Ñ€ÐµÐ¼Ñ %(time).2f Ñ (%(rate).2f/Ñ, оÑталоÑÑŒ: %(remaining)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s уÑпешно, %(failure)s Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°Ð¼Ð¸" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s возвратил 503 Ð´Ð»Ñ %(statuses)s" #, python-format msgid "%s already started..." msgstr "%s уже запущен..." #, python-format msgid "%s does not exist" msgstr "%s не ÑущеÑтвует" #, python-format msgid "%s is not mounted" msgstr "%s не Ñмонтирован" #, python-format msgid "%s responded as unmounted" msgstr "%s ответил как размонтированный" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Ñоединение Ñброшено на другой Ñтороне" #, python-format msgid ", %s containers deleted" msgstr ", удалено контейнеров: %s" #, python-format msgid ", %s containers possibly remaining" msgstr ", оÑталоÑÑŒ контейнеров (возможно): %s" #, python-format msgid ", %s containers remaining" msgstr ", оÑталоÑÑŒ контейнеров: %s" #, python-format msgid ", %s objects deleted" msgstr ", удалено объектов: %s" #, python-format msgid ", %s objects possibly remaining" msgstr ", оÑталоÑÑŒ объектов (возможно): %s" #, python-format msgid ", %s objects remaining" msgstr ", оÑталоÑÑŒ объектов: %s" #, python-format msgid ", elapsed: %.02fs" msgstr ", прошло: %.02fs" msgid ", return codes: " msgstr ", коды возврата: " msgid "Account" msgstr "Ð£Ñ‡ÐµÑ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Проверка учетной запиÑи в \"однократном\" режиме завершена: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Проход ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ запиÑи выполнен: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Попытка репликации %(count)d баз данных за %(time).5f Ñекунд (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Ðеправильный код возврата rsync: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Ðачать проверку учетной запиÑи в \"однократном\" режиме" msgid "Begin account audit pass." msgstr "Ðачать проход проверки учетной запиÑи." msgid "Begin container audit \"once\" mode" msgstr "Ðачать проверку контейнера в \"однократном\" режиме" msgid "Begin container audit pass." msgstr "Ðачать проход проверки контейнера." msgid "Begin container sync \"once\" mode" msgstr "Ðачать Ñинхронизацию контейнера в \"однократном\" режиме" msgid "Begin container update single threaded sweep" msgstr "Ðачать однонитевую Ñплошную проверку обновлений контейнера" msgid "Begin container update sweep" msgstr "Ðачать Ñплошную проверку обновлений контейнера" msgid "Begin object update single threaded sweep" msgstr "Ðачать однонитевую Ñплошную проверку обновлений объекта" msgid "Begin object update sweep" msgstr "Ðачать Ñплошную проверку обновлений объекта" #, python-format msgid "Beginning pass on account %s" msgstr "ÐачинаетÑÑ Ð¿Ñ€Ð¾Ñ…Ð¾Ð´ Ð´Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ запиÑи %s" msgid "Beginning replication run" msgstr "ЗапуÑк репликации" msgid "Broker error trying to rollback locked connection" msgstr "Ошибка поÑредника при попытке отката заблокированного ÑоединениÑ" #, python-format msgid "Can not access the file %s." msgstr "ОтÑутÑтвует доÑтуп к файлу %s." #, python-format msgid "Can not load profile data from %s." msgstr "Ðе удаетÑÑ Ð·Ð°Ð³Ñ€ÑƒÐ·Ð¸Ñ‚ÑŒ данные профайла из %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Клиент не прочитал данные из proxy в %ss" msgid "Client disconnected on read" msgstr "Клиент отключен во Ð²Ñ€ÐµÐ¼Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ" msgid "Client disconnected without sending enough data" msgstr "Клиент отключен без отправки данных" msgid "Client disconnected without sending last chunk" msgstr "Клиент отключилÑÑ, не отправив поÑледний фрагмент данных" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Путь клиента %(client)s не ÑоответÑтвует пути в метаданных объекта %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "ÐžÐ¿Ñ†Ð¸Ñ internal_client_conf_path конфигурации не определена. ИÑпользуетÑÑ " "ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ð¿Ð¾ умолчанию. ИÑпользуйте intenal-client.conf-sample Ð´Ð»Ñ " "информации об опциÑÑ…" msgid "Connection refused" msgstr "Соединение отклонено" msgid "Connection timeout" msgstr "Тайм-аут ÑоединениÑ" msgid "Container" msgstr "контейнер" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Проверка контейнера в \"однократном\" режиме завершена: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Проход проверки контейнера завершен: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Ð¡Ð¸Ð½Ñ…Ñ€Ð¾Ð½Ð¸Ð·Ð°Ñ†Ð¸Ñ ÐºÐ¾Ð½Ñ‚ÐµÐ¹Ð½ÐµÑ€Ð° в \"однократном\" режиме завершена: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Ð¡Ð¿Ð»Ð¾ÑˆÐ½Ð°Ñ Ð¾Ð´Ð½Ð¾Ð½Ð¸Ñ‚ÐµÐ²Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° обновлений контейнера завершена: " "%(elapsed).02fs, уÑпешно: %(success)s, Ñбоев: %(fail)s, без изменений: " "%(no_change)s" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Ð¡Ð¿Ð»Ð¾ÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° обновлений контейнера завершена: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Ð¡Ð¿Ð»Ð¾ÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° обновлений контейнера в %(path)s завершена: " "%(elapsed).02fs, уÑпешно: %(success)s, Ñбоев: %(fail)s, без изменений: " "%(no_change)s" #, python-format msgid "Data download error: %s" msgstr "Ошибка загрузки данных: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Проход уÑтройÑтв выполнен: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "Ошибка %(status)d %(body)s из Ñервера %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "Ошибка %(status)d %(body)s, ответ от Ñервера объекта: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "Ошибка %(status)d. Ожидаемое значение от Ñервера объекта: 100-continue" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "Ошибка: обновление учетной запиÑи не выполнено Ð´Ð»Ñ %(ip)s:%(port)s/" "%(device)s (Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ повторена позднее): Ответ: %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "Ошибка: Ðеправильный Ð·Ð°Ð¿Ñ€Ð¾Ñ %(status)s из %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "Ошибка: тайм-аут Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð° (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "Ошибка. Обновление контейнера не выполнено (Ñохранение аÑинхронных " "обновлений будет выполнено позднее): %(status)d ответ от %(ip)s:%(port)s/" "%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "Ошибка: не удалоÑÑŒ получить ÑÐ²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾Ð± учетной запиÑи %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "Ошибка: не удалоÑÑŒ получить информацию о контейнере %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "Ошибка: ошибка Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "Ошибка. ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð¿Ñ€Ð¸ отключении клиента" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" "ОШИБКÐ. ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð¿Ñ€Ð¸ передаче данных на Ñерверы объектов %s" msgid "ERROR Failed to get my own IPs?" msgstr "Ошибка: не удалоÑÑŒ получить ÑобÑтвенные IP-адреÑа?" msgid "ERROR Insufficient Storage" msgstr "Ошибка - недоÑтаточно памÑти" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "Ошибка: контроль объекта %(obj)s не выполнен, объект помещен в карантин: " "%(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "Ошибка Pickle, %s помещаетÑÑ Ð² карантин" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "Ошибка: удаленный накопитель не Ñмонтирован %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "Ошибка Ñинхронизации %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "Ошибка Ñинхронизации %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "Ошибка при попытке ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ %s" msgid "ERROR Unhandled exception in request" msgstr "Ошибка. ÐÐµÐ¾Ð±Ñ€Ð°Ð±Ð°Ñ‚Ñ‹Ð²Ð°ÐµÐ¼Ð°Ñ Ð¸ÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² запроÑе" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "Ошибка: ошибка __call__ в %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "Ошибка: обновление учетной запиÑи не выполнено Ð´Ð»Ñ %(ip)s:%(port)s/" "%(device)s (Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ повторена позднее)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "Ошибка: обновление учетной запиÑи не выполнено Ð´Ð»Ñ %(ip)s:%(port)s/" "%(device)s (Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ повторена позднее): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" "Ошибка Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð°Ñинхронной передачи ожидающего файла Ñ Ð½ÐµÐ¿Ñ€ÐµÐ´Ð²Ð¸Ð´ÐµÐ½Ð½Ñ‹Ð¼ " "именем %s" msgid "ERROR auditing" msgstr "ОШИБКРконтролÑ" #, python-format msgid "ERROR auditing: %s" msgstr "Ошибка контролÑ: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "Ошибка. Обновление контейнера не выполнена Ñ %(ip)s:%(port)s/%(dev)s " "(Ñохранение аÑинхронного Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ выполнено позднее)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð° HTTP из %s" #, python-format msgid "ERROR reading db %s" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð±Ð°Ð·Ñ‹ данных %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "Ошибка: команда rsync не выполнена Ñ ÐºÐ¾Ð´Ð¾Ð¼ %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "Ошибка Ñинхронизации %(file)s Ñ ÑƒÐ·Ð»Ð¾Ð¼ %(node)s" msgid "ERROR trying to replicate" msgstr "Ошибка при попытке репликации" #, python-format msgid "ERROR while trying to clean up %s" msgstr "Ошибка при попытке очиÑтки %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "Ошибка Ñ Ñервером %(type)s %(ip)s:%(port)s/%(device)s, возврат: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "Ошибка при загрузки Ñкрытых объектов из %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "Ошибка Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð½Ñ‹Ð¼ Ñервером %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "Ошибка: не удалоÑÑŒ получить пути к разделам накопителей: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "Ошибка: не удалоÑÑŒ получить доÑтуп к %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "Ошибка: не удалоÑÑŒ запуÑтить процеÑÑ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ: %s" msgid "Error hashing suffix" msgstr "Ошибка Ñ…ÑÑˆÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÑуффикÑа" msgid "Error listing devices" msgstr "Ошибка при выводе ÑпиÑка уÑтройÑтв" #, python-format msgid "Error on render profiling results: %s" msgstr "Ошибка при выводе результатов профилированиÑ: %s" msgid "Error parsing recon cache file" msgstr "Ошибка анализа файла кÑша recon" msgid "Error reading recon cache file" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° кÑша recon" msgid "Error reading ringfile" msgstr "Ошибка при чтении ringfile" msgid "Error reading swift.conf" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ swift.conf" msgid "Error retrieving recon data" msgstr "Ошибка при получении данных recon" msgid "Error syncing handoff partition" msgstr "Ошибка при Ñинхронизации раздела передачи управлениÑ" msgid "Error syncing partition" msgstr "Ошибка Ñинхронизации раздела" #, python-format msgid "Error syncing with node: %s" msgstr "Ошибка Ñинхронизации Ñ ÑƒÐ·Ð»Ð¾Ð¼ %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Ошибка при попытке перекомпоновки Ñтратегии %(path)s: номер#%(policy)d " "фрагмент#%(frag_index)s" msgid "Error: An error occurred" msgstr "Ошибка: произошла ошибка" msgid "Error: missing config path argument" msgstr "Ошибка: отÑутÑтвует аргумент пути конфигурации" #, python-format msgid "Error: unable to locate %s" msgstr "Ошибка: не удалоÑÑŒ найти %s" msgid "Exception dumping recon cache" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð¿Ñ€Ð¸ Ñоздании кÑша recon" msgid "Exception in top-level account reaper loop" msgstr "" "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² цикле чиÑтильщика учетных запиÑей верхнего уровнÑ" msgid "Exception in top-level replication loop" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² цикле репликации верхнего уровнÑ" msgid "Exception in top-levelreconstruction loop" msgstr "ИÑключение в цикле реконÑтрукции верхнего уровнÑ" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² учетной запиÑи %s" #, python-format msgid "Exception with containers for account %s" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² контейнерах Ð´Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ запиÑи %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² объектах Ð´Ð»Ñ ÐºÐ¾Ð½Ñ‚ÐµÐ¹Ð½ÐµÑ€Ð° %(container)s Ð´Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ " "запиÑи %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Ожидаемое значение: 100-continue в %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Ð¡Ð»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ñ†ÐµÐ¿Ð¾Ñ‡ÐºÐ° CNAME Ð´Ð»Ñ %(given_domain)s в %(found_domain)s" msgid "Found configs:" msgstr "Обнаружены конфигурации:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Ð’ режиме передачи ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð½Ðµ вÑе операции завершены. Принудительное " "завершение текущего прохода репликации." msgid "Host unreachable" msgstr "ХоÑÑ‚ недоÑтупен" #, python-format msgid "Incomplete pass on account %s" msgstr "Ðе завершен проход Ð´Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ запиÑи %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "ÐедопуÑтимый формат X-Container-Sync-To %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "ÐедопуÑтимый хоÑÑ‚ %r в X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¾Ð¶Ð¸Ð´Ð°ÑŽÑ‰Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "ÐедопуÑтимый ответ %(resp)s от %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "ÐедопуÑтимый ответ %(resp)s от %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ñхема %r в X-Container-Sync-To, допуÑтимые значениÑ: \"//\", " "\"http\" или \"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Принудительное завершение долго выполнÑющегоÑÑ rsync: %s" msgid "Lockup detected.. killing live coros." msgstr "Обнаружена блокировка.. принудительное завершение работающих модулей." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "Преобразовано %(given_domain)s в %(found_domain)s" #, python-format msgid "No %s running" msgstr "%s не выполнÑетÑÑ" #, python-format msgid "No permission to signal PID %d" msgstr "Ðет прав доÑтупа Ð´Ð»Ñ Ð¾Ñ‚Ð¿Ñ€Ð°Ð²ÐºÐ¸ Ñигнала в PID %d" #, python-format msgid "No policy with index %s" msgstr "Ðе найдено Ñтратегии Ñ Ð¸Ð½Ð´ÐµÐºÑом %s" #, python-format msgid "No realm key for %r" msgstr "ОтÑутÑтвует ключ облаÑти Ð´Ð»Ñ %r" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "ÐžÐ³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° узла %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "ÐедоÑтаточное чиÑло подтверждений Ñ Ñерверов объектов (получено %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Ðе найдено: %(sync_from)r => %(sync_to)r - объект " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Ðичего не реконÑтруировано за %s Ñ." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Ðичего не реплицировано за %s Ñ." msgid "Object" msgstr "Объект" msgid "Object PUT" msgstr "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ PUT объекта" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ PUT объекта возвратила 202 Ð´Ð»Ñ 409: %(req_timestamp)s <= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ PUT объекта возвратила 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Контроль объекта (%(type)s) в режиме \"%(mode)s\" завершен: %(elapsed).02fs. " "Ð’Ñего в карантине: %(quars)d, вÑего ошибок: %(errors)d, вÑего файлов/Ñ: " "%(frate).2f, вÑего байт/Ñ: %(brate).2f, Ð²Ñ€ÐµÐ¼Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ: %(audit).2f, " "ÑкороÑть: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Проверка объекта (%(type)s). ПоÑле %(start_time)s: локально: уÑпешно - " "%(passes)d, в карантине - %(quars)d, файлов Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°Ð¼Ð¸ %(errors)d в Ñекунду: " "%(frate).2f , байт/Ñ: %(brate).2f, общее времÑ: %(total).2f, Ð²Ñ€ÐµÐ¼Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ: " "%(audit).2f, ÑкороÑть: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "СоÑтоÑние ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð°: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "РеконÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° выполнена (однократно). (%.02f мин.)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "РеконÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° выполнена. (%.02f мин.)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Ð ÐµÐ¿Ð»Ð¸ÐºÐ°Ñ†Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° выполнена (однократно). (%.02f мин.)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Ð ÐµÐ¿Ð»Ð¸ÐºÐ°Ñ†Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° выполнена. (%.02f мин.)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Серверы объектов вернули неÑоответÑтвующие etag: %s" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Ð¡Ð¿Ð»Ð¾ÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° обновлений объекта завершена: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Ð’ X-Container-Sync-To не разрешены параметры, запроÑÑ‹ и фрагменты" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Ð’Ñ€ÐµÐ¼Ñ Ñ€Ð°Ð·Ð´ÐµÐ»Ð°: макÑимум: %(max).4fs, минимум: %(min).4fs, Ñреднее: %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "ТребуетÑÑ Ð¿ÑƒÑ‚ÑŒ в X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Ðеполадка при очиÑтке %s" #, python-format msgid "Profiling Error: %s" msgstr "Ошибка профилированиÑ: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s помещен в карантин в %(quar_path)s, так как не ÑвлÑетÑÑ " "каталогом" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s помещен в карантин в %(quar_path)s, так как не ÑвлÑетÑÑ " "каталогом" #, python-format msgid "Quarantining DB %s" msgstr "БД %s помещена в карантин" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Протокол тайм-аута при ограничении ÑкороÑти %(sleep)s Ð´Ð»Ñ %(account)s/" "%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Удалено баз данных: %(remove)d" #, python-format msgid "Removing %s objects" msgstr "Удаление объектов %s" #, python-format msgid "Removing partition: %s" msgstr "Удаление раздела: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Удаление файла pid %(pid_file)s Ñ Ð¾ÑˆÐ¸Ð±Ð¾Ñ‡Ð½Ñ‹Ð¼ pid %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Удаление pid файла %s Ñ Ð½ÐµÐ²ÐµÑ€Ð½Ñ‹Ð¼ pid-ом" #, python-format msgid "Removing stale pid file %s" msgstr "Удаление уÑтаревшего файла pid %s" msgid "Replication run OVER" msgstr "Ð ÐµÐ¿Ð»Ð¸ÐºÐ°Ñ†Ð¸Ñ Ð·Ð°Ð¿ÑƒÑ‰ÐµÐ½Ð° поверх" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Возвращено 497 из-за черного ÑпиÑка: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Возвращено 498 Ð´Ð»Ñ %(meth)s в %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(макÑÐ¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð·Ð°Ð´ÐµÑ€Ð¶ÐºÐ°): %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Обнаружено изменение кольца. Принудительное завершение текущего прохода " "реконÑтрукции." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Обнаружено кольцевое изменение. Принудительное завершение текущего прохода " "репликации." #, python-format msgid "Running %s once" msgstr "Однократное выполнение %s" msgid "Running object reconstructor in script mode." msgstr "ЗапуÑк утилиты реконÑтрукции объектов в режиме Ñкрипта." msgid "Running object replicator in script mode." msgstr "ЗапуÑк утилиты репликации объектов в режиме Ñценариев." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "За %(time)s операций Ñинхронизировано %(sync)s [удалено: %(delete)s, " "добавлено: %(put)s], пропущено: %(skip)s, ошибки: %(fail)s" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Выполнено проверок учетной запиÑи: %(time)s, из них уÑпешно: %(passed)s, Ñ " "ошибками: %(failed)s " #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Выполнено проверок контейнера: %(time)s, из них уÑпешно: %(pass)s, Ñ " "ошибками: %(fail)s " #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s будет пропущен, так как он не Ñмонтирован" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s будет пропущен, так как он не Ñмонтирован" #, python-format msgid "Starting %s" msgstr "ЗапуÑк %s" msgid "Starting object reconstruction pass." msgstr "ЗапуÑк прохода реконÑтрукции объектов." msgid "Starting object reconstructor in daemon mode." msgstr "ЗапуÑк утилиты реконÑтрукции объектов в режиме демона." msgid "Starting object replication pass." msgstr "ЗапуÑк прохода репликации объектов." msgid "Starting object replicator in daemon mode." msgstr "ЗапуÑк утилиты репликации объектов в режиме демона." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "УÑпешное выполнение rsync Ð´Ð»Ñ %(src)s на %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Запрещен доÑтуп к Ñтому типу файла!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Общее чиÑло %(key)s Ð´Ð»Ñ ÐºÐ¾Ð½Ñ‚ÐµÐ¹Ð½ÐµÑ€Ð° (%(total)s) не ÑоответÑтвует Ñумме " "%(key)s в ÑтратегиÑÑ… (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "ИÑключение по таймауту %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Попытка Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚Ð¾Ð´Ð° %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Попытка GET-запроÑа %(full_path)s" msgid "Trying to read during GET" msgstr "Попытка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ð¾ Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸ GET" msgid "Trying to read during GET (retrying)" msgstr "Попытка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ð¾ Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸ GET (выполнÑетÑÑ Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€)" msgid "Trying to send to client" msgstr "Попытка отправки клиенту" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Попытка Ñинхронизации ÑуффикÑов Ñ %s" #, python-format msgid "Trying to write to %s" msgstr "Попытка запиÑи в %s" msgid "UNCAUGHT EXCEPTION" msgstr "ÐÐµÐ¾Ð±Ñ€Ð°Ð±Ð°Ñ‚Ñ‹Ð²Ð°ÐµÐ¼Ð°Ñ Ð¸ÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑитуациÑ" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Ðе удалоÑÑŒ найти %s в libc. ОÑтавлено как no-op." #, python-format msgid "Unable to locate config for %s" msgstr "Ðе удалоÑÑŒ найти конфигурационный файл Ð´Ð»Ñ %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Ðе удалоÑÑŒ найти fallocate, posix_fallocate в libc. ОÑтавлено как no-op." #, python-format msgid "Unable to read config from %s" msgstr "Ðе удалоÑÑŒ прочитать конфигурацию из %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Ð¡Ð¸Ð½Ñ…Ñ€Ð¾Ð½Ð¸Ð·Ð°Ñ†Ð¸Ñ %(sync_from)r => %(sync_to)r без прав доÑтупа" msgid "Unhandled exception" msgstr "ÐÐµÐ¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚Ð°Ð½Ð½Ð°Ñ Ð¸ÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑитуациÑ" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "ÐеизвеÑтное иÑключение в GET-запроÑе: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Отчет об обновлении Ð´Ð»Ñ %(container)s %(dbfile)s не выполнен" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Отчет об обновлении отправлен Ð´Ð»Ñ %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "Предупреждение: SSL должен быть включен только в целÑÑ… теÑтированиÑ. " "ИÑпользуйте внешнее завершение SSL Ð´Ð»Ñ Ñ€Ð°Ð·Ð²ÐµÑ€Ñ‚Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð² рабочем режиме." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "Предупреждение: не удалоÑÑŒ изменить предельное значение Ð´Ð»Ñ Ð´ÐµÑкриптора " "файла. Запущен без прав доÑтупа root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "Предупреждение: не удалоÑÑŒ изменить предельное значение Ð´Ð»Ñ Ñ‡Ð¸Ñла процеÑÑов. " "Запущен без прав доÑтупа root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "Предупреждение: не удалоÑÑŒ изменить предельное значение Ð´Ð»Ñ Ð¿Ð°Ð¼Ñти. Запущен " "без прав доÑтупа root?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Предупреждение: не удаетÑÑ Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡Ð¸Ñ‚ÑŒ ÑкороÑть без клиента Ñ ÐºÑшированием " "памÑти" #, python-format msgid "method %s is not allowed." msgstr "Метод %s не разрешен." msgid "no log file found" msgstr "Ðе найден файл протокола" msgid "odfpy not installed." msgstr "Библиотека odfpy не уÑтановлена." #, python-format msgid "plotting results failed due to %s" msgstr "Ошибка в результатах plotting из-за %s" msgid "python-matplotlib not installed." msgstr "Библиотека python-matplotlib не уÑтановлена." swift-2.17.0/swift/locale/ko_KR/0000775000175100017510000000000013236061751016365 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/ko_KR/LC_MESSAGES/0000775000175100017510000000000013236061751020152 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/ko_KR/LC_MESSAGES/swift.po0000666000175100017510000007212713236061620021654 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Mario Cho , 2014 # Ying Chun Guo , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Korean (South Korea)\n" msgid "" "\n" "user quit" msgstr "" "\n" "ì‚¬ìš©ìž ì¢…ë£Œ" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - 병렬, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)dê°œ 접미부를 검사함 - %(hashed).2f%%ê°œ 해시ë¨, %(synced).2f%%ê°œ ë™" "기화ë¨" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d(%(percentage).2f%%)ê°œ íŒŒí‹°ì…˜ì´ %(time).2fì´ˆ" "(%(rate).2f/ì´ˆ, %(remaining)s 남ìŒ) ì•ˆì— ë³µì œë¨" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)sê°œ 성공, %(failure)sê°œ 실패" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)sì—서 %(statuses)sì— ëŒ€í•´ 503ì„ ë¦¬í„´í•¨" #, python-format msgid "%s already started..." msgstr "%sì´(ê°€) ì´ë¯¸ 시작ë˜ì—ˆìŒ..." #, python-format msgid "%s does not exist" msgstr "%sì´(ê°€) 존재하지 않ìŒ" #, python-format msgid "%s is not mounted" msgstr "%sì´(ê°€) 마운트ë˜ì§€ 않ìŒ" #, python-format msgid "%s responded as unmounted" msgstr "%sì´(ê°€) 마운트 í•´ì œëœ ê²ƒìœ¼ë¡œ ì‘답" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: 피어ì—서 ì—°ê²° 재설정" #, python-format msgid ", %s containers deleted" msgstr ", %s 지워진 컨테ì´ë„ˆ" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s ì—¬ë¶„ì˜ ì»¨í…Œì´ë„ˆ" #, python-format msgid ", %s containers remaining" msgstr ", %s ë‚¨ì€ ì»¨í…Œì´ë„ˆ" #, python-format msgid ", %s objects deleted" msgstr ", %s 지워진 오브ì íЏ" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s oì—¬ë¶„ì˜ ì˜¤ë¸Œì íЏ" #, python-format msgid ", %s objects remaining" msgstr ", %s ë‚¨ì€ ì˜¤ë¸Œì íЏ" #, python-format msgid ", elapsed: %.02fs" msgstr ", 경과ë¨: %.02fs" msgid ", return codes: " msgstr ", 반환 코드들:" msgid "Account" msgstr "계정" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "계정 ê°ì‚¬ \"한 번\"모드가 완료: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "ì •ìƒìœ¼ë¡œ íŒì •난 계정: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "%(time).5fì´ˆ(%(rate).5f/s)ì— %(count)dê°œì˜ ë°ì´í„°ë² ì´ìŠ¤ë¥¼ 복제하려고 함" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "ìž˜ëª»ëœ rsync 리턴 코드: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "계정 ê°ì‚¬ \"한 번\"모드로 시작" msgid "Begin account audit pass." msgstr "계정 검사 시작." msgid "Begin container audit \"once\" mode" msgstr "컨테ì´ë„ˆ ê°ì‚¬ \"ì¼ íšŒ\" 모드 시작" msgid "Begin container audit pass." msgstr "컨테ì´ë„ˆ ê°ì‚¬ ì „ë‹¬ì´ ì‹œìž‘ë©ë‹ˆë‹¤." msgid "Begin container sync \"once\" mode" msgstr "컨테ì´ë„ˆ ë™ê¸°í™” \"ì¼ íšŒ\" 모드 시작" msgid "Begin container update single threaded sweep" msgstr "컨테ì´ë„ˆ ì—…ë°ì´íЏ ë‹¨ì¼ ìŠ¤ë ˆë“œ 스윕 시작" msgid "Begin container update sweep" msgstr "컨테ì´ë„ˆ ì—…ë°ì´íЏ 스윕 시작" msgid "Begin object update single threaded sweep" msgstr "오브ì íЏ ì—…ë°ì´íЏ ë‹¨ì¼ ìŠ¤ë ˆë“œ 스윕 시작" msgid "Begin object update sweep" msgstr "오브ì íЏ ì—…ë°ì´íЏ 스윕 시작" #, python-format msgid "Beginning pass on account %s" msgstr "계정 패스 시작 %s" msgid "Beginning replication run" msgstr "복제 실행 시작" msgid "Broker error trying to rollback locked connection" msgstr "잠긴 ì—°ê²°ì„ ë¡¤ë°±í•˜ëŠ” 중 브로커 오류 ë°œìƒ" #, python-format msgid "Can not access the file %s." msgstr "íŒŒì¼ %sì— ì•¡ì„¸ìŠ¤í•  수 없습니다." #, python-format msgid "Can not load profile data from %s." msgstr "%sì—서 í”„ë¡œíŒŒì¼ ë°ì´í„°ë¥¼ 로드할 수 없습니다." #, python-format msgid "Client did not read from proxy within %ss" msgstr "í´ë¼ì´ì–¸íЏì—서 %ss ë‚´ì— í”„ë¡ì‹œë¥¼ ì½ì„ 수 없었ìŒ" msgid "Client disconnected on read" msgstr "ì½ê¸° 시 í´ë¼ì´ì–¸íЏ ì—°ê²°ì´ ëŠì–´ì§" msgid "Client disconnected without sending enough data" msgstr "ë°ì´í„°ë¥¼ ëª¨ë‘ ì „ì†¡í•˜ê¸° ì „ì— í´ë¼ì´ì–¸íЏ ì—°ê²°ì´ ëŠì–´ì§" msgid "Client disconnected without sending last chunk" msgstr "마지막 ì²­í¬ë¥¼ 전송하기 ì „ì— í´ë¼ì´ì–¸íЏ ì—°ê²°ì´ ëŠì–´ì§" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "í´ë¼ì´ì–¸íЏ 경로 %(client)sì´(ê°€) 오브ì íЏ 메타ë°ì´í„° %(meta)sì— ì €ìž¥ëœ ê²½ë¡œ" "와 ì¼ì¹˜í•˜ì§€ 않ìŒ" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "구성 옵션 internal_client_conf_pathê°€ ì •ì˜ë˜ì§€ 않았습니다. 기본 구성 사용 시 " "internal-client.conf-sampleì—서 ì˜µì…˜ì„ ì°¸ì¡°í•˜ì‹­ì‹œì˜¤." msgid "Connection refused" msgstr "ì—°ê²°ì´ ê±°ë¶€ë¨" msgid "Connection timeout" msgstr "ì—°ê²° 제한시간 초과" msgid "Container" msgstr "컨테ì´ë„ˆ" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "컨테ì´ë„ˆ ê°ì‚¬ \"ì¼ íšŒ\" 모드 완료: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "컨테ì´ë„ˆ ê°ì‚¬ 전달 완료: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "컨테ì´ë„ˆ ë™ê¸°í™” \"ì¼ íšŒ\" 모드 완료: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "컨테ì´ë„ˆ ì—…ë°ì´íЏ ë‹¨ì¼ ìŠ¤ë ˆë“œ 스윕 완료: %(elapsed).02fs, %(success)sê°œ 성" "ê³µ, %(fail)sê°œ 실패, %(no_change)sê°œ 변경 ì—†ìŒ" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "컨테ì´ë„ˆ ì—…ë°ì´íЏ 스윕 완료: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)sì˜ ì»¨í…Œì´ë„ˆ ì—…ë°ì´íЏ 스윕 완료: %(elapsed).02fs, %(success)sê°œ 성공, " "%(fail)sê°œ 실패, %(no_change)sê°œ 변경 ì—†ìŒ" #, python-format msgid "Data download error: %s" msgstr "ë°ì´í„° 다운로드 오류: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "장치 패스 완료 : %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "오류 %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "오류 %(status)d %(body)s, %(type)s 서버 발신" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "오류 %(status)d %(body)s, 오브ì íЏ 서버 발신, 회신: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "오류 %(status)d. 예ìƒ: 100-continue, 오브ì íЏ 서버 발신" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "오류. %(ip)s:%(port)s/%(device)s(으)로 계정 ì—…ë°ì´íЏ 실패(ë‚˜ì¤‘ì— ë‹¤ì‹œ 시ë„): " "ì‘답 %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "오류. %(host)sì˜ ìž˜ëª»ëœ ì‘답 %(status)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERROR í´ë¼ì´ì–¸íЏ ì½ê¸° 시간 초과 (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "오류. 컨테ì´ë„ˆ ì—…ë°ì´íЏ 실패(ì´í›„ 비ë™ê¸° ì—…ë°ì´íŠ¸ìš©ìœ¼ë¡œ 저장): %(status)dì‘" "답. 출처: %(ip)s:%(port)s/%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "오류는 %sì˜ ê³„ì • 정보를 ì–»ì„ ìˆ˜ 없습니다" #, python-format msgid "ERROR Could not get container info %s" msgstr "오류. 컨테ì´ë„ˆ ì •ë³´ %sì„(를) 가져올 수 ì—†ìŒ" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "오류. ë””ìŠ¤í¬ íŒŒì¼ %(data_file)s 닫기 실패: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "오류. 예외로 ì¸í•´ í´ë¼ì´ì–¸íЏ ì—°ê²°ì´ ëŠì–´ì§" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "ERROR 오브ì íЏ 서버 %sì— ë°ì´í„°ë¥¼ 전송하는 ì¤‘ì— ì˜ˆì™¸ ë°œìƒ" msgid "ERROR Failed to get my own IPs?" msgstr "오류. ìžì²´ IP를 가져오는 중 오류 ë°œìƒ ì—¬ë¶€" msgid "ERROR Insufficient Storage" msgstr "오류. 스토리지 ê³µê°„ì´ ì¶©ë¶„í•˜ì§€ 않ìŒ" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "오류. 오브ì íЏ %(obj)sì˜ ê°ì‚¬ê°€ 실패하여 격리ë¨: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "오류. 문제가 ë°œìƒí•¨, %s 격리 중" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "오류. ì›ê²© 드ë¼ì´ë¸Œê°€ 마운트ë˜ì§€ 않ìŒ. %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "%(db_file)s %(row)s ë™ê¸°í™” 오류" #, python-format msgid "ERROR Syncing %s" msgstr "%s ë™ê¸°í™” 오류" #, python-format msgid "ERROR Trying to audit %s" msgstr "%s ê°ì‚¬ 중 오류 ë°œìƒ" msgid "ERROR Unhandled exception in request" msgstr "오류. ìš”ì²­ì— ì²˜ë¦¬ë˜ì§€ ì•Šì€ ì˜ˆì™¸ê°€ 있ìŒ" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "오류. %(method)s %(path)sì— __call__ 오류 ë°œìƒ" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "오류. %(ip)s:%(port)s/%(device)s(으)로 계정 ì—…ë°ì´íЏ 실패(ë‚˜ì¤‘ì— ë‹¤ì‹œ 시ë„)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "오류. %(ip)s:%(port)s/%(device)s(으)로 계정 ì—…ë°ì´íЏ 실패(ë‚˜ì¤‘ì— ë‹¤ì‹œ 시ë„): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "오류. 비ë™ê¸° 보류 파ì¼ì— 예ìƒì¹˜ 못한 ì´ë¦„ %sì„(를) 사용함" msgid "ERROR auditing" msgstr "검사 오류" #, python-format msgid "ERROR auditing: %s" msgstr "ê°ì‚¬ 오류: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "오류. %(ip)s:%(port)s/%(dev)s(으)로 컨테ì´ë„ˆ ì—…ë°ì´íЏ 실패(ì´í›„ 비ë™ê¸° ì—…ë°ì´" "트용으로 저장)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "%sì—서 HTTP ì‘ë‹µì„ ì½ëŠ” 중 오류 ë°œìƒ" #, python-format msgid "ERROR reading db %s" msgstr "ë°ì´í„°ë² ì´ìФ %sì„(를) ì½ëŠ” 중 오류 ë°œìƒ" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "오류. %(code)sì˜ rsyncê°€ 실패함: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "%(file)sì„(를) 노드 %(node)sê³¼(와) ë™ê¸°í™”하는 중 오류 ë°œìƒ" msgid "ERROR trying to replicate" msgstr "복제 중 오류 ë°œìƒ" #, python-format msgid "ERROR while trying to clean up %s" msgstr "%s 정리 중 오류 ë°œìƒ" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)s 서버 %(ip)s:%(port)s/%(device)s 오류, 회신: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "%sì—서 억제를 로드하는 중 오류 ë°œìƒ: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ì›ê²© 서버 %(ip)s:%(port)s/%(device)sì— ì˜¤ë¥˜ ë°œìƒ" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "오류: 드ë¼ì´ë¸Œ íŒŒí‹°ì…˜ì— ëŒ€í•œ 경로를 가져오지 못함: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "오류: %(path)sì— ì•¡ì„¸ìŠ¤í•  수 ì—†ìŒ: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "오류: ê°ì‚¬ë¥¼ 실행할 수 ì—†ìŒ: %s" msgid "Error hashing suffix" msgstr "접미부를 해싱하는 중 오류 ë°œìƒ" msgid "Error listing devices" msgstr "디바ì´ìФ 나열 중 오류 ë°œìƒ" #, python-format msgid "Error on render profiling results: %s" msgstr "프로파ì¼ë§ 결과를 ë Œë”ë§í•˜ëŠ” 중 오류 ë°œìƒ: %s" msgid "Error parsing recon cache file" msgstr "ì¡°ì • ìºì‹œ 파ì¼ì„ 구문 ë¶„ì„하는 중 오류 ë°œìƒ" msgid "Error reading recon cache file" msgstr "ì¡°ì • ìºì‹œ 파ì¼ì„ ì½ëŠ” 중 오류 ë°œìƒ" msgid "Error reading ringfile" msgstr "ë§ íŒŒì¼ì„ ì½ëŠ” 중 오류 ë°œìƒ" msgid "Error reading swift.conf" msgstr "swift.conf를 ì½ëŠ” 중 오류 ë°œìƒ" msgid "Error retrieving recon data" msgstr "ì¡°ì • ë°ì´í„°ë¥¼ 검색하는 ì¤‘ì— ì˜¤ë¥˜ ë°œìƒ" msgid "Error syncing handoff partition" msgstr "핸드오프 파티션 ë™ê¸°í™” 중 오류 ë°œìƒ" msgid "Error syncing partition" msgstr "파티션 ë™ê¸° 오류 " #, python-format msgid "Error syncing with node: %s" msgstr "노드 ë™ê¸° 오류: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "%(path)s policy#%(policy)d frag#%(frag_index)sì„(를) 다시 빌드하려는 중 오류 " "ë°œìƒ" msgid "Error: An error occurred" msgstr "오류: 오류 ë°œìƒ" msgid "Error: missing config path argument" msgstr "오류: 구성 경로 ì¸ìˆ˜ 누ë½" #, python-format msgid "Error: unable to locate %s" msgstr "오류: %sì„(를) ì°¾ì„ ìˆ˜ ì—†ìŒ" msgid "Exception dumping recon cache" msgstr "ì¡°ì • ìºì‹œ ë¤í”„ 중 예외 ë°œìƒ" msgid "Exception in top-level account reaper loop" msgstr "최ìƒìœ„ 계정 ë£¨í”„ì˜ ì˜ˆì™¸ " msgid "Exception in top-level replication loop" msgstr "최ìƒìœ„ 레벨 복제 루프ì—서 예외 ë°œìƒ" msgid "Exception in top-levelreconstruction loop" msgstr "최ìƒìœ„ 레벨 재구성 루프ì—서 예외 ë°œìƒ" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 예외" #, python-format msgid "Exception with account %s" msgstr "예외 계정 %s" #, python-format msgid "Exception with containers for account %s" msgstr "계정 콘테ì´ë„ˆì˜ 예외 %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "계정 %(account)sì˜ ì»¨í…Œì´ë„ˆ %(container)sì— ëŒ€í•œ 오브ì íŠ¸ì— ì˜ˆì™¸ ë°œìƒ" #, python-format msgid "Expect: 100-continue on %s" msgstr "%sì—서 100-continue 예ìƒ" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)sì—서 %(found_domain)s(으)ë¡œì˜ ë‹¤ìŒ CNAME ì²´ì¸" msgid "Found configs:" msgstr "구성 발견:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "핸드오프 첫 ëª¨ë“œì— ì—¬ì „ížˆ 핸드오프가 남아 있습니다. 현재 복제 ì „ë‹¬ì„ ì¤‘ë‹¨í•©ë‹ˆ" "다." msgid "Host unreachable" msgstr "호스트 ë„달 불가능" #, python-format msgid "Incomplete pass on account %s" msgstr "계정 패스 미완료 %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "올바르지 ì•Šì€ X-Container-Sync-To í˜•ì‹ %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-Toì— ì˜¬ë°”ë¥´ì§€ ì•Šì€ í˜¸ìŠ¤íŠ¸ %rì´(ê°€) 있ìŒ" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "올바르지 ì•Šì€ ë³´ë¥˜ 항목 %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "%(full_path)sì—서 올바르지 ì•Šì€ ì‘답 %(resp)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)sì˜ ì˜¬ë°”ë¥´ì§€ ì•Šì€ ì‘답 %(resp)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "X-Container-Sync-To 올바르지 ì•Šì€ ìŠ¤í‚¤ë§ˆ %rì´(ê°€) 있습니다. \"//\", \"http\" " "ë˜ëŠ” \"https\"여야 합니다." #, python-format msgid "Killing long-running rsync: %s" msgstr "장기 실행 ì¤‘ì¸ rsync ê°•ì œ 종료: %s" msgid "Lockup detected.. killing live coros." msgstr "잠금 발견.. 활성 coros를 ê°•ì œ 종료합니다." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)sì„(를) %(found_domain)s(으)로 맵핑함" #, python-format msgid "No %s running" msgstr "%sì´(ê°€) 실행ë˜ì§€ 않ìŒ" #, python-format msgid "No permission to signal PID %d" msgstr "PID %dì„(를) 표시할 ê¶Œí•œì´ ì—†ìŒ" #, python-format msgid "No policy with index %s" msgstr "ì¸ë±ìŠ¤ê°€ %sì¸ ì •ì±…ì´ ì—†ìŒ" #, python-format msgid "No realm key for %r" msgstr "%rì— ëŒ€í•œ ì˜ì—­ 키가 ì—†ìŒ" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "노드 오류로 %(ip)s:%(port)s(%(device)s)ì´(ê°€) 제한ë¨" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "승ì¸ëœ 오브ì íЏ 서버가 부족함(%dì„(를) ë°›ìŒ)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "ì°¾ì„ ìˆ˜ ì—†ìŒ %(sync_from)r => %(sync_to)r - 오브ì " "트%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%sì´ˆ ë™ì•ˆ ìž¬êµ¬ì„±ëœ ê²ƒì´ ì—†ìŠµë‹ˆë‹¤." #, python-format msgid "Nothing replicated for %s seconds." msgstr "%sì´ˆ ë™ì•ˆ ë³µì œëœ ê²ƒì´ ì—†ìŠµë‹ˆë‹¤." msgid "Object" msgstr "오브ì íЏ" msgid "Object PUT" msgstr "Object PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Object PUTì—서 409ì— ëŒ€í•´ 202를 리턴함: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Object PUTì—서 412를 리턴함, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "오브ì íЏ ê°ì‚¬(%(type)s) \"%(mode)s\" 모드 완료: %(elapsed).02fs. ì´ ê²©ë¦¬ í•­" "목: %(quars)d, ì´ ì˜¤ë¥˜ 수: %(errors)d, ì´ íŒŒì¼/ì´ˆ: %(frate).2f, ì´ ë°”ì´íЏ/" "ì´ˆ: %(brate).2f, ê°ì‚¬ 시간: %(audit).2f, ì†ë„: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "오브ì íЏ ê°ì‚¬(%(type)s). %(start_time)s ì´í›„: 로컬: %(passes)dê°œ 통과, " "%(quars)dê°œ 격리, %(errors)dê°œ 오류, 파ì¼/ì´ˆ: %(frate).2f, ë°”ì´íЏ/ì´ˆ: " "%(brate).2f, ì´ ì‹œê°„: %(total).2f, ê°ì‚¬ 시간: %(audit).2f, ì†ë„: " "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "오브ì íЏ ê°ì‚¬ 통계: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "오브ì íЏ 재구성 완료(ì¼ íšŒ). (%.02fë¶„)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "오브ì íЏ 재구성 완료. (%.02fë¶„)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "오브ì íЏ 복제 완료(ì¼ íšŒ). (%.02fë¶„)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "오브ì íЏ 복제 완료. (%.02fë¶„)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "오브ì íЏ 서버ì—서 %sê°œì˜ ë¶ˆì¼ì¹˜ etag를 리턴함" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "오브ì íЏ ì—…ë°ì´íЏ 스윕 완료: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "X-Container-Sync-Toì— ë§¤ê°œë³€ìˆ˜, 조회, ë‹¨íŽ¸ì´ í—ˆìš©ë˜ì§€ 않ìŒ" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "파티션 시간: 최대 %(max).4fì´ˆ, 최소 %(min).4fì´ˆ, 중간 %(med).4fì´ˆ" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-Toì— ê²½ë¡œê°€ 필요함" #, python-format msgid "Problem cleaning up %s" msgstr "%s 정리 문제 ë°œìƒ" #, python-format msgid "Profiling Error: %s" msgstr "프로파ì¼ë§ 오류: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "디렉토리가 아니어서 %(hsh_path)sì„(를) %(quar_path)sì— ê²©ë¦¬í•¨" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "디렉토리가 아니어서 %(object_path)sì„(를) %(quar_path)sì— ê²©ë¦¬í•¨" #, python-format msgid "Quarantining DB %s" msgstr "ë°ì´í„°ë² ì´ìФ %s 격리" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "%(account)s/%(container)s/%(object)sì— ëŒ€í•œ Ratelimit 휴면 로그: %(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d ë°ì´í„°ë² ì´ìŠ¤ë¥¼ 제거함" #, python-format msgid "Removing %s objects" msgstr "%s 오브ì íЏ 제거 중" #, python-format msgid "Removing partition: %s" msgstr "파티션 제거: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "ìž˜ëª»ëœ pid %(pid)dì˜ pid íŒŒì¼ %(pid_file)s 제거" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "pidê°€ 올바르지 ì•Šì€ pid íŒŒì¼ %s 제거" #, python-format msgid "Removing stale pid file %s" msgstr "ì‹œê°„ì´ ê²½ê³¼ëœ pid íŒŒì¼ %sì„(를) 제거하는 중 " msgid "Replication run OVER" msgstr "복제 실행 대ìƒ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "블랙리스트 지정으로 ì¸í•´ 497ì´ ë¦¬í„´ë¨: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "%(acc)s/%(cont)s/%(obj)s(으)로 %(meth)sì— ëŒ€í•œ 498ì„ ë¦¬í„´í•©ë‹ˆë‹¤. 전송률 제한" "(최대 휴면) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "ë§ ë³€ê²½ì´ ë°œê²¬ë˜ì—ˆìŠµë‹ˆë‹¤. 현재 재구성 ì „ë‹¬ì„ ì¤‘ë‹¨í•©ë‹ˆë‹¤." msgid "Ring change detected. Aborting current replication pass." msgstr "ë§ ë³€ê²½ì´ ë°œê²¬ë˜ì—ˆìŠµë‹ˆë‹¤. 현재 복제 ì „ë‹¬ì„ ì¤‘ë‹¨í•©ë‹ˆë‹¤." #, python-format msgid "Running %s once" msgstr "%sì„(를) 한 번 실행" msgid "Running object reconstructor in script mode." msgstr "오브ì íЏ 재구성ìžë¥¼ 스í¬ë¦½íЏ 모드로 실행 중입니다." msgid "Running object replicator in script mode." msgstr "오브ì íЏ 복제ìžë¥¼ 스í¬ë¦½íЏ 모드로 실행 중입니다." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "%(time)s ì´í›„: %(sync)s ë™ê¸°í™”ë¨ [%(delete)s ì‚­ì œ, %(put)s 배치], %(skip)s ê±´" "너뜀, %(fail)s 실패" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "검사 경과 시간 %(time)s: 계정 검사A: %(passed)s ì •ìƒ ,%(failed)s 실패" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "%(time)s ì´í›„: 컨테ì´ë„ˆ ê°ì‚¬: %(pass)s ê°ì‚¬ 전달, %(fail)s ê°ì‚¬ 실패" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "마운트ë˜ì§€ 않았으므로 %(device)sì„(를) 건너뜀" #, python-format msgid "Skipping %s as it is not mounted" msgstr "마운트ë˜ì§€ 않는 %s를 건너 뛰기" #, python-format msgid "Starting %s" msgstr "%s 시작 중" msgid "Starting object reconstruction pass." msgstr "오브ì íЏ 재구성 ì „ë‹¬ì„ ì‹œìž‘í•©ë‹ˆë‹¤." msgid "Starting object reconstructor in daemon mode." msgstr "오브ì íЏ 재구성ìžë¥¼ 디먼 모드로 시작합니다." msgid "Starting object replication pass." msgstr "오브ì íЏ 복제 ì „ë‹¬ì„ ì‹œìž‘í•©ë‹ˆë‹¤." msgid "Starting object replicator in daemon mode." msgstr "오브ì íЏ 복제ìžë¥¼ 디먼 모드로 시작합니다." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "%(dst)s(%(time).03f)ì—서 %(src)sì˜ rsync 성공" msgid "The file type are forbidden to access!" msgstr "ì´ íŒŒì¼ ìœ í˜•ì— ëŒ€í•œ 액세스가 금지ë˜ì—ˆìŠµë‹ˆë‹¤!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "컨테ì´ë„ˆì˜ ì´ %(key)sê°€ (%(total)s) ê³¼ %(key)sì˜ ì´í•© (%(sum)s)ê°€ ì¼ì¹˜í•˜ì§€ " "않습니다." #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)sì—서 제한시간 초과 예외 ë°œìƒ" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s ì‹œë„ ì¤‘" #, python-format msgid "Trying to GET %(full_path)s" msgstr "GET %(full_path)s ì‹œë„ ì¤‘" msgid "Trying to read during GET" msgstr "가져오기 중 ì½ê¸°ë¥¼ 시ë„함" msgid "Trying to read during GET (retrying)" msgstr "가져오기(재시ë„) 중 ì½ê¸°ë¥¼ 시ë„함" msgid "Trying to send to client" msgstr "í´ë¼ì´ì–¸íŠ¸ë¡œ 전송 ì‹œë„ ì¤‘" #, python-format msgid "Trying to sync suffixes with %s" msgstr "%sê³¼(와) 접미사를 ë™ê¸°í™”하려고 시ë„" #, python-format msgid "Trying to write to %s" msgstr "%sì— ì“°ê¸° ì‹œë„ ì¤‘" msgid "UNCAUGHT EXCEPTION" msgstr "미발견 예외" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "libcì—서 %sì„(를) ì°¾ì„ ìˆ˜ 없습니다. no-op로 남겨 둡니다." #, python-format msgid "Unable to locate config for %s" msgstr "%sì˜ êµ¬ì„±ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "libcì—서 fallocate, posix_fallocate를 ì°¾ì„ ìˆ˜ 없습니다. no-op로 남겨 둡니다." #, python-format msgid "Unable to read config from %s" msgstr "%sì—서 êµ¬ì„±ì„ ì½ì„ 수 ì—†ìŒ" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "권한 부여 í•´ì œ %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "처리ë˜ì§€ ì•Šì€ ì˜ˆì™¸" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "GETì„ ì‹œë„하는 중 알 수 없는 예외 ë°œìƒ: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)sì˜ ì—…ë°ì´íЏ 보고서 실패" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)sì˜ ì—…ë°ì´íЏ 보고서를 발송함" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "경고: SSLì€ í…ŒìŠ¤íŠ¸ìš©ìœ¼ë¡œë§Œ 사용해야 합니다. 프로ë•ì…˜ 배치ì—는 외부 SSL 종료" "를 사용하십시오." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "경고: íŒŒì¼ ë””ìŠ¤í¬ë¦½í„° 한계를 수정할 수 없습니다. 비루트로 실행 중ì¸ì§€ 확ì¸í•˜" "십시오." msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "경고: 최대 프로세스 한계를 수정할 수 없습니다. 비루트로 실행 중ì¸ì§€ 확ì¸í•˜ì‹­" "시오." msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "경고: 메모리 한계를 수정할 수 없습니다. 비루트로 실행 중ì¸ì§€ 확ì¸í•˜ì‹­ì‹œì˜¤." msgid "Warning: Cannot ratelimit without a memcached client" msgstr "경고: memcached í´ë¼ì´ì–¸íЏ ì—†ì´ ì „ì†¡ë¥ ì„ ì œí•œí•  수 ì—†ìŒ" #, python-format msgid "method %s is not allowed." msgstr "메소드 %sì´(ê°€) 허용ë˜ì§€ 않습니다." msgid "no log file found" msgstr "로그 파ì¼ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ" msgid "odfpy not installed." msgstr "odfpyê°€ 설치ë˜ì–´ 있지 않습니다." #, python-format msgid "plotting results failed due to %s" msgstr "%s(으)로 ì¸í•´ ê²°ê³¼ 표시 실패" msgid "python-matplotlib not installed." msgstr "python-matplotlibê°€ 설치ë˜ì–´ 있지 않습니다." swift-2.17.0/swift/locale/es/0000775000175100017510000000000013236061751015767 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/es/LC_MESSAGES/0000775000175100017510000000000013236061751017554 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/es/LC_MESSAGES/swift.po0000666000175100017510000011123413236061620021247 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2016. #zanata # Pablo Caruana , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-07-18 10:57+0000\n" "Last-Translator: Pablo Caruana \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Spanish\n" msgid "" "\n" "user quit" msgstr "" "\n" "salida del usuario" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - paralelo, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d sufijos comprobados - %(hashed).2f%% con hash, %(synced).2f%% " "sincronizados" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) particiones replicadas en " "%(time).2fs (%(rate).2f/segundo, %(remaining)s restantes)" #, python-format msgid "%(server)s #%(number)d not running (%(conf)s)" msgstr "%(server)s #%(number)d not running (%(conf)s)" #, python-format msgid "%(server)s (%(pid)s) appears to have stopped" msgstr "%(server)s (%(pid)s) parece haberse detenido" #, python-format msgid "%(server)s running (%(pid)s - %(conf)s)" msgstr "%(server)s running (%(pid)s - %(conf)s)" #, python-format msgid "%(server)s running (%(pid)s - %(pid_file)s)" msgstr "%(server)s corriendo (%(pid)s - %(pid_file)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s éxitos, %(failure)s fallos" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s devuelve 503 para %(statuses)s" #, python-format msgid "%(type)s: %(value)s" msgstr "%(type)s: %(value)s" #, python-format msgid "%s already started..." msgstr "%s ya está iniciado..." #, python-format msgid "%s does not exist" msgstr "%s no existe" #, python-format msgid "%s is not mounted" msgstr "%s no está montado" #, python-format msgid "%s responded as unmounted" msgstr "%s ha respondido como desmontado" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Restablecimiento de conexión por igual" #, python-format msgid ", %s containers deleted" msgstr ", %s contenedores suprimidos" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s contenedores posiblemente restantes" #, python-format msgid ", %s containers remaining" msgstr ", %s contenedores restantes" #, python-format msgid ", %s objects deleted" msgstr ", %s objetos suprimidos" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objetos posiblemente restantes" #, python-format msgid ", %s objects remaining" msgstr ", %s objectos restantes" #, python-format msgid ", elapsed: %.02fs" msgstr ", transcurrido: %.02fs" msgid ", return codes: " msgstr ", códigos de retorno:" msgid "Account" msgstr "Cuenta" #, python-format msgid "Account %(account)s has not been reaped since %(time)s" msgstr "La cuenta %(account)s no se ha cosechado desde %(time)s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Auditoría de cuenta en modalidad de \"una vez\" finalizada: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Paso de auditoría de cuenta finalizado: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Se han intentado replicar %(count)d bases de datos en %(time).5f segundos " "(%(rate).5f/s)" #, python-format msgid "Audit Failed for %(path)s: %(err)s" msgstr "Ha fallado la auditoría para %(path)s: %(err)s" #, python-format msgid "Bad key for %(name)r: %(err)s" msgstr "Clave errónea para %(name)r: %(err)s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Código de retorno de resincronización erróneo: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Comenzar auditoría de cuenta en modalidad de \"una vez\"" msgid "Begin account audit pass." msgstr "Comenzar a pasar la auditoría de cuenta." msgid "Begin container audit \"once\" mode" msgstr "Comenzar auditoría de contenedor en modalidad de \"una vez\"" msgid "Begin container audit pass." msgstr "Comenzar a pasar la auditoría de contenedor." msgid "Begin container sync \"once\" mode" msgstr "Comenzar sincronización de contenedor en modalidad de \"una vez\"" msgid "Begin container update single threaded sweep" msgstr "Comenzar el barrido de hebra única de actualización del contenedor" msgid "Begin container update sweep" msgstr "Comenzar el barrido de actualización del contenedor" #, python-format msgid "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)" msgstr "" "Comenzar auditoría de objetos en modalidad \"%(mode)s\" mode (%(audi_type)s" "%(description)s)" msgid "Begin object update single threaded sweep" msgstr "Comenzar el barrido de hebra única de actualización del objeto" msgid "Begin object update sweep" msgstr "Comenzar el barrido de actualización del objeto" #, python-format msgid "Beginning pass on account %s" msgstr "Iniciando el paso en la cuenta %s" msgid "Beginning replication run" msgstr "Iniciando la ejecución de la replicación" msgid "Broker error trying to rollback locked connection" msgstr "Error de intermediario al intentar retrotraer una conexión bloqueada" #, python-format msgid "Can not access the file %s." msgstr "No se puede acceder al archivo %s." #, python-format msgid "Can not load profile data from %s." msgstr "No se pueden cargar los datos de perfil desde %s." #, python-format msgid "Cannot read %(auditor_status)s (%(err)s)" msgstr "No se puede leer %(auditor_status)s (%(err)s)" #, python-format msgid "Cannot write %(auditor_status)s (%(err)s)" msgstr "No se puede escribir %(auditor_status)s (%(err)s)" #, python-format msgid "Client did not read from proxy within %ss" msgstr "El cliente pudo realizar la lectura desde el proxy en %ss" msgid "Client disconnected on read" msgstr "El cliente se ha desconectado de la lectura" msgid "Client disconnected without sending enough data" msgstr "El cliente se ha desconectado sin enviar suficientes datos" msgid "Client disconnected without sending last chunk" msgstr "El cliente se ha desconectado sin enviar el último fragmento" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "La vía de acceso de cliente %(client)s no coincide con la vía de acceso " "almacenada en los metadatos de objeto %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "La opción de configuración internal_client_conf_path no está definida. Se " "utilizará la configuración predeterminada, Consulte internal-client.conf-" "sample para ver las opciones" msgid "Connection refused" msgstr "Conexión rechazada" msgid "Connection timeout" msgstr "Tiempo de espera de conexión agotado" msgid "Container" msgstr "Contenedor" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Auditoría de contenedor en modalidad de \"una vez\" finalizada: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Paso de auditoría de contenedor finalizado: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" "Sincronización de contenedor en modalidad de \"una vez\" finalizada: %.02fs" #, python-format msgid "" "Container sync report: %(container)s, time window start: %(start)s, time " "window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, " "bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " "total_rows: %(total)s" msgstr "" "Informe de sincronización de contenedores: %(container)s, inicio de la " "ventana de tiempo: %(start)s, extremo ventana de tiempo: %(end)s, " "colocaciones: %(puts)s, publicaciones:: %(posts)s, eliminados: %(deletes)s, " "bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " "total_filas: %(total)s" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Barrido de hebra única de actualización del contenedor finalizado: " "%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos, %(no_change)s sin " "cambios" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Barrido de actualización del contenedor finalizado: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Barrido de actualización del contenedor de %(path)s finalizado: " "%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos, %(no_change)s sin " "cambios" #, python-format msgid "" "Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds" msgstr "" "No se ha podido enlazar a %(addr)s:%(port)s después de intentarlo durante " "%(timeout)ssegundos" #, python-format msgid "Could not load %(conf)r: %(error)s" msgstr "No se ha podido cargar %(conf)r: %(error)s" #, python-format msgid "Data download error: %s" msgstr "Error de descarga de datos: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Paso de dispositivos finalizado: %.02fs" msgid "Did not get a keys dict" msgstr "No tuvimos un diccionario de claves" #, python-format msgid "Directory %(directory)r does not map to a valid policy (%(error)s)" msgstr "" "El directorio %(directory)r no está correlacionado con una política válida " "(%(error)s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERROR %(status)d %(body)s Desde el servidor %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERROR %(status)d %(body)s Desde el servidor de objeto re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERROR %(status)d Esperado: 100-continuo Desde el servidor de objeto" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server" msgstr "" "ERROR %(status)d Intentando %(method)s %(path)s Desde %(type)s de " "Servidor" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde): Respuesta %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "ERROR La actualización de la cuenta ha fallado: hay números distintos de " "hosts y dispositivos en la solicitud: %(hosts)s\" frente a %(devices)s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERROR Respuesta errónea %(status)s desde %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERROR Tiempo de espera de lectura de cliente agotado (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERROR La actualización del contenedor ha fallado (guardando para una " "actualización asíncrona posterior): %(status)d respuesta desde %(ip)s:" "%(port)s/%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "ERROR La actualización de la cuenta ha fallado: hay números distintos de " "hosts y dispositivos en la solicitud: %(hosts)s\" frente a %(devices)s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERROR No se ha podido obtener la información de cuenta %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERROR No se ha podido obtener la información de contenedor %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "ERROR Fallo al cerrar el archivo de disco %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERROR Excepción que provoca la desconexión del cliente" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "ERROR Excepción al transferir datos a los servidores de objetos %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERROR ¿No puedo obtener mis propias IP?" msgid "ERROR Insufficient Storage" msgstr "ERROR No hay suficiente almacenamiento" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "ERROR La auditoría del objeto %(obj)s ha fallado y se ha puesto en " "cuarentena: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERROR Problema de desorden, poniendo %s en cuarentena" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERROR Unidad remota no montada %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERROR al sincronizar %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERROR al sincronizar %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERROR al intentar la auditoría de %s" msgid "ERROR Unhandled exception in request" msgstr "ERROR Excepción no controlada en la solicitud" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR Error de __call__ con %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" "ERROR Archivo pendiente de sincronización asíncrona con nombre inesperado %s" msgid "ERROR auditing" msgstr "ERROR de auditoría" #, python-format msgid "ERROR auditing: %s" msgstr "ERROR en la auditoría: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERROR La actualización del contenedor ha fallado con %(ip)s:%(port)s/%(dev)s " "(guardando para una actualización asíncrona posterior)" msgid "ERROR get_keys() missing callback" msgstr "ERROR get_keys() No se proporciona devolución de llamada " #, python-format msgid "ERROR get_keys(): from callback: %s" msgstr "ERROR get_keys() No se proporciona devolución de llamada: %s" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERROR al leer la respuesta HTTP desde %s" #, python-format msgid "ERROR reading db %s" msgstr "ERROR al leer la base de datos %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERROR La resincronización ha fallado con %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERROR al sincronizar %(file)s con el nodo %(node)s" msgid "ERROR trying to replicate" msgstr "ERROR al intentar la replicación" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERROR al intentar limpiar %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "ERROR con el servidor %(type)s %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERROR con las supresiones de carga desde %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERROR con el servidor remoto %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" "ERROR: No se han podido obtener las vías de acceso a las particiones de " "unidad: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERROR: no se ha podido acceder a %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERROR: no se ha podido ejecutar la auditoría: %s" msgid "Error hashing suffix" msgstr "Error en el hash del sufijo" #, python-format msgid "Error in %(conf)r with mtime_check_interval: %(error)s" msgstr "Error en %(conf)r con mtime_check_interval: %(error)s" msgid "Error listing devices" msgstr "Error al mostrar los dispositivos" #, python-format msgid "Error on render profiling results: %s" msgstr "Error al representar los resultados de perfil: %s" msgid "Error parsing recon cache file" msgstr "Error al analizar el archivo de memoria caché de recon" msgid "Error reading recon cache file" msgstr "Error al leer el archivo de memoria caché de recon" msgid "Error reading ringfile" msgstr "Error al leer el ringfile" msgid "Error reading swift.conf" msgstr "Error al leer swift.conf" msgid "Error retrieving recon data" msgstr "Error al recuperar los datos de recon" msgid "Error syncing handoff partition" msgstr "Error al sincronizar la partición de transferencia" msgid "Error syncing partition" msgstr "Error al sincronizar la partición" #, python-format msgid "Error syncing with node: %s" msgstr "Error en la sincronización con el nodo: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Error al intentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Error: se ha producido un error" msgid "Error: missing config path argument" msgstr "Error: falta el argumento de vía de acceso de configuración" #, python-format msgid "Error: unable to locate %s" msgstr "Error: no se ha podido localizar %s" msgid "Exception dumping recon cache" msgstr "Excepción al volcar la memoria caché de recon" msgid "Exception in top-level account reaper loop" msgstr "Excepción en el bucle cosechador de cuenta de nivel superior" msgid "Exception in top-level replication loop" msgstr "Excepción en el bucle de réplica de nivel superior" msgid "Exception in top-levelreconstruction loop" msgstr "Excepción en el bucle de reconstrucción de nivel superior" #, python-format msgid "Exception while deleting container %(container)s %(err)s" msgstr "Excepción al suprimir el contenedor %(container)s %(err)s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Excepción con %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Excepción con la cuenta %s" #, python-format msgid "Exception with containers for account %s" msgstr "Excepción con los contenedores para la cuenta %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Excepción con objetos para el contenedor %(container)s para la cuenta " "%(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Esperado: 100-continuo en %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Siguiente cadena CNAME de %(given_domain)s a %(found_domain)s" msgid "Found configs:" msgstr "Configuraciones encontradas:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "El modo de transferencias primero aún tiene transferencias restantes. " "Abortando el pase de réplica actual." msgid "Host unreachable" msgstr "Host no alcanzable" #, python-format msgid "Incomplete pass on account %s" msgstr "Paso incompleto en la cuenta %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Formato de X-Container-Sync-To no válido %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Host no válido %r en X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrada pendiente no válida %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Respuesta no válida %(resp)s de %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Respuesta no válida %(resp)s desde %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Esquema no válido %r en X-Container-Sync-To, debe ser \"//\", \"http\" o " "\"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Interrumpiendo resincronización (rsync) de larga duración: %s" #, python-format msgid "Loading JSON from %(auditor_status)s failed (%(err)s)" msgstr "Error al cargar JSON desde %(auditor_status)s falla (%(err)s)" msgid "Lockup detected.. killing live coros." msgstr "Bloqueo detectado. Interrumpiendo coros activos." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "Se ha correlacionado %(given_domain)s con %(found_domain)s" #, python-format msgid "Missing key for %r" msgstr "Falta una clave en %r" #, python-format msgid "No %s running" msgstr "Ningún %s en ejecución" #, python-format msgid "No cluster endpoint for %(realm)r %(cluster)r" msgstr "No hay ningún punto final %(realm)r %(cluster)r" #, python-format msgid "No permission to signal PID %d" msgstr "No hay permiso para señalar el PID %d" #, python-format msgid "No policy with index %s" msgstr "No hay ninguna política que tenga el índice %s" #, python-format msgid "No realm key for %r" msgstr "No hay clave de dominio para %r" #, python-format msgid "No space left on device for %(file)s (%(err)s)" msgstr "No queda espacio libre en el dispositivo para %(file)s (%(err)s)" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Error de nodo limitado %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "No hay suficientes servidores de objetos reconocidos (constan %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "No se ha encontrado %(sync_from)r => %(sync_to)r - " "objeto %(obj_name)rd" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "No se ha reconstruido nada durante %s segundos." #, python-format msgid "Nothing replicated for %s seconds." msgstr "No se ha replicado nada durante %s segundos." msgid "Object" msgstr "Objeto" msgid "Object PUT" msgstr "Objeto PUT" #, python-format msgid "" "Object PUT exceptions after last send, %(conns)s/%(nodes)s required " "connections" msgstr "" "excepciones de objeto PUT después de la última emisión, %(conns)s/%(nodes)s " "conexiones requeridas" #, python-format msgid "" "Object PUT exceptions during send, %(conns)s/%(nodes)s required connections" msgstr "" "excepciones de objeto PUT después de la última emisión, %(conns)s/%(nodes)s " "conexiones requeridas" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "El objeto PUT devuelve 202 para 409: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "El objeto PUT devuelve 412, %(statuses)r" #, python-format msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections" msgstr "Retorno de objecto PUT 503, %(conns)s/%(nodes)s conexiones requeridas" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Auditoría de objetos (%(type)s) en modalidad \"%(mode)s\" finalizada: " "%(elapsed).02fs. Total en cuarentena: %(quars)d, Errores totales: " "%(errors)d, Archivos totales por segundo: %(frate).2f, Bytes totales por " "segundo: %(brate).2f, Tiempo de auditoría: %(audit).2f, Velocidad: " "%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Auditoría de objetos (%(type)s). Desde %(start_time)s: Localmente: " "%(passes)d han pasado, %(quars)d en cuarentena, %(errors)d errores, archivos " "por segundo: %(frate).2f , bytes por segundo: %(brate).2f, Tiempo total: " "%(total).2f, Tiempo de auditoría: %(audit).2f, Velocidad: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Estadísticas de auditoría de objetos: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Reconstrucción de objeto finalizada (una vez). (%.02f minutos)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Reconstrucción de objeto finalizada. (%.02f minutos)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Réplica de objeto finalizada (una vez). (%.02f minutos)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Réplica de objeto finalizada. (%.02f minutos)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "" "Los servidores de objeto han devuelvo %s etiquetas (etags) no coincidentes" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Barrido de actualización del objeto finalizado: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "Parámetros, consultas y fragmentos no permitidos en X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Tiempos de partición: máximo %(max).4fs, mínimo %(min).4fs, medio %(med).4fs" #, python-format msgid "" "Pass beginning; %(containers)s possible containers; %(objects)s possible " "objects" msgstr "" "Inicio del paso; %(containers)s posibles contenedores; %(objects)s posibles " "objetos" #, python-format msgid "Pass completed in %(time)ds; %(objects)d objects expired" msgstr "Paso completado en %(time)ds; %(objects)d objetos caducados" #, python-format msgid "Pass so far %(time)ds; %(objects)d objects expired" msgstr "Paso hasta ahora%(time)ds; %(objects)d objetos caducados" msgid "Path required in X-Container-Sync-To" msgstr "Vía de acceso necesaria en X-Container-Sync-To" #, python-format msgid "Problem cleaning up %(datadir)s (%(err)s)" msgstr "Problema al limpiar %(datadir)s (%(err)s)" #, python-format msgid "Problem cleaning up %s" msgstr "Problema al limpiar %s" #, python-format msgid "Profiling Error: %s" msgstr "Error de perfil: %s" #, python-format msgid "Quarantined %(db_dir)s to %(quar_path)s due to %(exc_hint)s database" msgstr "" "En cuarentena%(db_dir)s hasta %(quar_path)s debido a %(exc_hint)s database" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "Se ha puesto en cuarentena %(hsh_path)s en %(quar_path)s debido a que no es " "un directorio" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Se ha puesto en cuarentena %(object_path)s en %(quar_path)s debido a que no " "es un directorio" #, python-format msgid "Quarantining DB %s" msgstr "Poniendo en cuarentena la base de datos %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Ajuste de límite de registro de suspensión: %(sleep)s para %(account)s/" "%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Se han eliminado %(remove)d bases de datos" #, python-format msgid "Removing %s objects" msgstr "Eliminando %s objetos" #, python-format msgid "Removing partition: %s" msgstr "Eliminando partición: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "" "Eliminando el archivo PID %(pid_file)s que tiene el PID no válido %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Eliminando el archivo PID %s, que tiene un PID no válido" #, python-format msgid "Removing stale pid file %s" msgstr "Eliminando fichero de identificación positiva obsoleto %s" msgid "Replication run OVER" msgstr "Ejecución de la replicación finalizada" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Se devuelven 497 debido a las listas negras: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Se devuelven 498 de %(meth)s a %(acc)s/%(cont)s/%(obj)s. Ajuste de límite " "(suspensión máxima) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Cambio de anillo detectado. Abortando el pase de reconstrucción actual." msgid "Ring change detected. Aborting current replication pass." msgstr "Cambio de anillo detectado. Abortando el pase de réplica actual." #, python-format msgid "Running %s once" msgstr "Ejecutando %s una vez" msgid "Running object reconstructor in script mode." msgstr "Ejecutando reconstructor de objeto en modo script." msgid "Running object replicator in script mode." msgstr "Ejecutando replicador de objeto en modalidad de script." #, python-format msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s" msgstr "Señal %(server)s pid: %(pid)s Señal : %(signal)s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Desde %(time)s: %(sync)s se han sincronizado [%(delete)s supresiones, " "%(put)s colocaciones], %(skip)s se han omitido, %(fail)s han fallado" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Desde %(time)s: Auditorías de cuenta: %(passed)s han pasado la auditoría," "%(failed)s han fallado la auditoría" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Desde %(time)s: Auditorías de contenedor: %(pass)s han pasado la auditoría," "%(fail)s han fallado la auditoría" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "Omitiendo %(device)s, ya que no está montado" #, python-format msgid "Skipping %(dir)s: %(err)s" msgstr "Omitiendo %(dir)s: %(err)s" #, python-format msgid "Skipping %s as it is not mounted" msgstr "Omitiendo %s, ya que no está montado" #, python-format msgid "Starting %s" msgstr "Iniciando %s" msgid "Starting object reconstruction pass." msgstr "Iniciando el paso de reconstrucción de objeto." msgid "Starting object reconstructor in daemon mode." msgstr "Iniciando reconstructor de objeto en modo daemon." msgid "Starting object replication pass." msgstr "Iniciando el paso de réplica de objeto." msgid "Starting object replicator in daemon mode." msgstr "Iniciando replicador de objeto en modalidad de daemon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "" "Resincronización de %(src)s realizada con éxito en %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "El acceso al tipo de archivo está prohibido." #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "El total de %(key)s del contenedor (%(total)s) no coincide con la suma de " "%(key)s en las políticas (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Excepción de tiempo de espera superado con %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Intentando %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Intentando hacer un GET de %(full_path)s" #, python-format msgid "Trying to get %(status_type)s status of PUT to %(path)s" msgstr "Intentando obtener %(status_type)s el estado de PUT a %(path)s" msgid "Trying to read during GET" msgstr "Intentado leer durante GET" msgid "Trying to read during GET (retrying)" msgstr "Intentando leer durante GET (reintento)" msgid "Trying to send to client" msgstr "Intentando enviar al cliente" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Intentando sincronizar los sufijos con %s" #, python-format msgid "Trying to write to %s" msgstr "Intentando escribir en %s" msgid "UNCAUGHT EXCEPTION" msgstr "UNCAUGHT EXCEPTION" #, python-format msgid "Unable to find %(section)s config section in %(conf)s" msgstr "No se ha podido encontrar %(section)s de la configuración en %(conf)s" #, python-format msgid "Unable to load internal client from config: %(conf)r (%(error)s)" msgstr "" "No se puede cargar el cliente interno a partir de la configuración: %(conf)r " "(%(error)s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "No se ha podido localizar %s en libc. Se dejará como no operativo." #, python-format msgid "Unable to locate config for %s" msgstr "No se ha podido encontrar el número de configuración de %s" #, python-format msgid "Unable to locate config number %(number)s for %(server)s" msgstr "" "No se ha podido encontrar el número de configuración %(number)s de %(server)s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "No se ha podido localizar fallocate, posix_fallocate en libc. Se dejará como " "no operativo." #, python-format msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" msgstr "No se puede realizar fsync() en el directorio %(dir)s: %(err)s" #, python-format msgid "Unable to read config from %s" msgstr "No se ha podido leer la configuración de %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "%(sync_from)r => %(sync_to)r sin autorización" msgid "Unhandled exception" msgstr "Excepción no controlada" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Se ha producido una excepción desconocida al intentar hacer un GET de: " "%(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Informe de actualización fallido para %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Informe de actualización enviado para %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVISO: SSL sólo se debe habilitar con fines de prueba. Utilice la " "terminación de SSL externa para un despliegue de producción." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVISO: no se ha podido modificar el límite del descriptor de archivos. ¿Está " "en ejecución como no root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVISO: no se ha podido modificar el límite máximo de procesos. ¿Está en " "ejecución como no root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVISO: no se ha podido modificar el límite de memoria. ¿Está en ejecución " "como no root?" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up" msgstr "" "Se han esperado %(kill_wait)s segundos a que terminara %(server)s; " "abandonando" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgstr "" "Se han esperado %(kill_wait)s segundos a que terminara %(server)s ; " "terminando" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Aviso: no se puede ajustar el límite sin un cliente almacenado en memoria " "caché" #, python-format msgid "method %s is not allowed." msgstr "el método %s no está permitido." msgid "no log file found" msgstr "no se ha encontrado ningún archivo de registro" msgid "odfpy not installed." msgstr "odfpy no está instalado." #, python-format msgid "plotting results failed due to %s" msgstr "error en el trazado de resultados debido a %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib no está instalado." swift-2.17.0/swift/locale/pt_BR/0000775000175100017510000000000013236061751016366 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/pt_BR/LC_MESSAGES/0000775000175100017510000000000013236061751020153 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/pt_BR/LC_MESSAGES/swift.po0000666000175100017510000007135713236061620021661 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andre Campos Bezerra , 2015 # Lucas Ribeiro , 2014 # thiagol , 2015 # Volmar Oliveira Junior , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Portuguese (Brazil)\n" msgid "" "\n" "user quit" msgstr "" "\n" "encerramento do usuário" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - paralelo, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d sufixos verificados – %(hashed).2f%% de hash, %(synced).2f%% " "sincronizados" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partições replicadas em " "%(time).2fs (%(rate).2f/seg, %(remaining)s restantes)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s sucessos, %(failure)s falhas" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s retornando 503 para %(statuses)s" #, python-format msgid "%s already started..." msgstr "%s já iniciado..." #, python-format msgid "%s does not exist" msgstr "%s não existe" #, python-format msgid "%s is not mounted" msgstr "%s não está montado" #, python-format msgid "%s responded as unmounted" msgstr "%s respondeu como não montado" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Reconfiguração da conexão por peer" #, python-format msgid ", %s containers deleted" msgstr ", %s containers apagados" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s containers possivelmente restando" #, python-format msgid ", %s containers remaining" msgstr ", %s containers restando" #, python-format msgid ", %s objects deleted" msgstr ", %s objetos apagados" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objetos possivelmente restando" #, python-format msgid ", %s objects remaining" msgstr ", %s objetos restando" #, python-format msgid ", elapsed: %.02fs" msgstr ", passados: %.02fs" msgid ", return codes: " msgstr ", códigos de retorno:" msgid "Account" msgstr "Conta" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Auditoria de conta em modo \"único\" finalizado: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Passo de auditoria de conta finalizado: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Tentativa de replicação do %(count)d dbs em%(time).5f segundos (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Código de retorno de ressincronização inválido: %(ret)d <-%(args)s" msgid "Begin account audit \"once\" mode" msgstr "Iniciar auditoria de conta em modo \"único\"" msgid "Begin account audit pass." msgstr "Iniciando passo de auditoria de conta." msgid "Begin container audit \"once\" mode" msgstr "Inicie o modo \"único\" da auditoria do contêiner" msgid "Begin container audit pass." msgstr "Inicie a aprovação da auditoria do contêiner." msgid "Begin container sync \"once\" mode" msgstr "Inicie o modo \"único\" de sincronização do contêiner" msgid "Begin container update single threaded sweep" msgstr "Inicie a varredura de encadeamento único da atualização do contêiner" msgid "Begin container update sweep" msgstr "Inicie a varredura de atualização do contêiner" msgid "Begin object update single threaded sweep" msgstr "Inicie a varredura de encadeamento único da atualização do objeto" msgid "Begin object update sweep" msgstr "Inicie a varredura da atualização do objeto" #, python-format msgid "Beginning pass on account %s" msgstr "Iniciando a estapa nas contas %s" msgid "Beginning replication run" msgstr "Começando execução de replicação" msgid "Broker error trying to rollback locked connection" msgstr "Erro do Broker ao tentar retroceder a conexão bloqueada" #, python-format msgid "Can not access the file %s." msgstr "Não é possível acessar o arquivo %s." #, python-format msgid "Can not load profile data from %s." msgstr "Não é possível carregar dados do perfil a partir de %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "O cliente não leu no proxy dentro de %ss" msgid "Client disconnected on read" msgstr "Cliente desconectado durante leitura" msgid "Client disconnected without sending enough data" msgstr "Cliente desconecatdo sem ter enviado dados suficientes" msgid "Client disconnected without sending last chunk" msgstr "Cliente desconectado sem ter enviado o último chunk" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Caminho do cliente %(client)s não corresponde ao caminho armazenado nos " "metadados do objeto %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Opção de configuração internal_client_conf_path não definida. Usando a " "configuração padrão. Consulte internal-client.conf-sample para obter opções" msgid "Connection refused" msgstr "Conexão recusada" msgid "Connection timeout" msgstr "Tempo limite de conexão" msgid "Container" msgstr "Contêiner" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Modo \"único\" da auditoria do contêiner concluído: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Aprovação da auditoria do contêiner concluída: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Modo \"único\" de sincronização do contêiner concluído: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Varredura de encadeamento único da atualização do contêiner concluída: " "%(elapsed).02fs, %(success)s com êxito, %(fail)s com falha, %(no_change)s " "sem alterações" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Varredura da atualização do contêiner concluída: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Varredura da atualização do contêiner de %(path)s concluída: " "%(elapsed).02fs, %(success)s com êxito, %(fail)s com falha, %(no_change)s " "sem alterações" #, python-format msgid "Data download error: %s" msgstr "Erro ao fazer download de dados: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Dispositivos finalizados: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERRO %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERRO %(status)d %(body)s Do Servidor %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERRO %(status)d %(body)s No Servidor de Objetos re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERRO %(status)d Expectativa: 100-continuar Do Servidor de Objeto" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente " "novamente mais tarde): Resposta %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERRO Resposta inválida %(status)s a partir de %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERRO Tempo limite de leitura do cliente (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERRO A atualização do contêiner falhou (salvando para atualização assíncrona " "posterior): %(status)d resposta do %(ip)s:%(port)s/%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERRO Não foi possível recuperar as informações da conta %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERRO Não foi possível obter informações do contêiner %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ERROR DiskFile %(data_file)s falha ao fechar: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERRO Exceção causando clientes a desconectar" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "ERRO Exceção ao transferir dados para os servidores de objeto %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERRO Falha ao pegar meu próprio IPs?" msgid "ERROR Insufficient Storage" msgstr "ERRO Capacidade insuficiente" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "ERRO O objeto %(obj)s falhou ao auditar e ficou em quarentena: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERRO Problema de seleção, em quarentena %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERRO Drive remoto não montado %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERRO Sincronizando %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERRO Sincronizando %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERRO Tentando auditar %s" msgid "ERROR Unhandled exception in request" msgstr "ERRO Exceção não manipulada na solicitação" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ erro com %(method)s %(path)s" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente " "novamente mais tarde)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente " "novamente mais tarde): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "ERRO arquivo pendente assíncrono com nome inesperado %s" msgid "ERROR auditing" msgstr "Erro auditando" #, python-format msgid "ERROR auditing: %s" msgstr "ERRO auditoria: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERRO A atualização de contêiner falhou com %(ip)s:%(port)s/%(dev)s (salvando " "para atualização assíncrona posterior)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERRO lendo resposta HTTP de %s" #, python-format msgid "ERROR reading db %s" msgstr "ERRO lendo db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERRO rsync falhou com %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERRO sincronizando %(file)s com nodo %(node)s" msgid "ERROR trying to replicate" msgstr "ERRO tentando replicar" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERRO enquanto tentaava limpar %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "ERRO com %(type)s do servidor %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERRO com as supressões de carregamento a partir de %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERRO com o servidor remoto %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "ERRO: Falha ao obter caminhos para partições de unidade: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERRO: Não é possível acessar %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERRO: Não é possível executar a auditoria: %s" msgid "Error hashing suffix" msgstr "Erro ao efetuar hash do sufixo" msgid "Error listing devices" msgstr "Erro ao listar dispositivos" #, python-format msgid "Error on render profiling results: %s" msgstr "Erro na renderização de resultados de criação de perfil: %s" msgid "Error parsing recon cache file" msgstr "Erro ao analisar o arquivo de cache de reconhecimento" msgid "Error reading recon cache file" msgstr "Erro ao ler o arquivo de cache de reconhecimento" msgid "Error reading ringfile" msgstr "Erro na leitura do ringfile" msgid "Error reading swift.conf" msgstr "Erro ao ler swift.conf" msgid "Error retrieving recon data" msgstr "Erro ao recuperar dados de reconhecimento" msgid "Error syncing handoff partition" msgstr "Erro ao sincronizar a partição de handoff" msgid "Error syncing partition" msgstr "Erro ao sincronizar partição" #, python-format msgid "Error syncing with node: %s" msgstr "Erro ao sincronizar com o nó: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Erro ao tentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Erro: Ocorreu um erro" msgid "Error: missing config path argument" msgstr "Erro: argumento do caminho de configuração ausente" #, python-format msgid "Error: unable to locate %s" msgstr "Erro: não é possível localizar %s" msgid "Exception dumping recon cache" msgstr "Exceção dump de cache de reconhecimento" msgid "Exception in top-level account reaper loop" msgstr "Exceção no loop do removedor da conta de nível superior" msgid "Exception in top-level replication loop" msgstr "Exceção no loop de replicação de nível superior" msgid "Exception in top-levelreconstruction loop" msgstr "Exceção no loop de reconstrução de nível superior" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exceção com %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Exceção com a conta %s" #, python-format msgid "Exception with containers for account %s" msgstr "Exceção com os containers para a conta %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Exceção com objetos para o container %(container)s para conta %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Expectativa: 100-continuar em %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Cadeia CNAME a seguir para %(given_domain)s para%(found_domain)s" msgid "Found configs:" msgstr "Localizados arquivos de configuração:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "O primeiro modo de handoffs ainda possui handoffs. Interrompendo a aprovação " "da replicação atual." msgid "Host unreachable" msgstr "Destino inalcançável" #, python-format msgid "Incomplete pass on account %s" msgstr "Estapa incompleta nas contas %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Formato X-Container-Sync-To inválido %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Host inválido %r em X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrada pendente inválida %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Resposta inválida %(resp)s a partir de %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Resposta inválida %(resp)s a partir de %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Esquema inválido %r em X-Container-Sync-To, deve ser \" // \", \"http\" ou " "\"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Eliminando a ressincronização de longa execução: %s" msgid "Lockup detected.. killing live coros." msgstr "Bloqueio detectado... eliminando núcleos em tempo real." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s mapeado para %(found_domain)s" #, python-format msgid "No %s running" msgstr "Nenhum %s rodando" #, python-format msgid "No permission to signal PID %d" msgstr "Nenhuma permissão para PID do sinal %d" #, python-format msgid "No policy with index %s" msgstr "Nenhuma política com índice %s" #, python-format msgid "No realm key for %r" msgstr "Nenhuma chave do domínio para %r" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Erro de nó limitado %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" "Insuficiente número de servidores de objeto confirmaram (%d confirmados)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Não localizado %(sync_from)r => %(sync_to)r – objeto " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Nada foi reconstruído durante %s segundos." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nada foi replicado para %s segundos." msgid "Object" msgstr "Objeto" msgid "Object PUT" msgstr "PUT de objeto" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Objeto PUT retornando 202 para a versão 409: %(req_timestamp)s < = " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "PUT de objeto retornando 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Modo \"%(mode)s\" da auditoria de objeto (%(type)s) concluído: " "%(elapsed).02fs. Total em quarentena: %(quars)d, Total de erros: %(errors)d, " "Total de arquivos/seg: %(frate).2f, Total de bytes/seg: %(brate).2f, Tempo " "de auditoria: %(audit).2f, Taxa: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Auditoria de objeto (%(type)s). Desde %(start_time)s: Localmente: %(passes)d " "aprovado, %(quars)d em quarentena, %(errors)d erros, arquivos/s: " "%(frate).2f, bytes/seg: %(brate).2f, Tempo total: %(total).2f, Tempo de " "auditoria: %(audit).2f, Taxa: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Estatísticas de auditoria do objeto: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Reconstrução do objeto concluída (única). (%.02f minutos)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Reconstrução do objeto concluída. (%.02f minutos)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Replicação completa do objeto (única). (%.02f minutos)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Replicação completa do objeto. (%.02f minutos)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Servidores de objeto retornaram %s etags incompatíveis" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Varredura da atualização de objeto concluída: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "Parâmetros, consultas e fragmentos não permitidos em X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Tempos de partição: máximo %(max).4fs, mínimo %(min).4fs, médio %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "Caminho necessário em X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problema ao limpar %s" #, python-format msgid "Profiling Error: %s" msgstr "Erro da Criação de Perfil: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "Em quarentena %(hsh_path)s para %(quar_path)s porque ele não é um diretório" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Em quarentena %(object_path)s para %(quar_path)s porque ele não é um " "diretório" #, python-format msgid "Quarantining DB %s" msgstr "Quarentenando BD %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Log de suspensão do limite de taxa: %(sleep)s para %(account)s/%(container)s/" "%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Dbs %(remove)d removido" #, python-format msgid "Removing %s objects" msgstr "Removendo %s objetos" #, python-format msgid "Removing partition: %s" msgstr "Removendo partição: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Removendo arquivo pid %(pid_file)s com pid errado %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Removendo o arquivo pid %s com pid inválido" #, python-format msgid "Removing stale pid file %s" msgstr "Removendo o arquivo pid %s antigo" msgid "Replication run OVER" msgstr "Execução de replicação TERMINADA" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Retornando 497 por causa da listagem negra: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Retornando 498 para %(meth)s para %(acc)s/%(cont)s/%(obj)s. Limite de taxa " "(Suspensão Máxima) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Mudança no anel detectada. Interrompendo a aprovação da recosntrução atual." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Alteração do anel detectada. Interrompendo a aprovação da replicação atual." #, python-format msgid "Running %s once" msgstr "Executando %s uma vez," msgid "Running object reconstructor in script mode." msgstr "Executando o reconstrutor do objeto no modo de script." msgid "Running object replicator in script mode." msgstr "Executando replicador do objeto no modo de script." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Desde %(time)s: %(sync)s sincronizados [%(delete)s exclui, %(put)s coloca], " "%(skip)s ignorados, %(fail)s com falha" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Desde %(time)s: Auditoria de contas: %(passed)s auditorias passaram," "%(failed)s auditorias falharam" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Desde %(time)s: As auditorias do contêiner: %(pass)s de auditoria aprovada, " "%(fail)s com falha auditoria" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "Pulando %(device)s porque não está montado" #, python-format msgid "Skipping %s as it is not mounted" msgstr "Pulando %s porque não está montado" #, python-format msgid "Starting %s" msgstr "Iniciando %s" msgid "Starting object reconstruction pass." msgstr "Iniciando a aprovação da reconstrução de objeto." msgid "Starting object reconstructor in daemon mode." msgstr "Iniciando o reconstrutor do objeto no modo daemon." msgid "Starting object replication pass." msgstr "Iniciando a aprovação da replicação de objeto." msgid "Starting object replicator in daemon mode." msgstr "Iniciando o replicador do objeto no modo daemon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Ressincronização bem-sucedida de %(src)s em %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "O tipo de arquivo é de acesso proibido!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "O total %(key)s para o container (%(total)s) não confere com a soma %(key)s " "pelas politicas (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exceção de tempo limite com %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentando %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Tentando GET %(full_path)s" msgid "Trying to read during GET" msgstr "Tentando ler durante GET" msgid "Trying to read during GET (retrying)" msgstr "Tentando ler durante GET (tentando novamente)" msgid "Trying to send to client" msgstr "Tentando enviar para o cliente" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Tentando sincronizar sufixos com %s" #, python-format msgid "Trying to write to %s" msgstr "Tentando escrever para %s" msgid "UNCAUGHT EXCEPTION" msgstr "EXCEÇÃO NÃO CAPTURADA" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Não é possível localizar %s em libc. Saindo como um não operacional." #, python-format msgid "Unable to locate config for %s" msgstr "Não é possível localizar configuração para %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Não é possível localizar fallocate, posix_fallocate em libc. Saindo como um " "não operacional." #, python-format msgid "Unable to read config from %s" msgstr "Não é possível ler a configuração a partir de %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Não autorizado %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "Exceção não-tratada" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "Exceção inesperada ao tentar GET: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Atualize o relatório com falha para %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Atualize o relatório enviado para %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVISO: SSL deve ser ativada somente para fins de teste. Use rescisão SSL " "externa para uma implementação de produção." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite do descritor de arquivo. Executar " "como não raiz?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite máximo do processo. Executar como " "não raiz?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite de memória. Executar como não raiz?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Aviso: Não é possível um limite de taxa sem um cliente memcached" #, python-format msgid "method %s is not allowed." msgstr "o método %s não é permitido." msgid "no log file found" msgstr "Nenhum arquivo de log encontrado" msgid "odfpy not installed." msgstr "odfpy não está instalado." #, python-format msgid "plotting results failed due to %s" msgstr "plotar resultados falhou devido a %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib não instalado." swift-2.17.0/swift/locale/zh_CN/0000775000175100017510000000000013236061751016361 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/zh_CN/LC_MESSAGES/0000775000175100017510000000000013236061751020146 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/zh_CN/LC_MESSAGES/swift.po0000666000175100017510000006364413236061620021654 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Pearl Yajing Tan(Seagate Tech) , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Chinese (China)\n" msgid "" "\n" "user quit" msgstr "" "\n" "用户退出" #, python-format msgid " - %s" msgstr "- %s" #, python-format msgid " - parallel, %s" msgstr "ï¼å¹³è¡Œï¼Œ%s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "%(checked)dåŽç¼€å·²è¢«æ£€æŸ¥ %(hashed).2f%% hashed, %(synced).2f%% synced" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) 分区被å¤åˆ¶ æŒç»­æ—¶é—´ä¸º \"\n" "\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)sæˆåŠŸï¼Œ%(failure)s失败" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s 返回 503 在 %(statuses)s" #, python-format msgid "%s already started..." msgstr "%så·²å¯åЍ..." #, python-format msgid "%s does not exist" msgstr "%sä¸å­˜åœ¨" #, python-format msgid "%s is not mounted" msgstr "%s未挂载" #, python-format msgid "%s responded as unmounted" msgstr "%s å“应为未安装" #, python-format msgid "%s: Connection reset by peer" msgstr "%s:已由åŒçº§é‡ç½®è¿žæŽ¥" #, python-format msgid ", %s containers deleted" msgstr ",删除容器%s" #, python-format msgid ", %s containers possibly remaining" msgstr ",å¯èƒ½å‰©ä½™å®¹å™¨%s" #, python-format msgid ", %s containers remaining" msgstr ",剩余容器%s" #, python-format msgid ", %s objects deleted" msgstr ",删除对象%s" #, python-format msgid ", %s objects possibly remaining" msgstr ",å¯èƒ½å‰©ä½™å¯¹è±¡%s" #, python-format msgid ", %s objects remaining" msgstr ",剩余对象%s" #, python-format msgid ", elapsed: %.02fs" msgstr ",耗时:%.02fs" msgid ", return codes: " msgstr ",返回代ç ï¼š" msgid "Account" msgstr "è´¦å·" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "è´¦å·å®¡è®¡\"once\"模å¼å®Œæˆï¼š %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "è´¦å·å®¡è®¡å®Œæˆï¼š%.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(time).5f seconds (%(rate).5f/s)å°è¯•å¤åˆ¶%(count)d dbs" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Bad rsync返还代ç ï¼š%(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "开始账å·å®¡è®¡\"once\"模å¼" msgid "Begin account audit pass." msgstr "开始账å·å®¡è®¡é€šè¿‡" msgid "Begin container audit \"once\" mode" msgstr "开始容器审计\"once\" 模å¼" msgid "Begin container audit pass." msgstr "开始通过容器审计" msgid "Begin container sync \"once\" mode" msgstr "å¼€å§‹å®¹å™¨åŒæ­¥\"once\"模å¼" msgid "Begin container update single threaded sweep" msgstr "开始容器更新å•线程扫除" msgid "Begin container update sweep" msgstr "开始容器更新扫除" msgid "Begin object update single threaded sweep" msgstr "开始对象更新å•线程扫除" msgid "Begin object update sweep" msgstr "开始对象更新扫除" #, python-format msgid "Beginning pass on account %s" msgstr "è´¦å·%s开始通过" msgid "Beginning replication run" msgstr "开始è¿è¡Œå¤åˆ¶" msgid "Broker error trying to rollback locked connection" msgstr "æœåŠ¡å™¨é”™è¯¯å¹¶å°è¯•去回滚已ç»é”ä½çš„链接" #, python-format msgid "Can not access the file %s." msgstr "无法访问文件%s" #, python-format msgid "Can not load profile data from %s." msgstr "无法从%sä¸‹è½½åˆ†æžæ•°æ®" #, python-format msgid "Client did not read from proxy within %ss" msgstr "客户尚未从代ç†å¤„读å–%ss" msgid "Client disconnected on read" msgstr "å®¢æˆ·è¯»å–æ—¶ä¸­æ–­" msgid "Client disconnected without sending enough data" msgstr "客户中断 尚未å‘é€è¶³å¤Ÿ" msgid "Client disconnected without sending last chunk" msgstr "客户机已断开连接而未å‘逿œ€åŽä¸€ä¸ªæ•°æ®å—" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "客户路径%(client)s与对象元数æ®ä¸­å­˜å‚¨çš„路径%(meta)sä¸ç¬¦" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "未定义é…置选项 internal_client_conf_path。正在使用缺çœé…置。请å‚阅 internal-" "client.conf-sample 以了解å„个选项" msgid "Connection refused" msgstr "连接被拒ç»" msgid "Connection timeout" msgstr "连接超时" msgid "Container" msgstr "容器" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "容器审计\"once\"模å¼å®Œæˆï¼š%.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "容器审计通过完æˆï¼š %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "å®¹å™¨åŒæ­¥\"once\"模å¼å®Œæˆï¼š%.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "容器更新å•线程扫除完æˆï¼š%(elapsed).02fs, %(success)s æˆåŠŸ, %(fail)s 失败, " "%(no_change)s 无更改" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "容器更新扫除完æˆï¼š%.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "通过路径%(path)s容器更新扫除完æˆï¼š%(elapsed).02fs, %(success)s æˆåŠŸ, " "%(fail)s 失败, %(no_change)s 无更改" #, python-format msgid "Data download error: %s" msgstr "æ•°æ®ä¸‹è½½é”™è¯¯ï¼š%s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "设备通过完æˆï¼š %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "错误 %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "错误 %(status)d %(body)s æ¥è‡ª %(type)s æœåС噍" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "错误 %(status)d %(body)s æ¥è‡ª 对象æœåС噍 re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "å‘生 %(status)d é”™è¯¯ï¼Œéœ€è¦ 100 - 从对象æœåŠ¡å™¨ç»§ç»­" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "出现错误 è´¦å·æ›´æ–°å¤±è´¥ï¼š %(ip)s:%(port)s/%(device)s (ç¨åŽå°è¯•): 回应 " "%(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "失败å“应错误%(status)sæ¥è‡ª%(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "错误 客户读å–è¶…æ—¶(%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "错误 容器更新失败(正在ä¿å­˜ ç¨åŽåŒæ­¥æ›´æ–°):%(status)d回应æ¥è‡ª%(ip)s:%(port)s/" "%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "错误:无法获å–è´¦å·ä¿¡æ¯%s" #, python-format msgid "ERROR Could not get container info %s" msgstr "错误:无法获å–容器%sä¿¡æ¯" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ç£ç›˜æ–‡ä»¶é”™è¯¯%(data_file)s关闭失败: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "错误 异常导致客户端中断连接" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "错误:å‘对象æœåС噍 %s ä¼ è¾“æ•°æ®æ—¶å‘生异常" msgid "ERROR Failed to get my own IPs?" msgstr "错误 无法获得我方IPs?" msgid "ERROR Insufficient Storage" msgstr "错误 存储空间ä¸è¶³" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "错误 Pickle问题 隔离%s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "错误 远程驱动器无法挂载 %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "åŒæ­¥é”™è¯¯ %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "åŒæ­¥æ—¶å‘生错误%s" #, python-format msgid "ERROR Trying to audit %s" msgstr "错误 å°è¯•开始审计%s" msgid "ERROR Unhandled exception in request" msgstr "错误 未处ç†çš„异常å‘出请求" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "%(method)s %(path)s出现错误__call__ error" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "错误 è´¦å·æ›´æ–°å¤±è´¥ %(ip)s:%(port)s/%(device)s (ç¨åŽå°è¯•)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "错误 è´¦å·æ›´æ–°å¤±è´¥%(ip)s:%(port)s/%(device)s (ç¨åŽå°è¯•):" #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "æ‰§è¡ŒåŒæ­¥ç­‰å¾…文件 文件åä¸å¯çŸ¥%s" msgid "ERROR auditing" msgstr "错误 审计" #, python-format msgid "ERROR auditing: %s" msgstr "审计错误:%s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在ä¿å­˜ ç¨åŽåŒæ­¥æ›´æ–°)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "读å–HTTP错误 å“åº”æ¥æº%s" #, python-format msgid "ERROR reading db %s" msgstr "错误 读å–db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "错误 rsync失败 %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "错误 åŒæ­¥ %(file)s å’Œ 节点%(node)s" msgid "ERROR trying to replicate" msgstr "å°è¯•å¤åˆ¶æ—¶å‘生错误" #, python-format msgid "ERROR while trying to clean up %s" msgstr "æ¸…ç†æ—¶å‡ºçŽ°é”™è¯¯%s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)sæœåС噍å‘生错误 %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "执行下载压缩时å‘生错误%s" #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "远程æœåС噍å‘生错误 %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "%s未挂载" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "出错,无法访问 %(path)s:%(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "错误:无法执行审计:%s" msgid "Error hashing suffix" msgstr "执行HashingåŽç¼€æ—¶å‘生错误" msgid "Error listing devices" msgstr "设备列表时出现错误" #, python-format msgid "Error on render profiling results: %s" msgstr "给予分æžç»“果时å‘生错误:%s" msgid "Error parsing recon cache file" msgstr "è§£æžrecon cache file时出现错误" msgid "Error reading recon cache file" msgstr "读å–recon cache file时出现错误" msgid "Error reading ringfile" msgstr "读å–ringfile时出现错误" msgid "Error reading swift.conf" msgstr "读å–swift.conf时出现错误" msgid "Error retrieving recon data" msgstr "检索recon data时出现错误" msgid "Error syncing handoff partition" msgstr "æ‰§è¡ŒåŒæ­¥åˆ‡æ¢åˆ†åŒºæ—¶å‘生错误" msgid "Error syncing partition" msgstr "æ‰§è¡ŒåŒæ­¥åˆ†åŒºæ—¶å‘生错误" #, python-format msgid "Error syncing with node: %s" msgstr "æ‰§è¡ŒåŒæ­¥æ—¶èŠ‚ç‚¹%så‘生错误" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "å°è¯•é‡å»º %(path)s 策略时出错:#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "错误:一个错误å‘生了" msgid "Error: missing config path argument" msgstr "错误:设置路径信æ¯ä¸¢å¤±" #, python-format msgid "Error: unable to locate %s" msgstr "错误:无法查询到 %s" msgid "Exception dumping recon cache" msgstr "执行dump recon的时候出现异常" msgid "Exception in top-level account reaper loop" msgstr "异常出现在top-levelè´¦å·reaper环" msgid "Exception in top-level replication loop" msgstr "top-levelå¤åˆ¶åœˆå‡ºçް异叏" msgid "Exception in top-levelreconstruction loop" msgstr " top-levelreconstruction 环中å‘生异常" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s出现异常" #, python-format msgid "Exception with account %s" msgstr "è´¦å·%s出现异常" #, python-format msgid "Exception with containers for account %s" msgstr "è´¦å·%s内容器出现异常" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "è´¦å·%(account)s容器%(container)s的对象出现异常" #, python-format msgid "Expect: 100-continue on %s" msgstr "已知:100-continue on %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "è·ŸéšCNAME链从%(given_domain)s到%(found_domain)s" msgid "Found configs:" msgstr "找到é…ç½®" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "Handoffs 优先方å¼ä»æœ‰ handoffs。正在中止当å‰å¤åˆ¶è¿‡ç¨‹ã€‚" msgid "Host unreachable" msgstr "无法连接到主机" #, python-format msgid "Incomplete pass on account %s" msgstr "è´¦å·%s未完æˆé€šè¿‡" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "无效的X-Container-Sync-Toæ ¼å¼%r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To中无效主机%r" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "ä¸å¯ç”¨çš„等待输入%(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "从 %(full_path)s 返回了无效å“应 %(resp)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "无效的回应%(resp)sæ¥è‡ª%(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。" #, python-format msgid "Killing long-running rsync: %s" msgstr "终止long-runningåŒæ­¥: %s" msgid "Lockup detected.. killing live coros." msgstr "检测到lockup。终止正在执行的coros" #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "集åˆ%(given_domain)s到%(found_domain)s" #, python-format msgid "No %s running" msgstr "æ— %sè´¦å·è¿è¡Œ" #, python-format msgid "No permission to signal PID %d" msgstr "æ— æƒé™å‘é€ä¿¡å·PID%d" #, python-format msgid "No policy with index %s" msgstr "没有具备索引 %s 的策略" #, python-format msgid "No realm key for %r" msgstr "%ræƒé™keyä¸å­˜åœ¨" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "节点错误æžé™ %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "没有足够的对象æœåŠ¡å™¨åº”ç­”ï¼ˆæ”¶åˆ° %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "过去 %s ç§’æœªé‡æž„任何对象。" #, python-format msgid "Nothing replicated for %s seconds." msgstr "%sç§’æ— å¤åˆ¶" msgid "Object" msgstr "对象" msgid "Object PUT" msgstr "对象上传" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "对象 PUT 正在返回 202(对于 409):%(req_timestamp)s å°äºŽæˆ–等于 " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "对象PUT返还 412,%(statuses)r " #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "对象审计 (%(type)s) \\\"%(mode)s\\\"模å¼å®Œæˆ: %(elapsed).02fs 隔离总数: " "%(quars)d, 错误总数: %(errors)d, 文件ï¼ç§’总和:%(frate).2f, bytes/sec总和: " "%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "对象审计 (%(type)s). 自 %(start_time)s 开始: 本地:%(passes)d 通" "过,%(quars)d 隔离,%(errors)d 错误,文件/秒:%(frate).2f,字节/秒:" "%(brate).2f,总时间:%(total).2f,审计时间:%(audit).2f,速率:" "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "对象审计统计:%s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "å¯¹è±¡é‡æž„完æˆï¼ˆä¸€æ¬¡ï¼‰ã€‚(%.02f 分钟)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "å¯¹è±¡é‡æž„完æˆã€‚(%.02f 分钟)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "对象å¤åˆ¶å®Œæˆ(一次)。(%.02f minutes)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "对象å¤åˆ¶å®Œæˆã€‚(%.02f minutes)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "对象æœåŠ¡å™¨è¿”è¿˜%sä¸åŒ¹é…etags" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "对象更新扫除完æˆï¼š%.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "在X-Container-Sync-To中,å˜é‡ï¼ŒæŸ¥è¯¢å’Œç¢Žç‰‡ä¸è¢«å…许" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "在X-Container-Sync-To中路径是必须的" #, python-format msgid "Problem cleaning up %s" msgstr "问题清除%s" #, python-format msgid "Profiling Error: %s" msgstr "分æžä»£ç æ—¶å‡ºçŽ°é”™è¯¯ï¼š%s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(hsh_path)så’Œ%(quar_path)s因为éžç›®å½•" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(object_path)så’Œ%(quar_path)s因为éžç›®å½•" #, python-format msgid "Quarantining DB %s" msgstr "隔离DB%s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "æµé‡æŽ§åˆ¶ä¼‘眠日志:%(sleep)s for %(account)s/%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "删除%(remove)d dbs" #, python-format msgid "Removing %s objects" msgstr "正在移除 %s 个对象" #, python-format msgid "Removing partition: %s" msgstr "移除分区:%s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "移除 pid 文件 %(pid_file)s 失败,pid %(pid)d 䏿­£ç¡®" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "正在移除带有无效 pid çš„ pid 文件 %s" #, python-format msgid "Removing stale pid file %s" msgstr "移除原有pid文件%s" msgid "Replication run OVER" msgstr "å¤åˆ¶è¿è¡Œç»“æŸ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "返回497因为黑åå•:%s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,æµé‡æŽ§åˆ¶(Max \"\n" "\"Sleep) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "检测到环更改。正在中止当å‰é‡æž„过程。" msgid "Ring change detected. Aborting current replication pass." msgstr "Ring改å˜è¢«æ£€æµ‹åˆ°ã€‚退出现有的å¤åˆ¶é€šè¿‡" #, python-format msgid "Running %s once" msgstr "è¿è¡Œ%s一次" msgid "Running object reconstructor in script mode." msgstr "正以脚本方å¼è¿è¡Œå¯¹è±¡é‡æž„程åºã€‚" msgid "Running object replicator in script mode." msgstr "在加密模å¼ä¸‹æ‰§è¡Œå¯¹è±¡å¤åˆ¶" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "自%(time)s起:%(sync)s完æˆåŒæ­¥ [%(delete)s 删除, %(put)s 上传], \"\n" "\"%(skip)s 跳过, %(fail)s 失败" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "自%(time)s开始:账å·å®¡è®¡ï¼š%(passed)s 通过审计,%(failed)s 失败" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "因无法挂载跳过%(device)s" #, python-format msgid "Skipping %s as it is not mounted" msgstr "挂载失败 跳过%s" #, python-format msgid "Starting %s" msgstr "å¯åЍ%s" msgid "Starting object reconstruction pass." msgstr "正在å¯åŠ¨å¯¹è±¡é‡æž„过程。" msgid "Starting object reconstructor in daemon mode." msgstr "æ­£ä»¥å®ˆæŠ¤ç¨‹åºæ–¹å¼å¯åŠ¨å¯¹è±¡é‡æž„程åºã€‚" msgid "Starting object replication pass." msgstr "开始通过对象å¤åˆ¶" msgid "Starting object replicator in daemon mode." msgstr "在守护模å¼ä¸‹å¼€å§‹å¯¹è±¡å¤åˆ¶" #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "æˆåŠŸçš„rsync %(src)s at %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "è¯¥æ–‡ä»¶ç±»åž‹è¢«ç¦æ­¢è®¿é—®ï¼" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "容器(%(total)s)内%(key)s总数ä¸ç¬¦åˆåè®®%(key)s总数(%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s å‘生超时异常" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "å°è¯•执行%(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "æ­£å°è¯•èŽ·å– %(full_path)s" msgid "Trying to read during GET" msgstr "执行GETæ—¶å°è¯•读å–" msgid "Trying to read during GET (retrying)" msgstr "执行GETæ—¶å°è¯•读å–(釿–°å°è¯•)" msgid "Trying to send to client" msgstr "å°è¯•å‘é€åˆ°å®¢æˆ·ç«¯" #, python-format msgid "Trying to sync suffixes with %s" msgstr "æ­£å°è¯•使åŽç¼€ä¸Ž %s åŒæ­¥" #, python-format msgid "Trying to write to %s" msgstr "å°è¯•执行书写%s" msgid "UNCAUGHT EXCEPTION" msgstr "未æ•获的异常" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "无法查询到%s ä¿ç•™ä¸ºno-op" #, python-format msgid "Unable to locate config for %s" msgstr "找ä¸åˆ° %s çš„é…ç½®" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "无法查询到fallocate, posix_fallocate。ä¿å­˜ä¸ºno-op" #, python-format msgid "Unable to read config from %s" msgstr "无法从%s读å–设置" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "未授æƒ%(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "未处ç†çš„异常" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "å°è¯•èŽ·å– %(account)r %(container)r %(object)r æ—¶å‘生未知异常" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s更新报告失败" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "更新报告å‘至%(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "警告:SSLä»…å¯ä»¥åšæµ‹è¯•ä½¿ç”¨ã€‚äº§å“部署时请使用外连SSL终端" msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告:无法修改文件æè¿°é™åˆ¶ã€‚æ˜¯å¦æŒ‰éžrootè¿è¡Œï¼Ÿ" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告:无法修改最大è¿è¡Œæžé™ï¼Œæ˜¯å¦æŒ‰éžrootè¿è¡Œï¼Ÿ" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:无法修改内存æžé™ï¼Œæ˜¯å¦æŒ‰éžrootè¿è¡Œï¼Ÿ" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:缺失缓存客户端 无法控制æµé‡ " #, python-format msgid "method %s is not allowed." msgstr "方法%sä¸è¢«å…许" msgid "no log file found" msgstr "日志文件丢失" msgid "odfpy not installed." msgstr "odfpy未安装" #, python-format msgid "plotting results failed due to %s" msgstr "绘制结果图标时失败因为%s" msgid "python-matplotlib not installed." msgstr "python-matplotlib未安装" swift-2.17.0/swift/locale/ja/0000775000175100017510000000000013236061751015752 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/ja/LC_MESSAGES/0000775000175100017510000000000013236061751017537 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/ja/LC_MESSAGES/swift.po0000666000175100017510000007654413236061620021250 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Sasuke(Kyohei MORIYAMA) <>, 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Japanese\n" msgid "" "\n" "user quit" msgstr "" "\n" "ユーザー終了" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - パラレルã€%s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d ã‚µãƒ•ã‚£ãƒƒã‚¯ã‚¹ãŒæ¤œæŸ»ã•れã¾ã—㟠- ãƒãƒƒã‚·ãƒ¥æ¸ˆã¿ %(hashed).2f%%ã€åŒæœŸ" "済㿠%(synced).2f%%" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) パーティションãŒ%(time).2fs ã§" "複製ã•れã¾ã—㟠(%(rate).2f/ç§’ã€æ®‹ã‚Š %(remaining)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "æˆåŠŸ %(success)sã€å¤±æ•— %(failure)s" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s ㌠%(statuses)s ã«ã¤ã„㦠503 ã‚’è¿”ã—ã¦ã„ã¾ã™" #, python-format msgid "%s already started..." msgstr "%s ã¯æ—¢ã«é–‹å§‹ã•れã¦ã„ã¾ã™..." #, python-format msgid "%s does not exist" msgstr "%s ãŒå­˜åœ¨ã—ã¾ã›ã‚“" #, python-format msgid "%s is not mounted" msgstr "%s ãŒãƒžã‚¦ãƒ³ãƒˆã•れã¦ã„ã¾ã›ã‚“" #, python-format msgid "%s responded as unmounted" msgstr "%s ã¯ã‚¢ãƒ³ãƒžã‚¦ãƒ³ãƒˆã¨ã—ã¦å¿œç­”ã—ã¾ã—ãŸ" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: 接続ãŒãƒ”ã‚¢ã«ã‚ˆã£ã¦ãƒªã‚»ãƒƒãƒˆã•れã¾ã—ãŸ" #, python-format msgid ", %s containers deleted" msgstr "ã€%s コンテナーãŒå‰Šé™¤ã•れã¾ã—ãŸ" #, python-format msgid ", %s containers possibly remaining" msgstr "ã€%s ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ãŒæ®‹ã£ã¦ã„ã‚‹ã¨æ€ã‚れã¾ã™" #, python-format msgid ", %s containers remaining" msgstr "ã€%s ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ãŒæ®‹ã£ã¦ã„ã¾ã™" #, python-format msgid ", %s objects deleted" msgstr "ã€%s オブジェクトãŒå‰Šé™¤ã•れã¾ã—ãŸ" #, python-format msgid ", %s objects possibly remaining" msgstr "ã€%s ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆãŒæ®‹ã£ã¦ã„ã‚‹ã¨æ€ã‚れã¾ã™" #, python-format msgid ", %s objects remaining" msgstr "ã€%s ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆãŒæ®‹ã£ã¦ã„ã¾ã™" #, fuzzy, python-format msgid ", elapsed: %.02fs" msgstr "ã€çµŒéŽæ™‚é–“: %.02fs" msgid ", return codes: " msgstr "ã€æˆ»ã‚Šã‚³ãƒ¼ãƒ‰: " msgid "Account" msgstr "アカウント" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "アカウント監査 \"once\" モードãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "アカウント監査ã®å‡¦ç†ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(time).5f ç§’ã§ %(count)d 個㮠DB ã®è¤‡è£½ã‚’試行ã—ã¾ã—㟠(%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "æ­£ã—ããªã„å†åŒæœŸæˆ»ã‚Šã‚³ãƒ¼ãƒ‰: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "アカウント監査 \"once\" モードã®é–‹å§‹" msgid "Begin account audit pass." msgstr "アカウント監査パスを開始ã—ã¾ã™ã€‚" msgid "Begin container audit \"once\" mode" msgstr "コンテナー監査「onceã€ãƒ¢ãƒ¼ãƒ‰ã®é–‹å§‹" msgid "Begin container audit pass." msgstr "コンテナー監査パスを開始ã—ã¾ã™ã€‚" msgid "Begin container sync \"once\" mode" msgstr "ã‚³ãƒ³ãƒ†ãƒŠãƒ¼åŒæœŸã€Œonceã€ãƒ¢ãƒ¼ãƒ‰ã®é–‹å§‹" msgid "Begin container update single threaded sweep" msgstr "コンテナー更新å˜ä¸€ã‚¹ãƒ¬ãƒƒãƒ‰åŒ–スイープã®é–‹å§‹" msgid "Begin container update sweep" msgstr "コンテナー更新スイープã®é–‹å§‹" msgid "Begin object update single threaded sweep" msgstr "オブジェクト更新å˜ä¸€ã‚¹ãƒ¬ãƒƒãƒ‰åŒ–スイープã®é–‹å§‹" msgid "Begin object update sweep" msgstr "オブジェクト更新スイープã®é–‹å§‹" #, python-format msgid "Beginning pass on account %s" msgstr "アカウント %s ã§ãƒ‘スを開始中" msgid "Beginning replication run" msgstr "複製ã®å®Ÿè¡Œã‚’開始中" msgid "Broker error trying to rollback locked connection" msgstr "ãƒ­ãƒƒã‚¯æ¸ˆã¿æŽ¥ç¶šã®ãƒ­ãƒ¼ãƒ«ãƒãƒƒã‚¯ã‚’試行中ã®ãƒ–ローカーエラー" #, python-format msgid "Can not access the file %s." msgstr "ファイル %s ã«ã‚¢ã‚¯ã‚»ã‚¹ã§ãã¾ã›ã‚“。" #, python-format msgid "Can not load profile data from %s." msgstr "プロファイルデータを %s ã‹ã‚‰ãƒ­ãƒ¼ãƒ‰ã§ãã¾ã›ã‚“。" #, python-format msgid "Client did not read from proxy within %ss" msgstr "クライアント㯠%s 内ã®ãƒ—ロキシーã‹ã‚‰ã®èª­ã¿å–りを行ã„ã¾ã›ã‚“ã§ã—ãŸ" msgid "Client disconnected on read" msgstr "クライアントãŒèª­ã¿å–り時ã«åˆ‡æ–­ã•れã¾ã—ãŸ" msgid "Client disconnected without sending enough data" msgstr "å分ãªãƒ‡ãƒ¼ã‚¿ã‚’é€ä¿¡ã›ãšã«ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆãŒåˆ‡æ–­ã•れã¾ã—ãŸ" msgid "Client disconnected without sending last chunk" msgstr "最後ã®ãƒãƒ£ãƒ³ã‚¯ã‚’é€ä¿¡ã›ãšã«ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆãŒåˆ‡æ–­ã•れã¾ã—ãŸ" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "クライアントパス %(client)s ã¯ã‚ªãƒ–ジェクトメタデータ %(meta)s ã«ä¿ç®¡ã•れãŸãƒ‘" "スã«ä¸€è‡´ã—ã¾ã›ã‚“" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "設定オプション internal_client_conf_path ãŒå®šç¾©ã•れã¦ã„ã¾ã›ã‚“。デフォルト設定" "を使用ã—ã¦ã„ã¾ã™ã€‚オプションã«ã¤ã„ã¦ã¯ internal-client.conf-sample ã‚’å‚ç…§ã—ã¦" "ãã ã•ã„" msgid "Connection refused" msgstr "æŽ¥ç¶šãŒæ‹’å¦ã•れã¾ã—ãŸ" msgid "Connection timeout" msgstr "接続ãŒã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—ãŸ" msgid "Container" msgstr "コンテナー" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "コンテナー監査「onceã€ãƒ¢ãƒ¼ãƒ‰ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "コンテナー監査ã®å‡¦ç†ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "ã‚³ãƒ³ãƒ†ãƒŠãƒ¼åŒæœŸã€Œonceã€ãƒ¢ãƒ¼ãƒ‰ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "コンテナー更新å˜ä¸€ã‚¹ãƒ¬ãƒƒãƒ‰åŒ–スイープãŒå®Œäº†ã—ã¾ã—ãŸ: %(elapsed).02fsã€æˆåŠŸ " "%(success)sã€å¤±æ•— %(fail)sã€æœªå¤‰æ›´ %(no_change)s" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "コンテナー更新スイープãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s ã®ã‚³ãƒ³ãƒ†ãƒŠãƒ¼æ›´æ–°ã‚¹ã‚¤ãƒ¼ãƒ—ãŒå®Œäº†ã—ã¾ã—ãŸ: %(elapsed).02fsã€æˆåŠŸ " "%(success)sã€å¤±æ•— %(fail)sã€æœªå¤‰æ›´ %(no_change)s" #, python-format msgid "Data download error: %s" msgstr "データダウンロードエラー: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "デãƒã‚¤ã‚¹ã®å‡¦ç†ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "エラー %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "エラー %(status)d: %(type)s サーãƒãƒ¼ã‹ã‚‰ã® %(body)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "エラー %(status)d: オブジェクトサーãƒãƒ¼ã‹ã‚‰ã® %(body)sã€re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "エラー %(status)d: 予期: オブジェクトサーãƒãƒ¼ã‹ã‚‰ã® 100-continue" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "エラー: アカウント更新㌠%(ip)s:%(port)s/%(device)s ã§å¤±æ•—ã—ã¾ã—ãŸ(後ã§å†è©¦è¡Œ" "ã•れã¾ã™): 応答 %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "エラー: ホスト %(host)s ã‹ã‚‰ã®å¿œç­” %(status)s ãŒæ­£ã—ãã‚りã¾ã›ã‚“" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "エラー: クライアント読ã¿å–りãŒã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—㟠(%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "エラー: コンテナー更新ã«å¤±æ•—ã—ã¾ã—㟠(後ã®éžåŒæœŸæ›´æ–°ã®ãŸã‚ã«ä¿å­˜ä¸­): %(ip)s:" "%(port)s/%(dev)s ã‹ã‚‰ã® %(status)d 応答" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERROR アカウント情報 %s ãŒå–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ" #, python-format msgid "ERROR Could not get container info %s" msgstr "エラー: コンテナー情報 %s ã‚’å–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "エラー: DiskFile %(data_file)s ã‚’é–‰ã˜ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "エラー: 例外ã«ã‚ˆã‚Šã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆãŒåˆ‡æ–­ã•れã¦ã„ã¾ã™" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "エラー: オブジェクトサーãƒãƒ¼ %s ã¸ã®ãƒ‡ãƒ¼ã‚¿è»¢é€ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "ERROR Failed to get my own IPs?" msgstr "エラー: 自分㮠IP ã®å–å¾—ã«å¤±æ•—?" msgid "ERROR Insufficient Storage" msgstr "エラー: ストレージãŒä¸è¶³ã—ã¦ã„ã¾ã™" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "エラー: オブジェクト %(obj)s ã¯ç›£æŸ»ã«å¤±æ•—ã—ã€æ¤œç–«ã•れã¾ã—ãŸ: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "エラー: ピックルã®å•題ã€%s を検疫ã—ã¾ã™" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "エラー: リモートドライブ㫠%s ãŒãƒžã‚¦ãƒ³ãƒˆã•れã¦ã„ã¾ã›ã‚“" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "%(db_file)s %(row)s ã®åŒæœŸã‚¨ãƒ©ãƒ¼" #, python-format msgid "ERROR Syncing %s" msgstr "%s ã®åŒæœŸã‚¨ãƒ©ãƒ¼" #, python-format msgid "ERROR Trying to audit %s" msgstr "%s ã®ç›£æŸ»ã‚’試行中ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "ERROR Unhandled exception in request" msgstr "エラー: è¦æ±‚ã§æœªå‡¦ç†ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "エラー: %(method)s %(path)s ã§ã® __call__ エラー" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "エラー: アカウント更新㌠%(ip)s:%(port)s/%(device)s ã§å¤±æ•—ã—ã¾ã—ãŸ(後ã§å†è©¦è¡Œ" "ã•れã¾ã™)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "エラー: アカウント更新㌠%(ip)s:%(port)s/%(device)s ã§å¤±æ•—ã—ã¾ã—ãŸ(後ã§å†è©¦è¡Œ" "ã•れã¾ã™): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "エラー: 予期ã—ãªã„åå‰ %s ã‚’æŒã¤ãƒ•ァイルをéžåŒæœŸä¿ç•™ä¸­" msgid "ERROR auditing" msgstr "監査エラー" #, python-format msgid "ERROR auditing: %s" msgstr "監査エラー: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "エラー: コンテナー更新㌠%(ip)s:%(port)s/%(dev)s ã§å¤±æ•—ã—ã¾ã—㟠(後ã®éžåŒæœŸæ›´" "æ–°ã®ãŸã‚ã«ä¿å­˜ä¸­)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "%s ã‹ã‚‰ã® HTTP 応答ã®èª­ã¿å–りエラー" #, python-format msgid "ERROR reading db %s" msgstr "DB %s ã®èª­ã¿å–りエラー" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "エラー: %(code)s ã¨ã®å†åŒæœŸã«å¤±æ•—ã—ã¾ã—ãŸ: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ノード %(node)s ã¨ã® %(file)s ã®åŒæœŸã‚¨ãƒ©ãƒ¼" msgid "ERROR trying to replicate" msgstr "複製ã®è©¦è¡Œã‚¨ãƒ©ãƒ¼" #, python-format msgid "ERROR while trying to clean up %s" msgstr "%s ã®ã‚¯ãƒªãƒ¼ãƒ³ã‚¢ãƒƒãƒ—を試行中ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "%(type)s サーãƒãƒ¼ %(ip)s:%(port)s/%(device)s ã§ã®ã‚¨ãƒ©ãƒ¼ã€è¿”ã•れãŸå€¤: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "%s ã‹ã‚‰ã®æŠ‘æ­¢ã®ãƒ­ãƒ¼ãƒ‰ã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "リモートサーãƒãƒ¼ %(ip)s:%(port)s/%(device)s ã§ã®ã‚¨ãƒ©ãƒ¼" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "エラー: ドライブパーティションã«å¯¾ã™ã‚‹ãƒ‘スã®å–å¾—ã«å¤±æ•—ã—ã¾ã—ãŸ: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "エラー: %(path)s ã«ã‚¢ã‚¯ã‚»ã‚¹ã§ãã¾ã›ã‚“: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "エラー: 監査を実行ã§ãã¾ã›ã‚“: %s" msgid "Error hashing suffix" msgstr "サフィックスã®ãƒãƒƒã‚·ãƒ¥ã‚¨ãƒ©ãƒ¼" msgid "Error listing devices" msgstr "デãƒã‚¤ã‚¹ã®ãƒªã‚¹ãƒˆã‚¨ãƒ©ãƒ¼" #, python-format msgid "Error on render profiling results: %s" msgstr "ãƒ¬ãƒ³ãƒ€ãƒªãƒ³ã‚°ãƒ—ãƒ­ãƒ•ã‚¡ã‚¤ãƒ«çµæžœã§ã®ã‚¨ãƒ©ãƒ¼: %s" msgid "Error parsing recon cache file" msgstr "冿§‹æˆã‚­ãƒ£ãƒƒã‚·ãƒ¥ãƒ•ã‚¡ã‚¤ãƒ«ã®æ§‹æ–‡è§£æžã‚¨ãƒ©ãƒ¼" msgid "Error reading recon cache file" msgstr "冿§‹æˆã‚­ãƒ£ãƒƒã‚·ãƒ¥ãƒ•ァイルã®èª­ã¿å–りエラー" msgid "Error reading ringfile" msgstr "リングファイルã®èª­ã¿å–りエラー" msgid "Error reading swift.conf" msgstr "swift.conf ã®èª­ã¿å–りエラー" msgid "Error retrieving recon data" msgstr "冿§‹æˆãƒ‡ãƒ¼ã‚¿ã®å–得エラー" msgid "Error syncing handoff partition" msgstr "ãƒãƒ³ãƒ‰ã‚ªãƒ•パーティションã®åŒæœŸã‚¨ãƒ©ãƒ¼" msgid "Error syncing partition" msgstr "パーティションã¨ã®åŒæœŸã‚¨ãƒ©ãƒ¼" #, python-format msgid "Error syncing with node: %s" msgstr "ノードã¨ã®åŒæœŸã‚¨ãƒ©ãƒ¼: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "%(path)s ã®å†æ§‹ç¯‰ã‚’試行中ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚ãƒãƒªã‚·ãƒ¼ #%(policy)d フラグ" "メント #%(frag_index)s" msgid "Error: An error occurred" msgstr "エラー: エラーãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "Error: missing config path argument" msgstr "エラー: æ§‹æˆãƒ‘ス引数ãŒã‚りã¾ã›ã‚“" #, python-format msgid "Error: unable to locate %s" msgstr "エラー: %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" msgid "Exception dumping recon cache" msgstr "冿§‹æˆã‚­ãƒ£ãƒƒã‚·ãƒ¥ã®ãƒ€ãƒ³ãƒ—ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "Exception in top-level account reaper loop" msgstr "最上ä½ã‚¢ã‚«ã‚¦ãƒ³ãƒˆãƒªãƒ¼ãƒ‘ーループã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "Exception in top-level replication loop" msgstr "最上ä½è¤‡è£½ãƒ«ãƒ¼ãƒ—ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "Exception in top-levelreconstruction loop" msgstr "最上ä½å†æ§‹æˆãƒ«ãƒ¼ãƒ—ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "Exception with account %s" msgstr "アカウント %s ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "Exception with containers for account %s" msgstr "アカウント %s ã®ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "アカウント %(account)s ã®ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ %(container)s ã®ã‚ªãƒ–ジェクトã§ä¾‹å¤–ãŒç™ºç”Ÿ" "ã—ã¾ã—ãŸ" #, python-format msgid "Expect: 100-continue on %s" msgstr "予期: %s ã§ã® 100-continue" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s ã‹ã‚‰ %(found_domain)s 㸠CNAME ãƒã‚§ãƒ¼ãƒ³ã‚’フォロー中" msgid "Found configs:" msgstr "æ§‹æˆãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸ:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "ãƒãƒ³ãƒ‰ã‚ªãƒ•ã®ãƒ•ァーストモードã«ãƒãƒ³ãƒ‰ã‚ªãƒ•ãŒæ®‹ã£ã¦ã„ã¾ã™ã€‚ç¾è¡Œè¤‡è£½ãƒ‘スを打ã¡åˆ‡" "りã¾ã™ã€‚" msgid "Host unreachable" msgstr "ホストãŒåˆ°é”ä¸èƒ½ã§ã™" #, python-format msgid "Incomplete pass on account %s" msgstr "アカウント %s ã§ã®ä¸å®Œå…¨ãªãƒ‘ス" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "X-Container-Sync-To å½¢å¼ %r ãŒç„¡åйã§ã™" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "無効ãªãƒ›ã‚¹ãƒˆ %r ㌠X-Container-Sync-To ã«ã‚りã¾ã™" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "無効ãªä¿ç•™ä¸­é …ç›® %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "%(full_path)s ã‹ã‚‰ã®å¿œç­” %(resp)s ãŒç„¡åйã§ã™" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)s ã‹ã‚‰ã®å¿œç­” %(resp)s ãŒç„¡åйã§ã™" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "無効ãªã‚¹ã‚­ãƒ¼ãƒ  %r ㌠X-Container-Sync-To ã«ã‚りã¾ã™ã€‚「//ã€ã€ã€Œhttpã€ã€" "「httpsã€ã®ã„ãšã‚Œã‹ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" #, python-format msgid "Killing long-running rsync: %s" msgstr "長期実行ã®å†åŒæœŸã‚’強制終了中: %s" msgid "Lockup detected.. killing live coros." msgstr "ãƒ­ãƒƒã‚¯ãŒæ¤œå‡ºã•れã¾ã—ãŸ.. ライブ coros を強制終了中" #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s ㌠%(found_domain)s ã«ãƒžãƒƒãƒ—ã•れã¾ã—ãŸ" #, python-format msgid "No %s running" msgstr "%s ãŒå®Ÿè¡Œã•れã¦ã„ã¾ã›ã‚“" #, python-format msgid "No permission to signal PID %d" msgstr "PID %d ã«ã‚·ã‚°ãƒŠãƒ«é€šçŸ¥ã™ã‚‹è¨±å¯ãŒã‚りã¾ã›ã‚“" #, python-format msgid "No policy with index %s" msgstr "インデックス %s ã®ãƒãƒªã‚·ãƒ¼ã¯ã‚りã¾ã›ã‚“" #, python-format msgid "No realm key for %r" msgstr "%r ã®ãƒ¬ãƒ«ãƒ ã‚­ãƒ¼ãŒã‚りã¾ã›ã‚“" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "ãƒŽãƒ¼ãƒ‰ã‚¨ãƒ©ãƒ¼åˆ¶é™ %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "肯定応答を返ã—ãŸã‚ªãƒ–ジェクト・サーãƒãƒ¼ãŒä¸å分ã§ã™ (%d å–å¾—)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "䏿¤œå‡º %(sync_from)r => %(sync_to)r - オブジェクト " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%s ç§’é–“ã§ä½•ã‚‚å†æ§‹æˆã•れã¾ã›ã‚“ã§ã—ãŸã€‚" #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s ç§’é–“ã§ä½•も複製ã•れã¾ã›ã‚“ã§ã—ãŸã€‚" msgid "Object" msgstr "オブジェクト" msgid "Object PUT" msgstr "オブジェクト PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "オブジェクト PUT ㌠409 ã«å¯¾ã—㦠202 ã‚’è¿”ã—ã¦ã„ã¾ã™: %(req_timestamp)s<= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "オブジェクト PUT ㌠412 ã‚’è¿”ã—ã¦ã„ã¾ã™ã€‚%(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "オブジェクト監査 (%(type)s) 「%(mode)sã€ãƒ¢ãƒ¼ãƒ‰å®Œäº†: %(elapsed).02fs。åˆè¨ˆæ¤œç–«" "済ã¿: %(quars)dã€åˆè¨ˆã‚¨ãƒ©ãƒ¼: %(errors)dã€åˆè¨ˆãƒ•ァイル/ç§’: %(frate).2fã€åˆè¨ˆãƒ" "イト/ç§’: %(brate).2fã€ç›£æŸ»æ™‚é–“: %(audit).2fã€çއ: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "オブジェクト監査 (%(type)s)。%(start_time)s 以é™: ローカル: åˆæ ¼ã—ãŸç›£æŸ» " "%(passes)dã€æ¤œç–«æ¸ˆã¿ %(quars)dã€ã‚¨ãƒ©ãƒ¼ %(errors)dã€ãƒ•ァイル/ç§’: %(frate).2fã€" "ãƒã‚¤ãƒˆ/ç§’: %(brate).2fã€åˆè¨ˆæ™‚é–“: %(total).2fã€ç›£æŸ»æ™‚é–“: %(audit).2fã€çއ: " "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "オブジェクト監査統計: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆå†æ§‹æˆãŒå®Œäº†ã—ã¾ã—㟠(1 回)。(%.02f 分)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆå†æ§‹æˆãŒå®Œäº†ã—ã¾ã—ãŸã€‚(%.02f 分)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "オブジェクト複製ãŒå®Œäº†ã—ã¾ã—㟠(1 回)。(%.02f 分)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "オブジェクト複製ãŒå®Œäº†ã—ã¾ã—ãŸã€‚(%.02f 分)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "オブジェクトサーãƒãƒ¼ãŒ %s 個ã®ä¸ä¸€è‡´ etag ã‚’è¿”ã—ã¾ã—ãŸ" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "オブジェクト更新スイープãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "パラメーターã€ç…§ä¼šã€ãŠã‚ˆã³ãƒ•ラグメント㯠X-Container-Sync-To ã§è¨±å¯ã•れã¦ã„ã¾" "ã›ã‚“" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "パーティション時間: 最大 %(max).4fsã€æœ€å° %(min).4fsã€ä¸­é–“ %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To ã«ãƒ‘スãŒå¿…è¦ã§ã™" #, python-format msgid "Problem cleaning up %s" msgstr "%s ã®ã‚¯ãƒªãƒ¼ãƒ³ã‚¢ãƒƒãƒ—中ã«å•題ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "Profiling Error: %s" msgstr "プロファイル作æˆã‚¨ãƒ©ãƒ¼: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "ディレクトリーã§ã¯ãªã„ãŸã‚ã€%(hsh_path)s 㯠%(quar_path)s ã¸æ¤œç–«ã•れã¾ã—ãŸ" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "ディレクトリーã§ã¯ãªã„ãŸã‚ã€%(object_path)s 㯠%(quar_path)s ã¸æ¤œç–«ã•れã¾ã—ãŸ" #, python-format msgid "Quarantining DB %s" msgstr "DB %s ã®æ¤œç–«ä¸­" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Ratelimit スリープログ: %(account)s/%(container)s/%(object)s ã® %(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d 個㮠DB ãŒå‰Šé™¤ã•れã¾ã—ãŸ" #, python-format msgid "Removing %s objects" msgstr "%s オブジェクトã®å‰Šé™¤ä¸­" #, python-format msgid "Removing partition: %s" msgstr "パーティションã®å‰Šé™¤ä¸­: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "æ­£ã—ããªã„ pid %(pid)d ã® pid ファイル %(pid_file)s を削除中" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "無効㪠pid ã® pid ファイル %s を削除中" #, python-format msgid "Removing stale pid file %s" msgstr "失効ã—㟠pid ファイル %s を削除中" msgid "Replication run OVER" msgstr "複製ã®å®Ÿè¡ŒãŒçµ‚了ã—ã¾ã—ãŸ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "ブラックリスティングã®ãŸã‚ 497 ã‚’è¿”ã—ã¦ã„ã¾ã™: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "%(acc)s/%(cont)s/%(obj)s ã«å¯¾ã™ã‚‹ %(meth)s ã«é–¢ã—㦠498 ã‚’è¿”ã—ã¦ã„ã¾ã™ã€‚" "Ratelimit (最大スリープ) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "ãƒªãƒ³ã‚°å¤‰æ›´ãŒæ¤œå‡ºã•れã¾ã—ãŸã€‚ç¾è¡Œå†æ§‹æˆãƒ‘スを打ã¡åˆ‡ã‚Šã¾ã™ã€‚" msgid "Ring change detected. Aborting current replication pass." msgstr "ãƒªãƒ³ã‚°å¤‰æ›´ãŒæ¤œå‡ºã•れã¾ã—ãŸã€‚ç¾è¡Œè¤‡è£½ãƒ‘スを打ã¡åˆ‡ã‚Šã¾ã™ã€‚" #, python-format msgid "Running %s once" msgstr "%s ã‚’ 1 回実行中" msgid "Running object reconstructor in script mode." msgstr "スクリプトモードã§ã‚ªãƒ–ジェクトリコンストラクターを実行中ã§ã™ã€‚" msgid "Running object replicator in script mode." msgstr "スクリプトモードã§ã‚ªãƒ–ジェクトレプリケーターを実行中ã§ã™ã€‚" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "%(time)s 以é™: åŒæœŸæ¸ˆã¿ %(sync)s [削除 %(delete)sã€æ›¸ã込㿠%(put)s]ã€ã‚¹ã‚­ãƒƒ" "プ %(skip)sã€å¤±æ•— %(fail)s" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "%(time)s 以é™: アカウント監査: åˆæ ¼ã—ãŸç›£æŸ» %(passed)sã€ä¸åˆæ ¼ã®ç›£" "査%(failed)s" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "%(time)s 以é™: コンテナー監査: åˆæ ¼ã—ãŸç›£æŸ» %(pass)sã€ä¸åˆæ ¼ã®ç›£æŸ»%(fail)s" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s ã¯ãƒžã‚¦ãƒ³ãƒˆã•れã¦ã„ãªã„ãŸã‚ã€ã‚¹ã‚­ãƒƒãƒ—ã•れã¾ã™" #, python-format msgid "Skipping %s as it is not mounted" msgstr "マウントã•れã¦ã„ãªã„ãŸã‚〠%s をスキップã—ã¾ã™" #, python-format msgid "Starting %s" msgstr "%s ã‚’é–‹å§‹ã—ã¦ã„ã¾ã™" msgid "Starting object reconstruction pass." msgstr "ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆå†æ§‹æˆãƒ‘スを開始中ã§ã™ã€‚" msgid "Starting object reconstructor in daemon mode." msgstr "オブジェクトリコンストラクターをデーモンモードã§é–‹å§‹ä¸­ã§ã™ã€‚" msgid "Starting object replication pass." msgstr "オブジェクト複製パスを開始中ã§ã™ã€‚" msgid "Starting object replicator in daemon mode." msgstr "オブジェクトレプリケーターをデーモンモードã§é–‹å§‹ä¸­ã§ã™ã€‚" #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "%(dst)s ã§ã® %(src)s ã®å†åŒæœŸãŒæˆåŠŸã—ã¾ã—㟠(%(time).03f)" msgid "The file type are forbidden to access!" msgstr "ã“ã®ãƒ•ァイルタイプã«ã¯ã‚¢ã‚¯ã‚»ã‚¹ãŒç¦æ­¢ã•れã¦ã„ã¾ã™" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "コンテナーã®åˆè¨ˆ %(key)s (%(total)s) ãŒãƒãƒªã‚·ãƒ¼å…¨ä½“ã®åˆè¨ˆ %(key)s(%(sum)s) ã«" "一致ã—ã¾ã›ã‚“" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ã®ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆä¾‹å¤–" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s を試行中" #, python-format msgid "Trying to GET %(full_path)s" msgstr "GET %(full_path)s を試行中" msgid "Trying to read during GET" msgstr "GET 時ã«èª­ã¿å–りを試行中" msgid "Trying to read during GET (retrying)" msgstr "GET 時ã«èª­ã¿å–りを試行中 (å†è©¦è¡Œä¸­)" msgid "Trying to send to client" msgstr "クライアントã¸ã®é€ä¿¡ã‚’試行中" #, python-format msgid "Trying to sync suffixes with %s" msgstr "%s ã§ã‚µãƒ•ィックスã®åŒæœŸã‚’試行中" #, python-format msgid "Trying to write to %s" msgstr "%s ã¸ã®æ›¸ãè¾¼ã¿ã‚’試行中" msgid "UNCAUGHT EXCEPTION" msgstr "キャッãƒã•れã¦ã„ãªã„例外" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "%s ㌠libc ã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。no-op ã¨ã—ã¦çµ‚了ã—ã¾ã™ã€‚" #, python-format msgid "Unable to locate config for %s" msgstr "%s ã®è¨­å®šãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocateã€posix_fallocate ㌠libc ã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。no-op ã¨ã—ã¦çµ‚了ã—ã¾ã™ã€‚" #, python-format msgid "Unable to read config from %s" msgstr "æ§‹æˆã‚’ %s ã‹ã‚‰èª­ã¿å–ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "éžèªè¨¼ %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "未処ç†ä¾‹å¤–" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "GET を試行中ã«ä¸æ˜Žãªä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s ã«é–¢ã™ã‚‹æ›´æ–°ãƒ¬ãƒãƒ¼ãƒˆãŒå¤±æ•—ã—ã¾ã—ãŸ" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s ã«é–¢ã™ã‚‹æ›´æ–°ãƒ¬ãƒãƒ¼ãƒˆãŒé€ä¿¡ã•れã¾ã—ãŸ" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "警告: SSL を有効ã«ã™ã‚‹ã®ã¯ãƒ†ã‚¹ãƒˆç›®çš„ã®ã¿ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。製å“ã®ãƒ‡ãƒ—ロイ" "ã«ã¯å¤–部 SSL 終端を使用ã—ã¦ãã ã•ã„。" msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告: ファイル記述å­åˆ¶é™ã‚’変更ã§ãã¾ã›ã‚“。éžãƒ«ãƒ¼ãƒˆã¨ã—ã¦å®Ÿè¡Œã—ã¾ã™ã‹?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告: 最大処ç†é™ç•Œã‚’変更ã§ãã¾ã›ã‚“。éžãƒ«ãƒ¼ãƒˆã¨ã—ã¦å®Ÿè¡Œã—ã¾ã™ã‹?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告: メモリー制é™ã‚’変更ã§ãã¾ã›ã‚“。éžãƒ«ãƒ¼ãƒˆã¨ã—ã¦å®Ÿè¡Œã—ã¾ã™ã‹?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告: memcached クライアントãªã—ã§ ratelimit を行ã†ã“ã¨ã¯ã§ãã¾ã›ã‚“" #, python-format msgid "method %s is not allowed." msgstr "メソッド %s ã¯è¨±å¯ã•れã¦ã„ã¾ã›ã‚“。" msgid "no log file found" msgstr "ログファイルãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" msgid "odfpy not installed." msgstr "odfpy ãŒã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•れã¦ã„ã¾ã›ã‚“。" #, python-format msgid "plotting results failed due to %s" msgstr "%s ãŒåŽŸå› ã§çµæžœã®ãƒ—ロットã«å¤±æ•—ã—ã¾ã—ãŸ" msgid "python-matplotlib not installed." msgstr "python-matplotlib ãŒã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•れã¦ã„ã¾ã›ã‚“。" swift-2.17.0/swift/locale/de/0000775000175100017510000000000013236061751015750 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/de/LC_MESSAGES/0000775000175100017510000000000013236061751017535 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/de/LC_MESSAGES/swift.po0000666000175100017510000010167013236061620021233 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2014 # Ettore Atalan , 2014-2015 # Jonas John , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-02 07:02+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: German\n" msgid "" "\n" "user quit" msgstr "" "\n" "Durch Benutzer beendet" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - parallel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d Suffixe überprüft - %(hashed).2f%% hashverschlüsselt, " "%(synced).2f%% synchronisiert" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) Partitionen repliziert in " "%(time).2fs (%(rate).2f/s, %(remaining)s verbleibend)" #, python-format msgid "%(server)s (%(pid)s) appears to have stopped" msgstr "%(server)s (%(pid)s) scheinbar gestoppt" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s Erfolge, %(failure)s Fehlschläge" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s gab 503 für %(statuses)s zurück" #, python-format msgid "%(type)s: %(value)s" msgstr "%(type)s: %(value)s" #, python-format msgid "%s already started..." msgstr "%s bereits gestartet..." #, python-format msgid "%s does not exist" msgstr "%s existiert nicht" #, python-format msgid "%s is not mounted" msgstr "%s ist nicht eingehängt" #, python-format msgid "%s responded as unmounted" msgstr "%s zurückgemeldet als ausgehängt" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Verbindung zurückgesetzt durch Peer" #, python-format msgid ", %s containers deleted" msgstr ", %s Container gelöscht" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s Container möglicherweise verbleibend" #, python-format msgid ", %s containers remaining" msgstr ", %s Container verbleibend" #, python-format msgid ", %s objects deleted" msgstr ", %s Objekte gelöscht" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s Objekte möglicherweise verbleibend" #, python-format msgid ", %s objects remaining" msgstr ", %s Objekte verbleibend" #, python-format msgid ", elapsed: %.02fs" msgstr ", vergangen: %.02fs" msgid ", return codes: " msgstr ", Rückgabecodes: " msgid "Account" msgstr "Konto" #, python-format msgid "Account %(account)s has not been reaped since %(time)s" msgstr "Konto %(account)s wurde nicht aufgeräumt seit %(time)s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Kontoprüfungsmodus \"once\" abgeschlossen: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Kontoprüfungsdurchlauf abgeschlossen: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Versuch, %(count)d Datenbanken in %(time).5f Sekunden zu replizieren " "(%(rate).5f/s)" #, python-format msgid "Audit Failed for %(path)s: %(err)s" msgstr "Prüfung fehlgeschlagen für %(path)s: %(err)s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Falscher rsync-Rückgabecode: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Kontoprüfungsmodus \"once\" wird gestartet" msgid "Begin account audit pass." msgstr "Kontoprüfungsdurchlauf wird gestartet." msgid "Begin container audit \"once\" mode" msgstr "Containerprüfungsmodus \"once\" wird gestartet" msgid "Begin container audit pass." msgstr "Containerprüfungsdurchlauf wird gestartet." msgid "Begin container sync \"once\" mode" msgstr "Containersynchronisationsmodus \"once\" wird gestartet" msgid "Begin container update single threaded sweep" msgstr "Einzelthread-Scanvorgang für Containeraktualisierung wird gestartet" msgid "Begin container update sweep" msgstr "Scanvorgang für Containeraktualisierung wird gestartet" msgid "Begin object update single threaded sweep" msgstr "Einzelthread-Scanvorgang für Objektaktualisierung wird gestartet" msgid "Begin object update sweep" msgstr "Scanvorgang für Objektaktualisierung wird gestartet" #, python-format msgid "Beginning pass on account %s" msgstr "Durchlauf für Konto %s wird gestartet" msgid "Beginning replication run" msgstr "Replizierungsdurchlauf wird gestartet" msgid "Broker error trying to rollback locked connection" msgstr "" "Brokerfehler beim Versuch, für eine gesperrte Verbindung ein Rollback " "durchzuführen" #, python-format msgid "Can not access the file %s." msgstr "Kann nicht auf die Datei %s zugreifen." #, python-format msgid "Can not load profile data from %s." msgstr "Die Profildaten von %s können nicht geladen werden." #, python-format msgid "Cannot read %(auditor_status)s (%(err)s)" msgstr "%(auditor_status)s (%(err)s) kann nicht gelesen werden." #, python-format msgid "Cannot write %(auditor_status)s (%(err)s)" msgstr "Schreiben von %(auditor_status)s (%(err)s) nicht möglich." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Client konnte nicht innerhalb von %ss vom Proxy lesen" msgid "Client disconnected on read" msgstr "Client beim Lesen getrennt" msgid "Client disconnected without sending enough data" msgstr "Client getrennt ohne dem Senden von genügend Daten" msgid "Client disconnected without sending last chunk" msgstr "" "Die Verbindung zum Client wurde getrennt, bevor der letzte Chunk gesendet " "wurde. " #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Clientpfad %(client)s entspricht nicht dem in den Objektmetadaten " "gespeicherten Pfad %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Konfigurationsoption internal_client_conf_path nicht definiert. " "Standardkonfiguration wird verwendet. Informationen zu den Optionen finden " "Sie in internal-client.conf-sample." msgid "Connection refused" msgstr "Verbindung abgelehnt" msgid "Connection timeout" msgstr "Verbindungszeitüberschreitung" msgid "Container" msgstr "Container" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Containerprüfungsmodus \"once\" abgeschlossen: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Containerprüfungsdurchlauf abgeschlossen: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Containersynchronisationsmodus \"once\" abgeschlossen: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Einzelthread-Scanvorgang für Containeraktualisierung abgeschlossen: " "%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler, %(no_change)s ohne " "Änderungen" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Scanvorgang für Containeraktualisierung abgeschlossen: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Scanvorgang für Containeraktualisierung von %(path)s abgeschlossen: " "%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler, %(no_change)s ohne " "Änderungen" #, python-format msgid "" "Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds" msgstr "" "Keine Bindung an %(addr)s:%(port)s möglich nach Versuch über %(timeout)s " "Sekunden" #, python-format msgid "Could not load %(conf)r: %(error)s" msgstr "%(conf)r konnte nicht geladen werden: %(error)s" #, python-format msgid "Data download error: %s" msgstr "Fehler beim Downloaden von Daten: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Gerätedurchgang abgeschlossen: %.02fs" #, python-format msgid "Directory %(directory)r does not map to a valid policy (%(error)s)" msgstr "" "Das Verzeichnis %(directory)r kann keiner gültigen Richtlinie (%(error)s) " "zugeordnet werden." #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "FEHLER %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "FEHLER %(status)d %(body)s von %(type)s Server" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "FEHLER %(status)d %(body)s Vom Objektserver bezüglich: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "FEHLER %(status)d Erwartet: 100-continue von Objektserver" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "FEHLER Kontoaktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s " "(wird zu einem späteren Zeitpunkt erneut versucht): Antwort %(status)s " "%(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "FEHLER Kontoaktualisierung fehlgeschlagen: Unterschiedliche Anzahl von Hosts " "und Einheiten in der Anforderung: \"%(hosts)s\" contra \"%(devices)s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "FEHLER Falsche Rückmeldung %(status)s von %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "FEHLER Client-Lesezeitüberschreitung (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen (wird für asynchrone " "Aktualisierung zu einem späteren Zeitpunkt gespeichert): %(status)d Antwort " "von %(ip)s:%(port)s/%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "FEHLER Kontoinfo %s konnte nicht abgerufen werden" #, python-format msgid "ERROR Could not get container info %s" msgstr "FEHLER Containerinformation %s konnte nicht geholt werden" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "FEHLER Fehler beim Schließen von DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "" "FEHLER Ausnahme, die zu einer Unterbrechung der Verbindung zum Client führt" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "FEHLER: Ausnahme bei der Übertragung von Daten an die Ojektserver %s" msgid "ERROR Failed to get my own IPs?" msgstr "FEHLER Eigene IPs konnten nicht abgerufen werden?" msgid "ERROR Insufficient Storage" msgstr "FEHLER Nicht genügend Speicher" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "FEHLER Objekt %(obj)s hat die Prüfung nicht bestanden und wurde unter " "Quarantäne gestellt: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "FEHLER Pickle-Problem, %s wird unter Quarantäne gestellt" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "FEHLER Entferntes Laufwerk nicht eingehängt %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "FEHLER beim Synchronisieren %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "FEHLER beim Synchronisieren %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "FEHLER beim Versuch, %s zu prüfen" msgid "ERROR Unhandled exception in request" msgstr "FEHLER Nicht behandelte Ausnahme in Anforderung" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "FEHLER __call__-Fehler mit %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s " "(wird zu einem späteren Zeitpunkt erneut versucht)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "FEHLER Kontoaktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s " "(wird später erneut versucht): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "FEHLER asynchrone anstehende Datei mit unerwartetem Namen %s" msgid "ERROR auditing" msgstr "FEHLER bei der Prüfung" #, python-format msgid "ERROR auditing: %s" msgstr "FEHLER bei der Prüfung: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(dev)s " "(wird für asynchrone Aktualisierung zu einem späteren Zeitpunkt gespeichert)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "FEHLER beim Lesen der HTTP-Antwort von %s" #, python-format msgid "ERROR reading db %s" msgstr "FEHLER beim Lesen der Datenbank %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "FEHLER rsync fehlgeschlagen mit %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "" "FEHLER beim Synchronisieren von %(file)s Dateien mit dem Knoten %(node)s" msgid "ERROR trying to replicate" msgstr "FEHLER beim Versuch zu replizieren" #, python-format msgid "ERROR while trying to clean up %s" msgstr "FEHLER beim Versuch, %s zu bereinigen" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "FEHLER mit %(type)s Server %(ip)s:%(port)s/%(device)s AW: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "FEHLER beim Laden von Unterdrückungen von %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "FEHLER mit entferntem Server %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" "FEHLER: Pfade zu Laufwerkpartitionen konnten nicht abgerufen werden: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "FEHLER: Auf %(path)s kann nicht zugegriffen werden: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "FEHLER: Prüfung konnte nicht durchgeführt werden: %s" msgid "Error hashing suffix" msgstr "Fehler beim Hashing des Suffix" #, python-format msgid "Error in %(conf)r with mtime_check_interval: %(error)s" msgstr "Fehler in %(conf)r mit mtime_check_interval: %(error)s" msgid "Error listing devices" msgstr "Fehler beim Auflisten der Geräte" #, python-format msgid "Error on render profiling results: %s" msgstr "Fehler beim Wiedergeben der Profilerstellungsergebnisse: %s" msgid "Error parsing recon cache file" msgstr "Fehler beim Analysieren von recon-Zwischenspeicherdatei" msgid "Error reading recon cache file" msgstr "Fehler beim Lesen von recon-Zwischenspeicherdatei" msgid "Error reading ringfile" msgstr "Fehler beim Lesen der Ringdatei" msgid "Error reading swift.conf" msgstr "Fehler beim Lesen der swift.conf" msgid "Error retrieving recon data" msgstr "Fehler beim Abrufen der recon-Daten" msgid "Error syncing handoff partition" msgstr "Fehler bei der Synchronisierung der Übergabepartition" msgid "Error syncing partition" msgstr "Fehler beim Syncen der Partition" #, python-format msgid "Error syncing with node: %s" msgstr "Fehler beim Synchronisieren mit Knoten: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Fehler bei Versuch, erneuten Build zu erstellen für %(path)s policy#" "%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Fehler: Ein Fehler ist aufgetreten" msgid "Error: missing config path argument" msgstr "Fehler: fehlendes Konfigurationspfadargument" #, python-format msgid "Error: unable to locate %s" msgstr "Fehler: %s kann nicht lokalisiert werden" msgid "Exception dumping recon cache" msgstr "Ausnahme beim Löschen von recon-Cache" msgid "Exception in top-level account reaper loop" msgstr "Ausnahme in Reaper-Loop für Konto der höchsten Ebene" msgid "Exception in top-level replication loop" msgstr "Ausnahme in Replizierungsloop der höchsten Ebene" msgid "Exception in top-levelreconstruction loop" msgstr "Ausnahme in Rekonstruktionsloop der höchsten Ebene" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Ausnahme bei %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Ausnahme mit Account %s" #, python-format msgid "Exception with containers for account %s" msgstr "Ausnahme bei Containern für Konto %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Ausnahme bei Objekten für Container %(container)s für Konto %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Erwartet: 100-continue auf %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "CNAME-Kette für %(given_domain)s bis %(found_domain)s wird gefolgt" msgid "Found configs:" msgstr "Gefundene Konfigurationen:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Der Modus 'handoffs_first' ist noch nicht abgeschlossen. Der aktuelle " "Replikationsdurchgang wird abgebrochen." msgid "Host unreachable" msgstr "Host nicht erreichbar" #, python-format msgid "Incomplete pass on account %s" msgstr "Unvollständiger Durchgang auf Konto %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Ungültiges X-Container-Sync-To-Format %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Ungültiger Host %r in X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Ungültiger ausstehender Eintrag %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Ungültige Rückmeldung %(resp)s von %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Ungültige Rückmeldung %(resp)s von %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Ungültiges Schema %r in X-Container-Sync-To, muss \"//\", \"http\" oder " "\"https\" sein." #, python-format msgid "Killing long-running rsync: %s" msgstr "Lange laufendes rsync wird gekillt: %s" #, python-format msgid "Loading JSON from %(auditor_status)s failed (%(err)s)" msgstr "Laden von JSON aus %(auditor_status)s fehlgeschlagen: (%(err)s)" msgid "Lockup detected.. killing live coros." msgstr "Suche erkannt. Live-Coros werden gelöscht." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s zugeordnet zu %(found_domain)s" #, python-format msgid "No %s running" msgstr "Kein %s läuft" #, python-format msgid "No cluster endpoint for %(realm)r %(cluster)r" msgstr "Kein Cluster-Endpunkt für %(realm)r %(cluster)r" #, python-format msgid "No permission to signal PID %d" msgstr "Keine Berechtigung zu Signal-Programmkennung %d" #, python-format msgid "No policy with index %s" msgstr "Keine Richtlinie mit Index %s" #, python-format msgid "No realm key for %r" msgstr "Kein Bereichsschlüssel für %r" #, python-format msgid "No space left on device for %(file)s (%(err)s)" msgstr "Kein freier Speicherplatz im Gerät für %(file)s (%(err)s) vorhanden." #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Knotenfehler begrenzt %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Es wurden nicht genügend Objektserver bestätigt (got %d)." #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Nicht gefunden %(sync_from)r => %(sync_to)r - Objekt " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Für %s Sekunden nichts rekonstruiert." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Für %s Sekunden nichts repliziert." msgid "Object" msgstr "Objekt" msgid "Object PUT" msgstr "Objekt PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "PUT-Operation für ein Objekt gibt 202 für 409 zurück: %(req_timestamp)s <= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Objekt PUT Rückgabe 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Objektprüfung (%(type)s) \"%(mode)s\" Modus abgeschlossen: %(elapsed).02fs. " "Unter Quarantäne gestellt insgesamt: %(quars)d, Fehler insgesamt: " "%(errors)d, Dateien/s insgesamt: %(frate).2f, Bytes/s insgesamt: " "%(brate).2f, Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Objektprüfung (%(type)s). Seit %(start_time)s: Lokal: %(passes)d übergeben, " "%(quars)d unter Quarantäne gestellt, %(errors)d Fehler, Dateien/s: " "%(frate).2f, Bytes/s: %(brate).2f, Zeit insgesamt: %(total).2f, " "Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Objektprüfungsstatistik: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Objektrekonstruktion vollständig (einmal). (%.02f Minuten)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Objektrekonstruktion vollständig. (%.02f Minuten)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Objektreplizierung abgeschlossen (einmal). (%.02f Minuten)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Objektreplikation vollständig. (%.02f Minuten)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Objektserver haben %s nicht übereinstimmende Etags zurückgegeben" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Scanvorgang für Objektaktualisierung abgeschlossen: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "Parameter, Abfragen und Fragmente nicht zulässig in X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Partitionszeiten: max. %(max).4fs, min. %(min).4fs, durchschnittl. %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "Pfad in X-Container-Sync-To ist erforderlich" #, python-format msgid "Problem cleaning up %(datadir)s (%(err)s)" msgstr "Problem bei der Bereinigung von %(datadir)s (%(err)s)" #, python-format msgid "Problem cleaning up %s" msgstr "Problem bei der Bereinigung von %s" #, python-format msgid "Profiling Error: %s" msgstr "Fehler bei der Profilerstellung: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s bis %(quar_path)s wurden unter Quarantäne gestellt, da es sich " "nicht um ein Verzeichnis handelt" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s bis %(quar_path)s wurden unter Quarantäne gestellt, da es " "sich nicht um ein Verzeichnis handelt" #, python-format msgid "Quarantining DB %s" msgstr "Datenbank %s wird unter Quarantäne gestellt" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Inaktivitätsprotokoll für Geschwindigkeitsbegrenzung: %(sleep)s für " "%(account)s/%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d Datenbanken entfernt" #, python-format msgid "Removing %s objects" msgstr "%s Objekte werden entfernt" #, python-format msgid "Removing partition: %s" msgstr "Partition wird entfernt: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "PID-Datei %(pid_file)s mit falscher PID %(pid)d wird entfernt" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "PID-Datei %s mit ungültiger PID wird entfernt." #, python-format msgid "Removing stale pid file %s" msgstr "Veraltete PID-Datei %s wird entfernt" msgid "Replication run OVER" msgstr "Replizierungsdurchlauf ABGESCHLOSSEN" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "497 wird aufgrund von Blacklisting zurückgegeben: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "498 wird für %(meth)s auf %(acc)s/%(cont)s/%(obj)s zurückgegeben. " "Geschwindigkeitsbegrenzung (Max. Inaktivität) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Ringänderung erkannt. Aktueller Rekonstruktionsdurchgang wird abgebrochen." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Ringänderung erkannt. Aktueller Replizierungsdurchlauf wird abgebrochen." #, python-format msgid "Running %s once" msgstr "%s läuft einmal" msgid "Running object reconstructor in script mode." msgstr "Objektrekonstruktor läuft im Skriptmodus." msgid "Running object replicator in script mode." msgstr "Objektreplikator läuft im Skriptmodus." #, python-format msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s" msgstr "Signal %(server)s PID: %(pid)s Signal: %(signal)s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Seit %(time)s: %(sync)s synchronisiert [%(delete)s Löschungen, %(put)s " "Puts], %(skip)s übersprungen, %(fail)s fehlgeschlagen" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Seit %(time)s: Kontoprüfungen: %(passed)s bestandene Prüfung,%(failed)s " "nicht bestandene Prüfung" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Seit %(time)s: Containerprüfungen: %(pass)s bestandene Prüfung, %(fail)s " "nicht bestandene Prüfung" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s wird übersprungen, da nicht angehängt" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s wird übersprungen, weil es nicht eingehängt ist" #, python-format msgid "Starting %s" msgstr "%s wird gestartet" msgid "Starting object reconstruction pass." msgstr "Objektrekonstruktionsdurchgang wird gestartet." msgid "Starting object reconstructor in daemon mode." msgstr "Objektrekonstruktor wird im Daemon-Modus gestartet." msgid "Starting object replication pass." msgstr "Objektreplikationsdurchgang wird gestartet." msgid "Starting object replicator in daemon mode." msgstr "Objektreplikator wird im Dämonmodus gestartet." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Erfolgreiches rsync von %(src)s um %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Auf den Dateityp darf nicht zugegriffen werden!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Die Gesamtsumme an %(key)s für den Container (%(total)s) entspricht nicht " "der Summe der %(key)s für alle Richtlinien (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Zeitüberschreitungsausnahme bei %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Versuch, %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Versuch, %(full_path)s mit GET abzurufen" msgid "Trying to read during GET" msgstr "Versuch, während des GET-Vorgangs zu lesen" msgid "Trying to read during GET (retrying)" msgstr "Versuch, während des GET-Vorgangs zu lesen (Wiederholung)" msgid "Trying to send to client" msgstr "Versuch, an den Client zu senden" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Es wird versucht, Suffixe mit %s zu synchronisieren." #, python-format msgid "Trying to write to %s" msgstr "Versuch, an %s zu schreiben" msgid "UNCAUGHT EXCEPTION" msgstr "NICHT ABGEFANGENE AUSNAHME" #, python-format msgid "Unable to find %(section)s config section in %(conf)s" msgstr "" "%(section)s-Konfigurationsabschnitt in %(conf)s kann nicht gefunden werden" #, python-format msgid "Unable to load internal client from config: %(conf)r (%(error)s)" msgstr "" "Interner Client konnte nicht aus der Konfiguration geladen werden: %(conf)r " "(%(error)s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" "%s konnte nicht in libc gefunden werden. Wird als Nullbefehl verlassen." #, python-format msgid "Unable to locate config for %s" msgstr "Konfiguration für %s wurde nicht gefunden." #, python-format msgid "Unable to locate config number %(number)s for %(server)s" msgstr "Konfigurationsnummer %(number)s für %(server)s wurde nicht gefunden." msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate, posix_fallocate konnte nicht in libc gefunden werden. Wird als " "Nullbefehl verlassen." #, python-format msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" msgstr "fsync() kann für Verzeichnis %(dir)s nicht ausgeführt werden: %(err)s" #, python-format msgid "Unable to read config from %s" msgstr "Konfiguration aus %s kann nicht gelesen werden" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Nicht genehmigte %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "Nicht behandelte Exception" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Unbekannte Ausnahme bei GET-Versuch: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Aktualisierungsbericht fehlgeschlagen für %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Aktualisierungsbericht gesendet für %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "WARNUNG: SSL sollte nur zu Testzwecken aktiviert werden. Verwenden Sie die " "externe SSL-Beendigung für eine Implementierung in der Produktionsumgebung." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "WARNUNG: Grenzwert für Dateideskriptoren kann nicht geändert werden. Wird " "nicht als Root ausgeführt?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "WARNUNG: Grenzwert für maximale Verarbeitung kann nicht geändert werden. " "Wird nicht als Root ausgeführt?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "WARNUNG: Grenzwert für Speicher kann nicht geändert werden. Wird nicht als " "Root ausgeführt?" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up" msgstr "" "Hat %(kill_wait)s Sekunden für %(server)s zum Erlöschen gewartet; Gibt auf" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgstr "" "Hat %(kill_wait)s Sekunden für %(server)s zum Erlöschen gewartet. Wird " "abgebrochen." msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Warnung: Geschwindigkeitsbegrenzung kann nicht ohne memcached-Client " "durchgeführt werden" #, python-format msgid "method %s is not allowed." msgstr "Methode %s ist nicht erlaubt." msgid "no log file found" msgstr "keine Protokolldatei gefunden" msgid "odfpy not installed." msgstr "odfpy ist nicht installiert." #, python-format msgid "plotting results failed due to %s" msgstr "" "Die grafische Darstellung der Ergebnisse ist fehlgeschlagen aufgrund von %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib ist nicht installiert." swift-2.17.0/swift/locale/zh_TW/0000775000175100017510000000000013236061751016413 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/zh_TW/LC_MESSAGES/0000775000175100017510000000000013236061751020200 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/zh_TW/LC_MESSAGES/swift.po0000666000175100017510000006640713236061620021706 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Chinese (Taiwan)\n" msgid "" "\n" "user quit" msgstr "" "\n" "使用者退出" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - 平行,%s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "已檢查 %(checked)d 個字尾 - %(hashed).2f%% 個已雜湊,%(synced).2f%% å€‹å·²åŒæ­¥" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "已抄寫 %(replicated)d/%(total)d (%(percentage).2f%%) 個分割å€ï¼ˆåœ¨ " "%(time).2fs 內,%(rate).2f/秒,剩餘 %(remaining)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s 個æˆåŠŸï¼Œ%(failure)s 個失敗" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s é‡å° %(statuses)s 正在傳回 503" #, python-format msgid "%s already started..." msgstr "%s 已啟動..." #, python-format msgid "%s does not exist" msgstr "%s ä¸å­˜åœ¨" #, python-format msgid "%s is not mounted" msgstr "未è£è¼‰ %s" #, python-format msgid "%s responded as unmounted" msgstr "%s 已回應為未è£è¼‰" #, python-format msgid "%s: Connection reset by peer" msgstr "%s:已由å°ç­‰é …ç›®é‡è¨­é€£ç·š" #, python-format msgid ", %s containers deleted" msgstr ",已刪除 %s 個儲存器" #, python-format msgid ", %s containers possibly remaining" msgstr ",å¯èƒ½å‰©é¤˜ %s 個儲存器" #, python-format msgid ", %s containers remaining" msgstr ",剩餘 %s 個儲存器" #, python-format msgid ", %s objects deleted" msgstr ",已刪除 %s 個物件" #, python-format msgid ", %s objects possibly remaining" msgstr ",å¯èƒ½å‰©é¤˜ %s 個物件" #, python-format msgid ", %s objects remaining" msgstr ",剩餘 %s 個物件" #, python-format msgid ", elapsed: %.02fs" msgstr ",經歷時間:%.02fs" msgid ", return codes: " msgstr ",回覆碼:" msgid "Account" msgstr "帳戶" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "å¸³æˆ¶å¯©æ ¸ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼å·²å®Œæˆï¼š%.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "帳戶審核通éŽå·²å®Œæˆï¼š%.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "已嘗試在 %(time).5f 秒內抄寫 %(count)d 個資料庫 (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "ä¸ç•¶çš„é ç«¯åŒæ­¥å›žè¦†ç¢¼ï¼š%(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "é–‹å§‹å¸³æˆ¶å¯©æ ¸ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼" msgid "Begin account audit pass." msgstr "開始帳戶審核通éŽã€‚" msgid "Begin container audit \"once\" mode" msgstr "é–‹å§‹å„²å­˜å™¨å¯©æ ¸ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼" msgid "Begin container audit pass." msgstr "開始儲存器審核通éŽã€‚" msgid "Begin container sync \"once\" mode" msgstr "é–‹å§‹å„²å­˜å™¨åŒæ­¥ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼" msgid "Begin container update single threaded sweep" msgstr "開始儲存器更新單一執行緒清ç†" msgid "Begin container update sweep" msgstr "開始儲存器更新清ç†" msgid "Begin object update single threaded sweep" msgstr "開始物件更新單一執行緒清ç†" msgid "Begin object update sweep" msgstr "開始物件更新清ç†" #, python-format msgid "Beginning pass on account %s" msgstr "正在開始帳戶 %s 上的通éŽ" msgid "Beginning replication run" msgstr "正在開始抄寫執行" msgid "Broker error trying to rollback locked connection" msgstr "嘗試回復已鎖定的連線時發生分é…管ç†ç³»çµ±éŒ¯èª¤" #, python-format msgid "Can not access the file %s." msgstr "ç„¡æ³•å­˜å–æª”案 %s。" #, python-format msgid "Can not load profile data from %s." msgstr "無法從 %s 中載入設定檔資料。" #, python-format msgid "Client did not read from proxy within %ss" msgstr "用戶端未在 %s 秒內從 Proxy 中讀å–" msgid "Client disconnected on read" msgstr "ç”¨æˆ¶ç«¯åœ¨è®€å–æ™‚中斷連線" msgid "Client disconnected without sending enough data" msgstr "用戶端已中斷連線,未傳é€è¶³å¤ çš„資料" msgid "Client disconnected without sending last chunk" msgstr "ç”¨æˆ¶ç«¯å·²ä¸­æ–·é€£ç·šï¼Œæœªå‚³é€æœ€å¾Œä¸€å€‹ç‰‡æ®µ" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "用戶端路徑 %(client)s ä¸ç¬¦åˆç‰©ä»¶ meta 資料%(meta)s 中儲存的路徑" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "為定義é…ç½®é¸é … internal_client_conf_path。將使用é è¨­é…置,請åƒé–± internal-" "client.conf-sample 以å–å¾—é¸é …" msgid "Connection refused" msgstr "é€£ç·šé­æ‹’" msgid "Connection timeout" msgstr "連線逾時" msgid "Container" msgstr "容器" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "å„²å­˜å™¨å¯©æ ¸ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼å·²å®Œæˆï¼š%.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "儲存器審核通éŽå·²å®Œæˆï¼š%.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "å„²å­˜å™¨åŒæ­¥ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼å·²å®Œæˆï¼š%.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "儲存器更新單一執行緒清ç†å·²å®Œæˆï¼š%(elapsed).02fs,%(success)s 個æˆ" "功,%(fail)s 個失敗,%(no_change)s 個無變更" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "儲存器更新清ç†å·²å®Œæˆï¼š%.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s 的儲存器更新清ç†å·²å®Œæˆï¼š%(elapsed).02fs,%(success)s 個æˆ" "功,%(fail)s 個失敗,%(no_change)s 個無變更" #, python-format msgid "Data download error: %s" msgstr "資料下載錯誤:%s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "è£ç½®é€šéŽå·²å®Œæˆï¼š%.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "錯誤:%(db_file)s:%(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "錯誤:%(status)d %(body)s 來自 %(type)s 伺æœå™¨" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "錯誤:%(status)d %(body)s 來自物件伺æœå™¨ re:%(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "錯誤:%(status)d é æœŸï¼š100 繼續自物件伺æœå™¨" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將ç¨å¾Œé‡è©¦ï¼‰ï¼šå›žæ‡‰ " "%(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "錯誤:來自 %(host)s 的回應 %(status)s ä¸ç•¶" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "錯誤:用戶端讀å–逾時 (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "錯誤:儲存器更新失敗(儲存以ç¨å¾Œé€²è¡ŒéžåŒæ­¥æ›´æ–°ï¼‰ï¼š%(status)d回應(來自 " "%(ip)s:%(port)s/%(dev)s)" #, python-format msgid "ERROR Could not get account info %s" msgstr "錯誤:無法å–得帳戶資訊 %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "錯誤:無法å–得儲存器資訊 %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "錯誤:ç£ç¢Ÿæª” %(data_file)s 關閉失敗:%(exc)s:%(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "錯誤:異常狀æ³é€ æˆç”¨æˆ¶ç«¯ä¸­æ–·é€£ç·š" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "錯誤:將資料轉é€è‡³ç‰©ä»¶ä¼ºæœå™¨ %s 時發生異常狀æ³" msgid "ERROR Failed to get my own IPs?" msgstr "錯誤:無法å–得我自己的 IP?" msgid "ERROR Insufficient Storage" msgstr "錯誤:儲存體ä¸è¶³" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "錯誤:物件 %(obj)s 審核失敗,已隔離:%(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "錯誤:挑é¸å•題,正在隔離 %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "錯誤:未è£è¼‰é ç«¯ç£ç¢Ÿæ©Ÿ %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "åŒæ­¥ %(db_file)s %(row)s 時發生錯誤" #, python-format msgid "ERROR Syncing %s" msgstr "åŒæ­¥ %s 時發生錯誤" #, python-format msgid "ERROR Trying to audit %s" msgstr "嘗試審核 %s 時發生錯誤" msgid "ERROR Unhandled exception in request" msgstr "éŒ¯èª¤ï¼šè¦æ±‚中有無法處ç†çš„異常狀æ³" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "錯誤:%(method)s %(path)s 發生呼å«éŒ¯èª¤" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將ç¨å¾Œé‡è©¦ï¼‰" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將ç¨å¾Œé‡è©¦ï¼‰ï¼š" #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "錯誤:具有éžé æœŸå稱 %s çš„éžåŒæ­¥æ“±ç½®æª”案" msgid "ERROR auditing" msgstr "審核時發生錯誤" #, python-format msgid "ERROR auditing: %s" msgstr "審核時發生錯誤:%s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "錯誤:%(ip)s:%(port)s/%(dev)s 的儲存器更新失敗(儲存以ç¨å¾Œé€²è¡ŒéžåŒæ­¥æ›´æ–°ï¼‰" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "從 %s è®€å– HTTP 回應時發生錯誤" #, python-format msgid "ERROR reading db %s" msgstr "讀å–資料庫 %s 時發生錯誤" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "錯誤:é ç«¯åŒæ­¥å¤±æ•—,%(code)s:%(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "åŒæ­¥ %(file)s 與節點 %(node)s 時發生錯誤" msgid "ERROR trying to replicate" msgstr "嘗試抄寫時發生錯誤" #, python-format msgid "ERROR while trying to clean up %s" msgstr "嘗試清除 %s 時發生錯誤" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)s 伺æœå™¨ç™¼ç”ŸéŒ¯èª¤ï¼š%(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "從 %s 載入抑制時發生錯誤:" #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "é ç«¯ä¼ºæœå™¨ç™¼ç”ŸéŒ¯èª¤ï¼š%(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "錯誤:無法å–å¾—ç£ç¢Ÿæ©Ÿåˆ†å‰²å€çš„路徑:%s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "éŒ¯èª¤ï¼šç„¡æ³•å­˜å– %(path)s:%(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "錯誤:無法執行審核:%s" msgid "Error hashing suffix" msgstr "æ··åˆå­—尾時發生錯誤" msgid "Error listing devices" msgstr "列出è£ç½®æ™‚發生錯誤" #, python-format msgid "Error on render profiling results: %s" msgstr "呈ç¾å´å¯«çµæžœæ™‚發生錯誤:%s" msgid "Error parsing recon cache file" msgstr "å‰–æž recon å¿«å–æª”案時發生錯誤" msgid "Error reading recon cache file" msgstr "è®€å– recon å¿«å–æª”案時發生錯誤" msgid "Error reading ringfile" msgstr "è®€å– ringfile 時發生錯誤" msgid "Error reading swift.conf" msgstr "è®€å– swift.conf 時發生錯誤" msgid "Error retrieving recon data" msgstr "æ“·å– recon 資料時發生錯誤" msgid "Error syncing handoff partition" msgstr "åŒæ­¥éžäº¤åˆ†å‰²å€æ™‚發生錯誤" msgid "Error syncing partition" msgstr "åŒæ­¥åˆ†å‰²å€æ™‚發生錯誤" #, python-format msgid "Error syncing with node: %s" msgstr "èˆ‡ç¯€é»žåŒæ­¥æ™‚發生錯誤:%s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "嘗試é‡å»º %(path)s 原則 #%(policy)d 分段 #%(frag_index)s 時發生錯誤" msgid "Error: An error occurred" msgstr "錯誤:發生錯誤" msgid "Error: missing config path argument" msgstr "éŒ¯èª¤ï¼šéºæ¼é…置路徑引數" #, python-format msgid "Error: unable to locate %s" msgstr "錯誤:找ä¸åˆ° %s" msgid "Exception dumping recon cache" msgstr "傾出 recon å¿«å–æ™‚發生異常狀æ³" msgid "Exception in top-level account reaper loop" msgstr "最上層帳戶 Reaper 迴圈發生異常狀æ³" msgid "Exception in top-level replication loop" msgstr "最上層抄寫迴圈中發生異常狀æ³" msgid "Exception in top-levelreconstruction loop" msgstr "æœ€ä¸Šå±¤é‡æ–°å»ºæ§‹è¿´åœˆä¸­ç™¼ç”Ÿç•°å¸¸ç‹€æ³" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 發生異常狀æ³" #, python-format msgid "Exception with account %s" msgstr "帳戶 %s 發生異常狀æ³" #, python-format msgid "Exception with containers for account %s" msgstr "帳戶 %s 的儲存器發生異常狀æ³" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "帳戶 %(account)s 儲存器 %(container)s 的物件發生異常狀æ³" #, python-format msgid "Expect: 100-continue on %s" msgstr "é æœŸ 100 - 在 %s 上繼續" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "éµå¾ª %(given_domain)s 到 %(found_domain)s çš„ CNAME éˆ" msgid "Found configs:" msgstr "找到é…置:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "「éžäº¤ä½œæ¥­æœ€å…ˆã€æ¨¡å¼ä»æœ‰å‰©é¤˜çš„éžäº¤ä½œæ¥­ã€‚正在中斷ç¾è¡ŒæŠ„寫傳éžã€‚" msgid "Host unreachable" msgstr "無法抵é”主機" #, python-format msgid "Incomplete pass on account %s" msgstr "帳戶 %s ä¸Šçš„é€šéŽæœªå®Œæˆ" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "無效的 X-Container-Sync-To æ ¼å¼ %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To 中的主機 %r 無效" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "無效的擱置項目 %(file)s:%(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "來自 %(full_path)s 的回應 %(resp)s 無效" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "來自 %(ip)s 的回應 %(resp)s 無效" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "X-Container-Sync-To 中的架構 %r 無效,必須是 \"//\"ã€\"http\" 或\"https\"。" #, python-format msgid "Killing long-running rsync: %s" msgstr "æ­£åœ¨çµæŸé•·æ™‚間執行的é ç«¯åŒæ­¥ï¼š%s" msgid "Lockup detected.. killing live coros." msgstr "嵿¸¬åˆ°éŽ–å®šã€‚æ­£åœ¨çµæŸå³æ™‚ coro。" #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "已將 %(given_domain)s å°æ˜ è‡³ %(found_domain)s" #, python-format msgid "No %s running" msgstr "沒有 %s 在執行中" #, python-format msgid "No permission to signal PID %d" msgstr "沒有信號 PID %d çš„è¨±å¯æ¬Š" #, python-format msgid "No policy with index %s" msgstr "沒有具有索引 %s 的原則" #, python-format msgid "No realm key for %r" msgstr "沒有 %r 的範åœé‡‘é‘°" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "節點錯誤é™åˆ¶ %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "未確èªè¶³å¤ çš„物件伺æœå™¨ï¼ˆå·²å–å¾— %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "找ä¸åˆ° %(sync_from)r => %(sync_to)r - 物件%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%s ç§’æœªé‡æ–°å»ºæ§‹ä»»ä½•內容。" #, python-format msgid "Nothing replicated for %s seconds." msgstr "æœªæŠ„å¯«ä»»ä½•é …ç›®é” %s 秒。" msgid "Object" msgstr "物件" msgid "Object PUT" msgstr "物件 PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "物件 PUT é‡å° 409 正在傳回 202:%(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "物件 PUT 正在傳回 412,%(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "物件審核 (%(type)s) \"%(mode)s\" 模å¼å·²å®Œæˆï¼š%(elapsed).02fs。已隔離總計:" "%(quars)d,錯誤總計:%(errors)d,檔案/秒總計:%(frate).2f,ä½å…ƒçµ„/秒總計:" "%(brate).2f,審核時間:%(audit).2f,速率:%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "物件審核 (%(type)s)。自 %(start_time)s 以來:本端:%(passes)d 個已通" "éŽï¼Œ%(quars)d 個已隔離,%(errors)d 個錯誤,檔案/秒:%(frate).2f,ä½å…ƒçµ„數/" "秒:%(brate).2f,時間總計:%(total).2f,審核時間:%(audit).2f,速率:" "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "物件審核統計資料:%s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "ç‰©ä»¶é‡æ–°å»ºæ§‹å®Œæˆï¼ˆä¸€æ¬¡æ€§ï¼‰ã€‚(%.02f 分é˜ï¼‰" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "ç‰©ä»¶é‡æ–°å»ºæ§‹å®Œæˆã€‚(%.02f 分é˜ï¼‰" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "物件抄寫完æˆï¼ˆä¸€æ¬¡æ€§ï¼‰ã€‚(%.02f 分é˜ï¼‰" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "物件抄寫完æˆã€‚(%.02f 分é˜ï¼‰" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "物件伺æœå™¨å·²å‚³å›ž %s 個ä¸ç¬¦ etag" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "物件更新清ç†å·²å®Œæˆï¼š%.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "X-Container-Sync-To 中ä¸å®¹è¨±åƒæ•¸ã€æŸ¥è©¢åŠç‰‡æ®µ" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "åˆ†å‰²å€æ™‚間:最大 %(max).4fsï¼Œæœ€å° %(min).4fs,中間 %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To 中需è¦è·¯å¾‘" #, python-format msgid "Problem cleaning up %s" msgstr "清除 %s 時發生å•題" #, python-format msgid "Profiling Error: %s" msgstr "å´å¯«éŒ¯èª¤ï¼š%s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "已將 %(hsh_path)s 隔離至 %(quar_path)sï¼ŒåŽŸå› æ˜¯å®ƒä¸æ˜¯ç›®éŒ„" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "已將 %(object_path)s 隔離至 %(quar_path)sï¼ŒåŽŸå› æ˜¯å®ƒä¸æ˜¯ç›®éŒ„" #, python-format msgid "Quarantining DB %s" msgstr "正在隔離資料庫 %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "%(account)s/%(container)s/%(object)s çš„ ratelimit 休眠日誌:%(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "已移除 %(remove)d 個資料庫" #, python-format msgid "Removing %s objects" msgstr "正在移除 %s 物件" #, python-format msgid "Removing partition: %s" msgstr "正在移除分割å€ï¼š%s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "正在移除具有錯誤 PID %(pid)d çš„ PID 檔 %(pid_file)s" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "正在移除具有無效 PID çš„ PID 檔 %s" #, python-format msgid "Removing stale pid file %s" msgstr "æ­£åœ¨ç§»é™¤éŽæ™‚ PID 檔案 %s" msgid "Replication run OVER" msgstr "æŠ„å¯«åŸ·è¡ŒçµæŸ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "由於黑å單,正在傳回 497:%s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "正在將 %(meth)s çš„ 498 傳回至 %(acc)s/%(cont)s/%(obj)s。Ratelimit(休眠上" "é™ï¼‰%(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "嵿¸¬åˆ°ç’°è®Šæ›´ã€‚正在中斷ç¾è¡Œé‡æ–°å»ºæ§‹å‚³éžã€‚" msgid "Ring change detected. Aborting current replication pass." msgstr "嵿¸¬åˆ°ç’°è®Šæ›´ã€‚正在中斷ç¾è¡ŒæŠ„寫傳éžã€‚" #, python-format msgid "Running %s once" msgstr "正在執行 %s 一次" msgid "Running object reconstructor in script mode." msgstr "正在 Script 模å¼ä¸‹åŸ·è¡Œç‰©ä»¶é‡æ–°å»ºæ§‹å™¨ã€‚" msgid "Running object replicator in script mode." msgstr "正在 Script 模å¼ä¸‹åŸ·è¡Œç‰©ä»¶æŠ„寫器" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "自 %(time)s ä»¥ä¾†ï¼šå·²åŒæ­¥ %(sync)s 個 [已刪除 [%(delete)s 個,已放置 %(put)s " "個]ï¼Œå·²è·³éŽ %(skip)s 個,%(fail)s 個失敗" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "自 %(time)s 以來:帳戶審核:%(passed)s 個已通éŽå¯©æ ¸ï¼Œ%(failed)s 個失敗審核" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "自 %(time)s 以來:儲存器審核:%(pass)s 個已通éŽå¯©æ ¸ï¼Œ%(fail)s 個失敗審核" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "æ­£åœ¨è·³éŽ %(device)s,原因是它未è£è¼‰" #, python-format msgid "Skipping %s as it is not mounted" msgstr "æ­£åœ¨è·³éŽ %s,原因是它未è£è¼‰" #, python-format msgid "Starting %s" msgstr "正在啟動 %s" msgid "Starting object reconstruction pass." msgstr "æ­£åœ¨å•Ÿå‹•ç‰©ä»¶é‡æ–°å»ºæ§‹å‚³éžã€‚" msgid "Starting object reconstructor in daemon mode." msgstr "正在常é§ç¨‹å¼æ¨¡å¼ä¸‹å•Ÿå‹•ç‰©ä»¶é‡æ–°å»ºæ§‹å™¨ã€‚" msgid "Starting object replication pass." msgstr "正在啟動物件抄寫傳éžã€‚" msgid "Starting object replicator in daemon mode." msgstr "正在常é§ç¨‹å¼æ¨¡å¼ä¸‹å•Ÿå‹•物件抄寫器。" #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "已順利é ç«¯åŒæ­¥ %(dst)s 中的 %(src)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "æ­¤æª”æ¡ˆé¡žåž‹ç¦æ­¢å­˜å–ï¼" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "儲存器的 %(key)s 總計 (%(total)s) ä¸ç¬¦åˆåŽŸå‰‡ä¸­çš„ %(key)s 總和 (%(sum)s) " #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 發生逾時異常狀æ³" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "正在嘗試 %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "æ­£åœ¨å˜—è©¦å° %(full_path)s 執行 GET 動作" msgid "Trying to read during GET" msgstr "正在嘗試於 GET 期間讀å–" msgid "Trying to read during GET (retrying)" msgstr "正在嘗試於 GET 期間讀å–(正在é‡è©¦ï¼‰" msgid "Trying to send to client" msgstr "正在嘗試傳é€è‡³ç”¨æˆ¶ç«¯" #, python-format msgid "Trying to sync suffixes with %s" msgstr "正在嘗試與 %s åŒæ­¥å­—å°¾" #, python-format msgid "Trying to write to %s" msgstr "正在嘗試寫入至 %s" msgid "UNCAUGHT EXCEPTION" msgstr "æœªæ•æ‰çš„異常狀æ³" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "在 libc 中找ä¸åˆ° %s。ä¿ç•™ç‚º no-op。" #, python-format msgid "Unable to locate config for %s" msgstr "找ä¸åˆ° %s çš„é…ç½®" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "在 libc 中找ä¸åˆ° fallocateã€posix_fallocate。ä¿ç•™ç‚º no-op。" #, python-format msgid "Unable to read config from %s" msgstr "無法從 %s 讀å–é…ç½®" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "未鑑別 %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "無法處ç†çš„異常狀æ³" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "嘗試執行 GET å‹•ä½œæ™‚ç™¼ç”Ÿä¸æ˜Žç•°å¸¸ç‹€æ³ï¼š%(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s 的更新報告失敗" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "å·²å‚³é€ %(container)s %(dbfile)s 的更新報告" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "警告:應該僅啟用 SSL 以用於測試目的。使用外部SSL 終止以進行正å¼ä½œæ¥­éƒ¨ç½²ã€‚" msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告:無法修改檔案æè¿°å­é™åˆ¶ã€‚ä»¥éž root 使用者身分執行?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告:無法修改處ç†ç¨‹åºæ•¸ä¸Šé™é™åˆ¶ã€‚ä»¥éž root 使用者身分執行?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:無法修改記憶體é™åˆ¶ã€‚ä»¥éž root 使用者身分執行?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:無法在沒有 memcached 用戶端的情æ³ä¸‹é™åˆ¶é€Ÿçއ" #, python-format msgid "method %s is not allowed." msgstr "ä¸å®¹è¨±æ–¹æ³• %s。" msgid "no log file found" msgstr "找ä¸åˆ°æ—¥èªŒæª”" msgid "odfpy not installed." msgstr "æœªå®‰è£ odfpy。" #, python-format msgid "plotting results failed due to %s" msgstr "由於 %sï¼Œç¹ªè£½çµæžœå¤±æ•—" msgid "python-matplotlib not installed." msgstr "æœªå®‰è£ python-matplotlib。" swift-2.17.0/swift/locale/en_GB/0000775000175100017510000000000013236061751016332 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/en_GB/LC_MESSAGES/0000775000175100017510000000000013236061751020117 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/en_GB/LC_MESSAGES/swift.po0000666000175100017510000011354413236061620021620 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andi Chandler , 2016. #zanata # Andreas Jaeger , 2016. #zanata # Andi Chandler , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-01-27 09:17+0000\n" "Last-Translator: Andi Chandler \n" "Language: en-GB\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: English (United Kingdom)\n" msgid "" "\n" "user quit" msgstr "" "\n" "user quit" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - parallel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" #, python-format msgid "%(replication_ip)s/%(device)s responded as unmounted" msgstr "%(replication_ip)s/%(device)s responded as unmounted" #, python-format msgid "%(server)s #%(number)d not running (%(conf)s)" msgstr "%(server)s #%(number)d not running (%(conf)s)" #, python-format msgid "%(server)s (%(pid)s) appears to have stopped" msgstr "%(server)s (%(pid)s) appears to have stopped" #, python-format msgid "%(server)s running (%(pid)s - %(conf)s)" msgstr "%(server)s running (%(pid)s - %(conf)s)" #, python-format msgid "%(server)s running (%(pid)s - %(pid_file)s)" msgstr "%(server)s running (%(pid)s - %(pid_file)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s successes, %(failure)s failures" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s returning 503 for %(statuses)s" #, python-format msgid "%(type)s: %(value)s" msgstr "%(type)s: %(value)s" #, python-format msgid "%s already started..." msgstr "%s already started..." #, python-format msgid "%s does not exist" msgstr "%s does not exist" #, python-format msgid "%s is not mounted" msgstr "%s is not mounted" #, python-format msgid "%s responded as unmounted" msgstr "%s responded as unmounted" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Connection reset by peer" #, python-format msgid ", %s containers deleted" msgstr ", %s containers deleted" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s containers possibly remaining" #, python-format msgid ", %s containers remaining" msgstr ", %s containers remaining" #, python-format msgid ", %s objects deleted" msgstr ", %s objects deleted" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objects possibly remaining" #, python-format msgid ", %s objects remaining" msgstr ", %s objects remaining" #, python-format msgid ", elapsed: %.02fs" msgstr ", elapsed: %.02fs" msgid ", return codes: " msgstr ", return codes: " msgid "Account" msgstr "Account" #, python-format msgid "Account %(account)s has not been reaped since %(time)s" msgstr "Account %(account)s has not been reaped since %(time)s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Account audit \"once\" mode completed: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Account audit pass completed: %.02fs" #, python-format msgid "" "Adding required filter %(filter_name)s to pipeline at position %(insert_at)d" msgstr "" "Adding required filter %(filter_name)s to pipeline at position %(insert_at)d" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" #, python-format msgid "Audit Failed for %(path)s: %(err)s" msgstr "Audit Failed for %(path)s: %(err)s" #, python-format msgid "Audit passed for %s" msgstr "Audit passed for %s" #, python-format msgid "Bad key for %(name)r: %(err)s" msgstr "Bad key for %(name)r: %(err)s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Bad rsync return code: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Begin account audit \"once\" mode" msgid "Begin account audit pass." msgstr "Begin account audit pass." msgid "Begin container audit \"once\" mode" msgstr "Begin container audit \"once\" mode" msgid "Begin container audit pass." msgstr "Begin container audit pass." msgid "Begin container sync \"once\" mode" msgstr "Begin container sync \"once\" mode" msgid "Begin container update single threaded sweep" msgstr "Begin container update single threaded sweep" msgid "Begin container update sweep" msgstr "Begin container update sweep" #, python-format msgid "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)" msgstr "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)" msgid "Begin object update single threaded sweep" msgstr "Begin object update single threaded sweep" msgid "Begin object update sweep" msgstr "Begin object update sweep" #, python-format msgid "Beginning pass on account %s" msgstr "Beginning pass on account %s" msgid "Beginning replication run" msgstr "Beginning replication run" msgid "Broker error trying to rollback locked connection" msgstr "Broker error trying to rollback locked connection" #, python-format msgid "Can not access the file %s." msgstr "Can not access the file %s." #, python-format msgid "Can not load profile data from %s." msgstr "Can not load profile data from %s." #, python-format msgid "Cannot read %(auditor_status)s (%(err)s)" msgstr "Cannot read %(auditor_status)s (%(err)s)" #, python-format msgid "Cannot write %(auditor_status)s (%(err)s)" msgstr "Cannot write %(auditor_status)s (%(err)s)" #, python-format msgid "Client did not read from proxy within %ss" msgstr "Client did not read from proxy within %ss" msgid "Client disconnected on read" msgstr "Client disconnected on read" msgid "Client disconnected without sending enough data" msgstr "Client disconnected without sending enough data" msgid "Client disconnected without sending last chunk" msgstr "Client disconnected without sending last chunk" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgid "Connection refused" msgstr "Connection refused" msgid "Connection timeout" msgstr "Connection timeout" msgid "Container" msgstr "Container" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Container audit \"once\" mode completed: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Container audit pass completed: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Container sync \"once\" mode completed: %.02fs" #, python-format msgid "" "Container sync report: %(container)s, time window start: %(start)s, time " "window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, " "bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " "total_rows: %(total)s" msgstr "" "Container sync report: %(container)s, time window start: %(start)s, time " "window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, " "bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " "total_rows: %(total)s" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Container update sweep completed: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" #, python-format msgid "Could not autocreate account %r" msgstr "Could not autocreate account %r" #, python-format msgid "" "Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds" msgstr "" "Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds" #, python-format msgid "Could not load %(conf)r: %(error)s" msgstr "Could not load %(conf)r: %(error)s" #, python-format msgid "Data download error: %s" msgstr "Data download error: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Devices pass completed: %.02fs" msgid "Did not get a keys dict" msgstr "Did not get a keys dict" #, python-format msgid "Directory %(directory)r does not map to a valid policy (%(error)s)" msgstr "Directory %(directory)r does not map to a valid policy (%(error)s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERROR %(status)d %(body)s From %(type)s Server" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERROR %(status)d %(body)s From Object Server re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERROR %(status)d Expect: 100-continue From Object Server" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server" msgstr "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERROR Bad response %(status)s from %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERROR Client read timeout (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERROR Could not get account info %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERROR Could not get container info %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERROR Exception causing client disconnect" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "ERROR Exception transferring data to object servers %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERROR Failed to get my own IPs?" msgid "ERROR Insufficient Storage" msgstr "ERROR Insufficient Storage" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERROR Pickle problem, quarantining %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERROR Remote drive not mounted %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERROR Syncing %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERROR Syncing %s" #, python-format msgid "" "ERROR There are not enough handoff nodes to reach replica count for " "partition %s" msgstr "" "ERROR There are not enough hand-off nodes to reach replica count for " "partition %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERROR Trying to audit %s" msgid "ERROR Unhandled exception in request" msgstr "ERROR Unhandled exception in request" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ error with %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "ERROR async pending file with unexpected name %s" msgid "ERROR auditing" msgstr "ERROR auditing" #, python-format msgid "ERROR auditing: %s" msgstr "ERROR auditing: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgid "ERROR get_keys() missing callback" msgstr "ERROR get_keys() missing callback" #, python-format msgid "ERROR get_keys(): from callback: %s" msgstr "ERROR get_keys(): from callback: %s" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERROR reading HTTP response from %s" #, python-format msgid "ERROR reading db %s" msgstr "ERROR reading db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERROR rsync failed with %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERROR syncing %(file)s with node %(node)s" msgid "ERROR trying to replicate" msgstr "ERROR trying to replicate" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERROR while trying to clean up %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERROR with loading suppressions from %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERROR with remote server %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "ERROR: Failed to get paths to drive partitions: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERROR: Unable to access %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERROR: Unable to run auditing: %s" #, python-format msgid "" "Error code %(status)d is returned from remote server %(ip)s: %(port)s / " "%(device)s" msgstr "" "Error code %(status)d is returned from remote server %(ip)s: %(port)s / " "%(device)s" #, python-format msgid "Error decoding fragments for %r" msgstr "Error decoding fragments for %r" #, python-format msgid "Error decrypting %(resp_type)s: %(reason)s" msgstr "Error decrypting %(resp_type)s: %(reason)s" #, python-format msgid "Error decrypting %(resp_type)s: Missing %(key)s" msgstr "Error decrypting %(resp_type)s: Missing %(key)s" #, python-format msgid "Error decrypting container listing: %s" msgstr "Error decrypting container listing: %s" #, python-format msgid "Error decrypting header %(header)s: %(error)s" msgstr "Error decrypting header %(header)s: %(error)s" #, python-format msgid "Error decrypting object: %s" msgstr "Error decrypting object: %s" msgid "Error hashing suffix" msgstr "Error hashing suffix" #, python-format msgid "Error in %(conf)r with mtime_check_interval: %(error)s" msgstr "Error in %(conf)r with mtime_check_interval: %(error)s" msgid "Error listing devices" msgstr "Error listing devices" #, python-format msgid "Error on render profiling results: %s" msgstr "Error on render profiling results: %s" msgid "Error parsing recon cache file" msgstr "Error parsing recon cache file" msgid "Error reading recon cache file" msgstr "Error reading recon cache file" msgid "Error reading ringfile" msgstr "Error reading ringfile" msgid "Error reading swift.conf" msgstr "Error reading swift.conf" msgid "Error retrieving recon data" msgstr "Error retrieving recon data" #, python-format msgid "Error sending UDP message to %(target)r: %(err)s" msgstr "Error sending UDP message to %(target)r: %(err)s" msgid "Error syncing handoff partition" msgstr "Error syncing hand-off partition" msgid "Error syncing partition" msgstr "Error syncing partition" #, python-format msgid "Error syncing with node: %s" msgstr "Error syncing with node: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Error: An error occurred" msgid "Error: missing config path argument" msgstr "Error: missing config path argument" #, python-format msgid "Error: unable to locate %s" msgstr "Error: unable to locate %s" msgid "Exception dumping recon cache" msgstr "Exception dumping recon cache" #, python-format msgid "Exception fetching fragments for %r" msgstr "Exception fetching fragments for %r" msgid "Exception in top-level account reaper loop" msgstr "Exception in top-level account reaper loop" msgid "Exception in top-level replication loop" msgstr "Exception in top-level replication loop" msgid "Exception in top-levelreconstruction loop" msgstr "Exception in top-level reconstruction loop" #, python-format msgid "Exception while deleting container %(container)s %(err)s" msgstr "Exception while deleting container %(container)s %(err)s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exception with %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Exception with account %s" #, python-format msgid "Exception with containers for account %s" msgstr "Exception with containers for account %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Exception with objects for container %(container)s for account %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Expect: 100-continue on %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgid "Found configs:" msgstr "Found configs:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Hand-offs first mode still has hand-offs remaining. Aborting current " "replication pass." msgid "" "Handoffs only mode found no handoffs remaining. You should disable " "handoffs_only once all nodes are reporting no handoffs remaining." msgstr "" "Hand-offs only mode found no hand-offs remaining. You should disable " "handoffs_only once all nodes are reporting no hand-offs remaining." msgid "" "Handoffs only mode still has handoffs remaining. Next pass will continue to " "revert handoffs." msgstr "" "Hand-offs only mode still has hand-offs remaining. Next pass will continue " "to revert hand-offs." msgid "Host unreachable" msgstr "Host unreachable" #, python-format msgid "Incomplete pass on account %s" msgstr "Incomplete pass on account %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Invalid X-Container-Sync-To format %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Invalid host %r in X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Invalid pending entry %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Invalid response %(resp)s from %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Invalid response %(resp)s from %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgid "Invalid swift_bytes" msgstr "Invalid swift_bytes" #, python-format msgid "Killing long-running rsync: %s" msgstr "Killing long-running rsync: %s" #, python-format msgid "Loading JSON from %(auditor_status)s failed (%(err)s)" msgstr "Loading JSON from %(auditor_status)s failed (%(err)s)" msgid "Lockup detected.. killing live coros." msgstr "Lockup detected.. killing live coros." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "Mapped %(given_domain)s to %(found_domain)s" #, python-format msgid "Missing key for %r" msgstr "Missing key for %r" msgid "More than one part in a single-part response?" msgstr "More than one part in a single-part response?" #, python-format msgid "No %s running" msgstr "No %s running" #, python-format msgid "No cluster endpoint for %(realm)r %(cluster)r" msgstr "No cluster endpoint for %(realm)r %(cluster)r" #, python-format msgid "No permission to signal PID %d" msgstr "No permission to signal PID %d" #, python-format msgid "No policy with index %s" msgstr "No policy with index %s" #, python-format msgid "No realm key for %r" msgstr "No realm key for %r" #, python-format msgid "No space left on device for %(file)s (%(err)s)" msgstr "No space left on device for %(file)s (%(err)s)" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Node error limited %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Not enough object servers ack'ed (got %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Nothing reconstructed for %s seconds." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nothing replicated for %s seconds." msgid "Object" msgstr "Object" msgid "Object PUT" msgstr "Object PUT" #, python-format msgid "" "Object PUT exceptions after last send, %(conns)s/%(nodes)s required " "connections" msgstr "" "Object PUT exceptions after last send, %(conns)s/%(nodes)s required " "connections" #, python-format msgid "" "Object PUT exceptions during send, %(conns)s/%(nodes)s required connections" msgstr "" "Object PUT exceptions during send, %(conns)s/%(nodes)s required connections" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Object PUT returning 412, %(statuses)r" #, python-format msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections" msgstr "Object PUT returning 503, %(conns)s/%(nodes)s required connections" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Object audit stats: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Object reconstruction complete (once). (%.02f minutes)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Object reconstruction complete. (%.02f minutes)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Object replication complete (once). (%.02f minutes)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Object replication complete. (%.02f minutes)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Object servers returned %s mismatched etags" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Object update sweep completed: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Params, queries, and fragments not allowed in X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" #, python-format msgid "" "Pass beginning; %(containers)s possible containers; %(objects)s possible " "objects" msgstr "" "Pass beginning; %(containers)s possible containers; %(objects)s possible " "objects" #, python-format msgid "Pass completed in %(time)ds; %(objects)d objects expired" msgstr "Pass completed in %(time)ds; %(objects)d objects expired" #, python-format msgid "Pass so far %(time)ds; %(objects)d objects expired" msgstr "Pass so far %(time)ds; %(objects)d objects expired" msgid "Path required in X-Container-Sync-To" msgstr "Path required in X-Container-Sync-To" #, python-format msgid "Pipeline is \"%s\"" msgstr "Pipeline is \"%s\"" #, python-format msgid "Pipeline was modified. New pipeline is \"%s\"." msgstr "Pipeline was modified. New pipeline is \"%s\"." #, python-format msgid "Problem checking EC fragment %(datadir)s: %(err)s" msgstr "Problem checking EC fragment %(datadir)s: %(err)s" #, python-format msgid "Problem cleaning up %(datadir)s (%(err)s)" msgstr "Problem cleaning up %(datadir)s (%(err)s)" #, python-format msgid "Problem cleaning up %s" msgstr "Problem cleaning up %s" #, python-format msgid "Problem making data file durable %(file)s (%(err)s)" msgstr "Problem making data file durable %(file)s (%(err)s)" #, python-format msgid "Problem with fragment response: %s" msgstr "Problem with fragment response: %s" #, python-format msgid "Profiling Error: %s" msgstr "Profiling Error: %s" #, python-format msgid "Quarantined %(db_dir)s to %(quar_path)s due to %(exc_hint)s database" msgstr "Quarantined %(db_dir)s to %(quar_path)s due to %(exc_hint)s database" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" #, python-format msgid "Quarantining DB %s" msgstr "Quarantining DB %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Removed %(remove)d dbs" #, python-format msgid "Removing %s objects" msgstr "Removing %s objects" #, python-format msgid "Removing partition: %s" msgstr "Removing partition: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Removing pid file %(pid_file)s with wrong pid %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Removing pid file %s with invalid pid" #, python-format msgid "Removing stale pid file %s" msgstr "Removing stale pid file %s" msgid "Replication run OVER" msgstr "Replication run OVER" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Returning 497 because of blacklisting: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "Ring change detected. Aborting current reconstruction pass." msgid "Ring change detected. Aborting current replication pass." msgstr "Ring change detected. Aborting current replication pass." #, python-format msgid "Running %s once" msgstr "Running %s once" msgid "Running object reconstructor in script mode." msgstr "Running object reconstructor in script mode." msgid "Running object replicator in script mode." msgstr "Running object replicator in script mode." #, python-format msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s" msgstr "Signal %(server)s pid: %(pid)s signal: %(signal)s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" #, python-format msgid "Skipping %(datadir)s because %(err)s" msgstr "Skipping %(datadir)s because %(err)s" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "Skipping %(device)s as it is not mounted" #, python-format msgid "Skipping %(dir)s: %(err)s" msgstr "Skipping %(dir)s: %(err)s" #, python-format msgid "Skipping %s as it is not mounted" msgstr "Skipping %s as it is not mounted" #, python-format msgid "Starting %s" msgstr "Starting %s" msgid "Starting object reconstruction pass." msgstr "Starting object reconstruction pass." msgid "Starting object reconstructor in daemon mode." msgstr "Starting object reconstructor in daemon mode." msgid "Starting object replication pass." msgstr "Starting object replication pass." msgid "Starting object replicator in daemon mode." msgstr "Starting object replicator in daemon mode." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "The file type are forbidden to access!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Timeout Exception with %(ip)s:%(port)s/%(device)s" #, python-format msgid "Timeout fetching fragments for %r" msgstr "Timeout fetching fragments for %r" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Trying to %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Trying to GET %(full_path)s" #, python-format msgid "Trying to get %(status_type)s status of PUT to %(path)s" msgstr "Trying to get %(status_type)s status of PUT to %(path)s" msgid "Trying to read during GET" msgstr "Trying to read during GET" msgid "Trying to read during GET (retrying)" msgstr "Trying to read during GET (retrying)" msgid "Trying to send to client" msgstr "Trying to send to client" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Trying to sync suffixes with %s" #, python-format msgid "Trying to write to %s" msgstr "Trying to write to %s" msgid "UNCAUGHT EXCEPTION" msgstr "UNCAUGHT EXCEPTION" #, python-format msgid "Unable to find %(section)s config section in %(conf)s" msgstr "Unable to find %(section)s config section in %(conf)s" #, python-format msgid "Unable to load internal client from config: %(conf)r (%(error)s)" msgstr "Unable to load internal client from config: %(conf)r (%(error)s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Unable to locate %s in libc. Leaving as a no-op." #, python-format msgid "Unable to locate config for %s" msgstr "Unable to locate config for %s" #, python-format msgid "Unable to locate config number %(number)s for %(server)s" msgstr "Unable to locate config number %(number)s for %(server)s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." #, python-format msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" msgstr "Unable to perform fsync() on directory %(dir)s: %(err)s" #, python-format msgid "Unable to read config from %s" msgstr "Unable to read config from %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Unauth %(sync_from)r => %(sync_to)r" #, python-format msgid "" "Unexpected fragment data type (not quarantined)%(datadir)s: %(type)s at " "offset 0x%(offset)x" msgstr "" "Unexpected fragment data type (not quarantined)%(datadir)s: %(type)s at " "offset 0x%(offset)x" msgid "Unhandled exception" msgstr "Unhandled exception" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "Unknown exception trying to GET: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Update report failed for %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Update report sent for %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgid "" "WARNING: Unable to modify I/O scheduling class and priority of process. " "Keeping unchanged! Check logs for more info." msgstr "" "WARNING: Unable to modify I/O scheduling class and priority of process. " "Keeping unchanged! Check logs for more info." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "WARNING: Unable to modify max process limit. Running as non-root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "WARNING: Unable to modify memory limit. Running as non-root?" msgid "" "WARNING: Unable to modify scheduling priority of process. Keeping unchanged! " "Check logs for more info. " msgstr "" "WARNING: Unable to modify scheduling priority of process. Keeping unchanged! " "Check logs for more info. " #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up" msgstr "Waited %(kill_wait)s seconds for %(server)s to die; giving up" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgstr "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Warning: Cannot ratelimit without a memcached client" #, python-format msgid "autocreate account %r" msgstr "autocreate account %r" #, python-format msgid "method %s is not allowed." msgstr "method %s is not allowed." #, python-format msgid "next_part_power set in policy '%s'. Skipping" msgstr "next_part_power set in policy '%s'. Skipping" msgid "no log file found" msgstr "no log file found" msgid "odfpy not installed." msgstr "odfpy not installed." #, python-format msgid "plotting results failed due to %s" msgstr "plotting results failed due to %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib not installed." #, python-format msgid "" "sorting_method is set to '%(method)s', not 'affinity'; %(label)s " "read_affinity setting will have no effect." msgstr "" "sorting_method is set to '%(method)s', not 'affinity'; %(label)s " "read_affinity setting will have no effect." swift-2.17.0/swift/locale/it/0000775000175100017510000000000013236061751015774 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/it/LC_MESSAGES/0000775000175100017510000000000013236061751017561 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/it/LC_MESSAGES/swift.po0000666000175100017510000007311013236061620021254 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:42+0000\n" "Last-Translator: Copied by Zanata \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Italian\n" msgid "" "\n" "user quit" msgstr "" "\n" "l'utente è uscito" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - parallelo, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d suffissi controllati - %(hashed).2f%% con hash, %(synced).2f%% " "sincronizzati" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partizioni replicate in " "%(time).2fs (%(rate).2f/sec, %(remaining)s rimanenti)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s operazioni con esito positivo, %(failure)s errori" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s restituisce 503 per %(statuses)s" #, python-format msgid "%s already started..." msgstr "%s già avviato..." #, python-format msgid "%s does not exist" msgstr "%s non esiste" #, python-format msgid "%s is not mounted" msgstr "%s non è montato" #, python-format msgid "%s responded as unmounted" msgstr "%s ha risposto come smontato" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Connessione reimpostata dal peer" #, python-format msgid ", %s containers deleted" msgstr ", %s contenitori eliminati" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s contenitori probabilmente rimanenti" #, python-format msgid ", %s containers remaining" msgstr ", %s contenitori rimanenti" #, python-format msgid ", %s objects deleted" msgstr ", %s oggetti eliminati" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s oggetti probabilmente rimanenti" #, python-format msgid ", %s objects remaining" msgstr ", %s oggetti rimanenti" #, python-format msgid ", elapsed: %.02fs" msgstr ", trascorso: %.02fs" msgid ", return codes: " msgstr ", codici di ritorno: " msgid "Account" msgstr "Conto" #, python-format msgid "Account %(account)s has not been reaped since %(time)s" msgstr "Il conto %(account)s non è stato verificato dal %(time)s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Modalità \"once\" verifica account completata: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Trasmissione verifica account completata: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "È stato eseguito un tentativo di replicare %(count)d dbs in %(time).5f " "secondi (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Codice di ritorno rsync errato: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Avvio modalità \"once\" verifica account" msgid "Begin account audit pass." msgstr "Avvio trasmissione verifica account." msgid "Begin container audit \"once\" mode" msgstr "Avvio modalità \"once\" verifica contenitore" msgid "Begin container audit pass." msgstr "Avvio trasmissione verifica contenitore." msgid "Begin container sync \"once\" mode" msgstr "Avvio della modalità \"once\" di sincronizzazione contenitore" msgid "Begin container update single threaded sweep" msgstr "Avvio pulizia a singolo thread aggiornamento contenitore" msgid "Begin container update sweep" msgstr "Avvio pulizia aggiornamento contenitore" msgid "Begin object update single threaded sweep" msgstr "Avvio pulizia a singolo thread aggiornamento oggetto" msgid "Begin object update sweep" msgstr "Avvio pulizia aggiornamento oggetto" #, python-format msgid "Beginning pass on account %s" msgstr "Avvio della trasmissione sull'account %s" msgid "Beginning replication run" msgstr "Avvio replica" msgid "Broker error trying to rollback locked connection" msgstr "" "Errore del broker durante il tentativo di eseguire il rollback della " "connessione bloccata" #, python-format msgid "Can not access the file %s." msgstr "Impossibile accedere al file %s." #, python-format msgid "Can not load profile data from %s." msgstr "Impossibile caricare i dati del profilo da %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Il client non ha eseguito la lettura dal proxy in %ss" msgid "Client disconnected on read" msgstr "Client scollegato alla lettura" msgid "Client disconnected without sending enough data" msgstr "Client disconnesso senza inviare dati sufficienti" msgid "Client disconnected without sending last chunk" msgstr "Client disconnesso senza inviare l'ultima porzione" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Il percorso del client %(client)s non corrisponde al percorso memorizzato " "nei metadati dell'oggetto %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Opzione di configurazione internal_client_conf_path non definita. Viene " "utilizzata la configurazione predefinita, vedere l'esempio internal-client." "conf-sample per le opzioni" msgid "Connection refused" msgstr "Connessione rifiutata" msgid "Connection timeout" msgstr "Timeout della connessione" msgid "Container" msgstr "Contenitore" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Modalità \"once\" verifica contenitore completata: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Trasmissione verifica contenitore completata: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" "Modalità \"once\" di sincronizzazione del contenitore completata: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Pulizia a singolo thread aggiornamento contenitore completata: " "%(elapsed).02fs, %(success)s operazioni con esito positivo, %(fail)s errori, " "%(no_change)s senza modifiche" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Pulizia aggiornamento contenitore completata: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Pulizia aggiornamento contenitore di %(path)s completata: %(elapsed).02fs, " "%(success)s operazioni con esito positivo, %(fail)s errori, %(no_change)s " "senza modifiche" #, python-format msgid "Data download error: %s" msgstr "Errore di download dei dati: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Trasmissione dei dispositivi completata: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERRORE %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERRORE %(status)d %(body)s dal server %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERRORE %(status)d %(body)s Dal server degli oggetti re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERRORE %(status)d Previsto: 100-continue dal server degli oggetti" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERRORE Aggiornamento dell'account non riuscito con %(ip)s:%(port)s/" "%(device)s (verrà eseguito un nuovo tentativo successivamente): Risposta " "%(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERRORE Risposta errata %(status)s da %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERRORE Timeout di lettura del client (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERRORE Aggiornamento del contenitore non riuscito (salvataggio per " "l'aggiornamento asincrono successivamente): %(status)d risposta da %(ip)s:" "%(port)s/%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERRORE Impossibile ottenere le informazioni sull'account %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERRORE Impossibile ottenere le informazioni sul contenitore %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ERRORE Errore di chiusura DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERRORE Eccezione che causa la disconnessione del client" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" "ERRORE Eccezione durante il trasferimento di dati nel server degli oggetti %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERRORE Impossibile ottenere i propri IP?" msgid "ERROR Insufficient Storage" msgstr "ERRORE Memoria insufficiente" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "ERRORE L'oggetto %(obj)s non ha superato la verifica ed è stato inserito " "nella quarantena: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERRORE Problema relativo a pickle, inserimento di %s nella quarantena" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERRORE Unità remota non montata %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERRORE durante la sincronizzazione di %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERRORE durante la sincronizzazione di %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERRORE durante il tentativo di eseguire la verifica %s" msgid "ERROR Unhandled exception in request" msgstr "ERRORE Eccezione non gestita nella richiesta" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERRORE errore __call__ con %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERRORE aggiornamento dell'account non riuscito con %(ip)s:%(port)s/" "%(device)s (verrà eseguito un nuovo tentativo successivamente)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERRORE aggiornamento dell'account non riuscito con %(ip)s:%(port)s/" "%(device)s (verrà eseguito un nuovo tentativo successivamente): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "ERRORE file in sospeso asincrono con nome non previsto %s" msgid "ERROR auditing" msgstr "ERRORE durante la verifica" #, python-format msgid "ERROR auditing: %s" msgstr "ERRORE durante la verifica: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERRORE aggiornamento del contenitore non riuscito con %(ip)s:%(port)s/" "%(dev)s (salvataggio per aggiornamento asincrono successivamente)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERRORE durante la lettura della risposta HTTP da %s" #, python-format msgid "ERROR reading db %s" msgstr "ERRORE durante la lettura del db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERRORE rsync non riuscito con %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERRORE durante la sincronizzazione di %(file)s con il nodo %(node)s" msgid "ERROR trying to replicate" msgstr "ERRORE durante il tentativo di eseguire la replica" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERRORE durante il tentativo di ripulire %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "ERRORE relativo al server %(type)s %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERRORE relativo al caricamento delle eliminazioni da %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERRORE relativo al server remoto %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "ERRORE: Impossibile ottenere i percorsi per gestire le partizioni: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERRORE: Impossibile accedere a %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERRORE: Impossibile eseguire la verifica: %s" msgid "Error hashing suffix" msgstr "Errore durante l'hash del suffisso" msgid "Error listing devices" msgstr "Errore durante l'elenco dei dispositivi" #, python-format msgid "Error on render profiling results: %s" msgstr "" "Errore durante la visualizzazione dei risultati della creazione dei profili: " "%s" msgid "Error parsing recon cache file" msgstr "Errore durante l'analisi del file della cache di riconoscimento" msgid "Error reading recon cache file" msgstr "Errore durante la lettura del file della cache di riconoscimento" msgid "Error reading ringfile" msgstr "Errore durante la lettura del ringfile" msgid "Error reading swift.conf" msgstr "Errore durante la lettura di swift.conf" msgid "Error retrieving recon data" msgstr "Errore durante il richiamo dei dati di riconoscimento" msgid "Error syncing handoff partition" msgstr "Errore durante la sincronizzazione della partizione di passaggio" msgid "Error syncing partition" msgstr "Errore durante la sincronizzazione della partizione" #, python-format msgid "Error syncing with node: %s" msgstr "Errore durante la sincronizzazione con il nodo: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Errore nel tentativo di ricreare %(path)s policy#%(policy)d frag#" "%(frag_index)s" msgid "Error: An error occurred" msgstr "Errore: Si è verificato un errore" msgid "Error: missing config path argument" msgstr "Errore: Argomento path della configurazione mancante" #, python-format msgid "Error: unable to locate %s" msgstr "Errore: impossibile individuare %s" msgid "Exception dumping recon cache" msgstr "Eccezione durante il dump della cache di recon" msgid "Exception in top-level account reaper loop" msgstr "Eccezione nel loop reaper dell'account di livello superiore" msgid "Exception in top-level replication loop" msgstr "Eccezione nel loop di replica di livello superiore" msgid "Exception in top-levelreconstruction loop" msgstr "Eccezione nel loop di ricostruzione di livello superiore" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Eccezione relativa a %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Eccezione relativa all'account %s" #, python-format msgid "Exception with containers for account %s" msgstr "Eccezione relativa ai contenitori per l'account %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Eccezione relativa agli oggetti per il contenitore %(container)s per " "l'account %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Previsto: 100-continue su %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "" "Viene seguita la catena CNAME per %(given_domain)s verso %(found_domain)s" msgid "Found configs:" msgstr "Configurazioni trovate:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Nella prima modalità di passaggio ci sono ancora passaggi restanti. " "Interruzione del passaggio di replica corrente." msgid "Host unreachable" msgstr "Host non raggiungibile" #, python-format msgid "Incomplete pass on account %s" msgstr "Trasmissione non completa sull'account %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Formato X-Container-Sync-To non valido %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Host non valido %r in X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Voce in sospeso non valida %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Risposta non valida %(resp)s da %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Risposta non valida %(resp)s da %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Schema non valido %r in X-Container-Sync-To, deve essere \"//\", \"http\" " "oppure \"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Chiusura rsync ad elaborazione prolungata: %s" msgid "Lockup detected.. killing live coros." msgstr "Blocco rilevato... chiusura dei coros attivi." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s associato a %(found_domain)s" #, python-format msgid "No %s running" msgstr "Nessun %s in esecuzione" #, python-format msgid "No permission to signal PID %d" msgstr "Nessuna autorizzazione per la segnalazione del PID %d" #, python-format msgid "No policy with index %s" msgstr "Nessuna politica con indice %s" #, python-format msgid "No realm key for %r" msgstr "Nessuna chiave dell'area di autenticazione per %r" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Errore del nodo limitato %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Server degli oggetti riconosciuti non sufficienti (got %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "%(sync_from)r => %(sync_to)r non trovato - oggetto %(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Nessun elemento ricostruito per %s secondi." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nessun elemento replicato per %s secondi." msgid "Object" msgstr "Oggetto" msgid "Object PUT" msgstr "PUT dell'oggetto" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Il PUT dell'oggetto ha restituito 202 per 409: %(req_timestamp)s <= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Il PUT dell'oggetto ha restituito 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Modalità \"%(mode)s\" (%(type)s) verifica oggetto completata: " "%(elapsed).02fs. Totale in quarantena: %(quars)d, Totale errori: %(errors)d, " "Totale file/sec: %(frate).2f, Totale byte/sec: %(brate).2f, Tempo verifica: " "%(audit).2f, Velocità: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Verifica oggetto (%(type)s). A partire da %(start_time)s: In locale: " "%(passes)d passati, %(quars)d in quarantena, %(errors)d errori file/sec: " "%(frate).2f , byte/sec: %(brate).2f, Tempo totale: %(total).2f, Tempo " "verifica: %(audit).2f, Velocità: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Statistiche verifica oggetto: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Ricostruzione dell'oggetto completata (una volta). (%.02f minuti)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Ricostruzione dell'oggetto completata. (%.02f minuti)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Replica dell'oggetto completata (una volta). (%.02f minuti)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Replica dell'oggetto completata. (%.02f minuti)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "I server dell'oggetto hanno restituito %s etag senza corrispondenza" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Pulizia aggiornamento oggetto completata: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Parametri, query e frammenti non consentiti in X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "Tempi partizione: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "Percorso richiesto in X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problema durante la ripulitura di %s" #, python-format msgid "Profiling Error: %s" msgstr "Errore di creazione dei profili: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s inserito in quarantena in %(quar_path)s perché non è una " "directory" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s inserito in quarantena in %(quar_path)s perché non è una " "directory" #, python-format msgid "Quarantining DB %s" msgstr "Inserimento in quarantena del DB %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Log di sospensione Ratelimit: %(sleep)s per %(account)s/%(container)s/" "%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Rimossi %(remove)d dbs" #, python-format msgid "Removing %s objects" msgstr "Rimozione di oggetti %s" #, python-format msgid "Removing partition: %s" msgstr "Rimozione della partizione: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Rimozione del file pid %(pid_file)s con pid non valido %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Rimozione del file pid %s con pid non valido" #, python-format msgid "Removing stale pid file %s" msgstr "Rimozione del file pid %s obsoleto in corso" msgid "Replication run OVER" msgstr "Esecuzione della replica TERMINATA" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Viene restituito il codice 497 a causa della blacklist: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Viene restituito 498 per %(meth)s a %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(numero massimo sospensioni) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Modifica ring rilevata. Interruzione della trasmissione della ricostruzione " "corrente." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Modifica ring rilevata. Interruzione della trasmissione della replica " "corrente." #, python-format msgid "Running %s once" msgstr "Esecuzione di %s una volta" msgid "Running object reconstructor in script mode." msgstr "" "Esecuzione del programma di ricostruzione dell'oggetto in modalità script." msgid "Running object replicator in script mode." msgstr "Esecuzione del programma di replica dell'oggetto in modalità script." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "A partire da %(time)s: %(sync)s sincronizzati [%(delete)s eliminazioni, " "%(put)s inserimenti], %(skip)s ignorati, %(fail)s non riusciti" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "A partire da %(time)s: Verifiche account: %(passed)s verifiche superate, " "%(failed)s verifiche non superate" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "A partire da %(time)s: Verifiche contenitore: %(pass)s verifiche superate, " "%(fail)s verifiche non superate" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s viene ignorato perché non è montato" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s viene ignorato perché non è montato" #, python-format msgid "Starting %s" msgstr "Avvio di %s" msgid "Starting object reconstruction pass." msgstr "Avvio della trasmissione della ricostruzione dell'oggetto." msgid "Starting object reconstructor in daemon mode." msgstr "Avvio del programma di ricostruzione dell'oggetto in modalità daemon." msgid "Starting object replication pass." msgstr "Avvio della trasmissione della replica dell'oggetto." msgid "Starting object replicator in daemon mode." msgstr "Avvio del programma di replica dell'oggetto in modalità daemon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Rsync di %(src)s eseguito correttamente su %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Non è consentito l'accesso a questo tipo di file!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Il numero totale di %(key)s per il contenitore (%(total)s) non corrisponde " "alla somma di %(key)s tra le politiche (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Eccezione di timeout con %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentativo di %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Tentativo di eseguire GET %(full_path)s" msgid "Trying to read during GET" msgstr "Tentativo di lettura durante GET" msgid "Trying to read during GET (retrying)" msgstr "Tentativo di lettura durante GET (nuovo tentativo)" msgid "Trying to send to client" msgstr "Tentativo di invio al client" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Tentativo di sincronizzazione dei suffissi con %s" #, python-format msgid "Trying to write to %s" msgstr "Tentativo di scrittura in %s" msgid "UNCAUGHT EXCEPTION" msgstr "ECCEZIONE NON RILEVATA" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Impossibile individuare %s in libc. Lasciato come no-op." #, python-format msgid "Unable to locate config for %s" msgstr "Impossibile individuare la configurazione per %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Impossibile individuare fallocate, posix_fallocate in libc. Lasciato come " "no-op." #, python-format msgid "Unable to read config from %s" msgstr "Impossibile leggere la configurazione da %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "%(sync_from)r => %(sync_to)r non autorizzato" msgid "Unhandled exception" msgstr "Eccezione non gestita" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Eccezione imprevista nel tentativo di eseguire GET: %(account)r " "%(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Report di aggiornamento non riuscito per %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Report di aggiornamento inviato per %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVVERTENZA: SSL deve essere abilitato solo per scopi di test. Utilizzare la " "terminazione SSL esterna per una distribuzione di produzione." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVVERTENZA: Impossibile modificare il limite del descrittore del file. " "Eseguire come non-root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVVERTENZA: Impossibile modificare il limite del numero massimo di processi. " "Eseguire come non-root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVVERTENZA: Impossibile modificare il limite di memoria. Eseguire come non-" "root?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avvertenza: impossibile eseguire ratelimit senza un client memcached" #, python-format msgid "method %s is not allowed." msgstr "il metodo %s non è consentito." msgid "no log file found" msgstr "nessun file di log trovato" msgid "odfpy not installed." msgstr "odfpy non installato." #, python-format msgid "plotting results failed due to %s" msgstr "tracciamento dei risultati non riuscito a causa di %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib non installato." swift-2.17.0/swift/locale/tr_TR/0000775000175100017510000000000013236061751016412 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/tr_TR/LC_MESSAGES/0000775000175100017510000000000013236061751020177 5ustar zuulzuul00000000000000swift-2.17.0/swift/locale/tr_TR/LC_MESSAGES/swift.po0000666000175100017510000006540013236061620021675 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # İşbaran Akçayır , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Turkish (Turkey)\n" msgid "" "\n" "user quit" msgstr "" "\n" "kullanıcı çıktı" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - paralel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d sonek kontrol edildi - %(hashed).2f%% özetlenen, %(synced).2f%% " "eÅŸzamanlanan" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) bölüm %(time).2fs (%(rate).2f/" "sn, %(remaining)s kalan) içinde çoÄŸaltıldı" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s baÅŸarı, %(failure)s baÅŸarısızlık" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s %(statuses)s için 503 döndürüyor" #, python-format msgid "%s already started..." msgstr "%s zaten baÅŸlatıldı..." #, python-format msgid "%s does not exist" msgstr "%s mevcut deÄŸil" #, python-format msgid "%s is not mounted" msgstr "%s baÄŸlı deÄŸil" #, python-format msgid "%s responded as unmounted" msgstr "%s baÄŸlı deÄŸil olarak yanıt verdi" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: BaÄŸlantı eÅŸ tarafından sıfırlandı" #, python-format msgid ", %s containers deleted" msgstr ", %s kap silindi" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s kap kaldı muhtemelen" #, python-format msgid ", %s containers remaining" msgstr ", %s kap kaldı" #, python-format msgid ", %s objects deleted" msgstr ", %s nesne silindi" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s nesne kaldı muhtemelen" #, python-format msgid ", %s objects remaining" msgstr ", %s nesne kaldı" #, python-format msgid ", elapsed: %.02fs" msgstr ", geçen süre: %.02fs" msgid ", return codes: " msgstr ", dönen kodlar: " msgid "Account" msgstr "Hesap" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Hesap denetimi \"bir kere\" kipi tamamlandı: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Hesap denetimi geçiÅŸi tamamlandı: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(count)d db %(time).5f saniyede çoÄŸaltılmaya çalışıldı (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Kötü rsync dönüş kodu: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Hesap denetimi \"bir kere\" kipini baÅŸlat" msgid "Begin account audit pass." msgstr "Hesap denetimi baÅŸlatma geçildi." msgid "Begin container audit \"once\" mode" msgstr "Kap denetimine \"bir kere\" kipinde baÅŸla" msgid "Begin container audit pass." msgstr "Kap denetimi geçiÅŸini baÅŸlat." msgid "Begin container sync \"once\" mode" msgstr "Kap eÅŸzamanlamayı \"bir kere\" kipinde baÅŸlat" msgid "Begin container update single threaded sweep" msgstr "Kap güncelleme tek iÅŸ iplikli süpürmeye baÅŸla" msgid "Begin container update sweep" msgstr "Kap güncelleme süpürmesine baÅŸla" msgid "Begin object update single threaded sweep" msgstr "Nesne güncelleme tek iÅŸ iplikli süpürmeye baÅŸla" msgid "Begin object update sweep" msgstr "Nesne güncelleme süpürmesine baÅŸla" #, python-format msgid "Beginning pass on account %s" msgstr "%s hesabı üzerinde geçiÅŸ baÅŸlatılıyor" msgid "Beginning replication run" msgstr "ÇoÄŸaltmanın çalıştırılmasına baÅŸlanıyor" msgid "Broker error trying to rollback locked connection" msgstr "Kilitli baÄŸlantı geri alınmaya çalışılırken vekil hatası" #, python-format msgid "Can not access the file %s." msgstr "%s dosyasına eriÅŸilemiyor." #, python-format msgid "Can not load profile data from %s." msgstr "%s'den profil verisi yüklenemiyor." #, python-format msgid "Client did not read from proxy within %ss" msgstr "İstemci %ss içinde vekilden okumadı" msgid "Client disconnected on read" msgstr "İstemci okuma sırasında baÄŸlantıyı kesti" msgid "Client disconnected without sending enough data" msgstr "İstemci yeterli veri göndermeden baÄŸlantıyı kesti" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "İstemci yolu %(client)s nesne metadata'sında kayıtlı yol ile eÅŸleÅŸmiyor " "%(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Yapılandırma seçeneÄŸi internal_client_conf_path belirtilmemiÅŸ. Varsayılan " "yapılandırma kullanılıyor, seçenekleri çin internal-client.conf-sample'a " "bakın" msgid "Connection refused" msgstr "BaÄŸlantı reddedildi" msgid "Connection timeout" msgstr "BaÄŸlantı zaman aşımına uÄŸradı" msgid "Container" msgstr "Kap" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Kap denetimi \"bir kere\" kipinde tamamlandı: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Kap denetim geçiÅŸi tamamlandı: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Kap eÅŸzamanlama \"bir kere\" kipinde tamamlandı: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Kap güncelleme tek iÅŸ iplikli süpürme tamamlandı: %(elapsed).02fs, " "%(success)s baÅŸarılı, %(fail)s baÅŸarısız, %(no_change)s deÄŸiÅŸiklik yok" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Kap güncelleme süpürme tamamlandı: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s in kap güncelleme süpürmesi tamamlandı: %(elapsed).02fs, " "%(success)s baÅŸarılı, %(fail)s baÅŸarısız, %(no_change)s deÄŸiÅŸiklik yok" #, python-format msgid "Data download error: %s" msgstr "Veri indirme hatası: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Aygıtlar geçiÅŸi tamamlandı: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "HATA %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "HATA %(status)d %(body)s %(type)s Sunucudan" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "HATA %(status)d %(body)s Nesne Sunucu re'den: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "HATA %(status)d Beklenen: 100-Nesne Sunucusundan devam et" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme baÅŸarısız (sonra tekrar " "denenecek): Yanıt %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "HATA %(host)s dan kötü yanıt %(status)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "HATA İstemci okuma zaman aşımına uÄŸradı (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "HATA Kap güncelleme baÅŸarısız (daha sonraki async güncellemesi için " "kaydediliyor): %(ip)s:%(port)s/%(dev)s den %(status)d yanıtı" #, python-format msgid "ERROR Could not get account info %s" msgstr "HATA hesap bilgisi %s alınamadı" #, python-format msgid "ERROR Could not get container info %s" msgstr "HATA %s kap bilgisi alınamadı" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "HATA %(data_file)s disk dosyası kapatma baÅŸarısız: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "HATA İstisna istemci baÄŸlantısının kesilmesine neden oluyor" msgid "ERROR Failed to get my own IPs?" msgstr "Kendi IP'lerimi alırken HATA?" msgid "ERROR Insufficient Storage" msgstr "HATA Yetersiz Depolama" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "HATA Nesne %(obj)s denetimde baÅŸarısız oldu ve karantinaya alındı: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "HATA Picke problemi, %s karantinaya alınıyor" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "HATA Uzak sürücü baÄŸlı deÄŸil %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "HATA %(db_file)s %(row)s eÅŸzamanlamada" #, python-format msgid "ERROR Syncing %s" msgstr "HATA %s EÅŸzamanlama" #, python-format msgid "ERROR Trying to audit %s" msgstr "HATA %s denetimi denemesinde" msgid "ERROR Unhandled exception in request" msgstr "HATA İstekte ele alınmayan istisna var" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ hatası %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme baÅŸarısız (sonra " "yeniden denenecek)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "HATA hesap güncelleme baÅŸarısız %(ip)s:%(port)s/%(device)s (sonra tekrar " "denenecek):" #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "HATA beklenmeyen isimli async bekleyen dosya %s" msgid "ERROR auditing" msgstr "denetlemede HATA" #, python-format msgid "ERROR auditing: %s" msgstr "HATA denetim: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "HATA kap güncelleme %(ip)s:%(port)s/%(dev)s ile baÅŸarısız oldu (sonraki " "async güncellemesi için kaydediliyor)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "%s'den HTTP yanıtı okumada HATA" #, python-format msgid "ERROR reading db %s" msgstr "%s veri tabanı okumada HATA" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "HATA rsync %(code)s ile baÅŸarısız oldu: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "%(node)s düğümlü %(file)s eÅŸ zamanlamada HATA" msgid "ERROR trying to replicate" msgstr "ÇoÄŸaltmaya çalışmada HATA" #, python-format msgid "ERROR while trying to clean up %s" msgstr "%s temizlenmeye çalışırken HATA" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "HATA %(type)s sunucusu %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "HATA %s den baskılamaların yüklenmesinde: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "HATA uzuk sunucuda %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "HATA: Sürücü bölümlerine olan yollar alınamadı: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "HATA: %(path)s e eriÅŸilemiyor: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "HATA: Denetim çalıştırılamıyor: %s" msgid "Error hashing suffix" msgstr "Sonek özetini çıkarmada hata" msgid "Error listing devices" msgstr "Aygıtları listelemede hata" #, python-format msgid "Error on render profiling results: %s" msgstr "Profilleme sonuçlarının gerçeklenmesinde hata: %s" msgid "Error parsing recon cache file" msgstr "Recon zula dosyasını ayrıştırmada hata" msgid "Error reading recon cache file" msgstr "Recon zula dosyası okumada hata" msgid "Error reading ringfile" msgstr "Halka dosyası okunurken hata" msgid "Error reading swift.conf" msgstr "swift.conf okunurken hata" msgid "Error retrieving recon data" msgstr "Recon verisini almada hata" msgid "Error syncing handoff partition" msgstr "Devir bölümünü eÅŸ zamanlamada hata" msgid "Error syncing partition" msgstr "Bölüm eÅŸzamanlamada hata" #, python-format msgid "Error syncing with node: %s" msgstr "Düğüm ile eÅŸ zamanlamada hata: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Yeniden inÅŸa denenirken hata %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Hata: Bir hata oluÅŸtu" msgid "Error: missing config path argument" msgstr "Hata: yapılandırma yolu deÄŸiÅŸkeni eksik" #, python-format msgid "Error: unable to locate %s" msgstr "Hata: %s bulunamıyor" msgid "Exception dumping recon cache" msgstr "Yeniden baÄŸlanma zulasının dökümünde istisna" msgid "Exception in top-level account reaper loop" msgstr "Üst seviye hesap biçme döngüsünde istisna" msgid "Exception in top-level replication loop" msgstr "Üst seviye çoÄŸaltma döngüsünde istisna" msgid "Exception in top-levelreconstruction loop" msgstr "Üst seviye yeniden oluÅŸturma döngüsünde istisna" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ile istisna" #, python-format msgid "Exception with account %s" msgstr "%s hesabında istisna" #, python-format msgid "Exception with containers for account %s" msgstr "%s hesabı için kaplarla ilgili istisna" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "%(account)s hesabı için %(container)s kabı için nesneler için istisna" #, python-format msgid "Expect: 100-continue on %s" msgstr "Beklenen: 100-%s üzerinden devam et" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s den %(found_domain)s e CNAME zinciri takip ediliyor" msgid "Found configs:" msgstr "Yapılandırmalar bulundu:" msgid "Host unreachable" msgstr "İstemci eriÅŸilebilir deÄŸil" #, python-format msgid "Incomplete pass on account %s" msgstr "%s hesabından tamamlanmamış geçiÅŸ" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Geçersix X-Container-Sync-To biçimi %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To'da geçersiz istemci %r" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Geçersiz bekleyen girdi %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "%(full_path)s den geçersiz yanıt %(resp)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)s den geçersiz yanıt %(resp)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "X-Container-Sync-To'da geçersiz ÅŸema %r, \"//\", \"http\", veya \"https\" " "olmalı." #, python-format msgid "Killing long-running rsync: %s" msgstr "Uzun süre çalışan rsync öldürülüyor: %s" msgid "Lockup detected.. killing live coros." msgstr "Kilitleme algılandı.. canlı co-rutinler öldürülüyor." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s %(found_domain)s eÅŸleÅŸtirildi" #, python-format msgid "No %s running" msgstr "Çalışan %s yok" #, python-format msgid "No permission to signal PID %d" msgstr "%d PID'ine sinyalleme izni yok" #, python-format msgid "No policy with index %s" msgstr "%s indisine sahip ilke yok" #, python-format msgid "No realm key for %r" msgstr "%r için realm anahtarı yok" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Düğüm hatası sınırlandı %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Yeterince nesne sunucu ack'lenmedi (%d alındı)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Bulunamadı %(sync_from)r => %(sync_to)r - nesne %(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%s saniye boyunca hiçbir ÅŸey yeniden oluÅŸturulmadı." #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s saniyedir hiçbir ÅŸey çoÄŸaltılmadı." msgid "Object" msgstr "Nesne" msgid "Object PUT" msgstr "Nesne PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "Nesne PUT 409 için 202 döndürüyor: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Nesne PUT 412 döndürüyor, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Nesne denetimi (%(type)s) \"%(mode)s\" kipinde tamamlandı: %(elapsed).02fs. " "Toplam karantina: %(quars)d, Toplam hata: %(errors)d, Toplam dosya/sn: " "%(frate).2f, Toplam bayt/sn: %(brate).2f, Denetleme zamanı: %(audit).2f, " "Oran: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Nesne denetim istatistikleri: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Nesne yeniden oluÅŸturma tamamlandı (bir kere). (%.02f dakika)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Nesne yeniden oluÅŸturma tamamlandı. (%.02f dakika)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Nesne çoÄŸaltma tamamlandı (bir kere). (%.02f dakika)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Nesne çoÄŸaltma tamamlandı. (%.02f dakika)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Nesne sunucuları %s eÅŸleÅŸmeyen etag döndürdü" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Nesne güncelleme süpürmesi tamamlandı: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "X-Container-Sync-To'da parametre, sorgular, ve parçalara izin verilmez" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Bölüm zamanları: azami %(max).4fs, asgari %(min).4fs, ortalama %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To'de yol gerekli" #, python-format msgid "Problem cleaning up %s" msgstr "%s temizliÄŸinde problem" #, python-format msgid "Profiling Error: %s" msgstr "Profilleme Hatası: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "%(hsh_path)s %(quar_path)s karantinasına alındı çünkü bir dizin deÄŸil" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Bir dizin olmadığından %(object_path)s %(quar_path)s e karantinaya alındı" #, python-format msgid "Quarantining DB %s" msgstr "DB %s karantinaya alınıyor" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Oran sınırı uyku kaydı: %(account)s/%(container)s/%(object)s için %(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d db silindi" #, python-format msgid "Removing %s objects" msgstr "%s nesne kaldırılıyor" #, python-format msgid "Removing partition: %s" msgstr "Bölüm kaldırılıyor: %s" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Geçersiz pid'e sahip pid dosyası %s siliniyor" #, python-format msgid "Removing stale pid file %s" msgstr "Askıdaki pid dosyası siliniyor %s" msgid "Replication run OVER" msgstr "ÇoÄŸaltma çalışması BİTTİ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Kara listeleme yüzünden 497 döndürülüyor: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "%(acc)s/%(cont)s/%(obj)s ye %(meth)s için 498 döndürülüyor. Oran sınırı " "(Azami uyku) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Zincir deÄŸiÅŸikliÄŸi algılandı. Mevcut yeniden oluÅŸturma geçiÅŸi iptal ediliyor." msgid "Ring change detected. Aborting current replication pass." msgstr "Zincir deÄŸiÅŸimi algılandı. Mevcut çoÄŸaltma geçiÅŸi iptal ediliyor." #, python-format msgid "Running %s once" msgstr "%s bir kere çalıştırılıyor" msgid "Running object reconstructor in script mode." msgstr "Nesne yeniden oluÅŸturma betik kipinde çalıştırılıyor." msgid "Running object replicator in script mode." msgstr "Nesne çoÄŸaltıcı betik kipinde çalıştırılıyor." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "%(time)s den beri: %(sync)s eÅŸzamanlandı [%(delete)s silme, %(put)s koyma], " "%(skip)s atlama, %(fail)s baÅŸarısız" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "%(time)s den beri: Hesap denetimleri: %(passed)s denetimi geçti, %(failed)s " "denetimi geçemedi" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "%(time)s den beri: Kap denetimleri: %(pass)s denetimi geçti, %(fail)s " "denetimde baÅŸarısız" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "BaÄŸlı olmadığından %(device)s atlanıyor" #, python-format msgid "Skipping %s as it is not mounted" msgstr "BaÄŸlı olmadığından %s atlanıyor" #, python-format msgid "Starting %s" msgstr "%s baÅŸlatılıyor" msgid "Starting object reconstruction pass." msgstr "Nesne yeniden oluÅŸturma geçiÅŸi baÅŸlatılıyor." msgid "Starting object reconstructor in daemon mode." msgstr "Nesne yeniden oluÅŸturma artalan iÅŸlemi kipinde baÅŸlatılıyor." msgid "Starting object replication pass." msgstr "Nesne çoÄŸaltma geçiÅŸi baÅŸlatılıyor." msgid "Starting object replicator in daemon mode." msgstr "Nesne çoÄŸaltıcı artalan iÅŸlemi kipinde baÅŸlatılıyor." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "%(dst)s (%(time).03f) de %(src)s baÅŸarılı rsync'i" msgid "The file type are forbidden to access!" msgstr "Dosya türüne eriÅŸim yasaklanmış!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "(%(total)s) kabı için %(key)s toplamı ilkeler arasındaki %(key)s toplamıyla " "eÅŸleÅŸmiyor (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ile zaman aşımı istisnası" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s deneniyor" #, python-format msgid "Trying to GET %(full_path)s" msgstr "%(full_path)s GET deneniyor" msgid "Trying to read during GET" msgstr "GET sırasında okuma deneniyor" msgid "Trying to read during GET (retrying)" msgstr "GET sırasında okuma deneniyor (yeniden deneniyor)" msgid "Trying to send to client" msgstr "İstemciye gönderilmeye çalışılıyor" #, python-format msgid "Trying to sync suffixes with %s" msgstr "%s e sahip son ekler eÅŸzamanlanmaya çalışılıyor" #, python-format msgid "Trying to write to %s" msgstr "%s'e yazmaya çalışılıyor" msgid "UNCAUGHT EXCEPTION" msgstr "YAKALANMAYAN İSTİSNA" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "%s libc'de bulunamadı. No-op olarak çıkılıyor." #, python-format msgid "Unable to locate config for %s" msgstr "%s için yapılandırma bulunamıyor" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate, posix_fallocate libc'de bulunamadı. No-op olarak çıkılıyor." #, python-format msgid "Unable to read config from %s" msgstr "%s'den yapılandırma okunamıyor" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "%(sync_from)r => %(sync_to)r yetki al" msgid "Unhandled exception" msgstr "Yakalanmamış istisna" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "GET sırasında bilinmeyen istisna: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s için güncelleme raporu baÅŸarısız" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s için güncelleme raporu gönderildi" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "UYARI: SSL yalnızca test amaçlı etkinleÅŸtirilmelidir. Üretim için kurulumda " "harici SSL sonlandırma kullanın." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "UYARI: Dosya göstericisi sınırı deÄŸiÅŸtirilemiyor. Root deÄŸil misiniz?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "UYARI: Azami süreç limiti deÄŸiÅŸtirilemiyor. Root deÄŸil misiniz?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "UYARI: Hafıza sınırı deÄŸiÅŸtirilemiyor. Root deÄŸil misiniz?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Uyarı: Memcached istemcisi olmadan oran sınırlama yapılamaz" #, python-format msgid "method %s is not allowed." msgstr "%s metoduna izin verilmez." msgid "no log file found" msgstr "kayıt dosyası bulunamadı" msgid "odfpy not installed." msgstr "odfpy kurulu deÄŸil." #, python-format msgid "plotting results failed due to %s" msgstr "çizdirme sonuçlaru %s sebebiyle baÅŸarısız" msgid "python-matplotlib not installed." msgstr "python-matplotlib kurulu deÄŸil." swift-2.17.0/swift/common/0000775000175100017510000000000013236061751015411 5ustar zuulzuul00000000000000swift-2.17.0/swift/common/constraints.py0000666000175100017510000003675513236061617020355 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import os from os.path import isdir # tighter scoped import for mocking import six from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError from six.moves import urllib from swift.common import utils, exceptions from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \ HTTPRequestEntityTooLarge, HTTPPreconditionFailed, HTTPNotImplemented, \ HTTPException MAX_FILE_SIZE = 5368709122 MAX_META_NAME_LENGTH = 128 MAX_META_VALUE_LENGTH = 256 MAX_META_COUNT = 90 MAX_META_OVERALL_SIZE = 4096 MAX_HEADER_SIZE = 8192 MAX_OBJECT_NAME_LENGTH = 1024 CONTAINER_LISTING_LIMIT = 10000 ACCOUNT_LISTING_LIMIT = 10000 MAX_ACCOUNT_NAME_LENGTH = 256 MAX_CONTAINER_NAME_LENGTH = 256 VALID_API_VERSIONS = ["v1", "v1.0"] EXTRA_HEADER_COUNT = 0 # If adding an entry to DEFAULT_CONSTRAINTS, note that # these constraints are automatically published by the # proxy server in responses to /info requests, with values # updated by reload_constraints() DEFAULT_CONSTRAINTS = { 'max_file_size': MAX_FILE_SIZE, 'max_meta_name_length': MAX_META_NAME_LENGTH, 'max_meta_value_length': MAX_META_VALUE_LENGTH, 'max_meta_count': MAX_META_COUNT, 'max_meta_overall_size': MAX_META_OVERALL_SIZE, 'max_header_size': MAX_HEADER_SIZE, 'max_object_name_length': MAX_OBJECT_NAME_LENGTH, 'container_listing_limit': CONTAINER_LISTING_LIMIT, 'account_listing_limit': ACCOUNT_LISTING_LIMIT, 'max_account_name_length': MAX_ACCOUNT_NAME_LENGTH, 'max_container_name_length': MAX_CONTAINER_NAME_LENGTH, 'valid_api_versions': VALID_API_VERSIONS, 'extra_header_count': EXTRA_HEADER_COUNT, } SWIFT_CONSTRAINTS_LOADED = False OVERRIDE_CONSTRAINTS = {} # any constraints overridden by SWIFT_CONF_FILE EFFECTIVE_CONSTRAINTS = {} # populated by reload_constraints def reload_constraints(): """ Parse SWIFT_CONF_FILE and reset module level global constraint attrs, populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way. """ global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS SWIFT_CONSTRAINTS_LOADED = False OVERRIDE_CONSTRAINTS = {} constraints_conf = ConfigParser() if constraints_conf.read(utils.SWIFT_CONF_FILE): SWIFT_CONSTRAINTS_LOADED = True for name in DEFAULT_CONSTRAINTS: try: value = constraints_conf.get('swift-constraints', name) except NoOptionError: pass except NoSectionError: # We are never going to find the section for another option break else: try: value = int(value) except ValueError: value = utils.list_from_csv(value) OVERRIDE_CONSTRAINTS[name] = value for name, default in DEFAULT_CONSTRAINTS.items(): value = OVERRIDE_CONSTRAINTS.get(name, default) EFFECTIVE_CONSTRAINTS[name] = value # "globals" in this context is module level globals, always. globals()[name.upper()] = value reload_constraints() # Maximum slo segments in buffer MAX_BUFFERED_SLO_SEGMENTS = 10000 # By default the maximum number of allowed headers depends on the number of max # allowed metadata settings plus a default value of 36 for swift internally # generated headers and regular http headers. If for some reason this is not # enough (custom middleware for example) it can be increased with the # extra_header_count constraint. MAX_HEADER_COUNT = MAX_META_COUNT + 36 + max(EXTRA_HEADER_COUNT, 0) def check_metadata(req, target_type): """ Check metadata sent in the request headers. This should only check that the metadata in the request given is valid. Checks against account/container overall metadata should be forwarded on to its respective server to be checked. :param req: request object :param target_type: str: one of: object, container, or account: indicates which type the target storage for the metadata is :returns: HTTPBadRequest with bad metadata otherwise None """ target_type = target_type.lower() prefix = 'x-%s-meta-' % target_type meta_count = 0 meta_size = 0 for key, value in req.headers.items(): if (isinstance(value, six.string_types) and len(value) > MAX_HEADER_SIZE): return HTTPBadRequest(body='Header value too long: %s' % key[:MAX_META_NAME_LENGTH], request=req, content_type='text/plain') if not key.lower().startswith(prefix): continue key = key[len(prefix):] if not key: return HTTPBadRequest(body='Metadata name cannot be empty', request=req, content_type='text/plain') bad_key = not check_utf8(key) bad_value = value and not check_utf8(value) if target_type in ('account', 'container') and (bad_key or bad_value): return HTTPBadRequest(body='Metadata must be valid UTF-8', request=req, content_type='text/plain') meta_count += 1 meta_size += len(key) + len(value) if len(key) > MAX_META_NAME_LENGTH: return HTTPBadRequest( body='Metadata name too long: %s%s' % (prefix, key), request=req, content_type='text/plain') if len(value) > MAX_META_VALUE_LENGTH: return HTTPBadRequest( body='Metadata value longer than %d: %s%s' % ( MAX_META_VALUE_LENGTH, prefix, key), request=req, content_type='text/plain') if meta_count > MAX_META_COUNT: return HTTPBadRequest( body='Too many metadata items; max %d' % MAX_META_COUNT, request=req, content_type='text/plain') if meta_size > MAX_META_OVERALL_SIZE: return HTTPBadRequest( body='Total metadata too large; max %d' % MAX_META_OVERALL_SIZE, request=req, content_type='text/plain') return None def check_object_creation(req, object_name): """ Check to ensure that everything is alright about an object to be created. :param req: HTTP request object :param object_name: name of object to be created :returns: HTTPRequestEntityTooLarge -- the object is too large :returns: HTTPLengthRequired -- missing content-length header and not a chunked request :returns: HTTPBadRequest -- missing or bad content-type header, or bad metadata :returns: HTTPNotImplemented -- unsupported transfer-encoding header value """ try: ml = req.message_length() except ValueError as e: return HTTPBadRequest(request=req, content_type='text/plain', body=str(e)) except AttributeError as e: return HTTPNotImplemented(request=req, content_type='text/plain', body=str(e)) if ml is not None and ml > MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(body='Your request is too large.', request=req, content_type='text/plain') if req.content_length is None and \ req.headers.get('transfer-encoding') != 'chunked': return HTTPLengthRequired(body='Missing Content-Length header.', request=req, content_type='text/plain') if len(object_name) > MAX_OBJECT_NAME_LENGTH: return HTTPBadRequest(body='Object name length of %d longer than %d' % (len(object_name), MAX_OBJECT_NAME_LENGTH), request=req, content_type='text/plain') if 'Content-Type' not in req.headers: return HTTPBadRequest(request=req, content_type='text/plain', body='No content type') try: req = check_delete_headers(req) except HTTPException as e: return HTTPBadRequest(request=req, body=e.body, content_type='text/plain') if not check_utf8(req.headers['Content-Type']): return HTTPBadRequest(request=req, body='Invalid Content-Type', content_type='text/plain') return check_metadata(req, 'object') def check_dir(root, drive): """ Verify that the path to the device is a directory and is a lesser constraint that is enforced when a full mount_check isn't possible with, for instance, a VM using loopback or partitions. :param root: base path where the dir is :param drive: drive name to be checked :returns: full path to the device, or None if drive fails to validate """ return check_drive(root, drive, False) def check_mount(root, drive): """ Verify that the path to the device is a mount point and mounted. This allows us to fast fail on drives that have been unmounted because of issues, and also prevents us for accidentally filling up the root partition. :param root: base path where the devices are mounted :param drive: drive name to be checked :returns: full path to the device, or None if drive fails to validate """ return check_drive(root, drive, True) def check_drive(root, drive, mount_check): """ Validate the path given by root and drive is a valid existing directory. :param root: base path where the devices are mounted :param drive: drive name to be checked :param mount_check: additionally require path is mounted :returns: full path to the device, or None if drive fails to validate """ if not (urllib.parse.quote_plus(drive) == drive): return None path = os.path.join(root, drive) if mount_check: if utils.ismount(path): return path else: if isdir(path): return path return None def check_float(string): """ Helper function for checking if a string can be converted to a float. :param string: string to be verified as a float :returns: True if the string can be converted to a float, False otherwise """ try: float(string) return True except ValueError: return False def valid_timestamp(request): """ Helper function to extract a timestamp from requests that require one. :param request: the swob request object :returns: a valid Timestamp instance :raises HTTPBadRequest: on missing or invalid X-Timestamp """ try: return request.timestamp except exceptions.InvalidTimestamp as e: raise HTTPBadRequest(body=str(e), request=request, content_type='text/plain') def check_delete_headers(request): """ Check that 'x-delete-after' and 'x-delete-at' headers have valid values. Values should be positive integers and correspond to a time greater than the request timestamp. If the 'x-delete-after' header is found then its value is used to compute an 'x-delete-at' value which takes precedence over any existing 'x-delete-at' header. :param request: the swob request object :raises: HTTPBadRequest in case of invalid values :returns: the swob request object """ now = float(valid_timestamp(request)) if 'x-delete-after' in request.headers: try: x_delete_after = int(request.headers['x-delete-after']) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-After') actual_del_time = utils.normalize_delete_at_timestamp( now + x_delete_after) if int(actual_del_time) <= now: raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-After in past') request.headers['x-delete-at'] = actual_del_time del request.headers['x-delete-after'] if 'x-delete-at' in request.headers: try: x_delete_at = int(utils.normalize_delete_at_timestamp( int(request.headers['x-delete-at']))) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-At') if x_delete_at <= now and not utils.config_true_value( request.headers.get('x-backend-replication', 'f')): raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-At in past') return request def check_utf8(string): """ Validate if a string is valid UTF-8 str or unicode and that it does not contain any null character. :param string: string to be validated :returns: True if the string is valid utf-8 str or unicode and contains no null characters, False otherwise """ if not string: return False try: if isinstance(string, six.text_type): string.encode('utf-8') else: decoded = string.decode('UTF-8') if decoded.encode('UTF-8') != string: return False # A UTF-8 string with surrogates in it is invalid. if any(0xD800 <= ord(codepoint) <= 0xDFFF for codepoint in decoded): return False return '\x00' not in string # If string is unicode, decode() will raise UnicodeEncodeError # So, we should catch both UnicodeDecodeError & UnicodeEncodeError except UnicodeError: return False def check_name_format(req, name, target_type): """ Validate that the header contains valid account or container name. :param req: HTTP request object :param name: header value to validate :param target_type: which header is being validated (Account or Container) :returns: A properly encoded account name or container name :raise HTTPPreconditionFailed: if account header is not well formatted. """ if not name: raise HTTPPreconditionFailed( request=req, body='%s name cannot be empty' % target_type) if isinstance(name, six.text_type): name = name.encode('utf-8') if '/' in name: raise HTTPPreconditionFailed( request=req, body='%s name cannot contain slashes' % target_type) return name check_account_format = functools.partial(check_name_format, target_type='Account') check_container_format = functools.partial(check_name_format, target_type='Container') def valid_api_version(version): """ Checks if the requested version is valid. Currently Swift only supports "v1" and "v1.0". """ global VALID_API_VERSIONS if not isinstance(VALID_API_VERSIONS, list): VALID_API_VERSIONS = [str(VALID_API_VERSIONS)] return version in VALID_API_VERSIONS swift-2.17.0/swift/common/splice.py0000666000175100017510000001257413236061617017256 0ustar zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Bindings to the `tee` and `splice` system calls ''' import os import operator import six import ctypes import ctypes.util __all__ = ['tee', 'splice'] c_loff_t = ctypes.c_long # python 2.6 doesn't have c_ssize_t c_ssize_t = getattr(ctypes, 'c_ssize_t', ctypes.c_long) class Tee(object): '''Binding to `tee`''' __slots__ = '_c_tee', def __init__(self): libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) try: c_tee = libc.tee except AttributeError: self._c_tee = None return c_tee.argtypes = [ ctypes.c_int, ctypes.c_int, ctypes.c_size_t, ctypes.c_uint ] c_tee.restype = c_ssize_t def errcheck(result, func, arguments): if result == -1: errno = ctypes.set_errno(0) raise IOError(errno, 'tee: %s' % os.strerror(errno)) else: return result c_tee.errcheck = errcheck self._c_tee = c_tee def __call__(self, fd_in, fd_out, len_, flags): '''See `man 2 tee` File-descriptors can be file-like objects with a `fileno` method, or integers. Flags can be an integer value, or a list of flags (exposed on `splice`). This function returns the number of bytes transferred (i.e. the actual result of the call to `tee`). Upon other errors, an `IOError` is raised with the proper `errno` set. ''' if not self.available: raise EnvironmentError('tee not available') if not isinstance(flags, six.integer_types): c_flags = six.moves.reduce(operator.or_, flags, 0) else: c_flags = flags c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)() c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)() return self._c_tee(c_fd_in, c_fd_out, len_, c_flags) @property def available(self): '''Availability of `tee`''' return self._c_tee is not None tee = Tee() del Tee class Splice(object): '''Binding to `splice`''' # From `bits/fcntl-linux.h` SPLICE_F_MOVE = 1 SPLICE_F_NONBLOCK = 2 SPLICE_F_MORE = 4 SPLICE_F_GIFT = 8 __slots__ = '_c_splice', def __init__(self): libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) try: c_splice = libc.splice except AttributeError: self._c_splice = None return c_loff_t_p = ctypes.POINTER(c_loff_t) c_splice.argtypes = [ ctypes.c_int, c_loff_t_p, ctypes.c_int, c_loff_t_p, ctypes.c_size_t, ctypes.c_uint ] c_splice.restype = c_ssize_t def errcheck(result, func, arguments): if result == -1: errno = ctypes.set_errno(0) raise IOError(errno, 'splice: %s' % os.strerror(errno)) else: off_in = arguments[1] off_out = arguments[3] return ( result, off_in.contents.value if off_in is not None else None, off_out.contents.value if off_out is not None else None) c_splice.errcheck = errcheck self._c_splice = c_splice def __call__(self, fd_in, off_in, fd_out, off_out, len_, flags): '''See `man 2 splice` File-descriptors can be file-like objects with a `fileno` method, or integers. Flags can be an integer value, or a list of flags (exposed on this object). Returns a tuple of the result of the `splice` call, the output value of `off_in` and the output value of `off_out` (or `None` for any of these output values, if applicable). Upon other errors, an `IOError` is raised with the proper `errno` set. Note: if you want to pass `NULL` as value for `off_in` or `off_out` to the system call, you must pass `None`, *not* 0! ''' if not self.available: raise EnvironmentError('splice not available') if not isinstance(flags, six.integer_types): c_flags = six.moves.reduce(operator.or_, flags, 0) else: c_flags = flags c_fd_in = getattr(fd_in, 'fileno', lambda: fd_in)() c_fd_out = getattr(fd_out, 'fileno', lambda: fd_out)() c_off_in = \ ctypes.pointer(c_loff_t(off_in)) if off_in is not None else None c_off_out = \ ctypes.pointer(c_loff_t(off_out)) if off_out is not None else None return self._c_splice( c_fd_in, c_off_in, c_fd_out, c_off_out, len_, c_flags) @property def available(self): '''Availability of `splice`''' return self._c_splice is not None splice = Splice() del Splice swift-2.17.0/swift/common/storage_policy.py0000666000175100017510000010453213236061617021016 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import string import sys import textwrap import six from six.moves.configparser import ConfigParser from swift.common.utils import ( config_true_value, quorum_size, whataremyips, list_from_csv, config_positive_int_value, get_zero_indexed_base_string) from swift.common.ring import Ring, RingData from swift.common import utils from swift.common.exceptions import RingLoadError from pyeclib.ec_iface import ECDriver, ECDriverError, VALID_EC_TYPES LEGACY_POLICY_NAME = 'Policy-0' VALID_CHARS = '-' + string.ascii_letters + string.digits DEFAULT_POLICY_TYPE = REPL_POLICY = 'replication' EC_POLICY = 'erasure_coding' DEFAULT_EC_OBJECT_SEGMENT_SIZE = 1048576 class BindPortsCache(object): def __init__(self, swift_dir, bind_ip): self.swift_dir = swift_dir self.mtimes_by_ring_path = {} self.portsets_by_ring_path = {} self.my_ips = set(whataremyips(bind_ip)) def all_bind_ports_for_node(self): """ Given an iterable of IP addresses identifying a storage backend server, return a set of all bind ports defined in all rings for this storage backend server. The caller is responsible for not calling this method (which performs at least a stat on all ring files) too frequently. """ # NOTE: we don't worry about disappearing rings here because you can't # ever delete a storage policy. for policy in POLICIES: # NOTE: we must NOT use policy.load_ring to load the ring. Users # of this utility function will not need the actual ring data, just # the bind ports. # # This is duplicated with Ring.__init__ just a bit... serialized_path = os.path.join(self.swift_dir, policy.ring_name + '.ring.gz') try: new_mtime = os.path.getmtime(serialized_path) except OSError: continue old_mtime = self.mtimes_by_ring_path.get(serialized_path) if not old_mtime or old_mtime != new_mtime: self.portsets_by_ring_path[serialized_path] = set( dev['port'] for dev in RingData.load(serialized_path, metadata_only=True).devs if dev and dev['ip'] in self.my_ips) self.mtimes_by_ring_path[serialized_path] = new_mtime # No "break" here so that the above line will update the # mtimes_by_ring_path entry for any ring that changes, not just # the first one we notice. # Return the requested set of ports from our (now-freshened) cache return six.moves.reduce(set.union, self.portsets_by_ring_path.values(), set()) class PolicyError(ValueError): def __init__(self, msg, index=None): if index is not None: msg += ', for index %r' % index super(PolicyError, self).__init__(msg) def _get_policy_string(base, policy_index): return get_zero_indexed_base_string(base, policy_index) def get_policy_string(base, policy_or_index): """ Helper function to construct a string from a base and the policy. Used to encode the policy index into either a file name or a directory name by various modules. :param base: the base string :param policy_or_index: StoragePolicy instance, or an index (string or int), if None the legacy storage Policy-0 is assumed. :returns: base name with policy index added :raises PolicyError: if no policy exists with the given policy_index """ if isinstance(policy_or_index, BaseStoragePolicy): policy = policy_or_index else: policy = POLICIES.get_by_index(policy_or_index) if policy is None: raise PolicyError("Unknown policy", index=policy_or_index) return _get_policy_string(base, int(policy)) def split_policy_string(policy_string): """ Helper function to convert a string representing a base and a policy. Used to decode the policy from either a file name or a directory name by various modules. :param policy_string: base name with policy index added :raises PolicyError: if given index does not map to a valid policy :returns: a tuple, in the form (base, policy) where base is the base string and policy is the StoragePolicy instance for the index encoded in the policy_string. """ if '-' in policy_string: base, policy_index = policy_string.rsplit('-', 1) else: base, policy_index = policy_string, None policy = POLICIES.get_by_index(policy_index) if get_policy_string(base, policy) != policy_string: raise PolicyError("Unknown policy", index=policy_index) return base, policy class BaseStoragePolicy(object): """ Represents a storage policy. Not meant to be instantiated directly; implement a derived subclasses (e.g. StoragePolicy, ECStoragePolicy, etc) or use :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. The object_ring property is lazy loaded once the service's ``swift_dir`` is known via :meth:`~StoragePolicyCollection.get_object_ring`, but it may be over-ridden via object_ring kwarg at create time for testing or actively loaded with :meth:`~StoragePolicy.load_ring`. """ policy_type_to_policy_cls = {} def __init__(self, idx, name='', is_default=False, is_deprecated=False, object_ring=None, aliases=''): # do not allow BaseStoragePolicy class to be instantiated directly if type(self) == BaseStoragePolicy: raise TypeError("Can't instantiate BaseStoragePolicy directly") # policy parameter validation try: self.idx = int(idx) except ValueError: raise PolicyError('Invalid index', idx) if self.idx < 0: raise PolicyError('Invalid index', idx) self.alias_list = [] self.add_name(name) if aliases: names_list = list_from_csv(aliases) for alias in names_list: if alias == name: continue self.add_name(alias) self.is_deprecated = config_true_value(is_deprecated) self.is_default = config_true_value(is_default) if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls: raise PolicyError('Invalid type', self.policy_type) if self.is_deprecated and self.is_default: raise PolicyError('Deprecated policy can not be default. ' 'Invalid config', self.idx) self.ring_name = _get_policy_string('object', self.idx) self.object_ring = object_ring @property def name(self): return self.alias_list[0] @name.setter def name_setter(self, name): self._validate_policy_name(name) self.alias_list[0] = name @property def aliases(self): return ", ".join(self.alias_list) def __int__(self): return self.idx def __cmp__(self, other): return cmp(self.idx, int(other)) def __repr__(self): return ("%s(%d, %r, is_default=%s, " "is_deprecated=%s, policy_type=%r)") % \ (self.__class__.__name__, self.idx, self.alias_list, self.is_default, self.is_deprecated, self.policy_type) @classmethod def register(cls, policy_type): """ Decorator for Storage Policy implementations to register their StoragePolicy class. This will also set the policy_type attribute on the registered implementation. """ def register_wrapper(policy_cls): if policy_type in cls.policy_type_to_policy_cls: raise PolicyError( '%r is already registered for the policy_type %r' % ( cls.policy_type_to_policy_cls[policy_type], policy_type)) cls.policy_type_to_policy_cls[policy_type] = policy_cls policy_cls.policy_type = policy_type return policy_cls return register_wrapper @classmethod def _config_options_map(cls): """ Map config option name to StoragePolicy parameter name. """ return { 'name': 'name', 'aliases': 'aliases', 'policy_type': 'policy_type', 'default': 'is_default', 'deprecated': 'is_deprecated', } @classmethod def from_config(cls, policy_index, options): config_to_policy_option_map = cls._config_options_map() policy_options = {} for config_option, value in options.items(): try: policy_option = config_to_policy_option_map[config_option] except KeyError: raise PolicyError('Invalid option %r in ' 'storage-policy section' % config_option, index=policy_index) policy_options[policy_option] = value return cls(policy_index, **policy_options) def get_info(self, config=False): """ Return the info dict and conf file options for this policy. :param config: boolean, if True all config options are returned """ info = {} for config_option, policy_attribute in \ self._config_options_map().items(): info[config_option] = getattr(self, policy_attribute) if not config: # remove some options for public consumption if not self.is_default: info.pop('default') if not self.is_deprecated: info.pop('deprecated') info.pop('policy_type') return info def _validate_policy_name(self, name): """ Helper function to determine the validity of a policy name. Used to check policy names before setting them. :param name: a name string for a single policy name. :raises PolicyError: if the policy name is invalid. """ if not name: raise PolicyError('Invalid name %r' % name, self.idx) # this is defensively restrictive, but could be expanded in the future if not all(c in VALID_CHARS for c in name): msg = 'Names are used as HTTP headers, and can not ' \ 'reliably contain any characters not in %r. ' \ 'Invalid name %r' % (VALID_CHARS, name) raise PolicyError(msg, self.idx) if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0: msg = 'The name %s is reserved for policy index 0. ' \ 'Invalid name %r' % (LEGACY_POLICY_NAME, name) raise PolicyError(msg, self.idx) if name.upper() in (existing_name.upper() for existing_name in self.alias_list): msg = 'The name %s is already assigned to this policy.' % name raise PolicyError(msg, self.idx) def add_name(self, name): """ Adds an alias name to the storage policy. Shouldn't be called directly from the storage policy but instead through the storage policy collection class, so lookups by name resolve correctly. :param name: a new alias for the storage policy """ self._validate_policy_name(name) self.alias_list.append(name) def remove_name(self, name): """ Removes an alias name from the storage policy. Shouldn't be called directly from the storage policy but instead through the storage policy collection class, so lookups by name resolve correctly. If the name removed is the primary name then the next available alias will be adopted as the new primary name. :param name: a name assigned to the storage policy """ if name not in self.alias_list: raise PolicyError("%s is not a name assigned to policy %s" % (name, self.idx)) if len(self.alias_list) == 1: raise PolicyError("Cannot remove only name %s from policy %s. " "Policies must have at least one name." % (name, self.idx)) else: self.alias_list.remove(name) def change_primary_name(self, name): """ Changes the primary/default name of the policy to a specified name. :param name: a string name to replace the current primary name. """ if name == self.name: return elif name in self.alias_list: self.remove_name(name) else: self._validate_policy_name(name) self.alias_list.insert(0, name) def load_ring(self, swift_dir): """ Load the ring for this policy immediately. :param swift_dir: path to rings """ if self.object_ring: return self.object_ring = Ring(swift_dir, ring_name=self.ring_name) @property def quorum(self): """ Number of successful backend requests needed for the proxy to consider the client request successful. """ raise NotImplementedError() @BaseStoragePolicy.register(REPL_POLICY) class StoragePolicy(BaseStoragePolicy): """ Represents a storage policy of type 'replication'. Default storage policy class unless otherwise overridden from swift.conf. Not meant to be instantiated directly; use :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. """ @property def quorum(self): """ Quorum concept in the replication case: floor(number of replica / 2) + 1 """ if not self.object_ring: raise PolicyError('Ring is not loaded') return quorum_size(self.object_ring.replica_count) @BaseStoragePolicy.register(EC_POLICY) class ECStoragePolicy(BaseStoragePolicy): """ Represents a storage policy of type 'erasure_coding'. Not meant to be instantiated directly; use :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. """ def __init__(self, idx, name='', aliases='', is_default=False, is_deprecated=False, object_ring=None, ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, ec_type=None, ec_ndata=None, ec_nparity=None, ec_duplication_factor=1): super(ECStoragePolicy, self).__init__( idx=idx, name=name, aliases=aliases, is_default=is_default, is_deprecated=is_deprecated, object_ring=object_ring) # Validate erasure_coding policy specific members # ec_type is one of the EC implementations supported by PyEClib if ec_type is None: raise PolicyError('Missing ec_type') if ec_type not in VALID_EC_TYPES: raise PolicyError('Wrong ec_type %s for policy %s, should be one' ' of "%s"' % (ec_type, self.name, ', '.join(VALID_EC_TYPES))) self._ec_type = ec_type # Define _ec_ndata as the number of EC data fragments # Accessible as the property "ec_ndata" try: value = int(ec_ndata) if value <= 0: raise ValueError self._ec_ndata = value except (TypeError, ValueError): raise PolicyError('Invalid ec_num_data_fragments %r' % ec_ndata, index=self.idx) # Define _ec_nparity as the number of EC parity fragments # Accessible as the property "ec_nparity" try: value = int(ec_nparity) if value <= 0: raise ValueError self._ec_nparity = value except (TypeError, ValueError): raise PolicyError('Invalid ec_num_parity_fragments %r' % ec_nparity, index=self.idx) # Define _ec_segment_size as the encode segment unit size # Accessible as the property "ec_segment_size" try: value = int(ec_segment_size) if value <= 0: raise ValueError self._ec_segment_size = value except (TypeError, ValueError): raise PolicyError('Invalid ec_object_segment_size %r' % ec_segment_size, index=self.idx) if self._ec_type == 'isa_l_rs_vand' and self._ec_nparity >= 5: logger = logging.getLogger("swift.common.storage_policy") if not logger.handlers: # If nothing else, log to stderr logger.addHandler(logging.StreamHandler(sys.__stderr__)) logger.warning( 'Storage policy %s uses an EC configuration known to harm ' 'data durability. Any data in this policy should be migrated. ' 'See https://bugs.launchpad.net/swift/+bug/1639691 for ' 'more information.' % self.name) if not is_deprecated: raise PolicyError( 'Storage policy %s uses an EC configuration known to harm ' 'data durability. This policy MUST be deprecated.' % self.name) # Initialize PyECLib EC backend try: self.pyeclib_driver = \ ECDriver(k=self._ec_ndata, m=self._ec_nparity, ec_type=self._ec_type) except ECDriverError as e: raise PolicyError("Error creating EC policy (%s)" % e, index=self.idx) # quorum size in the EC case depends on the choice of EC scheme. self._ec_quorum_size = \ self._ec_ndata + self.pyeclib_driver.min_parity_fragments_needed() self._fragment_size = None self._ec_duplication_factor = \ config_positive_int_value(ec_duplication_factor) @property def ec_type(self): return self._ec_type @property def ec_ndata(self): return self._ec_ndata @property def ec_nparity(self): return self._ec_nparity @property def ec_n_unique_fragments(self): return self._ec_ndata + self._ec_nparity @property def ec_segment_size(self): return self._ec_segment_size @property def fragment_size(self): """ Maximum length of a fragment, including header. NB: a fragment archive is a sequence of 0 or more max-length fragments followed by one possibly-shorter fragment. """ # Technically pyeclib's get_segment_info signature calls for # (data_len, segment_size) but on a ranged GET we don't know the # ec-content-length header before we need to compute where in the # object we should request to align with the fragment size. So we # tell pyeclib a lie - from it's perspective, as long as data_len >= # segment_size it'll give us the answer we want. From our # perspective, because we only use this answer to calculate the # *minimum* size we should read from an object body even if data_len < # segment_size we'll still only read *the whole one and only last # fragment* and pass than into pyeclib who will know what to do with # it just as it always does when the last fragment is < fragment_size. if self._fragment_size is None: self._fragment_size = self.pyeclib_driver.get_segment_info( self.ec_segment_size, self.ec_segment_size)['fragment_size'] return self._fragment_size @property def ec_scheme_description(self): """ This short hand form of the important parts of the ec schema is stored in Object System Metadata on the EC Fragment Archives for debugging. """ return "%s %d+%d" % (self._ec_type, self._ec_ndata, self._ec_nparity) @property def ec_duplication_factor(self): return self._ec_duplication_factor def __repr__(self): extra_info = '' if self.ec_duplication_factor != 1: extra_info = ', ec_duplication_factor=%d' % \ self.ec_duplication_factor return ("%s, EC config(ec_type=%s, ec_segment_size=%d, " "ec_ndata=%d, ec_nparity=%d%s)") % \ (super(ECStoragePolicy, self).__repr__(), self.ec_type, self.ec_segment_size, self.ec_ndata, self.ec_nparity, extra_info) @classmethod def _config_options_map(cls): options = super(ECStoragePolicy, cls)._config_options_map() options.update({ 'ec_type': 'ec_type', 'ec_object_segment_size': 'ec_segment_size', 'ec_num_data_fragments': 'ec_ndata', 'ec_num_parity_fragments': 'ec_nparity', 'ec_duplication_factor': 'ec_duplication_factor', }) return options def get_info(self, config=False): info = super(ECStoragePolicy, self).get_info(config=config) if not config: info.pop('ec_object_segment_size') info.pop('ec_num_data_fragments') info.pop('ec_num_parity_fragments') info.pop('ec_type') info.pop('ec_duplication_factor') return info @property def quorum(self): """ Number of successful backend requests needed for the proxy to consider the client PUT request successful. The quorum size for EC policies defines the minimum number of data + parity elements required to be able to guarantee the desired fault tolerance, which is the number of data elements supplemented by the minimum number of parity elements required by the chosen erasure coding scheme. For example, for Reed-Solomon, the minimum number parity elements required is 1, and thus the quorum_size requirement is ec_ndata + 1. Given the number of parity elements required is not the same for every erasure coding scheme, consult PyECLib for min_parity_fragments_needed() """ return self._ec_quorum_size * self.ec_duplication_factor def load_ring(self, swift_dir): """ Load the ring for this policy immediately. :param swift_dir: path to rings """ if self.object_ring: return def validate_ring_data(ring_data): """ EC specific validation Replica count check - we need _at_least_ (#data + #parity) replicas configured. Also if the replica count is larger than exactly that number there's a non-zero risk of error for code that is considering the number of nodes in the primary list from the ring. """ configured_fragment_count = ring_data.replica_count required_fragment_count = \ (self.ec_n_unique_fragments) * self.ec_duplication_factor if configured_fragment_count != required_fragment_count: raise RingLoadError( 'EC ring for policy %s needs to be configured with ' 'exactly %d replicas. Got %s.' % ( self.name, required_fragment_count, configured_fragment_count)) self.object_ring = Ring( swift_dir, ring_name=self.ring_name, validation_hook=validate_ring_data) def get_backend_index(self, node_index): """ Backend index for PyECLib :param node_index: integer of node index :return: integer of actual fragment index. if param is not an integer, return None instead """ try: node_index = int(node_index) except ValueError: return None return node_index % self.ec_n_unique_fragments class StoragePolicyCollection(object): """ This class represents the collection of valid storage policies for the cluster and is instantiated as :class:`StoragePolicy` objects are added to the collection when ``swift.conf`` is parsed by :func:`parse_storage_policies`. When a StoragePolicyCollection is created, the following validation is enforced: * If a policy with index 0 is not declared and no other policies defined, Swift will create one * The policy index must be a non-negative integer * If no policy is declared as the default and no other policies are defined, the policy with index 0 is set as the default * Policy indexes must be unique * Policy names are required * Policy names are case insensitive * Policy names must contain only letters, digits or a dash * Policy names must be unique * The policy name 'Policy-0' can only be used for the policy with index 0 * If any policies are defined, exactly one policy must be declared default * Deprecated policies can not be declared the default """ def __init__(self, pols): self.default = [] self.by_name = {} self.by_index = {} self._validate_policies(pols) def _add_policy(self, policy): """ Add pre-validated policies to internal indexes. """ for name in policy.alias_list: self.by_name[name.upper()] = policy self.by_index[int(policy)] = policy def __repr__(self): return (textwrap.dedent(""" StoragePolicyCollection([ %s ]) """) % ',\n '.join(repr(p) for p in self)).strip() def __len__(self): return len(self.by_index) def __getitem__(self, key): return self.by_index[key] def __iter__(self): return iter(self.by_index.values()) def _validate_policies(self, policies): """ :param policies: list of policies """ for policy in policies: if int(policy) in self.by_index: raise PolicyError('Duplicate index %s conflicts with %s' % ( policy, self.get_by_index(int(policy)))) for name in policy.alias_list: if name.upper() in self.by_name: raise PolicyError('Duplicate name %s conflicts with %s' % ( policy, self.get_by_name(name))) if policy.is_default: if not self.default: self.default = policy else: raise PolicyError( 'Duplicate default %s conflicts with %s' % ( policy, self.default)) self._add_policy(policy) # If a 0 policy wasn't explicitly given, or nothing was # provided, create the 0 policy now if 0 not in self.by_index: if len(self) != 0: raise PolicyError('You must specify a storage policy ' 'section for policy index 0 in order ' 'to define multiple policies') self._add_policy(StoragePolicy(0, name=LEGACY_POLICY_NAME)) # at least one policy must be enabled enabled_policies = [p for p in self if not p.is_deprecated] if not enabled_policies: raise PolicyError("Unable to find policy that's not deprecated!") # if needed, specify default if not self.default: if len(self) > 1: raise PolicyError("Unable to find default policy") self.default = self[0] self.default.is_default = True def get_by_name(self, name): """ Find a storage policy by its name. :param name: name of the policy :returns: storage policy, or None """ return self.by_name.get(name.upper()) def get_by_index(self, index): """ Find a storage policy by its index. An index of None will be treated as 0. :param index: numeric index of the storage policy :returns: storage policy, or None if no such policy """ # makes it easier for callers to just pass in a header value if index in ('', None): index = 0 else: try: index = int(index) except ValueError: return None return self.by_index.get(index) @property def legacy(self): return self.get_by_index(None) def get_object_ring(self, policy_idx, swift_dir): """ Get the ring object to use to handle a request based on its policy. An index of None will be treated as 0. :param policy_idx: policy index as defined in swift.conf :param swift_dir: swift_dir used by the caller :returns: appropriate ring object """ policy = self.get_by_index(policy_idx) if not policy: raise PolicyError("No policy with index %s" % policy_idx) if not policy.object_ring: policy.load_ring(swift_dir) return policy.object_ring def get_policy_info(self): """ Build info about policies for the /info endpoint :returns: list of dicts containing relevant policy information """ policy_info = [] for pol in self: # delete from /info if deprecated if pol.is_deprecated: continue policy_entry = pol.get_info() policy_info.append(policy_entry) return policy_info def add_policy_alias(self, policy_index, *aliases): """ Adds a new name or names to a policy :param policy_index: index of a policy in this policy collection. :param aliases: arbitrary number of string policy names to add. """ policy = self.get_by_index(policy_index) for alias in aliases: if alias.upper() in self.by_name: raise PolicyError('Duplicate name %s in use ' 'by policy %s' % (alias, self.get_by_name(alias))) else: policy.add_name(alias) self.by_name[alias.upper()] = policy def remove_policy_alias(self, *aliases): """ Removes a name or names from a policy. If the name removed is the primary name then the next available alias will be adopted as the new primary name. :param aliases: arbitrary number of existing policy names to remove. """ for alias in aliases: policy = self.get_by_name(alias) if not policy: raise PolicyError('No policy with name %s exists.' % alias) if len(policy.alias_list) == 1: raise PolicyError('Policy %s with name %s has only one name. ' 'Policies must have at least one name.' % ( policy, alias)) else: policy.remove_name(alias) del self.by_name[alias.upper()] def change_policy_primary_name(self, policy_index, new_name): """ Changes the primary or default name of a policy. The new primary name can be an alias that already belongs to the policy or a completely new name. :param policy_index: index of a policy in this policy collection. :param new_name: a string name to set as the new default name. """ policy = self.get_by_index(policy_index) name_taken = self.get_by_name(new_name) # if the name belongs to some other policy in the collection if name_taken and name_taken != policy: raise PolicyError('Other policy %s with name %s exists.' % (self.get_by_name(new_name).idx, new_name)) else: policy.change_primary_name(new_name) self.by_name[new_name.upper()] = policy def parse_storage_policies(conf): """ Parse storage policies in ``swift.conf`` - note that validation is done when the :class:`StoragePolicyCollection` is instantiated. :param conf: ConfigParser parser object for swift.conf """ policies = [] for section in conf.sections(): if not section.startswith('storage-policy:'): continue policy_index = section.split(':', 1)[1] config_options = dict(conf.items(section)) policy_type = config_options.pop('policy_type', DEFAULT_POLICY_TYPE) policy_cls = BaseStoragePolicy.policy_type_to_policy_cls[policy_type] policy = policy_cls.from_config(policy_index, config_options) policies.append(policy) return StoragePolicyCollection(policies) class StoragePolicySingleton(object): """ An instance of this class is the primary interface to storage policies exposed as a module level global named ``POLICIES``. This global reference wraps ``_POLICIES`` which is normally instantiated by parsing ``swift.conf`` and will result in an instance of :class:`StoragePolicyCollection`. You should never patch this instance directly, instead patch the module level ``_POLICIES`` instance so that swift code which imported ``POLICIES`` directly will reference the patched :class:`StoragePolicyCollection`. """ def __iter__(self): return iter(_POLICIES) def __len__(self): return len(_POLICIES) def __getitem__(self, key): return _POLICIES[key] def __getattribute__(self, name): return getattr(_POLICIES, name) def __repr__(self): return repr(_POLICIES) def reload_storage_policies(): """ Reload POLICIES from ``swift.conf``. """ global _POLICIES policy_conf = ConfigParser() policy_conf.read(utils.SWIFT_CONF_FILE) try: _POLICIES = parse_storage_policies(policy_conf) except PolicyError as e: raise SystemExit('ERROR: Invalid Storage Policy Configuration ' 'in %s (%s)' % (utils.SWIFT_CONF_FILE, e)) # parse configuration and setup singleton _POLICIES = None reload_storage_policies() POLICIES = StoragePolicySingleton() swift-2.17.0/swift/common/request_helpers.py0000666000175100017510000006755413236061617021221 0ustar zuulzuul00000000000000# Copyright (c) 2010-2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Miscellaneous utility functions for use in generating responses. Why not swift.common.utils, you ask? Because this way we can import things from swob in here without creating circular imports. """ import hashlib import itertools import sys import time import six from six.moves.urllib.parse import unquote from swift.common.header_key_dict import HeaderKeyDict from swift import gettext_ as _ from swift.common.storage_policy import POLICIES from swift.common.exceptions import ListingIterError, SegmentError from swift.common.http import is_success from swift.common.swob import HTTPBadRequest, \ HTTPServiceUnavailable, Range, is_chunked, multi_range_iterator, \ HTTPPreconditionFailed from swift.common.utils import split_path, validate_device_partition, \ close_if_possible, maybe_multipart_byteranges_to_document_iters, \ multipart_byteranges_to_document_iters, parse_content_type, \ parse_content_range, csv_append, list_from_csv, Spliterator from swift.common.wsgi import make_subrequest OBJECT_TRANSIENT_SYSMETA_PREFIX = 'x-object-transient-sysmeta-' def get_param(req, name, default=None): """ Get parameters from an HTTP request ensuring proper handling UTF-8 encoding. :param req: request object :param name: parameter name :param default: result to return if the parameter is not found :returns: HTTP request parameter value (as UTF-8 encoded str, not unicode object) :raises HTTPBadRequest: if param not valid UTF-8 byte sequence """ value = req.params.get(name, default) if value and not isinstance(value, six.text_type): try: value.decode('utf8') # Ensure UTF8ness except UnicodeDecodeError: raise HTTPBadRequest( request=req, content_type='text/plain', body='"%s" parameter not valid UTF-8' % name) return value def get_name_and_placement(request, minsegs=1, maxsegs=None, rest_with_last=False): """ Utility function to split and validate the request path and storage policy. The storage policy index is extracted from the headers of the request and converted to a StoragePolicy instance. The remaining args are passed through to :meth:`split_and_validate_path`. :returns: a list, result of :meth:`split_and_validate_path` with the BaseStoragePolicy instance appended on the end :raises HTTPServiceUnavailable: if the path is invalid or no policy exists with the extracted policy_index. """ policy_index = request.headers.get('X-Backend-Storage-Policy-Index') policy = POLICIES.get_by_index(policy_index) if not policy: raise HTTPServiceUnavailable( body=_("No policy with index %s") % policy_index, request=request, content_type='text/plain') results = split_and_validate_path(request, minsegs=minsegs, maxsegs=maxsegs, rest_with_last=rest_with_last) results.append(policy) return results def split_and_validate_path(request, minsegs=1, maxsegs=None, rest_with_last=False): """ Utility function to split and validate the request path. :returns: result of :meth:`~swift.common.utils.split_path` if everything's okay :raises HTTPBadRequest: if something's not okay """ try: segs = split_path(unquote(request.path), minsegs, maxsegs, rest_with_last) validate_device_partition(segs[0], segs[1]) return segs except ValueError as err: raise HTTPBadRequest(body=str(err), request=request, content_type='text/plain') def is_user_meta(server_type, key): """ Tests if a header key starts with and is longer than the user metadata prefix for given server type. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: True if the key satisfies the test, False otherwise """ if len(key) <= 8 + len(server_type): return False return key.lower().startswith(get_user_meta_prefix(server_type)) def is_sys_meta(server_type, key): """ Tests if a header key starts with and is longer than the system metadata prefix for given server type. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: True if the key satisfies the test, False otherwise """ if len(key) <= 11 + len(server_type): return False return key.lower().startswith(get_sys_meta_prefix(server_type)) def is_sys_or_user_meta(server_type, key): """ Tests if a header key starts with and is longer than the user or system metadata prefix for given server type. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: True if the key satisfies the test, False otherwise """ return is_user_meta(server_type, key) or is_sys_meta(server_type, key) def is_object_transient_sysmeta(key): """ Tests if a header key starts with and is longer than the prefix for object transient system metadata. :param key: header key :returns: True if the key satisfies the test, False otherwise """ if len(key) <= len(OBJECT_TRANSIENT_SYSMETA_PREFIX): return False return key.lower().startswith(OBJECT_TRANSIENT_SYSMETA_PREFIX) def strip_user_meta_prefix(server_type, key): """ Removes the user metadata prefix for a given server type from the start of a header key. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: stripped header key """ if not is_user_meta(server_type, key): raise ValueError('Key is not user meta') return key[len(get_user_meta_prefix(server_type)):] def strip_sys_meta_prefix(server_type, key): """ Removes the system metadata prefix for a given server type from the start of a header key. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: stripped header key """ if not is_sys_meta(server_type, key): raise ValueError('Key is not sysmeta') return key[len(get_sys_meta_prefix(server_type)):] def strip_object_transient_sysmeta_prefix(key): """ Removes the object transient system metadata prefix from the start of a header key. :param key: header key :returns: stripped header key """ if not is_object_transient_sysmeta(key): raise ValueError('Key is not object transient sysmeta') return key[len(OBJECT_TRANSIENT_SYSMETA_PREFIX):] def get_user_meta_prefix(server_type): """ Returns the prefix for user metadata headers for given server type. This prefix defines the namespace for headers that will be persisted by backend servers. :param server_type: type of backend server i.e. [account|container|object] :returns: prefix string for server type's user metadata headers """ return 'x-%s-%s-' % (server_type.lower(), 'meta') def get_sys_meta_prefix(server_type): """ Returns the prefix for system metadata headers for given server type. This prefix defines the namespace for headers that will be persisted by backend servers. :param server_type: type of backend server i.e. [account|container|object] :returns: prefix string for server type's system metadata headers """ return 'x-%s-%s-' % (server_type.lower(), 'sysmeta') def get_object_transient_sysmeta(key): """ Returns the Object Transient System Metadata header for key. The Object Transient System Metadata namespace will be persisted by backend object servers. These headers are treated in the same way as object user metadata i.e. all headers in this namespace will be replaced on every POST request. :param key: metadata key :returns: the entire object transient system metadata header for key """ return '%s%s' % (OBJECT_TRANSIENT_SYSMETA_PREFIX, key) def remove_items(headers, condition): """ Removes items from a dict whose keys satisfy the given condition. :param headers: a dict of headers :param condition: a function that will be passed the header key as a single argument and should return True if the header is to be removed. :returns: a dict, possibly empty, of headers that have been removed """ removed = {} keys = filter(condition, headers) removed.update((key, headers.pop(key)) for key in keys) return removed def copy_header_subset(from_r, to_r, condition): """ Will copy desired subset of headers from from_r to to_r. :param from_r: a swob Request or Response :param to_r: a swob Request or Response :param condition: a function that will be passed the header key as a single argument and should return True if the header is to be copied. """ for k, v in from_r.headers.items(): if condition(k): to_r.headers[k] = v def check_path_header(req, name, length, error_msg): """ Validate that the value of path-like header is well formatted. We assume the caller ensures that specific header is present in req.headers. :param req: HTTP request object :param name: header name :param length: length of path segment check :param error_msg: error message for client :returns: A tuple with path parts according to length :raise: HTTPPreconditionFailed if header value is not well formatted. """ hdr = unquote(req.headers.get(name)) if not hdr.startswith('/'): hdr = '/' + hdr try: return split_path(hdr, length, length, True) except ValueError: raise HTTPPreconditionFailed( request=req, body=error_msg) class SegmentedIterable(object): """ Iterable that returns the object contents for a large object. :param req: original request object :param app: WSGI application from which segments will come :param listing_iter: iterable yielding the object segments to fetch, along with the byte subranges to fetch, in the form of a 5-tuple (object-path, object-etag, object-size, first-byte, last-byte). If object-etag is None, no MD5 verification will be done. If object-size is None, no length verification will be done. If first-byte and last-byte are None, then the entire object will be fetched. :param max_get_time: maximum permitted duration of a GET request (seconds) :param logger: logger object :param swift_source: value of swift.source in subrequest environ (just for logging) :param ua_suffix: string to append to user-agent. :param name: name of manifest (used in logging only) :param response_body_length: optional response body length for the response being sent to the client. """ def __init__(self, req, app, listing_iter, max_get_time, logger, ua_suffix, swift_source, name='', response_body_length=None): self.req = req self.app = app self.listing_iter = listing_iter self.max_get_time = max_get_time self.logger = logger self.ua_suffix = " " + ua_suffix self.swift_source = swift_source self.name = name self.response_body_length = response_body_length self.peeked_chunk = None self.app_iter = self._internal_iter() self.validated_first_segment = False self.current_resp = None def _coalesce_requests(self): start_time = time.time() pending_req = pending_etag = pending_size = None try: for seg_dict in self.listing_iter: if 'raw_data' in seg_dict: if pending_req: yield pending_req, pending_etag, pending_size to_yield = seg_dict['raw_data'][ seg_dict['first_byte']:seg_dict['last_byte'] + 1] yield to_yield, None, len(seg_dict['raw_data']) pending_req = pending_etag = pending_size = None continue seg_path, seg_etag, seg_size, first_byte, last_byte = ( seg_dict['path'], seg_dict.get('hash'), seg_dict.get('bytes'), seg_dict['first_byte'], seg_dict['last_byte']) if seg_size is not None: seg_size = int(seg_size) first_byte = first_byte or 0 go_to_end = last_byte is None or ( seg_size is not None and last_byte == seg_size - 1) if time.time() - start_time > self.max_get_time: raise SegmentError( 'While processing manifest %s, ' 'max LO GET time of %ds exceeded' % (self.name, self.max_get_time)) # The "multipart-manifest=get" query param ensures that the # segment is a plain old object, not some flavor of large # object; therefore, its etag is its MD5sum and hence we can # check it. path = seg_path + '?multipart-manifest=get' seg_req = make_subrequest( self.req.environ, path=path, method='GET', headers={'x-auth-token': self.req.headers.get( 'x-auth-token')}, agent=('%(orig)s ' + self.ua_suffix), swift_source=self.swift_source) seg_req_rangeval = None if first_byte != 0 or not go_to_end: seg_req_rangeval = "%s-%s" % ( first_byte, '' if go_to_end else last_byte) seg_req.headers['Range'] = "bytes=" + seg_req_rangeval # We can only coalesce if paths match and we know the segment # size (so we can check that the ranges will be allowed) if pending_req and pending_req.path == seg_req.path and \ seg_size is not None: # Make a new Range object so that we don't goof up the # existing one in case of invalid ranges. Note that a # range set with too many individual byteranges is # invalid, so we can combine N valid byteranges and 1 # valid byterange and get an invalid range set. if pending_req.range: new_range_str = str(pending_req.range) else: new_range_str = "bytes=0-%d" % (seg_size - 1) if seg_req.range: new_range_str += "," + seg_req_rangeval else: new_range_str += ",0-%d" % (seg_size - 1) if Range(new_range_str).ranges_for_length(seg_size): # Good news! We can coalesce the requests pending_req.headers['Range'] = new_range_str continue # else, Too many ranges, or too much backtracking, or ... if pending_req: yield pending_req, pending_etag, pending_size pending_req = seg_req pending_etag = seg_etag pending_size = seg_size except ListingIterError: e_type, e_value, e_traceback = sys.exc_info() if time.time() - start_time > self.max_get_time: raise SegmentError( 'While processing manifest %s, ' 'max LO GET time of %ds exceeded' % (self.name, self.max_get_time)) if pending_req: yield pending_req, pending_etag, pending_size six.reraise(e_type, e_value, e_traceback) if time.time() - start_time > self.max_get_time: raise SegmentError( 'While processing manifest %s, ' 'max LO GET time of %ds exceeded' % (self.name, self.max_get_time)) if pending_req: yield pending_req, pending_etag, pending_size def _internal_iter(self): bytes_left = self.response_body_length try: for data_or_req, seg_etag, seg_size in self._coalesce_requests(): if isinstance(data_or_req, bytes): chunk = data_or_req # ugly, awful overloading if bytes_left is None: yield chunk elif bytes_left >= len(chunk): yield chunk bytes_left -= len(chunk) else: yield chunk[:bytes_left] continue seg_req = data_or_req seg_resp = seg_req.get_response(self.app) if not is_success(seg_resp.status_int): close_if_possible(seg_resp.app_iter) raise SegmentError( 'While processing manifest %s, ' 'got %d while retrieving %s' % (self.name, seg_resp.status_int, seg_req.path)) elif ((seg_etag and (seg_resp.etag != seg_etag)) or (seg_size and (seg_resp.content_length != seg_size) and not seg_req.range)): # The content-length check is for security reasons. Seems # possible that an attacker could upload a >1mb object and # then replace it with a much smaller object with same # etag. Then create a big nested SLO that calls that # object many times which would hammer our obj servers. If # this is a range request, don't check content-length # because it won't match. close_if_possible(seg_resp.app_iter) raise SegmentError( 'Object segment no longer valid: ' '%(path)s etag: %(r_etag)s != %(s_etag)s or ' '%(r_size)s != %(s_size)s.' % {'path': seg_req.path, 'r_etag': seg_resp.etag, 'r_size': seg_resp.content_length, 's_etag': seg_etag, 's_size': seg_size}) else: self.current_resp = seg_resp seg_hash = None if seg_resp.etag and not seg_req.headers.get('Range'): # Only calculate the MD5 if it we can use it to validate seg_hash = hashlib.md5() document_iters = maybe_multipart_byteranges_to_document_iters( seg_resp.app_iter, seg_resp.headers['Content-Type']) for chunk in itertools.chain.from_iterable(document_iters): if seg_hash: seg_hash.update(chunk) if bytes_left is None: yield chunk elif bytes_left >= len(chunk): yield chunk bytes_left -= len(chunk) else: yield chunk[:bytes_left] bytes_left -= len(chunk) close_if_possible(seg_resp.app_iter) raise SegmentError( 'Too many bytes for %(name)s; truncating in ' '%(seg)s with %(left)d bytes left' % {'name': self.name, 'seg': seg_req.path, 'left': bytes_left}) close_if_possible(seg_resp.app_iter) if seg_hash and seg_hash.hexdigest() != seg_resp.etag: raise SegmentError( "Bad MD5 checksum in %(name)s for %(seg)s: headers had" " %(etag)s, but object MD5 was actually %(actual)s" % {'seg': seg_req.path, 'etag': seg_resp.etag, 'name': self.name, 'actual': seg_hash.hexdigest()}) if bytes_left: raise SegmentError( 'Not enough bytes for %s; closing connection' % self.name) except (ListingIterError, SegmentError) as err: self.logger.error(err) if not self.validated_first_segment: raise finally: if self.current_resp: close_if_possible(self.current_resp.app_iter) def app_iter_range(self, *a, **kw): """ swob.Response will only respond with a 206 status in certain cases; one of those is if the body iterator responds to .app_iter_range(). However, this object (or really, its listing iter) is smart enough to handle the range stuff internally, so we just no-op this out for swob. """ return self def app_iter_ranges(self, ranges, content_type, boundary, content_size): """ This method assumes that iter(self) yields all the data bytes that go into the response, but none of the MIME stuff. For example, if the response will contain three MIME docs with data "abcd", "efgh", and "ijkl", then iter(self) will give out the bytes "abcdefghijkl". This method inserts the MIME stuff around the data bytes. """ si = Spliterator(self) mri = multi_range_iterator( ranges, content_type, boundary, content_size, lambda start, end_plus_one: si.take(end_plus_one - start)) try: for x in mri: yield x finally: self.close() def validate_first_segment(self): """ Start fetching object data to ensure that the first segment (if any) is valid. This is to catch cases like "first segment is missing" or "first segment's etag doesn't match manifest". Note: this does not validate that you have any segments. A zero-segment large object is not erroneous; it is just empty. """ if self.validated_first_segment: return try: self.peeked_chunk = next(self.app_iter) except StopIteration: pass finally: self.validated_first_segment = True def __iter__(self): if self.peeked_chunk is not None: pc = self.peeked_chunk self.peeked_chunk = None return itertools.chain([pc], self.app_iter) else: return self.app_iter def close(self): """ Called when the client disconnect. Ensure that the connection to the backend server is closed. """ close_if_possible(self.app_iter) def http_response_to_document_iters(response, read_chunk_size=4096): """ Takes a successful object-GET HTTP response and turns it into an iterator of (first-byte, last-byte, length, headers, body-file) 5-tuples. The response must either be a 200 or a 206; if you feed in a 204 or something similar, this probably won't work. :param response: HTTP response, like from bufferedhttp.http_connect(), not a swob.Response. """ chunked = is_chunked(dict(response.getheaders())) if response.status == 200: if chunked: # Single "range" that's the whole object with an unknown length return iter([(0, None, None, response.getheaders(), response)]) # Single "range" that's the whole object content_length = int(response.getheader('Content-Length')) return iter([(0, content_length - 1, content_length, response.getheaders(), response)]) content_type, params_list = parse_content_type( response.getheader('Content-Type')) if content_type != 'multipart/byteranges': # Single range; no MIME framing, just the bytes. The start and end # byte indices are in the Content-Range header. start, end, length = parse_content_range( response.getheader('Content-Range')) return iter([(start, end, length, response.getheaders(), response)]) else: # Multiple ranges; the response body is a multipart/byteranges MIME # document, and we have to parse it using the MIME boundary # extracted from the Content-Type header. params = dict(params_list) return multipart_byteranges_to_document_iters( response, params['boundary'], read_chunk_size) def update_etag_is_at_header(req, name): """ Helper function to update an X-Backend-Etag-Is-At header whose value is a list of alternative header names at which the actual object etag may be found. This informs the object server where to look for the actual object etag when processing conditional requests. Since the proxy server and/or middleware may set alternative etag header names, the value of X-Backend-Etag-Is-At is a comma separated list which the object server inspects in order until it finds an etag value. :param req: a swob Request :param name: name of a sysmeta where alternative etag may be found """ if ',' in name: # HTTP header names should not have commas but we'll check anyway raise ValueError('Header name must not contain commas') existing = req.headers.get("X-Backend-Etag-Is-At") req.headers["X-Backend-Etag-Is-At"] = csv_append( existing, name) def resolve_etag_is_at_header(req, metadata): """ Helper function to resolve an alternative etag value that may be stored in metadata under an alternate name. The value of the request's X-Backend-Etag-Is-At header (if it exists) is a comma separated list of alternate names in the metadata at which an alternate etag value may be found. This list is processed in order until an alternate etag is found. The left most value in X-Backend-Etag-Is-At will have been set by the left most middleware, or if no middleware, by ECObjectController, if an EC policy is in use. The left most middleware is assumed to be the authority on what the etag value of the object content is. The resolver will work from left to right in the list until it finds a value that is a name in the given metadata. So the left most wins, IF it exists in the metadata. By way of example, assume the encrypter middleware is installed. If an object is *not* encrypted then the resolver will not find the encrypter middleware's alternate etag sysmeta (X-Object-Sysmeta-Crypto-Etag) but will then find the EC alternate etag (if EC policy). But if the object *is* encrypted then X-Object-Sysmeta-Crypto-Etag is found and used, which is correct because it should be preferred over X-Object-Sysmeta-Ec-Etag. :param req: a swob Request :param metadata: a dict containing object metadata :return: an alternate etag value if any is found, otherwise None """ alternate_etag = None metadata = HeaderKeyDict(metadata) if "X-Backend-Etag-Is-At" in req.headers: names = list_from_csv(req.headers["X-Backend-Etag-Is-At"]) for name in names: if name in metadata: alternate_etag = metadata[name] break return alternate_etag swift-2.17.0/swift/common/db.py0000666000175100017510000010377013236061617016363 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Database code for Swift """ from contextlib import contextmanager, closing import hashlib import json import logging import os from uuid import uuid4 import sys import time import errno import six import six.moves.cPickle as pickle from swift import gettext_ as _ from tempfile import mkstemp from eventlet import sleep, Timeout import sqlite3 from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE, \ check_utf8 from swift.common.utils import Timestamp, renamer, \ mkdirs, lock_parent_directory, fallocate from swift.common.exceptions import LockTimeout from swift.common.swob import HTTPBadRequest #: Whether calls will be made to preallocate disk space for database files. DB_PREALLOCATION = False #: Timeout for trying to connect to a DB BROKER_TIMEOUT = 25 #: Pickle protocol to use PICKLE_PROTOCOL = 2 #: Max size of .pending file in bytes. When this is exceeded, the pending # records will be merged. PENDING_CAP = 131072 def utf8encode(*args): return [(s.encode('utf8') if isinstance(s, six.text_type) else s) for s in args] def utf8encodekeys(metadata): uni_keys = [k for k in metadata if isinstance(k, six.text_type)] for k in uni_keys: sv = metadata[k] del metadata[k] metadata[k.encode('utf-8')] = sv def _db_timeout(timeout, db_file, call): with LockTimeout(timeout, db_file): retry_wait = 0.001 while True: try: return call() except sqlite3.OperationalError as e: if 'locked' not in str(e): raise sleep(retry_wait) retry_wait = min(retry_wait * 2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError): """More friendly error messages for DB Errors.""" def __init__(self, path, msg, timeout=0): self.path = path self.timeout = timeout self.msg = msg def __str__(self): return 'DB connection error (%s, %s):\n%s' % ( self.path, self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): """More friendly error messages for DB Errors.""" def __init__(self, path): self.path = path def __str__(self): return 'DB %s already exists' % self.path class GreenDBConnection(sqlite3.Connection): """SQLite DB Connection handler that plays well with eventlet.""" def __init__(self, database, timeout=None, *args, **kwargs): if timeout is None: timeout = BROKER_TIMEOUT self.timeout = timeout self.db_file = database super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs) def cursor(self, cls=None): if cls is None: cls = GreenDBCursor return sqlite3.Connection.cursor(self, cls) def commit(self): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): """SQLite Cursor handler that plays well with eventlet.""" def __init__(self, *args, **kwargs): self.timeout = args[0].timeout self.db_file = args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs) def execute(self, *args, **kwargs): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Cursor.execute( self, *args, **kwargs)) def dict_factory(crs, row): """ This should only be used when you need a real dict, i.e. when you're going to serialize the results. """ return dict( ((col[0], row[idx]) for idx, col in enumerate(crs.description))) def chexor(old, name, timestamp): """ Each entry in the account and container databases is XORed by the 128-bit hash on insert or delete. This serves as a rolling, order-independent hash of the contents. (check + XOR) :param old: hex representation of the current DB hash :param name: name of the object or container being inserted :param timestamp: internalized timestamp of the new record :returns: a hex representation of the new hash value """ if name is None: raise Exception('name is None!') new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest() return '%032x' % (int(old, 16) ^ int(new, 16)) def get_db_connection(path, timeout=30, okay_to_create=False): """ Returns a properly configured SQLite database connection. :param path: path to DB :param timeout: timeout for connection :param okay_to_create: if True, create the DB if it doesn't exist :returns: DB connection object """ try: connect_time = time.time() conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path != ':memory:' and not okay_to_create: # attempt to detect and fail when connect creates the db file stat = os.stat(path) if stat.st_size == 0 and stat.st_ctime >= connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB file created by connect?') conn.row_factory = sqlite3.Row conn.text_factory = str with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError: import traceback raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn class DatabaseBroker(object): """Encapsulates working with a database.""" def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None, account=None, container=None, pending_timeout=None, stale_reads_ok=False): """Encapsulates working with a database.""" self.conn = None self.db_file = db_file self.pending_file = self.db_file + '.pending' self.pending_timeout = pending_timeout or 10 self.stale_reads_ok = stale_reads_ok self.db_dir = os.path.dirname(db_file) self.timeout = timeout self.logger = logger or logging.getLogger() self.account = account self.container = container self._db_version = -1 def __str__(self): """ Returns a string identifying the entity under broker to a human. The baseline implementation returns a full pathname to a database. This is vital for useful diagnostics. """ return self.db_file def initialize(self, put_timestamp=None, storage_policy_index=None): """ Create the DB The storage_policy_index is passed through to the subclass's ``_initialize`` method. It is ignored by ``AccountBroker``. :param put_timestamp: internalized timestamp of initial PUT request :param storage_policy_index: only required for containers """ if self.db_file == ':memory:': tmp_db_file = None conn = get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir) fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0) # creating dbs implicitly does a lot of transactions, so we # pick fast, unsafe options here and do a big fsync at the end. with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = MEMORY') conn.create_function('chexor', 3, chexor) conn.row_factory = sqlite3.Row conn.text_factory = str conn.executescript(""" CREATE TABLE outgoing_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TABLE incoming_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; """) if not put_timestamp: put_timestamp = Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file: conn.close() with open(tmp_db_file, 'r+b') as fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file): # It's as if there was a "condition" where different parts # of the system were "racing" each other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn = get_db_connection(self.db_file, self.timeout) else: self.conn = conn def delete_db(self, timestamp): """ Mark the DB as deleted :param timestamp: internalized delete timestamp """ # first, clear the metadata cleared_meta = {} for k in self.metadata: cleared_meta[k] = ('', timestamp) self.update_metadata(cleared_meta) # then mark the db as deleted with self.get() as conn: self._delete_db(conn, timestamp) conn.commit() def possibly_quarantine(self, exc_type, exc_value, exc_traceback): """ Checks the exception info to see if it indicates a quarantine situation (malformed or corrupted database). If not, the original exception will be reraised. If so, the database will be quarantined and a new sqlite3.DatabaseError will be raised indicating the action taken. """ if 'database disk image is malformed' in str(exc_value): exc_hint = 'malformed' elif 'malformed database schema' in str(exc_value): exc_hint = 'malformed' elif ' is not a database' in str(exc_value): # older versions said 'file is not a database' # now 'file is encrypted or is not a database' exc_hint = 'corrupted' elif 'disk I/O error' in str(exc_value): exc_hint = 'disk error while accessing' else: six.reraise(exc_type, exc_value, exc_traceback) prefix_path = os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path) quar_path = os.path.join(device_path, 'quarantined', self.db_type + 's', os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path, fsync=False) except OSError as e: if e.errno not in (errno.EEXIST, errno.ENOTEMPTY): raise quar_path = "%s-%s" % (quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) detail = _('Quarantined %(db_dir)s to %(quar_path)s due to ' '%(exc_hint)s database') % {'db_dir': self.db_dir, 'quar_path': quar_path, 'exc_hint': exc_hint} self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager def get(self): """Use with the "with" statement; returns a database connection.""" if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): try: self.conn = get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file, "DB doesn't exist") conn = self.conn self.conn = None try: yield conn conn.rollback() self.conn = conn except sqlite3.DatabaseError: try: conn.close() except Exception: pass self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout): conn.close() raise @contextmanager def lock(self): """Use with the "with" statement; locks a database.""" if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file, self.timeout) else: raise DatabaseConnectionError(self.db_file, "DB doesn't exist") conn = self.conn self.conn = None orig_isolation_level = conn.isolation_level conn.isolation_level = None conn.execute('BEGIN IMMEDIATE') try: yield True except (Exception, Timeout): pass try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn = conn except (Exception, Timeout): logging.exception( _('Broker error trying to rollback locked connection')) conn.close() def newid(self, remote_id): """ Re-id the database. This should be called after an rsync. :param remote_id: the ID of the remote database being rsynced in """ with self.get() as conn: row = conn.execute(''' UPDATE %s_stat SET id=? ''' % self.db_type, (str(uuid4()),)) row = conn.execute(''' SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1 ''' % self.db_contains_type).fetchone() sync_point = row['ROWID'] if row else -1 conn.execute(''' INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id) VALUES (?, ?) ''', (sync_point, remote_id)) self._newid(conn) conn.commit() def _newid(self, conn): # Override for additional work when receiving an rsynced db. pass def _is_deleted(self, conn): """ Check if the database is considered deleted :param conn: database conn :returns: True if the DB is considered to be deleted, False otherwise """ raise NotImplementedError() def is_deleted(self): """ Check if the DB is considered to be deleted. :returns: True if the DB is considered to be deleted, False otherwise """ if self.db_file != ':memory:' and not os.path.exists(self.db_file): return True self._commit_puts_stale_ok() with self.get() as conn: return self._is_deleted(conn) def merge_timestamps(self, created_at, put_timestamp, delete_timestamp): """ Used in replication to handle updating timestamps. :param created_at: create timestamp :param put_timestamp: put timestamp :param delete_timestamp: delete timestamp """ with self.get() as conn: old_status = self._is_deleted(conn) conn.execute(''' UPDATE %s_stat SET created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp) ''' % self.db_type, (created_at, put_timestamp, delete_timestamp)) if old_status != self._is_deleted(conn): timestamp = Timestamp.now() self._update_status_changed_at(conn, timestamp.internal) conn.commit() def get_items_since(self, start, count): """ Get a list of objects in the database between start and end. :param start: start ROWID :param count: number to get :returns: list of objects between start and end """ self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute(''' SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ? ''' % self.db_contains_type, (start, count)) curs.row_factory = dict_factory return [r for r in curs] def get_sync(self, id, incoming=True): """ Gets the most recent sync point for a server from the sync table. :param id: remote ID to get the sync_point for :param incoming: if True, get the last incoming sync, otherwise get the last outgoing sync :returns: the sync point, or -1 if the id doesn't exist. """ with self.get() as conn: row = conn.execute( "SELECT sync_point FROM %s_sync WHERE remote_id=?" % ('incoming' if incoming else 'outgoing'), (id,)).fetchone() if not row: return -1 return row['sync_point'] def get_syncs(self, incoming=True): """ Get a serialized copy of the sync table. :param incoming: if True, get the last incoming sync, otherwise get the last outgoing sync :returns: list of {'remote_id', 'sync_point'} """ with self.get() as conn: curs = conn.execute(''' SELECT remote_id, sync_point FROM %s_sync ''' % ('incoming' if incoming else 'outgoing')) result = [] for row in curs: result.append({'remote_id': row[0], 'sync_point': row[1]}) return result def get_max_row(self): query = ''' SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name == '%s' LIMIT 1 ''' % (self.db_contains_type) with self.get() as conn: row = conn.execute(query).fetchone() return row[0] if row else -1 def get_replication_info(self): """ Get information about the DB required for replication. :returns: dict containing keys from get_info plus max_row and metadata Note:: get_info's _count is translated to just "count" and metadata is the raw string. """ info = self.get_info() info['count'] = info.pop('%s_count' % self.db_contains_type) info['metadata'] = self.get_raw_metadata() info['max_row'] = self.get_max_row() return info def get_info(self): self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute('SELECT * from %s_stat' % self.db_type) curs.row_factory = dict_factory return curs.fetchone() def put_record(self, record): if self.db_file == ':memory:': self.merge_items([record]) return if not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, "DB doesn't exist") with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size = 0 try: pending_size = os.path.getsize(self.pending_file) except OSError as err: if err.errno != errno.ENOENT: raise if pending_size > PENDING_CAP: self._commit_puts([record]) else: with open(self.pending_file, 'a+b') as fp: # Colons aren't used in base64 encoding; so they are our # delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self, item_list=None): """ Scan for .pending files and commit the found records by feeding them to merge_items(). Assume that lock_parent_directory has already been called. :param item_list: A list of items to commit in addition to .pending """ if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return if item_list is None: item_list = [] self._preallocate() if not os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list) return with open(self.pending_file, 'r+b') as fp: for entry in fp.read().split(':'): if entry: try: self._commit_puts_load(item_list, entry) except Exception: self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0) except OSError as err: if err.errno != errno.ENOENT: raise def _commit_puts_stale_ok(self): """ Catch failures of _commit_puts() if broker is intended for reading of stats, and thus does not care for pending updates. """ if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return try: with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except (LockTimeout, sqlite3.OperationalError): if not self.stale_reads_ok: raise def _commit_puts_load(self, item_list, entry): """ Unmarshall the :param:entry and append it to :param:item_list. This is implemented by a particular broker to be compatible with its :func:`merge_items`. """ raise NotImplementedError def make_tuple_for_pickle(self, record): """ Turn this db record dict into the format this service uses for pending pickles. """ raise NotImplementedError def merge_syncs(self, sync_points, incoming=True): """ Merge a list of sync points with the incoming sync table. :param sync_points: list of sync points where a sync point is a dict of {'sync_point', 'remote_id'} :param incoming: if True, get the last incoming sync, otherwise get the last outgoing sync """ with self.get() as conn: for rec in sync_points: try: conn.execute(''' INSERT INTO %s_sync (sync_point, remote_id) VALUES (?, ?) ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync SET sync_point=max(?, sync_point) WHERE remote_id=? ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit() def _preallocate(self): """ The idea is to allocate space in front of an expanding db. If it gets within 512k of a boundary, it allocates to the next boundary. Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after. """ if not DB_PREALLOCATION or self.db_file == ':memory:': return MB = (1024 * 1024) def prealloc_points(): for pm in (1, 2, 5, 10, 25, 50): yield pm * MB while True: pm += 50 yield pm * MB stat = os.stat(self.db_file) file_size = stat.st_size allocated_size = stat.st_blocks * 512 for point in prealloc_points(): if file_size <= point - MB / 2: prealloc_size = point break if allocated_size < prealloc_size: with open(self.db_file, 'rb+') as fp: fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self): with self.get() as conn: try: metadata = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] except sqlite3.OperationalError as err: if 'no such column: metadata' not in str(err): raise metadata = '' return metadata @property def metadata(self): """ Returns the metadata dict for the database. The metadata dict values are tuples of (value, timestamp) where the timestamp indicates when that key was set to that value. """ metadata = self.get_raw_metadata() if metadata: metadata = json.loads(metadata) utf8encodekeys(metadata) else: metadata = {} return metadata @staticmethod def validate_metadata(metadata): """ Validates that metadata falls within acceptable limits. :param metadata: to be validated :raises HTTPBadRequest: if MAX_META_COUNT or MAX_META_OVERALL_SIZE is exceeded, or if metadata contains non-UTF-8 data """ meta_count = 0 meta_size = 0 for key, (value, timestamp) in metadata.items(): key = key.lower() if value != '' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix = 'x-account-meta-' if key.startswith('x-container-meta-'): prefix = 'x-container-meta-' key = key[len(prefix):] meta_count = meta_count + 1 meta_size = meta_size + len(key) + len(value) bad_key = key and not check_utf8(key) bad_value = value and not check_utf8(value) if bad_key or bad_value: raise HTTPBadRequest('Metadata must be valid UTF-8') if meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too many metadata items; max %d' % MAX_META_COUNT) if meta_size > MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata too large; max %d' % MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates, validate_metadata=False): """ Updates the metadata dict for the database. The metadata dict values are tuples of (value, timestamp) where the timestamp indicates when that key was set to that value. Key/values will only be overwritten if the timestamp is newer. To delete a key, set its value to ('', timestamp). These empty keys will eventually be removed by :func:`reclaim` """ old_metadata = self.metadata if set(metadata_updates).issubset(set(old_metadata)): for key, (value, timestamp) in metadata_updates.items(): if timestamp > old_metadata[key][1]: break else: return with self.get() as conn: try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] md = json.loads(md) if md else {} utf8encodekeys(md) except sqlite3.OperationalError as err: if 'no such column: metadata' not in str(err): raise conn.execute(""" ALTER TABLE %s_stat ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type) md = {} for key, value_timestamp in metadata_updates.items(): value, timestamp = value_timestamp if key not in md or timestamp > md[key][1]: md[key] = value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) conn.commit() def reclaim(self, age_timestamp, sync_timestamp): """ Delete rows from the db_contains_type table that are marked deleted and whose created_at timestamp is < age_timestamp. Also deletes rows from incoming_sync and outgoing_sync where the updated_at timestamp is < sync_timestamp. In addition, this calls the DatabaseBroker's :func:`_reclaim` method. :param age_timestamp: max created_at timestamp of object rows to delete :param sync_timestamp: max update_at timestamp of sync rows to delete """ if self.db_file != ':memory:' and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with self.get() as conn: conn.execute(''' DELETE FROM %s WHERE deleted = 1 AND %s < ? ''' % (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute(''' DELETE FROM outgoing_sync WHERE updated_at < ? ''', (sync_timestamp,)) conn.execute(''' DELETE FROM incoming_sync WHERE updated_at < ? ''', (sync_timestamp,)) except sqlite3.OperationalError as err: # Old dbs didn't have updated_at in the _sync tables. if 'no such column: updated_at' not in str(err): raise DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit() def _reclaim(self, conn, timestamp): """ Removes any empty metadata values older than the timestamp using the given database connection. This function will not call commit on the conn, but will instead return True if the database needs committing. This function was created as a worker to limit transactions and commits from other related functions. :param conn: Database connection to reclaim metadata within. :param timestamp: Empty metadata items last updated before this timestamp will be removed. :returns: True if conn.commit() should be called """ try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] if md: md = json.loads(md) keys_to_delete = [] for key, (value, value_timestamp) in md.items(): if value == '' and value_timestamp < timestamp: keys_to_delete.append(key) if keys_to_delete: for key in keys_to_delete: del md[key] conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) return True except sqlite3.OperationalError as err: if 'no such column: metadata' not in str(err): raise return False def update_put_timestamp(self, timestamp): """ Update the put_timestamp. Only modifies it if it is greater than the current timestamp. :param timestamp: internalized put timestamp """ with self.get() as conn: conn.execute( 'UPDATE %s_stat SET put_timestamp = ?' ' WHERE put_timestamp < ?' % self.db_type, (timestamp, timestamp)) conn.commit() def update_status_changed_at(self, timestamp): """ Update the status_changed_at field in the stat table. Only modifies status_changed_at if the timestamp is greater than the current status_changed_at timestamp. :param timestamp: internalized timestamp """ with self.get() as conn: self._update_status_changed_at(conn, timestamp) conn.commit() def _update_status_changed_at(self, conn, timestamp): conn.execute( 'UPDATE %s_stat SET status_changed_at = ?' ' WHERE status_changed_at < ?' % self.db_type, (timestamp, timestamp)) swift-2.17.0/swift/common/exceptions.py0000666000175100017510000001270513236061617020154 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import Timeout import swift.common.utils class MessageTimeout(Timeout): def __init__(self, seconds=None, msg=None): Timeout.__init__(self, seconds=seconds) self.msg = msg def __str__(self): return '%s: %s' % (Timeout.__str__(self), self.msg) class SwiftException(Exception): pass class PutterConnectError(Exception): def __init__(self, status=None): self.status = status class InvalidTimestamp(SwiftException): pass class InsufficientStorage(SwiftException): pass class FooterNotSupported(SwiftException): pass class MultiphasePUTNotSupported(SwiftException): pass class SuffixSyncError(SwiftException): pass class RangeAlreadyComplete(SwiftException): pass class DiskFileError(SwiftException): pass class DiskFileNotOpen(DiskFileError): pass class DiskFileQuarantined(DiskFileError): pass class DiskFileCollision(DiskFileError): pass class DiskFileNotExist(DiskFileError): pass class DiskFileDeleted(DiskFileNotExist): def __init__(self, metadata=None): self.metadata = metadata or {} self.timestamp = swift.common.utils.Timestamp( self.metadata.get('X-Timestamp', 0)) class DiskFileExpired(DiskFileDeleted): pass class DiskFileNoSpace(DiskFileError): pass class DiskFileDeviceUnavailable(DiskFileError): pass class DiskFileXattrNotSupported(DiskFileError): pass class DiskFileBadMetadataChecksum(DiskFileError): pass class DeviceUnavailable(SwiftException): pass class InvalidAccountInfo(SwiftException): pass class PathNotDir(OSError): pass class ChunkReadError(SwiftException): pass class ChunkReadTimeout(Timeout): pass class ChunkWriteTimeout(Timeout): pass class ConnectionTimeout(Timeout): pass class ResponseTimeout(Timeout): pass class DriveNotMounted(SwiftException): pass class LockTimeout(MessageTimeout): pass class RingLoadError(SwiftException): pass class RingBuilderError(SwiftException): pass class RingValidationError(RingBuilderError): pass class EmptyRingError(RingBuilderError): pass class DuplicateDeviceError(RingBuilderError): pass class UnPicklingError(SwiftException): pass class FileNotFoundError(SwiftException): pass class PermissionError(SwiftException): pass class ListingIterError(SwiftException): pass class ListingIterNotFound(ListingIterError): pass class ListingIterNotAuthorized(ListingIterError): def __init__(self, aresp): self.aresp = aresp class SegmentError(SwiftException): pass class LinkIterError(SwiftException): pass class ReplicationException(Exception): pass class ReplicationLockTimeout(LockTimeout): pass class MimeInvalid(SwiftException): pass class APIVersionError(SwiftException): pass class EncryptionException(SwiftException): pass class ClientException(Exception): def __init__(self, msg, http_scheme='', http_host='', http_port='', http_path='', http_query='', http_status=None, http_reason='', http_device='', http_response_content='', http_headers=None): super(ClientException, self).__init__(msg) self.msg = msg self.http_scheme = http_scheme self.http_host = http_host self.http_port = http_port self.http_path = http_path self.http_query = http_query self.http_status = http_status self.http_reason = http_reason self.http_device = http_device self.http_response_content = http_response_content self.http_headers = http_headers or {} def __str__(self): a = self.msg b = '' if self.http_scheme: b += '%s://' % self.http_scheme if self.http_host: b += self.http_host if self.http_port: b += ':%s' % self.http_port if self.http_path: b += self.http_path if self.http_query: b += '?%s' % self.http_query if self.http_status: if b: b = '%s %s' % (b, self.http_status) else: b = str(self.http_status) if self.http_reason: if b: b = '%s %s' % (b, self.http_reason) else: b = '- %s' % self.http_reason if self.http_device: if b: b = '%s: device %s' % (b, self.http_device) else: b = 'device %s' % self.http_device if self.http_response_content: if len(self.http_response_content) <= 60: b += ' %s' % self.http_response_content else: b += ' [first 60 chars of response] %s' \ % self.http_response_content[:60] return b and '%s: %s' % (a, b) or a class InvalidPidFileException(Exception): pass swift-2.17.0/swift/common/container_sync_realms.py0000666000175100017510000001413513236061617022353 0ustar zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import hashlib import hmac import os import time from six.moves import configparser from swift import gettext_ as _ from swift.common.utils import get_valid_utf8_str class ContainerSyncRealms(object): """ Loads and parses the container-sync-realms.conf, occasionally checking the file's mtime to see if it needs to be reloaded. """ def __init__(self, conf_path, logger): self.conf_path = conf_path self.logger = logger self.next_mtime_check = 0 self.mtime_check_interval = 300 self.conf_path_mtime = 0 self.data = {} self.reload() def reload(self): """Forces a reload of the conf file.""" self.next_mtime_check = 0 self.conf_path_mtime = 0 self._reload() def _reload(self): now = time.time() if now >= self.next_mtime_check: self.next_mtime_check = now + self.mtime_check_interval try: mtime = os.path.getmtime(self.conf_path) except OSError as err: if err.errno == errno.ENOENT: log_func = self.logger.debug else: log_func = self.logger.error log_func(_('Could not load %(conf)r: %(error)s') % { 'conf': self.conf_path, 'error': err}) else: if mtime != self.conf_path_mtime: self.conf_path_mtime = mtime try: conf = configparser.ConfigParser() conf.read(self.conf_path) except configparser.ParsingError as err: self.logger.error( _('Could not load %(conf)r: %(error)s') % {'conf': self.conf_path, 'error': err}) else: try: self.mtime_check_interval = conf.getint( 'DEFAULT', 'mtime_check_interval') self.next_mtime_check = \ now + self.mtime_check_interval except configparser.NoOptionError: self.mtime_check_interval = 300 self.next_mtime_check = \ now + self.mtime_check_interval except (configparser.ParsingError, ValueError) as err: self.logger.error( _('Error in %(conf)r with ' 'mtime_check_interval: %(error)s') % {'conf': self.conf_path, 'error': err}) realms = {} for section in conf.sections(): realm = {} clusters = {} for option, value in conf.items(section): if option in ('key', 'key2'): realm[option] = value elif option.startswith('cluster_'): clusters[option[8:].upper()] = value realm['clusters'] = clusters realms[section.upper()] = realm self.data = realms def realms(self): """Returns a list of realms.""" self._reload() return self.data.keys() def key(self, realm): """Returns the key for the realm.""" self._reload() result = self.data.get(realm.upper()) if result: result = result.get('key') return result def key2(self, realm): """Returns the key2 for the realm.""" self._reload() result = self.data.get(realm.upper()) if result: result = result.get('key2') return result def clusters(self, realm): """Returns a list of clusters for the realm.""" self._reload() result = self.data.get(realm.upper()) if result: result = result.get('clusters') if result: result = result.keys() return result or [] def endpoint(self, realm, cluster): """Returns the endpoint for the cluster in the realm.""" self._reload() result = None realm_data = self.data.get(realm.upper()) if realm_data: cluster_data = realm_data.get('clusters') if cluster_data: result = cluster_data.get(cluster.upper()) return result def get_sig(self, request_method, path, x_timestamp, nonce, realm_key, user_key): """ Returns the hexdigest string of the HMAC-SHA1 (RFC 2104) for the information given. :param request_method: HTTP method of the request. :param path: The path to the resource. :param x_timestamp: The X-Timestamp header value for the request. :param nonce: A unique value for the request. :param realm_key: Shared secret at the cluster operator level. :param user_key: Shared secret at the user's container level. :returns: hexdigest str of the HMAC-SHA1 for the request. """ nonce = get_valid_utf8_str(nonce) realm_key = get_valid_utf8_str(realm_key) user_key = get_valid_utf8_str(user_key) return hmac.new( realm_key, '%s\n%s\n%s\n%s\n%s' % ( request_method, path, x_timestamp, nonce, user_key), hashlib.sha1).hexdigest() swift-2.17.0/swift/common/linkat.py0000666000175100017510000000421713236061617017254 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import ctypes from ctypes.util import find_library __all__ = ['linkat'] class Linkat(object): # From include/uapi/linux/fcntl.h AT_FDCWD = -100 AT_SYMLINK_FOLLOW = 0x400 __slots__ = '_c_linkat' def __init__(self): libc = ctypes.CDLL(find_library('c'), use_errno=True) try: c_linkat = libc.linkat except AttributeError: self._c_linkat = None return c_linkat.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int] c_linkat.restype = ctypes.c_int def errcheck(result, func, arguments): if result == -1: errno = ctypes.set_errno(0) raise IOError(errno, 'linkat: %s' % os.strerror(errno)) else: return result c_linkat.errcheck = errcheck self._c_linkat = c_linkat @property def available(self): return self._c_linkat is not None def __call__(self, olddirfd, oldpath, newdirfd, newpath, flags): """ linkat() creates a new link (also known as a hard link) to an existing file. See `man 2 linkat` for more info. """ if not self.available: raise EnvironmentError('linkat not available') if not isinstance(olddirfd, int) or not isinstance(newdirfd, int): raise TypeError("fd must be an integer.") return self._c_linkat(olddirfd, oldpath, newdirfd, newpath, flags) linkat = Linkat() del Linkat swift-2.17.0/swift/common/__init__.py0000666000175100017510000000004313236061617017522 0ustar zuulzuul00000000000000"""Code common to all of Swift.""" swift-2.17.0/swift/common/base_storage_server.py0000666000175100017510000000550713236061617022021 0ustar zuulzuul00000000000000# Copyright (c) 2010-2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from swift import __version__ as swift_version from swift.common.utils import public, timing_stats, config_true_value from swift.common.swob import Response class BaseStorageServer(object): """ Implements common OPTIONS method for object, account, container servers. """ def __init__(self, conf, **kwargs): self._allowed_methods = None replication_server = conf.get('replication_server', None) if replication_server is not None: replication_server = config_true_value(replication_server) self.replication_server = replication_server @property def server_type(self): raise NotImplementedError( 'Storage nodes have not implemented the Server type.') @property def allowed_methods(self): if self._allowed_methods is None: self._allowed_methods = [] all_methods = inspect.getmembers(self, predicate=callable) if self.replication_server is True: for name, m in all_methods: if (getattr(m, 'publicly_accessible', False) and getattr(m, 'replication', False)): self._allowed_methods.append(name) elif self.replication_server is False: for name, m in all_methods: if (getattr(m, 'publicly_accessible', False) and not getattr(m, 'replication', False)): self._allowed_methods.append(name) elif self.replication_server is None: for name, m in all_methods: if getattr(m, 'publicly_accessible', False): self._allowed_methods.append(name) self._allowed_methods.sort() return self._allowed_methods @public @timing_stats() def OPTIONS(self, req): """ Base handler for OPTIONS requests :param req: swob.Request object :returns: swob.Response object """ # Prepare the default response headers = {'Allow': ', '.join(self.allowed_methods), 'Server': '%s/%s' % (self.server_type, swift_version)} resp = Response(status=200, request=req, headers=headers) return resp swift-2.17.0/swift/common/header_key_dict.py0000666000175100017510000000413713236061617021076 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import six class HeaderKeyDict(dict): """ A dict that title-cases all keys on the way in, so as to be case-insensitive. """ def __init__(self, base_headers=None, **kwargs): if base_headers: self.update(base_headers) self.update(kwargs) def update(self, other): if hasattr(other, 'keys'): for key in other.keys(): self[key.title()] = other[key] else: for key, value in other: self[key.title()] = value def __getitem__(self, key): return dict.get(self, key.title()) def __setitem__(self, key, value): if value is None: self.pop(key.title(), None) elif six.PY2 and isinstance(value, six.text_type): return dict.__setitem__(self, key.title(), value.encode('utf-8')) elif six.PY3 and isinstance(value, six.binary_type): return dict.__setitem__(self, key.title(), value.decode('latin-1')) else: return dict.__setitem__(self, key.title(), str(value)) def __contains__(self, key): return dict.__contains__(self, key.title()) def __delitem__(self, key): return dict.__delitem__(self, key.title()) def get(self, key, default=None): return dict.get(self, key.title(), default) def setdefault(self, key, value=None): if key not in self: self[key] = value return self[key] def pop(self, key, default=None): return dict.pop(self, key.title(), default) swift-2.17.0/swift/common/utils.py0000666000175100017510000046366413236061617017151 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Miscellaneous utility functions for use with Swift.""" from __future__ import print_function import base64 import binascii import errno import fcntl import grp import hmac import json import math import operator import os import pwd import re import string import struct import sys import time import uuid import functools import platform import email.parser from distutils.version import LooseVersion from hashlib import md5, sha1 from random import random, shuffle from contextlib import contextmanager, closing import ctypes import ctypes.util from optparse import OptionParser from tempfile import mkstemp, NamedTemporaryFile import glob import itertools import stat import datetime import eventlet import eventlet.debug import eventlet.greenthread import eventlet.patcher import eventlet.semaphore from eventlet import GreenPool, sleep, Timeout, tpool from eventlet.green import socket, threading from eventlet.hubs import trampoline import eventlet.queue import netifaces import codecs utf8_decoder = codecs.getdecoder('utf-8') utf8_encoder = codecs.getencoder('utf-8') import six from six.moves import cPickle as pickle from six.moves.configparser import (ConfigParser, NoSectionError, NoOptionError, RawConfigParser) from six.moves import range from six.moves.urllib.parse import ParseResult from six.moves.urllib.parse import quote as _quote from six.moves.urllib.parse import urlparse as stdlib_urlparse from swift import gettext_ as _ import swift.common.exceptions from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \ HTTP_PRECONDITION_FAILED, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE from swift.common.header_key_dict import HeaderKeyDict from swift.common.linkat import linkat # logging doesn't import patched as cleanly as one would like from logging.handlers import SysLogHandler import logging logging.thread = eventlet.green.thread logging.threading = eventlet.green.threading logging._lock = logging.threading.RLock() # setup notice level logging NOTICE = 25 logging.addLevelName(NOTICE, 'NOTICE') SysLogHandler.priority_map['NOTICE'] = 'notice' # These are lazily pulled from libc elsewhere _sys_fallocate = None _posix_fadvise = None _libc_socket = None _libc_bind = None _libc_accept = None # see man -s 2 setpriority _libc_setpriority = None # see man -s 2 syscall _posix_syscall = None # If set to non-zero, fallocate routines will fail based on free space # available being at or below this amount, in bytes. FALLOCATE_RESERVE = 0 # Indicates if FALLOCATE_RESERVE is the percentage of free space (True) or # the number of bytes (False). FALLOCATE_IS_PERCENT = False # from /usr/src/linux-headers-*/include/uapi/linux/resource.h PRIO_PROCESS = 0 # /usr/include/x86_64-linux-gnu/asm/unistd_64.h defines syscalls there # are many like it, but this one is mine, see man -s 2 ioprio_set def NR_ioprio_set(): """Give __NR_ioprio_set value for your system.""" architecture = os.uname()[4] arch_bits = platform.architecture()[0] # check if supported system, now support x86_64 and AArch64 if architecture == 'x86_64' and arch_bits == '64bit': return 251 elif architecture == 'aarch64' and arch_bits == '64bit': return 30 raise OSError("Swift doesn't support ionice priority for %s %s" % (architecture, arch_bits)) # this syscall integer probably only works on x86_64 linux systems, you # can check if it's correct on yours with something like this: """ #include #include int main(int argc, const char* argv[]) { printf("%d\n", __NR_ioprio_set); return 0; } """ # this is the value for "which" that says our who value will be a pid # pulled out of /usr/src/linux-headers-*/include/linux/ioprio.h IOPRIO_WHO_PROCESS = 1 IO_CLASS_ENUM = { 'IOPRIO_CLASS_RT': 1, 'IOPRIO_CLASS_BE': 2, 'IOPRIO_CLASS_IDLE': 3, } # the IOPRIO_PRIO_VALUE "macro" is also pulled from # /usr/src/linux-headers-*/include/linux/ioprio.h IOPRIO_CLASS_SHIFT = 13 def IOPRIO_PRIO_VALUE(class_, data): return (((class_) << IOPRIO_CLASS_SHIFT) | data) # Used by hash_path to offer a bit more security when generating hashes for # paths. It simply appends this value to all paths; guessing the hash a path # will end up with would also require knowing this suffix. HASH_PATH_SUFFIX = '' HASH_PATH_PREFIX = '' SWIFT_CONF_FILE = '/etc/swift/swift.conf' # These constants are Linux-specific, and Python doesn't seem to know # about them. We ask anyway just in case that ever gets fixed. # # The values were copied from the Linux 3.x kernel headers. AF_ALG = getattr(socket, 'AF_ALG', 38) F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031) O_TMPFILE = getattr(os, 'O_TMPFILE', 0o20000000 | os.O_DIRECTORY) # Used by the parse_socket_string() function to validate IPv6 addresses IPV6_RE = re.compile("^\[(?P
.*)\](:(?P[0-9]+))?$") MD5_OF_EMPTY_STRING = 'd41d8cd98f00b204e9800998ecf8427e' class InvalidHashPathConfigError(ValueError): def __str__(self): return "[swift-hash]: both swift_hash_path_suffix and " \ "swift_hash_path_prefix are missing from %s" % SWIFT_CONF_FILE def set_swift_dir(swift_dir): """ Sets the directory from which swift config files will be read. If the given directory differs from that already set then the swift.conf file in the new directory will be validated and storage policies will be reloaded from the new swift.conf file. :param swift_dir: non-default directory to read swift.conf from """ global HASH_PATH_SUFFIX global HASH_PATH_PREFIX global SWIFT_CONF_FILE if (swift_dir is not None and swift_dir != os.path.dirname(SWIFT_CONF_FILE)): SWIFT_CONF_FILE = os.path.join( swift_dir, os.path.basename(SWIFT_CONF_FILE)) HASH_PATH_PREFIX = '' HASH_PATH_SUFFIX = '' validate_configuration() return True return False def validate_hash_conf(): global HASH_PATH_SUFFIX global HASH_PATH_PREFIX if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX: hash_conf = ConfigParser() if hash_conf.read(SWIFT_CONF_FILE): try: HASH_PATH_SUFFIX = hash_conf.get('swift-hash', 'swift_hash_path_suffix') except (NoSectionError, NoOptionError): pass try: HASH_PATH_PREFIX = hash_conf.get('swift-hash', 'swift_hash_path_prefix') except (NoSectionError, NoOptionError): pass if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX: raise InvalidHashPathConfigError() try: validate_hash_conf() except InvalidHashPathConfigError: # could get monkey patched or lazy loaded pass def get_hmac(request_method, path, expires, key, digest=sha1): """ Returns the hexdigest string of the HMAC (see RFC 2104) for the request. :param request_method: Request method to allow. :param path: The path to the resource to allow access to. :param expires: Unix timestamp as an int for when the URL expires. :param key: HMAC shared secret. :param digest: constructor for the digest to use in calculating the HMAC Defaults to SHA1 :returns: hexdigest str of the HMAC for the request using the specified digest algorithm. """ return hmac.new( key, '%s\n%s\n%s' % (request_method, expires, path), digest).hexdigest() # Used by get_swift_info and register_swift_info to store information about # the swift cluster. _swift_info = {} _swift_admin_info = {} def get_swift_info(admin=False, disallowed_sections=None): """ Returns information about the swift cluster that has been previously registered with the register_swift_info call. :param admin: boolean value, if True will additionally return an 'admin' section with information previously registered as admin info. :param disallowed_sections: list of section names to be withheld from the information returned. :returns: dictionary of information about the swift cluster. """ disallowed_sections = disallowed_sections or [] info = dict(_swift_info) for section in disallowed_sections: key_to_pop = None sub_section_dict = info for sub_section in section.split('.'): if key_to_pop: sub_section_dict = sub_section_dict.get(key_to_pop, {}) if not isinstance(sub_section_dict, dict): sub_section_dict = {} break key_to_pop = sub_section sub_section_dict.pop(key_to_pop, None) if admin: info['admin'] = dict(_swift_admin_info) info['admin']['disallowed_sections'] = list(disallowed_sections) return info def register_swift_info(name='swift', admin=False, **kwargs): """ Registers information about the swift cluster to be retrieved with calls to get_swift_info. NOTE: Do not use "." in the param: name or any keys in kwargs. "." is used in the disallowed_sections to remove unwanted keys from /info. :param name: string, the section name to place the information under. :param admin: boolean, if True, information will be registered to an admin section which can optionally be withheld when requesting the information. :param kwargs: key value arguments representing the information to be added. :raises ValueError: if name or any of the keys in kwargs has "." in it """ if name == 'admin' or name == 'disallowed_sections': raise ValueError('\'{0}\' is reserved name.'.format(name)) if admin: dict_to_use = _swift_admin_info else: dict_to_use = _swift_info if name not in dict_to_use: if "." in name: raise ValueError('Cannot use "." in a swift_info key: %s' % name) dict_to_use[name] = {} for key, val in kwargs.items(): if "." in key: raise ValueError('Cannot use "." in a swift_info key: %s' % key) dict_to_use[name][key] = val def backward(f, blocksize=4096): """ A generator returning lines from a file starting with the last line, then the second last line, etc. i.e., it reads lines backwards. Stops when the first line (if any) is read. This is useful when searching for recent activity in very large files. :param f: file object to read :param blocksize: no of characters to go backwards at each block """ f.seek(0, os.SEEK_END) if f.tell() == 0: return last_row = b'' while f.tell() != 0: try: f.seek(-blocksize, os.SEEK_CUR) except IOError: blocksize = f.tell() f.seek(-blocksize, os.SEEK_CUR) block = f.read(blocksize) f.seek(-blocksize, os.SEEK_CUR) rows = block.split(b'\n') rows[-1] = rows[-1] + last_row while rows: last_row = rows.pop(-1) if rows and last_row: yield last_row yield last_row # Used when reading config values TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y')) def config_true_value(value): """ Returns True if the value is either True or a string in TRUE_VALUES. Returns False otherwise. """ return value is True or \ (isinstance(value, six.string_types) and value.lower() in TRUE_VALUES) def config_positive_int_value(value): """ Returns positive int value if it can be cast by int() and it's an integer > 0. (not including zero) Raises ValueError otherwise. """ try: value = int(value) if value < 1: raise ValueError() except (TypeError, ValueError): raise ValueError( 'Config option must be an positive int number, not "%s".' % value) return value def config_auto_int_value(value, default): """ Returns default if value is None or 'auto'. Returns value as an int or raises ValueError otherwise. """ if value is None or \ (isinstance(value, six.string_types) and value.lower() == 'auto'): return default try: value = int(value) except (TypeError, ValueError): raise ValueError('Config option must be an integer or the ' 'string "auto", not "%s".' % value) return value def append_underscore(prefix): if prefix and not prefix.endswith('_'): prefix += '_' return prefix def config_read_reseller_options(conf, defaults): """ Read reseller_prefix option and associated options from configuration Reads the reseller_prefix option, then reads options that may be associated with a specific reseller prefix. Reads options such that an option without a prefix applies to all reseller prefixes unless an option has an explicit prefix. :param conf: the configuration :param defaults: a dict of default values. The key is the option name. The value is either an array of strings or a string :return: tuple of an array of reseller prefixes and a dict of option values """ reseller_prefix_opt = conf.get('reseller_prefix', 'AUTH').split(',') reseller_prefixes = [] for prefix in [pre.strip() for pre in reseller_prefix_opt if pre.strip()]: if prefix == "''": prefix = '' prefix = append_underscore(prefix) if prefix not in reseller_prefixes: reseller_prefixes.append(prefix) if len(reseller_prefixes) == 0: reseller_prefixes.append('') # Get prefix-using config options associated_options = {} for prefix in reseller_prefixes: associated_options[prefix] = dict(defaults) associated_options[prefix].update( config_read_prefixed_options(conf, '', defaults)) prefix_name = prefix if prefix != '' else "''" associated_options[prefix].update( config_read_prefixed_options(conf, prefix_name, defaults)) return reseller_prefixes, associated_options def config_read_prefixed_options(conf, prefix_name, defaults): """ Read prefixed options from configuration :param conf: the configuration :param prefix_name: the prefix (including, if needed, an underscore) :param defaults: a dict of default values. The dict supplies the option name and type (string or comma separated string) :return: a dict containing the options """ params = {} for option_name in defaults.keys(): value = conf.get('%s%s' % (prefix_name, option_name)) if value: if isinstance(defaults.get(option_name), list): params[option_name] = [] for role in value.lower().split(','): params[option_name].append(role.strip()) else: params[option_name] = value.strip() return params def eventlet_monkey_patch(): """ Install the appropriate Eventlet monkey patches. """ # NOTE(sileht): # monkey-patching thread is required by python-keystoneclient; # monkey-patching select is required by oslo.messaging pika driver # if thread is monkey-patched. eventlet.patcher.monkey_patch(all=False, socket=True, select=True, thread=True) def noop_libc_function(*args): return 0 def validate_configuration(): try: validate_hash_conf() except InvalidHashPathConfigError as e: sys.exit("Error: %s" % e) def load_libc_function(func_name, log_error=True, fail_if_missing=False, errcheck=False): """ Attempt to find the function in libc, otherwise return a no-op func. :param func_name: name of the function to pull from libc. :param log_error: log an error when a function can't be found :param fail_if_missing: raise an exception when a function can't be found. Default behavior is to return a no-op function. :param errcheck: boolean, if true install a wrapper on the function to check for a return values of -1 and call ctype.get_errno and raise an OSError """ try: libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) func = getattr(libc, func_name) except AttributeError: if fail_if_missing: raise if log_error: logging.warning(_("Unable to locate %s in libc. Leaving as a " "no-op."), func_name) return noop_libc_function if errcheck: def _errcheck(result, f, args): if result == -1: errcode = ctypes.get_errno() raise OSError(errcode, os.strerror(errcode)) return result func.errcheck = _errcheck return func def generate_trans_id(trans_id_suffix): return 'tx%s-%010x%s' % ( uuid.uuid4().hex[:21], time.time(), quote(trans_id_suffix)) def get_policy_index(req_headers, res_headers): """ Returns the appropriate index of the storage policy for the request from a proxy server :param req_headers: dict of the request headers. :param res_headers: dict of the response headers. :returns: string index of storage policy, or None """ header = 'X-Backend-Storage-Policy-Index' policy_index = res_headers.get(header, req_headers.get(header)) return str(policy_index) if policy_index is not None else None def get_log_line(req, res, trans_time, additional_info): """ Make a line for logging that matches the documented log line format for backend servers. :param req: the request. :param res: the response. :param trans_time: the time the request took to complete, a float. :param additional_info: a string to log at the end of the line :returns: a properly formatted line for logging. """ policy_index = get_policy_index(req.headers, res.headers) return '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %.4f "%s" %d %s' % ( req.remote_addr, time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()), req.method, req.path, res.status.split()[0], res.content_length or '-', req.referer or '-', req.headers.get('x-trans-id', '-'), req.user_agent or '-', trans_time, additional_info or '-', os.getpid(), policy_index or '-') def get_trans_id_time(trans_id): if len(trans_id) >= 34 and \ trans_id.startswith('tx') and trans_id[23] == '-': try: return int(trans_id[24:34], 16) except ValueError: pass return None def config_fallocate_value(reserve_value): """ Returns fallocate reserve_value as an int or float. Returns is_percent as a boolean. Returns a ValueError on invalid fallocate value. """ try: if str(reserve_value[-1:]) == '%': reserve_value = float(reserve_value[:-1]) is_percent = True else: reserve_value = int(reserve_value) is_percent = False except ValueError: raise ValueError('Error: %s is an invalid value for fallocate' '_reserve.' % reserve_value) return reserve_value, is_percent class FileLikeIter(object): def __init__(self, iterable): """ Wraps an iterable to behave as a file-like object. The iterable must yield bytes strings. """ self.iterator = iter(iterable) self.buf = None self.closed = False def __iter__(self): return self def next(self): """ next(x) -> the next value, or raise StopIteration """ if self.closed: raise ValueError('I/O operation on closed file') if self.buf: rv = self.buf self.buf = None return rv else: return next(self.iterator) __next__ = next def read(self, size=-1): """ read([size]) -> read at most size bytes, returned as a bytes string. If the size argument is negative or omitted, read until EOF is reached. Notice that when in non-blocking mode, less data than what was requested may be returned, even if no size parameter was given. """ if self.closed: raise ValueError('I/O operation on closed file') if size < 0: return b''.join(self) elif not size: chunk = b'' elif self.buf: chunk = self.buf self.buf = None else: try: chunk = next(self.iterator) except StopIteration: return b'' if len(chunk) > size: self.buf = chunk[size:] chunk = chunk[:size] return chunk def readline(self, size=-1): """ readline([size]) -> next line from the file, as a bytes string. Retain newline. A non-negative size argument limits the maximum number of bytes to return (an incomplete line may be returned then). Return an empty string at EOF. """ if self.closed: raise ValueError('I/O operation on closed file') data = b'' while b'\n' not in data and (size < 0 or len(data) < size): if size < 0: chunk = self.read(1024) else: chunk = self.read(size - len(data)) if not chunk: break data += chunk if b'\n' in data: data, sep, rest = data.partition(b'\n') data += sep if self.buf: self.buf = rest + self.buf else: self.buf = rest return data def readlines(self, sizehint=-1): """ readlines([size]) -> list of bytes strings, each a line from the file. Call readline() repeatedly and return a list of the lines so read. The optional size argument, if given, is an approximate bound on the total number of bytes in the lines returned. """ if self.closed: raise ValueError('I/O operation on closed file') lines = [] while True: line = self.readline(sizehint) if not line: break lines.append(line) if sizehint >= 0: sizehint -= len(line) if sizehint <= 0: break return lines def close(self): """ close() -> None or (perhaps) an integer. Close the file. Sets data attribute .closed to True. A closed file cannot be used for further I/O operations. close() may be called more than once without error. Some kinds of file objects (for example, opened by popen()) may return an exit status upon closing. """ self.iterator = None self.closed = True class FallocateWrapper(object): def __init__(self, noop=False): self.noop = noop if self.noop: self.func_name = 'posix_fallocate' self.fallocate = noop_libc_function return # fallocate is preferred because we need the on-disk size to match # the allocated size. Older versions of sqlite require that the # two sizes match. However, fallocate is Linux only. for func in ('fallocate', 'posix_fallocate'): self.func_name = func self.fallocate = load_libc_function(func, log_error=False) if self.fallocate is not noop_libc_function: break if self.fallocate is noop_libc_function: logging.warning(_("Unable to locate fallocate, posix_fallocate in " "libc. Leaving as a no-op.")) def __call__(self, fd, mode, offset, length): """The length parameter must be a ctypes.c_uint64.""" if not self.noop: if FALLOCATE_RESERVE > 0: st = os.fstatvfs(fd) free = st.f_frsize * st.f_bavail - length.value if FALLOCATE_IS_PERCENT: free = \ (float(free) / float(st.f_frsize * st.f_blocks)) * 100 if float(free) <= float(FALLOCATE_RESERVE): raise OSError( errno.ENOSPC, 'FALLOCATE_RESERVE fail %s <= %s' % (free, FALLOCATE_RESERVE)) args = { 'fallocate': (fd, mode, offset, length), 'posix_fallocate': (fd, offset, length) } return self.fallocate(*args[self.func_name]) def disable_fallocate(): global _sys_fallocate _sys_fallocate = FallocateWrapper(noop=True) def fallocate(fd, size): """ Pre-allocate disk space for a file. :param fd: file descriptor :param size: size to allocate (in bytes) """ global _sys_fallocate if _sys_fallocate is None: _sys_fallocate = FallocateWrapper() if size < 0: size = 0 # 1 means "FALLOC_FL_KEEP_SIZE", which means it pre-allocates invisibly ret = _sys_fallocate(fd, 1, 0, ctypes.c_uint64(size)) err = ctypes.get_errno() if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL): raise OSError(err, 'Unable to fallocate(%s)' % size) def fsync(fd): """ Sync modified file data and metadata to disk. :param fd: file descriptor """ if hasattr(fcntl, 'F_FULLSYNC'): try: fcntl.fcntl(fd, fcntl.F_FULLSYNC) except IOError as e: raise OSError(e.errno, 'Unable to F_FULLSYNC(%s)' % fd) else: os.fsync(fd) def fdatasync(fd): """ Sync modified file data to disk. :param fd: file descriptor """ try: os.fdatasync(fd) except AttributeError: fsync(fd) def fsync_dir(dirpath): """ Sync directory entries to disk. :param dirpath: Path to the directory to be synced. """ dirfd = None try: dirfd = os.open(dirpath, os.O_DIRECTORY | os.O_RDONLY) fsync(dirfd) except OSError as err: if err.errno == errno.ENOTDIR: # Raise error if someone calls fsync_dir on a non-directory raise logging.warning(_('Unable to perform fsync() on directory %(dir)s:' ' %(err)s'), {'dir': dirpath, 'err': os.strerror(err.errno)}) finally: if dirfd: os.close(dirfd) def drop_buffer_cache(fd, offset, length): """ Drop 'buffer' cache for the given range of the given file. :param fd: file descriptor :param offset: start offset :param length: length """ global _posix_fadvise if _posix_fadvise is None: _posix_fadvise = load_libc_function('posix_fadvise64') # 4 means "POSIX_FADV_DONTNEED" ret = _posix_fadvise(fd, ctypes.c_uint64(offset), ctypes.c_uint64(length), 4) if ret != 0: logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) " "-> %(ret)s", {'fd': fd, 'offset': offset, 'length': length, 'ret': ret}) NORMAL_FORMAT = "%016.05f" INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x' SHORT_FORMAT = NORMAL_FORMAT + '_%x' MAX_OFFSET = (16 ** 16) - 1 PRECISION = 1e-5 # Setting this to True will cause the internal format to always display # extended digits - even when the value is equivalent to the normalized form. # This isn't ideal during an upgrade when some servers might not understand # the new time format - but flipping it to True works great for testing. FORCE_INTERNAL = False # or True @functools.total_ordering class Timestamp(object): """ Internal Representation of Swift Time. The normalized form of the X-Timestamp header looks like a float with a fixed width to ensure stable string sorting - normalized timestamps look like "1402464677.04188" To support overwrites of existing data without modifying the original timestamp but still maintain consistency a second internal offset vector is append to the normalized timestamp form which compares and sorts greater than the fixed width float format but less than a newer timestamp. The internalized format of timestamps looks like "1402464677.04188_0000000000000000" - the portion after the underscore is the offset and is a formatted hexadecimal integer. The internalized form is not exposed to clients in responses from Swift. Normal client operations will not create a timestamp with an offset. The Timestamp class in common.utils supports internalized and normalized formatting of timestamps and also comparison of timestamp values. When the offset value of a Timestamp is 0 - it's considered insignificant and need not be represented in the string format; to support backwards compatibility during a Swift upgrade the internalized and normalized form of a Timestamp with an insignificant offset are identical. When a timestamp includes an offset it will always be represented in the internalized form, but is still excluded from the normalized form. Timestamps with an equivalent timestamp portion (the float part) will compare and order by their offset. Timestamps with a greater timestamp portion will always compare and order greater than a Timestamp with a lesser timestamp regardless of it's offset. String comparison and ordering is guaranteed for the internalized string format, and is backwards compatible for normalized timestamps which do not include an offset. """ def __init__(self, timestamp, offset=0, delta=0): """ Create a new Timestamp. :param timestamp: time in seconds since the Epoch, may be any of: * a float or integer * normalized/internalized string * another instance of this class (offset is preserved) :param offset: the second internal offset vector, an int :param delta: deca-microsecond difference from the base timestamp param, an int """ if isinstance(timestamp, six.string_types): parts = timestamp.split('_', 1) self.timestamp = float(parts.pop(0)) if parts: self.offset = int(parts[0], 16) else: self.offset = 0 else: self.timestamp = float(timestamp) self.offset = getattr(timestamp, 'offset', 0) # increment offset if offset >= 0: self.offset += offset else: raise ValueError('offset must be non-negative') if self.offset > MAX_OFFSET: raise ValueError('offset must be smaller than %d' % MAX_OFFSET) self.raw = int(round(self.timestamp / PRECISION)) # add delta if delta: self.raw = self.raw + delta if self.raw <= 0: raise ValueError( 'delta must be greater than %d' % (-1 * self.raw)) self.timestamp = float(self.raw * PRECISION) if self.timestamp < 0: raise ValueError('timestamp cannot be negative') if self.timestamp >= 10000000000: raise ValueError('timestamp too large') @classmethod def now(cls, offset=0, delta=0): return cls(time.time(), offset=offset, delta=delta) def __repr__(self): return INTERNAL_FORMAT % (self.timestamp, self.offset) def __str__(self): raise TypeError('You must specify which string format is required') def __float__(self): return self.timestamp def __int__(self): return int(self.timestamp) def __nonzero__(self): return bool(self.timestamp or self.offset) def __bool__(self): return self.__nonzero__() @property def normal(self): return NORMAL_FORMAT % self.timestamp @property def internal(self): if self.offset or FORCE_INTERNAL: return INTERNAL_FORMAT % (self.timestamp, self.offset) else: return self.normal @property def short(self): if self.offset or FORCE_INTERNAL: return SHORT_FORMAT % (self.timestamp, self.offset) else: return self.normal @property def isoformat(self): t = float(self.normal) if six.PY3: # On Python 3, round manually using ROUND_HALF_EVEN rounding # method, to use the same rounding method than Python 2. Python 3 # used a different rounding method, but Python 3.4.4 and 3.5.1 use # again ROUND_HALF_EVEN as Python 2. # See https://bugs.python.org/issue23517 frac, t = math.modf(t) us = round(frac * 1e6) if us >= 1000000: t += 1 us -= 1000000 elif us < 0: t -= 1 us += 1000000 dt = datetime.datetime.utcfromtimestamp(t) dt = dt.replace(microsecond=us) else: dt = datetime.datetime.utcfromtimestamp(t) isoformat = dt.isoformat() # python isoformat() doesn't include msecs when zero if len(isoformat) < len("1970-01-01T00:00:00.000000"): isoformat += ".000000" return isoformat def __eq__(self, other): if other is None: return False if not isinstance(other, Timestamp): other = Timestamp(other) return self.internal == other.internal def __ne__(self, other): if other is None: return True if not isinstance(other, Timestamp): other = Timestamp(other) return self.internal != other.internal def __lt__(self, other): if other is None: return False if not isinstance(other, Timestamp): other = Timestamp(other) return self.internal < other.internal def __hash__(self): return hash(self.internal) def encode_timestamps(t1, t2=None, t3=None, explicit=False): """ Encode up to three timestamps into a string. Unlike a Timestamp object, the encoded string does NOT used fixed width fields and consequently no relative chronology of the timestamps can be inferred from lexicographic sorting of encoded timestamp strings. The format of the encoded string is: [<+/->[<+/->]] i.e. if t1 = t2 = t3 then just the string representation of t1 is returned, otherwise the time offsets for t2 and t3 are appended. If explicit is True then the offsets for t2 and t3 are always appended even if zero. Note: any offset value in t1 will be preserved, but offsets on t2 and t3 are not preserved. In the anticipated use cases for this method (and the inverse decode_timestamps method) the timestamps passed as t2 and t3 are not expected to have offsets as they will be timestamps associated with a POST request. In the case where the encoding is used in a container objects table row, t1 could be the PUT or DELETE time but t2 and t3 represent the content type and metadata times (if different from the data file) i.e. correspond to POST timestamps. In the case where the encoded form is used in a .meta file name, t1 and t2 both correspond to POST timestamps. """ form = '{0}' values = [t1.short] if t2 is not None: t2_t1_delta = t2.raw - t1.raw explicit = explicit or (t2_t1_delta != 0) values.append(t2_t1_delta) if t3 is not None: t3_t2_delta = t3.raw - t2.raw explicit = explicit or (t3_t2_delta != 0) values.append(t3_t2_delta) if explicit: form += '{1:+x}' if t3 is not None: form += '{2:+x}' return form.format(*values) def decode_timestamps(encoded, explicit=False): """ Parses a string of the form generated by encode_timestamps and returns a tuple of the three component timestamps. If explicit is False, component timestamps that are not explicitly encoded will be assumed to have zero delta from the previous component and therefore take the value of the previous component. If explicit is True, component timestamps that are not explicitly encoded will be returned with value None. """ # TODO: some tests, e.g. in test_replicator, put float timestamps values # into container db's, hence this defensive check, but in real world # this may never happen. if not isinstance(encoded, six.string_types): ts = Timestamp(encoded) return ts, ts, ts parts = [] signs = [] pos_parts = encoded.split('+') for part in pos_parts: # parse time components and their signs # e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1] neg_parts = part.split('-') parts = parts + neg_parts signs = signs + [1] + [-1] * (len(neg_parts) - 1) t1 = Timestamp(parts[0]) t2 = t3 = None if len(parts) > 1: t2 = t1 delta = signs[1] * int(parts[1], 16) # if delta = 0 we want t2 = t3 = t1 in order to # preserve any offset in t1 - only construct a distinct # timestamp if there is a non-zero delta. if delta: t2 = Timestamp((t1.raw + delta) * PRECISION) elif not explicit: t2 = t1 if len(parts) > 2: t3 = t2 delta = signs[2] * int(parts[2], 16) if delta: t3 = Timestamp((t2.raw + delta) * PRECISION) elif not explicit: t3 = t2 return t1, t2, t3 def normalize_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx.xxxxx (10.5) format. Note that timestamps using values greater than or equal to November 20th, 2286 at 17:46 UTC will use 11 digits to represent the number of seconds. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return Timestamp(timestamp).normal EPOCH = datetime.datetime(1970, 1, 1) def last_modified_date_to_timestamp(last_modified_date_str): """ Convert a last modified date (like you'd get from a container listing, e.g. 2014-02-28T23:22:36.698390) to a float. """ start = datetime.datetime.strptime(last_modified_date_str, '%Y-%m-%dT%H:%M:%S.%f') delta = start - EPOCH # This calculation is based on Python 2.7's Modules/datetimemodule.c, # function delta_to_microseconds(), but written in Python. return Timestamp(delta.total_seconds()) def normalize_delete_at_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx (10) format. Note that timestamps less than 0000000000 are raised to 0000000000 and values greater than November 20th, 2286 at 17:46:39 UTC will be capped at that date and time, resulting in no return value exceeding 9999999999. This cap is because the expirer is already working through a sorted list of strings that were all a length of 10. Adding another digit would mess up the sort and cause the expirer to break from processing early. By 2286, this problem will need to be fixed, probably by creating an additional .expiring_objects account to work from with 11 (or more) digit container names. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return '%010d' % min(max(0, float(timestamp)), 9999999999) def mkdirs(path): """ Ensures the path is a directory or makes it if not. Errors if the path exists but is a file or on permissions failure. :param path: path to create """ if not os.path.isdir(path): try: os.makedirs(path) except OSError as err: if err.errno != errno.EEXIST or not os.path.isdir(path): raise def makedirs_count(path, count=0): """ Same as os.makedirs() except that this method returns the number of new directories that had to be created. Also, this does not raise an error if target directory already exists. This behaviour is similar to Python 3.x's os.makedirs() called with exist_ok=True. Also similar to swift.common.utils.mkdirs() https://hg.python.org/cpython/file/v3.4.2/Lib/os.py#l212 """ head, tail = os.path.split(path) if not tail: head, tail = os.path.split(head) if head and tail and not os.path.exists(head): count = makedirs_count(head, count) if tail == os.path.curdir: return try: os.mkdir(path) except OSError as e: # EEXIST may also be raised if path exists as a file # Do not let that pass. if e.errno != errno.EEXIST or not os.path.isdir(path): raise else: count += 1 return count def renamer(old, new, fsync=True): """ Attempt to fix / hide race conditions like empty object directories being removed by backend processes during uploads, by retrying. The containing directory of 'new' and of all newly created directories are fsync'd by default. This _will_ come at a performance penalty. In cases where these additional fsyncs are not necessary, it is expected that the caller of renamer() turn it off explicitly. :param old: old path to be renamed :param new: new path to be renamed to :param fsync: fsync on containing directory of new and also all the newly created directories. """ dirpath = os.path.dirname(new) try: count = makedirs_count(dirpath) os.rename(old, new) except OSError: count = makedirs_count(dirpath) os.rename(old, new) if fsync: # If count=0, no new directories were created. But we still need to # fsync leaf dir after os.rename(). # If count>0, starting from leaf dir, fsync parent dirs of all # directories created by makedirs_count() for i in range(0, count + 1): fsync_dir(dirpath) dirpath = os.path.dirname(dirpath) def link_fd_to_path(fd, target_path, dirs_created=0, retries=2, fsync=True): """ Creates a link to file descriptor at target_path specified. This method does not close the fd for you. Unlike rename, as linkat() cannot overwrite target_path if it exists, we unlink and try again. Attempts to fix / hide race conditions like empty object directories being removed by backend processes during uploads, by retrying. :param fd: File descriptor to be linked :param target_path: Path in filesystem where fd is to be linked :param dirs_created: Number of newly created directories that needs to be fsync'd. :param retries: number of retries to make :param fsync: fsync on containing directory of target_path and also all the newly created directories. """ dirpath = os.path.dirname(target_path) for _junk in range(0, retries): try: linkat(linkat.AT_FDCWD, "/proc/self/fd/%d" % (fd), linkat.AT_FDCWD, target_path, linkat.AT_SYMLINK_FOLLOW) break except IOError as err: if err.errno == errno.ENOENT: dirs_created = makedirs_count(dirpath) elif err.errno == errno.EEXIST: try: os.unlink(target_path) except OSError as e: if e.errno != errno.ENOENT: raise else: raise if fsync: for i in range(0, dirs_created + 1): fsync_dir(dirpath) dirpath = os.path.dirname(dirpath) def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False): """ Validate and split the given HTTP request path. **Examples**:: ['a'] = split_path('/a') ['a', None] = split_path('/a', 1, 2) ['a', 'c'] = split_path('/a/c', 1, 2) ['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True) :param path: HTTP Request path to be split :param minsegs: Minimum number of segments to be extracted :param maxsegs: Maximum number of segments to be extracted :param rest_with_last: If True, trailing data will be returned as part of last segment. If False, and there is trailing data, raises ValueError. :returns: list of segments with a length of maxsegs (non-existent segments will return as None) :raises ValueError: if given an invalid path """ if not maxsegs: maxsegs = minsegs if minsegs > maxsegs: raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs)) if rest_with_last: segs = path.split('/', maxsegs) minsegs += 1 maxsegs += 1 count = len(segs) if (segs[0] or count < minsegs or count > maxsegs or '' in segs[1:minsegs]): raise ValueError('Invalid path: %s' % quote(path)) else: minsegs += 1 maxsegs += 1 segs = path.split('/', maxsegs) count = len(segs) if (segs[0] or count < minsegs or count > maxsegs + 1 or '' in segs[1:minsegs] or (count == maxsegs + 1 and segs[maxsegs])): raise ValueError('Invalid path: %s' % quote(path)) segs = segs[1:maxsegs] segs.extend([None] * (maxsegs - 1 - len(segs))) return segs def validate_device_partition(device, partition): """ Validate that a device and a partition are valid and won't lead to directory traversal when used. :param device: device to validate :param partition: partition to validate :raises ValueError: if given an invalid device or partition """ if not device or '/' in device or device in ['.', '..']: raise ValueError('Invalid device: %s' % quote(device or '')) if not partition or '/' in partition or partition in ['.', '..']: raise ValueError('Invalid partition: %s' % quote(partition or '')) class RateLimitedIterator(object): """ Wrap an iterator to only yield elements at a rate of N per second. :param iterable: iterable to wrap :param elements_per_second: the rate at which to yield elements :param limit_after: rate limiting kicks in only after yielding this many elements; default is 0 (rate limit immediately) """ def __init__(self, iterable, elements_per_second, limit_after=0, ratelimit_if=lambda _junk: True): self.iterator = iter(iterable) self.elements_per_second = elements_per_second self.limit_after = limit_after self.running_time = 0 self.ratelimit_if = ratelimit_if def __iter__(self): return self def next(self): next_value = next(self.iterator) if self.ratelimit_if(next_value): if self.limit_after > 0: self.limit_after -= 1 else: self.running_time = ratelimit_sleep(self.running_time, self.elements_per_second) return next_value __next__ = next class GreenthreadSafeIterator(object): """ Wrap an iterator to ensure that only one greenthread is inside its next() method at a time. This is useful if an iterator's next() method may perform network IO, as that may trigger a greenthread context switch (aka trampoline), which can give another greenthread a chance to call next(). At that point, you get an error like "ValueError: generator already executing". By wrapping calls to next() with a mutex, we avoid that error. """ def __init__(self, unsafe_iterable): self.unsafe_iter = iter(unsafe_iterable) self.semaphore = eventlet.semaphore.Semaphore(value=1) def __iter__(self): return self def next(self): with self.semaphore: return next(self.unsafe_iter) __next__ = next class NullLogger(object): """A no-op logger for eventlet wsgi.""" def write(self, *args): # "Logs" the args to nowhere pass def exception(self, *args): pass def critical(self, *args): pass def error(self, *args): pass def warning(self, *args): pass def info(self, *args): pass def debug(self, *args): pass def log(self, *args): pass class LoggerFileObject(object): # Note: this is greenthread-local storage _cls_thread_local = threading.local() def __init__(self, logger, log_type='STDOUT'): self.logger = logger self.log_type = log_type def write(self, value): # We can get into a nasty situation when logs are going to syslog # and syslog dies. # # It's something like this: # # (A) someone logs something # # (B) there's an exception in sending to /dev/log since syslog is # not working # # (C) logging takes that exception and writes it to stderr (see # logging.Handler.handleError) # # (D) stderr was replaced with a LoggerFileObject at process start, # so the LoggerFileObject takes the provided string and tells # its logger to log it (to syslog, naturally). # # Then, steps B through D repeat until we run out of stack. if getattr(self._cls_thread_local, 'already_called_write', False): return self._cls_thread_local.already_called_write = True try: value = value.strip() if value: if 'Connection reset by peer' in value: self.logger.error( _('%s: Connection reset by peer'), self.log_type) else: self.logger.error(_('%(type)s: %(value)s'), {'type': self.log_type, 'value': value}) finally: self._cls_thread_local.already_called_write = False def writelines(self, values): if getattr(self._cls_thread_local, 'already_called_writelines', False): return self._cls_thread_local.already_called_writelines = True try: self.logger.error(_('%(type)s: %(value)s'), {'type': self.log_type, 'value': '#012'.join(values)}) finally: self._cls_thread_local.already_called_writelines = False def close(self): pass def flush(self): pass def __iter__(self): return self def next(self): raise IOError(errno.EBADF, 'Bad file descriptor') __next__ = next def read(self, size=-1): raise IOError(errno.EBADF, 'Bad file descriptor') def readline(self, size=-1): raise IOError(errno.EBADF, 'Bad file descriptor') def tell(self): return 0 def xreadlines(self): return self class StatsdClient(object): def __init__(self, host, port, base_prefix='', tail_prefix='', default_sample_rate=1, sample_rate_factor=1, logger=None): self._host = host self._port = port self._base_prefix = base_prefix self.set_prefix(tail_prefix) self._default_sample_rate = default_sample_rate self._sample_rate_factor = sample_rate_factor self.random = random self.logger = logger # Determine if host is IPv4 or IPv6 addr_info = None try: addr_info = socket.getaddrinfo(host, port, socket.AF_INET) self._sock_family = socket.AF_INET except socket.gaierror: try: addr_info = socket.getaddrinfo(host, port, socket.AF_INET6) self._sock_family = socket.AF_INET6 except socket.gaierror: # Don't keep the server from starting from what could be a # transient DNS failure. Any hostname will get re-resolved as # necessary in the .sendto() calls. # However, we don't know if we're IPv4 or IPv6 in this case, so # we assume legacy IPv4. self._sock_family = socket.AF_INET # NOTE: we use the original host value, not the DNS-resolved one # because if host is a hostname, we don't want to cache the DNS # resolution for the entire lifetime of this process. Let standard # name resolution caching take effect. This should help operators use # DNS trickery if they want. if addr_info is not None: # addr_info is a list of 5-tuples with the following structure: # (family, socktype, proto, canonname, sockaddr) # where sockaddr is the only thing of interest to us, and we only # use the first result. We want to use the originally supplied # host (see note above) and the remainder of the variable-length # sockaddr: IPv4 has (address, port) while IPv6 has (address, # port, flow info, scope id). sockaddr = addr_info[0][-1] self._target = (host,) + (sockaddr[1:]) else: self._target = (host, port) def set_prefix(self, new_prefix): if new_prefix and self._base_prefix: self._prefix = '.'.join([self._base_prefix, new_prefix, '']) elif new_prefix: self._prefix = new_prefix + '.' elif self._base_prefix: self._prefix = self._base_prefix + '.' else: self._prefix = '' def _send(self, m_name, m_value, m_type, sample_rate): if sample_rate is None: sample_rate = self._default_sample_rate sample_rate = sample_rate * self._sample_rate_factor parts = ['%s%s:%s' % (self._prefix, m_name, m_value), m_type] if sample_rate < 1: if self.random() < sample_rate: parts.append('@%s' % (sample_rate,)) else: return if six.PY3: parts = [part.encode('utf-8') for part in parts] # Ideally, we'd cache a sending socket in self, but that # results in a socket getting shared by multiple green threads. with closing(self._open_socket()) as sock: try: return sock.sendto(b'|'.join(parts), self._target) except IOError as err: if self.logger: self.logger.warning( _('Error sending UDP message to %(target)r: %(err)s'), {'target': self._target, 'err': err}) def _open_socket(self): return socket.socket(self._sock_family, socket.SOCK_DGRAM) def update_stats(self, m_name, m_value, sample_rate=None): return self._send(m_name, m_value, 'c', sample_rate) def increment(self, metric, sample_rate=None): return self.update_stats(metric, 1, sample_rate) def decrement(self, metric, sample_rate=None): return self.update_stats(metric, -1, sample_rate) def timing(self, metric, timing_ms, sample_rate=None): return self._send(metric, timing_ms, 'ms', sample_rate) def timing_since(self, metric, orig_time, sample_rate=None): return self.timing(metric, (time.time() - orig_time) * 1000, sample_rate) def transfer_rate(self, metric, elapsed_time, byte_xfer, sample_rate=None): if byte_xfer: return self.timing(metric, elapsed_time * 1000 / byte_xfer * 1000, sample_rate) def server_handled_successfully(status_int): """ True for successful responses *or* error codes that are not Swift's fault, False otherwise. For example, 500 is definitely the server's fault, but 412 is an error code (4xx are all errors) that is due to a header the client sent. If one is tracking error rates to monitor server health, one would be advised to use a function like this one, lest a client cause a flurry of 404s or 416s and make a spurious spike in your errors graph. """ return (is_success(status_int) or is_redirection(status_int) or status_int == HTTP_NOT_FOUND or status_int == HTTP_PRECONDITION_FAILED or status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE) def timing_stats(**dec_kwargs): """ Returns a decorator that logs timing events or errors for public methods in swift's wsgi server controllers, based on response code. """ def decorating_func(func): method = func.__name__ @functools.wraps(func) def _timing_stats(ctrl, *args, **kwargs): start_time = time.time() resp = func(ctrl, *args, **kwargs) if server_handled_successfully(resp.status_int): ctrl.logger.timing_since(method + '.timing', start_time, **dec_kwargs) else: ctrl.logger.timing_since(method + '.errors.timing', start_time, **dec_kwargs) return resp return _timing_stats return decorating_func # double inheritance to support property with setter class LogAdapter(logging.LoggerAdapter, object): """ A Logger like object which performs some reformatting on calls to :meth:`exception`. Can be used to store a threadlocal transaction id and client ip. """ _cls_thread_local = threading.local() def __init__(self, logger, server): logging.LoggerAdapter.__init__(self, logger, {}) self.server = server self.warn = self.warning @property def txn_id(self): if hasattr(self._cls_thread_local, 'txn_id'): return self._cls_thread_local.txn_id @txn_id.setter def txn_id(self, value): self._cls_thread_local.txn_id = value @property def client_ip(self): if hasattr(self._cls_thread_local, 'client_ip'): return self._cls_thread_local.client_ip @client_ip.setter def client_ip(self, value): self._cls_thread_local.client_ip = value @property def thread_locals(self): return (self.txn_id, self.client_ip) @thread_locals.setter def thread_locals(self, value): self.txn_id, self.client_ip = value def getEffectiveLevel(self): return self.logger.getEffectiveLevel() def process(self, msg, kwargs): """ Add extra info to message """ kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id, 'client_ip': self.client_ip} return msg, kwargs def notice(self, msg, *args, **kwargs): """ Convenience function for syslog priority LOG_NOTICE. The python logging lvl is set to 25, just above info. SysLogHandler is monkey patched to map this log lvl to the LOG_NOTICE syslog priority. """ self.log(NOTICE, msg, *args, **kwargs) def _exception(self, msg, *args, **kwargs): logging.LoggerAdapter.exception(self, msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): _junk, exc, _junk = sys.exc_info() call = self.error emsg = '' if isinstance(exc, (OSError, socket.error)): if exc.errno in (errno.EIO, errno.ENOSPC): emsg = str(exc) elif exc.errno == errno.ECONNREFUSED: emsg = _('Connection refused') elif exc.errno == errno.EHOSTUNREACH: emsg = _('Host unreachable') elif exc.errno == errno.ETIMEDOUT: emsg = _('Connection timeout') else: call = self._exception elif isinstance(exc, eventlet.Timeout): emsg = exc.__class__.__name__ if hasattr(exc, 'seconds'): emsg += ' (%ss)' % exc.seconds if isinstance(exc, swift.common.exceptions.MessageTimeout): if exc.msg: emsg += ' %s' % exc.msg else: call = self._exception call('%s: %s' % (msg, emsg), *args, **kwargs) def set_statsd_prefix(self, prefix): """ The StatsD client prefix defaults to the "name" of the logger. This method may override that default with a specific value. Currently used in the proxy-server to differentiate the Account, Container, and Object controllers. """ if self.logger.statsd_client: self.logger.statsd_client.set_prefix(prefix) def statsd_delegate(statsd_func_name): """ Factory to create methods which delegate to methods on self.logger.statsd_client (an instance of StatsdClient). The created methods conditionally delegate to a method whose name is given in 'statsd_func_name'. The created delegate methods are a no-op when StatsD logging is not configured. :param statsd_func_name: the name of a method on StatsdClient. """ func = getattr(StatsdClient, statsd_func_name) @functools.wraps(func) def wrapped(self, *a, **kw): if getattr(self.logger, 'statsd_client'): return func(self.logger.statsd_client, *a, **kw) return wrapped update_stats = statsd_delegate('update_stats') increment = statsd_delegate('increment') decrement = statsd_delegate('decrement') timing = statsd_delegate('timing') timing_since = statsd_delegate('timing_since') transfer_rate = statsd_delegate('transfer_rate') class SwiftLogFormatter(logging.Formatter): """ Custom logging.Formatter will append txn_id to a log message if the record has one and the message does not. Optionally it can shorten overly long log lines. """ def __init__(self, fmt=None, datefmt=None, max_line_length=0): logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt) self.max_line_length = max_line_length def format(self, record): if not hasattr(record, 'server'): # Catch log messages that were not initiated by swift # (for example, the keystone auth middleware) record.server = record.name # Included from Python's logging.Formatter and then altered slightly to # replace \n with #012 record.message = record.getMessage() if self._fmt.find('%(asctime)') >= 0: record.asctime = self.formatTime(record, self.datefmt) msg = (self._fmt % record.__dict__).replace('\n', '#012') if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException( record.exc_info).replace('\n', '#012') if record.exc_text: if not msg.endswith('#012'): msg = msg + '#012' msg = msg + record.exc_text if (hasattr(record, 'txn_id') and record.txn_id and record.txn_id not in msg): msg = "%s (txn: %s)" % (msg, record.txn_id) if (hasattr(record, 'client_ip') and record.client_ip and record.levelno != logging.INFO and record.client_ip not in msg): msg = "%s (client_ip: %s)" % (msg, record.client_ip) if self.max_line_length > 0 and len(msg) > self.max_line_length: if self.max_line_length < 7: msg = msg[:self.max_line_length] else: approxhalf = (self.max_line_length - 5) // 2 msg = msg[:approxhalf] + " ... " + msg[-approxhalf:] return msg def get_logger(conf, name=None, log_to_console=False, log_route=None, fmt="%(server)s: %(message)s"): """ Get the current system logger using config settings. **Log config and defaults**:: log_facility = LOG_LOCAL0 log_level = INFO log_name = swift log_max_line_length = 0 log_udp_host = (disabled) log_udp_port = logging.handlers.SYSLOG_UDP_PORT log_address = /dev/log log_statsd_host = (disabled) log_statsd_port = 8125 log_statsd_default_sample_rate = 1.0 log_statsd_sample_rate_factor = 1.0 log_statsd_metric_prefix = (empty-string) :param conf: Configuration dict to read settings from :param name: Name of the logger :param log_to_console: Add handler which writes to console on stderr :param log_route: Route for the logging, not emitted to the log, just used to separate logging configurations :param fmt: Override log format """ if not conf: conf = {} if name is None: name = conf.get('log_name', 'swift') if not log_route: log_route = name logger = logging.getLogger(log_route) logger.propagate = False # all new handlers will get the same formatter formatter = SwiftLogFormatter( fmt=fmt, max_line_length=int(conf.get('log_max_line_length', 0))) # get_logger will only ever add one SysLog Handler to a logger if not hasattr(get_logger, 'handler4logger'): get_logger.handler4logger = {} if logger in get_logger.handler4logger: logger.removeHandler(get_logger.handler4logger[logger]) # facility for this logger will be set by last call wins facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'), SysLogHandler.LOG_LOCAL0) udp_host = conf.get('log_udp_host') if udp_host: udp_port = int(conf.get('log_udp_port', logging.handlers.SYSLOG_UDP_PORT)) handler = ThreadSafeSysLogHandler(address=(udp_host, udp_port), facility=facility) else: log_address = conf.get('log_address', '/dev/log') try: handler = ThreadSafeSysLogHandler(address=log_address, facility=facility) except socket.error as e: # Either /dev/log isn't a UNIX socket or it does not exist at all if e.errno not in [errno.ENOTSOCK, errno.ENOENT]: raise handler = ThreadSafeSysLogHandler(facility=facility) handler.setFormatter(formatter) logger.addHandler(handler) get_logger.handler4logger[logger] = handler # setup console logging if log_to_console or hasattr(get_logger, 'console_handler4logger'): # remove pre-existing console handler for this logger if not hasattr(get_logger, 'console_handler4logger'): get_logger.console_handler4logger = {} if logger in get_logger.console_handler4logger: logger.removeHandler(get_logger.console_handler4logger[logger]) console_handler = logging.StreamHandler(sys.__stderr__) console_handler.setFormatter(formatter) logger.addHandler(console_handler) get_logger.console_handler4logger[logger] = console_handler # set the level for the logger logger.setLevel( getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO)) # Setup logger with a StatsD client if so configured statsd_host = conf.get('log_statsd_host') if statsd_host: statsd_port = int(conf.get('log_statsd_port', 8125)) base_prefix = conf.get('log_statsd_metric_prefix', '') default_sample_rate = float(conf.get( 'log_statsd_default_sample_rate', 1)) sample_rate_factor = float(conf.get( 'log_statsd_sample_rate_factor', 1)) statsd_client = StatsdClient(statsd_host, statsd_port, base_prefix, name, default_sample_rate, sample_rate_factor, logger=logger) logger.statsd_client = statsd_client else: logger.statsd_client = None adapted_logger = LogAdapter(logger, name) other_handlers = conf.get('log_custom_handlers', None) if other_handlers: log_custom_handlers = [s.strip() for s in other_handlers.split(',') if s.strip()] for hook in log_custom_handlers: try: mod, fnc = hook.rsplit('.', 1) logger_hook = getattr(__import__(mod, fromlist=[fnc]), fnc) logger_hook(conf, name, log_to_console, log_route, fmt, logger, adapted_logger) except (AttributeError, ImportError): print('Error calling custom handler [%s]' % hook, file=sys.stderr) except ValueError: print('Invalid custom handler format [%s]' % hook, file=sys.stderr) return adapted_logger def get_hub(): """ Checks whether poll is available and falls back on select if it isn't. Note about epoll: Review: https://review.openstack.org/#/c/18806/ There was a problem where once out of every 30 quadrillion connections, a coroutine wouldn't wake up when the client closed its end. Epoll was not reporting the event or it was getting swallowed somewhere. Then when that file descriptor was re-used, eventlet would freak right out because it still thought it was waiting for activity from it in some other coro. Another note about epoll: it's hard to use when forking. epoll works like so: * create an epoll instance: efd = epoll_create(...) * register file descriptors of interest with epoll_ctl(efd, EPOLL_CTL_ADD, fd, ...) * wait for events with epoll_wait(efd, ...) If you fork, you and all your child processes end up using the same epoll instance, and everyone becomes confused. It is possible to use epoll and fork and still have a correct program as long as you do the right things, but eventlet doesn't do those things. Really, it can't even try to do those things since it doesn't get notified of forks. In contrast, both poll() and select() specify the set of interesting file descriptors with each call, so there's no problem with forking. """ try: import select if hasattr(select, "poll"): return "poll" return "selects" except ImportError: return None def drop_privileges(user, call_setsid=True): """ Sets the userid/groupid of the current process, get session leader, etc. :param user: User name to change privileges to """ if os.geteuid() == 0: groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem] os.setgroups(groups) user = pwd.getpwnam(user) os.setgid(user[3]) os.setuid(user[2]) os.environ['HOME'] = user[5] if call_setsid: try: os.setsid() except OSError: pass os.chdir('/') # in case you need to rmdir on where you started the daemon os.umask(0o22) # ensure files are created with the correct privileges def capture_stdio(logger, **kwargs): """ Log unhandled exceptions, close stdio, capture stdout and stderr. param logger: Logger object to use """ # log uncaught exceptions sys.excepthook = lambda * exc_info: \ logger.critical(_('UNCAUGHT EXCEPTION'), exc_info=exc_info) # collect stdio file desc not in use for logging stdio_files = [sys.stdin, sys.stdout, sys.stderr] console_fds = [h.stream.fileno() for _junk, h in getattr( get_logger, 'console_handler4logger', {}).items()] stdio_files = [f for f in stdio_files if f.fileno() not in console_fds] with open(os.devnull, 'r+b') as nullfile: # close stdio (excludes fds open for logging) for f in stdio_files: # some platforms throw an error when attempting an stdin flush try: f.flush() except IOError: pass try: os.dup2(nullfile.fileno(), f.fileno()) except OSError: pass # redirect stdio if kwargs.pop('capture_stdout', True): sys.stdout = LoggerFileObject(logger) if kwargs.pop('capture_stderr', True): sys.stderr = LoggerFileObject(logger, 'STDERR') def parse_options(parser=None, once=False, test_args=None): """Parse standard swift server/daemon options with optparse.OptionParser. :param parser: OptionParser to use. If not sent one will be created. :param once: Boolean indicating the "once" option is available :param test_args: Override sys.argv; used in testing :returns: Tuple of (config, options); config is an absolute path to the config file, options is the parser options as a dictionary. :raises SystemExit: First arg (CONFIG) is required, file must exist """ if not parser: parser = OptionParser(usage="%prog CONFIG [options]") parser.add_option("-v", "--verbose", default=False, action="store_true", help="log to console") if once: parser.add_option("-o", "--once", default=False, action="store_true", help="only run one pass of daemon") # if test_args is None, optparse will use sys.argv[:1] options, args = parser.parse_args(args=test_args) if not args: parser.print_usage() print(_("Error: missing config path argument")) sys.exit(1) config = os.path.abspath(args.pop(0)) if not os.path.exists(config): parser.print_usage() print(_("Error: unable to locate %s") % config) sys.exit(1) extra_args = [] # if any named options appear in remaining args, set the option to True for arg in args: if arg in options.__dict__: setattr(options, arg, True) else: extra_args.append(arg) options = vars(options) if extra_args: options['extra_args'] = extra_args return config, options def is_valid_ip(ip): """ Return True if the provided ip is a valid IP-address """ return is_valid_ipv4(ip) or is_valid_ipv6(ip) def is_valid_ipv4(ip): """ Return True if the provided ip is a valid IPv4-address """ try: socket.inet_pton(socket.AF_INET, ip) except socket.error: # not a valid IPv4 address return False return True def is_valid_ipv6(ip): """ Returns True if the provided ip is a valid IPv6-address """ try: socket.inet_pton(socket.AF_INET6, ip) except socket.error: # not a valid IPv6 address return False return True def expand_ipv6(address): """ Expand ipv6 address. :param address: a string indicating valid ipv6 address :returns: a string indicating fully expanded ipv6 address """ packed_ip = socket.inet_pton(socket.AF_INET6, address) return socket.inet_ntop(socket.AF_INET6, packed_ip) def whataremyips(bind_ip=None): """ Get "our" IP addresses ("us" being the set of services configured by one `*.conf` file). If our REST listens on a specific address, return it. Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including the loopback. :param str bind_ip: Optional bind_ip from a config file; may be IP address or hostname. :returns: list of Strings of ip addresses """ if bind_ip: # See if bind_ip is '0.0.0.0'/'::' try: _, _, _, _, sockaddr = socket.getaddrinfo( bind_ip, None, 0, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST)[0] if sockaddr[0] not in ('0.0.0.0', '::'): return [bind_ip] except socket.gaierror: pass addresses = [] for interface in netifaces.interfaces(): try: iface_data = netifaces.ifaddresses(interface) for family in iface_data: if family not in (netifaces.AF_INET, netifaces.AF_INET6): continue for address in iface_data[family]: addr = address['addr'] # If we have an ipv6 address remove the # %ether_interface at the end if family == netifaces.AF_INET6: addr = expand_ipv6(addr.split('%')[0]) addresses.append(addr) except ValueError: pass return addresses def parse_socket_string(socket_string, default_port): """ Given a string representing a socket, returns a tuple of (host, port). Valid strings are DNS names, IPv4 addresses, or IPv6 addresses, with an optional port. If an IPv6 address is specified it **must** be enclosed in [], like *[::1]* or *[::1]:11211*. This follows the accepted prescription for `IPv6 host literals`_. Examples:: server.org server.org:1337 127.0.0.1:1337 [::1]:1337 [::1] .. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2 """ port = default_port # IPv6 addresses must be between '[]' if socket_string.startswith('['): match = IPV6_RE.match(socket_string) if not match: raise ValueError("Invalid IPv6 address: %s" % socket_string) host = match.group('address') port = match.group('port') or port else: if ':' in socket_string: tokens = socket_string.split(':') if len(tokens) > 2: raise ValueError("IPv6 addresses must be between '[]'") host, port = tokens else: host = socket_string return (host, port) def storage_directory(datadir, partition, name_hash): """ Get the storage directory :param datadir: Base data directory :param partition: Partition :param name_hash: Account, container or object name hash :returns: Storage directory """ return os.path.join(datadir, str(partition), name_hash[-3:], name_hash) def hash_path(account, container=None, object=None, raw_digest=False): """ Get the canonical hash for an account/container/object :param account: Account :param container: Container :param object: Object :param raw_digest: If True, return the raw version rather than a hex digest :returns: hash string """ if object and not container: raise ValueError('container is required if object is provided') paths = [account] if container: paths.append(container) if object: paths.append(object) if raw_digest: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).digest() else: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).hexdigest() def get_zero_indexed_base_string(base, index): """ This allows the caller to make a list of things with indexes, where the first item (zero indexed) is just the bare base string, and subsequent indexes are appended '-1', '-2', etc. e.g.:: 'lock', None => 'lock' 'lock', 0 => 'lock' 'lock', 1 => 'lock-1' 'object', 2 => 'object-2' :param base: a string, the base string; when ``index`` is 0 (or None) this is the identity function. :param index: a digit, typically an integer (or None); for values other than 0 or None this digit is appended to the base string separated by a hyphen. """ if index == 0 or index is None: return_string = base else: return_string = base + "-%d" % int(index) return return_string def _get_any_lock(fds): for fd in fds: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) return True except IOError as err: if err.errno != errno.EAGAIN: raise return False @contextmanager def lock_path(directory, timeout=10, timeout_class=None, limit=1): """ Context manager that acquires a lock on a directory. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). For locking exclusively, file or directory has to be opened in Write mode. Python doesn't allow directories to be opened in Write Mode. So we workaround by locking a hidden file in the directory. :param directory: directory to be locked :param timeout: timeout (in seconds) :param timeout_class: The class of the exception to raise if the lock cannot be granted within the timeout. Will be constructed as timeout_class(timeout, lockpath). Default: LockTimeout :param limit: The maximum number of locks that may be held concurrently on the same directory at the time this method is called. Note that this limit is only applied during the current call to this method and does not prevent subsequent calls giving a larger limit. Defaults to 1. :raises TypeError: if limit is not an int. :raises ValueError: if limit is less than 1. """ if limit < 1: raise ValueError('limit must be greater than or equal to 1') if timeout_class is None: timeout_class = swift.common.exceptions.LockTimeout mkdirs(directory) lockpath = '%s/.lock' % directory fds = [os.open(get_zero_indexed_base_string(lockpath, i), os.O_WRONLY | os.O_CREAT) for i in range(limit)] sleep_time = 0.01 slower_sleep_time = max(timeout * 0.01, sleep_time) slowdown_at = timeout * 0.01 time_slept = 0 try: with timeout_class(timeout, lockpath): while True: if _get_any_lock(fds): break if time_slept > slowdown_at: sleep_time = slower_sleep_time sleep(sleep_time) time_slept += sleep_time yield True finally: for fd in fds: os.close(fd) @contextmanager def lock_file(filename, timeout=10, append=False, unlink=True): """ Context manager that acquires a lock on a file. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). :param filename: file to be locked :param timeout: timeout (in seconds) :param append: True if file should be opened in append mode :param unlink: True if the file should be unlinked at the end """ flags = os.O_CREAT | os.O_RDWR if append: flags |= os.O_APPEND mode = 'a+' else: mode = 'r+' while True: fd = os.open(filename, flags) file_obj = os.fdopen(fd, mode) try: with swift.common.exceptions.LockTimeout(timeout, filename): while True: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as err: if err.errno != errno.EAGAIN: raise sleep(0.01) try: if os.stat(filename).st_ino != os.fstat(fd).st_ino: continue except OSError as err: if err.errno == errno.ENOENT: continue raise yield file_obj if unlink: os.unlink(filename) break finally: file_obj.close() def lock_parent_directory(filename, timeout=10): """ Context manager that acquires a lock on the parent directory of the given file path. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). :param filename: file path of the parent directory to be locked :param timeout: timeout (in seconds) """ return lock_path(os.path.dirname(filename), timeout=timeout) def get_time_units(time_amount): """ Get a nomralized length of time in the largest unit of time (hours, minutes, or seconds.) :param time_amount: length of time in seconds :returns: A touple of (length of time, unit of time) where unit of time is one of ('h', 'm', 's') """ time_unit = 's' if time_amount > 60: time_amount /= 60 time_unit = 'm' if time_amount > 60: time_amount /= 60 time_unit = 'h' return time_amount, time_unit def compute_eta(start_time, current_value, final_value): """ Compute an ETA. Now only if we could also have a progress bar... :param start_time: Unix timestamp when the operation began :param current_value: Current value :param final_value: Final value :returns: ETA as a tuple of (length of time, unit of time) where unit of time is one of ('h', 'm', 's') """ elapsed = time.time() - start_time completion = (float(current_value) / final_value) or 0.00001 return get_time_units(1.0 / completion * elapsed - elapsed) def unlink_older_than(path, mtime): """ Remove any file in a given path that was last modified before mtime. :param path: path to remove file from :param mtime: timestamp of oldest file to keep """ filepaths = map(functools.partial(os.path.join, path), listdir(path)) return unlink_paths_older_than(filepaths, mtime) def unlink_paths_older_than(filepaths, mtime): """ Remove any files from the given list that were last modified before mtime. :param filepaths: a list of strings, the full paths of files to check :param mtime: timestamp of oldest file to keep """ for fpath in filepaths: try: if os.path.getmtime(fpath) < mtime: os.unlink(fpath) except OSError: pass def item_from_env(env, item_name, allow_none=False): """ Get a value from the wsgi environment :param env: wsgi environment dict :param item_name: name of item to get :returns: the value from the environment """ item = env.get(item_name, None) if item is None and not allow_none: logging.error("ERROR: %s could not be found in env!", item_name) return item def cache_from_env(env, allow_none=False): """ Get memcache connection pool from the environment (which had been previously set by the memcache middleware :param env: wsgi environment dict :returns: swift.common.memcached.MemcacheRing from environment """ return item_from_env(env, 'swift.cache', allow_none) def read_conf_dir(parser, conf_dir): conf_files = [] for f in os.listdir(conf_dir): if f.endswith('.conf') and not f.startswith('.'): conf_files.append(os.path.join(conf_dir, f)) return parser.read(sorted(conf_files)) def readconf(conf_path, section_name=None, log_name=None, defaults=None, raw=False): """ Read config file(s) and return config items as a dict :param conf_path: path to config file/directory, or a file-like object (hasattr readline) :param section_name: config section to read (will return all sections if not defined) :param log_name: name to be used with logging (will use section_name if not defined) :param defaults: dict of default values to pre-populate the config with :returns: dict of config items :raises ValueError: if section_name does not exist :raises IOError: if reading the file failed """ if defaults is None: defaults = {} if raw: c = RawConfigParser(defaults) else: c = ConfigParser(defaults) if hasattr(conf_path, 'readline'): if hasattr(conf_path, 'seek'): conf_path.seek(0) c.readfp(conf_path) else: if os.path.isdir(conf_path): # read all configs in directory success = read_conf_dir(c, conf_path) else: success = c.read(conf_path) if not success: raise IOError(_("Unable to read config from %s") % conf_path) if section_name: if c.has_section(section_name): conf = dict(c.items(section_name)) else: raise ValueError( _("Unable to find %(section)s config section in %(conf)s") % {'section': section_name, 'conf': conf_path}) if "log_name" not in conf: if log_name is not None: conf['log_name'] = log_name else: conf['log_name'] = section_name else: conf = {} for s in c.sections(): conf.update({s: dict(c.items(s))}) if 'log_name' not in conf: conf['log_name'] = log_name conf['__file__'] = conf_path return conf def write_pickle(obj, dest, tmp=None, pickle_protocol=0): """ Ensure that a pickle file gets written to disk. The file is first written to a tmp location, ensure it is synced to disk, then perform a move to its final location :param obj: python object to be pickled :param dest: path of final destination file :param tmp: path to tmp to use, defaults to None :param pickle_protocol: protocol to pickle the obj with, defaults to 0 """ if tmp is None: tmp = os.path.dirname(dest) mkdirs(tmp) fd, tmppath = mkstemp(dir=tmp, suffix='.tmp') with os.fdopen(fd, 'wb') as fo: pickle.dump(obj, fo, pickle_protocol) fo.flush() os.fsync(fd) renamer(tmppath, dest) def search_tree(root, glob_match, ext='', exts=None, dir_ext=None): """Look in root, for any files/dirs matching glob, recursively traversing any found directories looking for files ending with ext :param root: start of search path :param glob_match: glob to match in root, matching dirs are traversed with os.walk :param ext: only files that end in ext will be returned :param exts: a list of file extensions; only files that end in one of these extensions will be returned; if set this list overrides any extension specified using the 'ext' param. :param dir_ext: if present directories that end with dir_ext will not be traversed and instead will be returned as a matched path :returns: list of full paths to matching files, sorted """ exts = exts or [ext] found_files = [] for path in glob.glob(os.path.join(root, glob_match)): if os.path.isdir(path): for root, dirs, files in os.walk(path): if dir_ext and root.endswith(dir_ext): found_files.append(root) # the root is a config dir, descend no further break for file_ in files: if any(exts) and not any(file_.endswith(e) for e in exts): continue found_files.append(os.path.join(root, file_)) found_dir = False for dir_ in dirs: if dir_ext and dir_.endswith(dir_ext): found_dir = True found_files.append(os.path.join(root, dir_)) if found_dir: # do not descend further into matching directories break else: if ext and not path.endswith(ext): continue found_files.append(path) return sorted(found_files) def write_file(path, contents): """Write contents to file at path :param path: any path, subdirs will be created as needed :param contents: data to write to file, will be converted to string """ dirname, name = os.path.split(path) if not os.path.exists(dirname): try: os.makedirs(dirname) except OSError as err: if err.errno == errno.EACCES: sys.exit('Unable to create %s. Running as ' 'non-root?' % dirname) with open(path, 'w') as f: f.write('%s' % contents) def remove_file(path): """Quiet wrapper for os.unlink, OSErrors are suppressed :param path: first and only argument passed to os.unlink """ try: os.unlink(path) except OSError: pass def audit_location_generator(devices, datadir, suffix='', mount_check=True, logger=None): """ Given a devices path and a data directory, yield (path, device, partition) for all files in that directory :param devices: parent directory of the devices to be audited :param datadir: a directory located under self.devices. This should be one of the DATADIR constants defined in the account, container, and object servers. :param suffix: path name suffix required for all names returned :param mount_check: Flag to check if a mount check should be performed on devices :param logger: a logger object """ device_dir = listdir(devices) # randomize devices in case of process restart before sweep completed shuffle(device_dir) for device in device_dir: if mount_check and not ismount(os.path.join(devices, device)): if logger: logger.warning( _('Skipping %s as it is not mounted'), device) continue datadir_path = os.path.join(devices, device, datadir) try: partitions = listdir(datadir_path) except OSError as e: if logger: logger.warning(_('Skipping %(datadir)s because %(err)s'), {'datadir': datadir_path, 'err': e}) continue for partition in partitions: part_path = os.path.join(datadir_path, partition) try: suffixes = listdir(part_path) except OSError as e: if e.errno != errno.ENOTDIR: raise continue for asuffix in suffixes: suff_path = os.path.join(part_path, asuffix) try: hashes = listdir(suff_path) except OSError as e: if e.errno != errno.ENOTDIR: raise continue for hsh in hashes: hash_path = os.path.join(suff_path, hsh) try: files = sorted(listdir(hash_path), reverse=True) except OSError as e: if e.errno != errno.ENOTDIR: raise continue for fname in files: if suffix and not fname.endswith(suffix): continue path = os.path.join(hash_path, fname) yield path, device, partition def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5): """ Will eventlet.sleep() for the appropriate time so that the max_rate is never exceeded. If max_rate is 0, will not ratelimit. The maximum recommended rate should not exceed (1000 * incr_by) a second as eventlet.sleep() does involve some overhead. Returns running_time that should be used for subsequent calls. :param running_time: the running time in milliseconds of the next allowable request. Best to start at zero. :param max_rate: The maximum rate per second allowed for the process. :param incr_by: How much to increment the counter. Useful if you want to ratelimit 1024 bytes/sec and have differing sizes of requests. Must be > 0 to engage rate-limiting behavior. :param rate_buffer: Number of seconds the rate counter can drop and be allowed to catch up (at a faster than listed rate). A larger number will result in larger spikes in rate but better average accuracy. Must be > 0 to engage rate-limiting behavior. """ if max_rate <= 0 or incr_by <= 0: return running_time # 1,000 milliseconds = 1 second clock_accuracy = 1000.0 # Convert seconds to milliseconds now = time.time() * clock_accuracy # Calculate time per request in milliseconds time_per_request = clock_accuracy * (float(incr_by) / max_rate) # Convert rate_buffer to milliseconds and compare if now - running_time > rate_buffer * clock_accuracy: running_time = now elif running_time - now > time_per_request: # Convert diff back to a floating point number of seconds and sleep eventlet.sleep((running_time - now) / clock_accuracy) # Return the absolute time for the next interval in milliseconds; note # that time could have passed well beyond that point, but the next call # will catch that and skip the sleep. return running_time + time_per_request class ContextPool(GreenPool): """GreenPool subclassed to kill its coros when it gets gc'ed""" def __enter__(self): return self def __exit__(self, type, value, traceback): for coro in list(self.coroutines_running): coro.kill() class GreenAsyncPileWaitallTimeout(Timeout): pass class GreenAsyncPile(object): """ Runs jobs in a pool of green threads, and the results can be retrieved by using this object as an iterator. This is very similar in principle to eventlet.GreenPile, except it returns results as they become available rather than in the order they were launched. Correlating results with jobs (if necessary) is left to the caller. """ def __init__(self, size_or_pool): """ :param size_or_pool: thread pool size or a pool to use """ if isinstance(size_or_pool, GreenPool): self._pool = size_or_pool size = self._pool.size else: self._pool = GreenPool(size_or_pool) size = size_or_pool self._responses = eventlet.queue.LightQueue(size) self._inflight = 0 self._pending = 0 def _run_func(self, func, args, kwargs): try: self._responses.put(func(*args, **kwargs)) finally: self._inflight -= 1 @property def inflight(self): return self._inflight def spawn(self, func, *args, **kwargs): """ Spawn a job in a green thread on the pile. """ self._pending += 1 self._inflight += 1 self._pool.spawn(self._run_func, func, args, kwargs) def waitfirst(self, timeout): """ Wait up to timeout seconds for first result to come in. :param timeout: seconds to wait for results :returns: first item to come back, or None """ for result in self._wait(timeout, first_n=1): return result def waitall(self, timeout): """ Wait timeout seconds for any results to come in. :param timeout: seconds to wait for results :returns: list of results accrued in that time """ return self._wait(timeout) def _wait(self, timeout, first_n=None): results = [] try: with GreenAsyncPileWaitallTimeout(timeout): while True: results.append(next(self)) if first_n and len(results) >= first_n: break except (GreenAsyncPileWaitallTimeout, StopIteration): pass return results def __iter__(self): return self def next(self): try: rv = self._responses.get_nowait() except eventlet.queue.Empty: if self._inflight == 0: raise StopIteration() rv = self._responses.get() self._pending -= 1 return rv __next__ = next class StreamingPile(GreenAsyncPile): """ Runs jobs in a pool of green threads, spawning more jobs as results are retrieved and worker threads become available. When used as a context manager, has the same worker-killing properties as :class:`ContextPool`. """ def __init__(self, size): """:param size: number of worker threads to use""" self.pool = ContextPool(size) super(StreamingPile, self).__init__(self.pool) def asyncstarmap(self, func, args_iter): """ This is the same as :func:`itertools.starmap`, except that *func* is executed in a separate green thread for each item, and results won't necessarily have the same order as inputs. """ args_iter = iter(args_iter) # Initialize the pile for args in itertools.islice(args_iter, self.pool.size): self.spawn(func, *args) # Keep populating the pile as greenthreads become available for args in args_iter: yield next(self) self.spawn(func, *args) # Drain the pile for result in self: yield result def __enter__(self): self.pool.__enter__() return self def __exit__(self, type, value, traceback): self.pool.__exit__(type, value, traceback) class ModifiedParseResult(ParseResult): """Parse results class for urlparse.""" @property def hostname(self): netloc = self.netloc.split('@', 1)[-1] if netloc.startswith('['): return netloc[1:].split(']')[0] elif ':' in netloc: return netloc.rsplit(':')[0] return netloc @property def port(self): netloc = self.netloc.split('@', 1)[-1] if netloc.startswith('['): netloc = netloc.rsplit(']')[1] if ':' in netloc: return int(netloc.rsplit(':')[1]) return None def urlparse(url): """ urlparse augmentation. This is necessary because urlparse can't handle RFC 2732 URLs. :param url: URL to parse. """ return ModifiedParseResult(*stdlib_urlparse(url)) def validate_sync_to(value, allowed_sync_hosts, realms_conf): """ Validates an X-Container-Sync-To header value, returning the validated endpoint, realm, and realm_key, or an error string. :param value: The X-Container-Sync-To header value to validate. :param allowed_sync_hosts: A list of allowed hosts in endpoints, if realms_conf does not apply. :param realms_conf: A instance of swift.common.container_sync_realms.ContainerSyncRealms to validate against. :returns: A tuple of (error_string, validated_endpoint, realm, realm_key). The error_string will None if the rest of the values have been validated. The validated_endpoint will be the validated endpoint to sync to. The realm and realm_key will be set if validation was done through realms_conf. """ orig_value = value value = value.rstrip('/') if not value: return (None, None, None, None) if value.startswith('//'): if not realms_conf: return (None, None, None, None) data = value[2:].split('/') if len(data) != 4: return ( _('Invalid X-Container-Sync-To format %r') % orig_value, None, None, None) realm, cluster, account, container = data realm_key = realms_conf.key(realm) if not realm_key: return (_('No realm key for %r') % realm, None, None, None) endpoint = realms_conf.endpoint(realm, cluster) if not endpoint: return ( _('No cluster endpoint for %(realm)r %(cluster)r') % {'realm': realm, 'cluster': cluster}, None, None, None) return ( None, '%s/%s/%s' % (endpoint.rstrip('/'), account, container), realm.upper(), realm_key) p = urlparse(value) if p.scheme not in ('http', 'https'): return ( _('Invalid scheme %r in X-Container-Sync-To, must be "//", ' '"http", or "https".') % p.scheme, None, None, None) if not p.path: return (_('Path required in X-Container-Sync-To'), None, None, None) if p.params or p.query or p.fragment: return ( _('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To'), None, None, None) if p.hostname not in allowed_sync_hosts: return ( _('Invalid host %r in X-Container-Sync-To') % p.hostname, None, None, None) return (None, value, None, None) def affinity_key_function(affinity_str): """Turns an affinity config value into a function suitable for passing to sort(). After doing so, the array will be sorted with respect to the given ordering. For example, if affinity_str is "r1=1, r2z7=2, r2z8=2", then the array will be sorted with all nodes from region 1 (r1=1) first, then all the nodes from region 2 zones 7 and 8 (r2z7=2 and r2z8=2), then everything else. Note that the order of the pieces of affinity_str is irrelevant; the priority values are what comes after the equals sign. If affinity_str is empty or all whitespace, then the resulting function will not alter the ordering of the nodes. :param affinity_str: affinity config value, e.g. "r1z2=3" or "r1=1, r2z1=2, r2z2=2" :returns: single-argument function :raises ValueError: if argument invalid """ affinity_str = affinity_str.strip() if not affinity_str: return lambda x: 0 priority_matchers = [] pieces = [s.strip() for s in affinity_str.split(',')] for piece in pieces: # matches r= or rz= match = re.match("r(\d+)(?:z(\d+))?=(\d+)$", piece) if match: region, zone, priority = match.groups() region = int(region) priority = int(priority) zone = int(zone) if zone else None matcher = {'region': region, 'priority': priority} if zone is not None: matcher['zone'] = zone priority_matchers.append(matcher) else: raise ValueError("Invalid affinity value: %r" % affinity_str) priority_matchers.sort(key=operator.itemgetter('priority')) def keyfn(ring_node): for matcher in priority_matchers: if (matcher['region'] == ring_node['region'] and ('zone' not in matcher or matcher['zone'] == ring_node['zone'])): return matcher['priority'] return 4294967296 # 2^32, i.e. "a big number" return keyfn def affinity_locality_predicate(write_affinity_str): """ Turns a write-affinity config value into a predicate function for nodes. The returned value will be a 1-arg function that takes a node dictionary and returns a true value if it is "local" and a false value otherwise. The definition of "local" comes from the affinity_str argument passed in here. For example, if affinity_str is "r1, r2z2", then only nodes where region=1 or where (region=2 and zone=2) are considered local. If affinity_str is empty or all whitespace, then the resulting function will consider everything local :param write_affinity_str: affinity config value, e.g. "r1z2" or "r1, r2z1, r2z2" :returns: single-argument function, or None if affinity_str is empty :raises ValueError: if argument invalid """ affinity_str = write_affinity_str.strip() if not affinity_str: return None matchers = [] pieces = [s.strip() for s in affinity_str.split(',')] for piece in pieces: # matches r or rz match = re.match("r(\d+)(?:z(\d+))?$", piece) if match: region, zone = match.groups() region = int(region) zone = int(zone) if zone else None matcher = {'region': region} if zone is not None: matcher['zone'] = zone matchers.append(matcher) else: raise ValueError("Invalid write-affinity value: %r" % affinity_str) def is_local(ring_node): for matcher in matchers: if (matcher['region'] == ring_node['region'] and ('zone' not in matcher or matcher['zone'] == ring_node['zone'])): return True return False return is_local def get_remote_client(req): # remote host for zeus client = req.headers.get('x-cluster-client-ip') if not client and 'x-forwarded-for' in req.headers: # remote host for other lbs client = req.headers['x-forwarded-for'].split(',')[0].strip() if not client: client = req.remote_addr return client def human_readable(value): """ Returns the number in a human readable format; for example 1048576 = "1Mi". """ value = float(value) index = -1 suffixes = 'KMGTPEZY' while value >= 1024 and index + 1 < len(suffixes): index += 1 value = round(value / 1024) if index == -1: return '%d' % value return '%d%si' % (round(value), suffixes[index]) def put_recon_cache_entry(cache_entry, key, item): """ Update a recon cache entry item. If ``item`` is an empty dict then any existing ``key`` in ``cache_entry`` will be deleted. Similarly if ``item`` is a dict and any of its values are empty dicts then the corrsponsing key will be deleted from the nested dict in ``cache_entry``. We use nested recon cache entries when the object auditor runs in parallel or else in 'once' mode with a specified subset of devices. :param cache_entry: a dict of existing cache entries :param key: key for item to update :param item: value for item to update """ if isinstance(item, dict): if not item: cache_entry.pop(key, None) return if key not in cache_entry or key in cache_entry and not \ isinstance(cache_entry[key], dict): cache_entry[key] = {} for k, v in item.items(): if v == {}: cache_entry[key].pop(k, None) else: cache_entry[key][k] = v else: cache_entry[key] = item def dump_recon_cache(cache_dict, cache_file, logger, lock_timeout=2, set_owner=None): """Update recon cache values :param cache_dict: Dictionary of cache key/value pairs to write out :param cache_file: cache file to update :param logger: the logger to use to log an encountered error :param lock_timeout: timeout (in seconds) :param set_owner: Set owner of recon cache file """ try: with lock_file(cache_file, lock_timeout, unlink=False) as cf: cache_entry = {} try: existing_entry = cf.readline() if existing_entry: cache_entry = json.loads(existing_entry) except ValueError: # file doesn't have a valid entry, we'll recreate it pass for cache_key, cache_value in cache_dict.items(): put_recon_cache_entry(cache_entry, cache_key, cache_value) tf = None try: with NamedTemporaryFile(dir=os.path.dirname(cache_file), delete=False) as tf: tf.write(json.dumps(cache_entry, sort_keys=True) + '\n') if set_owner: os.chown(tf.name, pwd.getpwnam(set_owner).pw_uid, -1) renamer(tf.name, cache_file, fsync=False) finally: if tf is not None: try: os.unlink(tf.name) except OSError as err: if err.errno != errno.ENOENT: raise except (Exception, Timeout): logger.exception(_('Exception dumping recon cache')) def listdir(path): try: return os.listdir(path) except OSError as err: if err.errno != errno.ENOENT: raise return [] def streq_const_time(s1, s2): """Constant-time string comparison. :params s1: the first string :params s2: the second string :return: True if the strings are equal. This function takes two strings and compares them. It is intended to be used when doing a comparison for authentication purposes to help guard against timing attacks. """ if len(s1) != len(s2): return False result = 0 for (a, b) in zip(s1, s2): result |= ord(a) ^ ord(b) return result == 0 def pairs(item_list): """ Returns an iterator of all pairs of elements from item_list. :param item_list: items (no duplicates allowed) """ for i, item1 in enumerate(item_list): for item2 in item_list[(i + 1):]: yield (item1, item2) def replication(func): """ Decorator to declare which methods are accessible for different type of servers: * If option replication_server is None then this decorator doesn't matter. * If option replication_server is True then ONLY decorated with this decorator methods will be started. * If option replication_server is False then decorated with this decorator methods will NOT be started. :param func: function to mark accessible for replication """ func.replication = True return func def public(func): """ Decorator to declare which methods are publicly accessible as HTTP requests :param func: function to make public """ func.publicly_accessible = True return func def majority_size(n): return (n // 2) + 1 def quorum_size(n): """ quorum size as it applies to services that use 'replication' for data integrity (Account/Container services). Object quorum_size is defined on a storage policy basis. Number of successful backend requests needed for the proxy to consider the client request successful. """ return (n + 1) // 2 def rsync_ip(ip): """ Transform ip string to an rsync-compatible form Will return ipv4 addresses unchanged, but will nest ipv6 addresses inside square brackets. :param ip: an ip string (ipv4 or ipv6) :returns: a string ip address """ return '[%s]' % ip if is_valid_ipv6(ip) else ip def rsync_module_interpolation(template, device): """ Interpolate devices variables inside a rsync module template :param template: rsync module template as a string :param device: a device from a ring :returns: a string with all variables replaced by device attributes """ replacements = { 'ip': rsync_ip(device.get('ip', '')), 'port': device.get('port', ''), 'replication_ip': rsync_ip(device.get('replication_ip', '')), 'replication_port': device.get('replication_port', ''), 'region': device.get('region', ''), 'zone': device.get('zone', ''), 'device': device.get('device', ''), 'meta': device.get('meta', ''), } try: module = template.format(**replacements) except KeyError as e: raise ValueError('Cannot interpolate rsync_module, invalid variable: ' '%s' % e) return module def get_valid_utf8_str(str_or_unicode): """ Get valid parts of utf-8 str from str, unicode and even invalid utf-8 str :param str_or_unicode: a string or an unicode which can be invalid utf-8 """ if isinstance(str_or_unicode, six.text_type): (str_or_unicode, _len) = utf8_encoder(str_or_unicode, 'replace') (valid_utf8_str, _len) = utf8_decoder(str_or_unicode, 'replace') return valid_utf8_str.encode('utf-8') def list_from_csv(comma_separated_str): """ Splits the str given and returns a properly stripped list of the comma separated values. """ if comma_separated_str: return [v.strip() for v in comma_separated_str.split(',') if v.strip()] return [] def csv_append(csv_string, item): """ Appends an item to a comma-separated string. If the comma-separated string is empty/None, just returns item. """ if csv_string: return ",".join((csv_string, item)) else: return item class CloseableChain(object): """ Like itertools.chain, but with a close method that will attempt to invoke its sub-iterators' close methods, if any. """ def __init__(self, *iterables): self.iterables = iterables def __iter__(self): return iter(itertools.chain(*(self.iterables))) def close(self): for it in self.iterables: close_method = getattr(it, 'close', None) if close_method: close_method() def reiterate(iterable): """ Consume the first item from an iterator, then re-chain it to the rest of the iterator. This is useful when you want to make sure the prologue to downstream generators have been executed before continuing. :param iterable: an iterable object """ if isinstance(iterable, (list, tuple)): return iterable else: iterator = iter(iterable) try: chunk = '' while not chunk: chunk = next(iterator) return CloseableChain([chunk], iterator) except StopIteration: return [] class InputProxy(object): """ File-like object that counts bytes read. To be swapped in for wsgi.input for accounting purposes. """ def __init__(self, wsgi_input): """ :param wsgi_input: file-like object to wrap the functionality of """ self.wsgi_input = wsgi_input self.bytes_received = 0 self.client_disconnect = False def read(self, *args, **kwargs): """ Pass read request to the underlying file-like object and add bytes read to total. """ try: chunk = self.wsgi_input.read(*args, **kwargs) except Exception: self.client_disconnect = True raise self.bytes_received += len(chunk) return chunk def readline(self, *args, **kwargs): """ Pass readline request to the underlying file-like object and add bytes read to total. """ try: line = self.wsgi_input.readline(*args, **kwargs) except Exception: self.client_disconnect = True raise self.bytes_received += len(line) return line class LRUCache(object): """ Decorator for size/time bound memoization that evicts the least recently used members. """ PREV, NEXT, KEY, CACHED_AT, VALUE = 0, 1, 2, 3, 4 # link fields def __init__(self, maxsize=1000, maxtime=3600): self.maxsize = maxsize self.maxtime = maxtime self.reset() def reset(self): self.mapping = {} self.head = [None, None, None, None, None] # oldest self.tail = [self.head, None, None, None, None] # newest self.head[self.NEXT] = self.tail def set_cache(self, value, *key): while len(self.mapping) >= self.maxsize: old_next, old_key = self.head[self.NEXT][self.NEXT:self.NEXT + 2] self.head[self.NEXT], old_next[self.PREV] = old_next, self.head del self.mapping[old_key] last = self.tail[self.PREV] link = [last, self.tail, key, time.time(), value] self.mapping[key] = last[self.NEXT] = self.tail[self.PREV] = link return value def get_cached(self, link, *key): link_prev, link_next, key, cached_at, value = link if cached_at + self.maxtime < time.time(): raise KeyError('%r has timed out' % (key,)) link_prev[self.NEXT] = link_next link_next[self.PREV] = link_prev last = self.tail[self.PREV] last[self.NEXT] = self.tail[self.PREV] = link link[self.PREV] = last link[self.NEXT] = self.tail return value def __call__(self, f): class LRUCacheWrapped(object): @functools.wraps(f) def __call__(im_self, *key): link = self.mapping.get(key, self.head) if link is not self.head: try: return self.get_cached(link, *key) except KeyError: pass value = f(*key) self.set_cache(value, *key) return value def size(im_self): """ Return the size of the cache """ return len(self.mapping) def reset(im_self): return self.reset() def get_maxsize(im_self): return self.maxsize def set_maxsize(im_self, i): self.maxsize = i def get_maxtime(im_self): return self.maxtime def set_maxtime(im_self, i): self.maxtime = i maxsize = property(get_maxsize, set_maxsize) maxtime = property(get_maxtime, set_maxtime) def __repr__(im_self): return '<%s %r>' % (im_self.__class__.__name__, f) return LRUCacheWrapped() class Spliterator(object): """ Takes an iterator yielding sliceable things (e.g. strings or lists) and yields subiterators, each yielding up to the requested number of items from the source. >>> si = Spliterator(["abcde", "fg", "hijkl"]) >>> ''.join(si.take(4)) "abcd" >>> ''.join(si.take(3)) "efg" >>> ''.join(si.take(1)) "h" >>> ''.join(si.take(3)) "ijk" >>> ''.join(si.take(3)) "l" # shorter than requested; this can happen with the last iterator """ def __init__(self, source_iterable): self.input_iterator = iter(source_iterable) self.leftovers = None self.leftovers_index = 0 self._iterator_in_progress = False def take(self, n): if self._iterator_in_progress: raise ValueError( "cannot call take() again until the first iterator is" " exhausted (has raised StopIteration)") self._iterator_in_progress = True try: if self.leftovers: # All this string slicing is a little awkward, but it's for # a good reason. Consider a length N string that someone is # taking k bytes at a time. # # With this implementation, we create one new string of # length k (copying the bytes) on each call to take(). Once # the whole input has been consumed, each byte has been # copied exactly once, giving O(N) bytes copied. # # If, instead of this, we were to set leftovers = # leftovers[k:] and omit leftovers_index, then each call to # take() would copy k bytes to create the desired substring, # then copy all the remaining bytes to reset leftovers, # resulting in an overall O(N^2) bytes copied. llen = len(self.leftovers) - self.leftovers_index if llen <= n: n -= llen to_yield = self.leftovers[self.leftovers_index:] self.leftovers = None self.leftovers_index = 0 yield to_yield else: to_yield = self.leftovers[ self.leftovers_index:(self.leftovers_index + n)] self.leftovers_index += n n = 0 yield to_yield while n > 0: chunk = next(self.input_iterator) cl = len(chunk) if cl <= n: n -= cl yield chunk else: self.leftovers = chunk self.leftovers_index = n yield chunk[:n] n = 0 finally: self._iterator_in_progress = False def tpool_reraise(func, *args, **kwargs): """ Hack to work around Eventlet's tpool not catching and reraising Timeouts. """ def inner(): try: return func(*args, **kwargs) except BaseException as err: return err resp = tpool.execute(inner) if isinstance(resp, BaseException): raise resp return resp def ismount(path): """ Test whether a path is a mount point. This will catch any exceptions and translate them into a False return value Use ismount_raw to have the exceptions raised instead. """ try: return ismount_raw(path) except OSError: return False def ismount_raw(path): """ Test whether a path is a mount point. Whereas ismount will catch any exceptions and just return False, this raw version will not catch exceptions. This is code hijacked from C Python 2.6.8, adapted to remove the extra lstat() system call. """ try: s1 = os.lstat(path) except os.error as err: if err.errno == errno.ENOENT: # It doesn't exist -- so not a mount point :-) return False raise if stat.S_ISLNK(s1.st_mode): # A symlink can never be a mount point return False s2 = os.lstat(os.path.join(path, '..')) dev1 = s1.st_dev dev2 = s2.st_dev if dev1 != dev2: # path/.. on a different device as path return True ino1 = s1.st_ino ino2 = s2.st_ino if ino1 == ino2: # path/.. is the same i-node as path return True # Device and inode checks are not properly working inside containerized # environments, therefore using a workaround to check if there is a # stubfile placed by an operator if os.path.isfile(os.path.join(path, ".ismount")): return True return False def close_if_possible(maybe_closable): close_method = getattr(maybe_closable, 'close', None) if callable(close_method): return close_method() @contextmanager def closing_if_possible(maybe_closable): """ Like contextlib.closing(), but doesn't crash if the object lacks a close() method. PEP 333 (WSGI) says: "If the iterable returned by the application has a close() method, the server or gateway must call that method upon completion of the current request[.]" This function makes that easier. """ try: yield maybe_closable finally: close_if_possible(maybe_closable) _rfc_token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+' _rfc_extension_pattern = re.compile( r'(?:\s*;\s*(' + _rfc_token + r")\s*(?:=\s*(" + _rfc_token + r'|"(?:[^"\\]|\\.)*"))?)') _content_range_pattern = re.compile(r'^bytes (\d+)-(\d+)/(\d+)$') def parse_content_range(content_range): """ Parse a content-range header into (first_byte, last_byte, total_size). See RFC 7233 section 4.2 for details on the header format, but it's basically "Content-Range: bytes ${start}-${end}/${total}". :param content_range: Content-Range header value to parse, e.g. "bytes 100-1249/49004" :returns: 3-tuple (start, end, total) :raises ValueError: if malformed """ found = re.search(_content_range_pattern, content_range) if not found: raise ValueError("malformed Content-Range %r" % (content_range,)) return tuple(int(x) for x in found.groups()) def parse_content_type(content_type): """ Parse a content-type and its parameters into values. RFC 2616 sec 14.17 and 3.7 are pertinent. **Examples**:: 'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')]) 'text/plain; charset=UTF-8; level=1' -> ('text/plain', [('charset, 'UTF-8'), ('level', '1')]) :param content_type: content_type to parse :returns: a tuple containing (content type, list of k, v parameter tuples) """ parm_list = [] if ';' in content_type: content_type, parms = content_type.split(';', 1) parms = ';' + parms for m in _rfc_extension_pattern.findall(parms): key = m[0].strip() value = m[1].strip() parm_list.append((key, value)) return content_type, parm_list def extract_swift_bytes(content_type): """ Parse a content-type and return a tuple containing: - the content_type string minus any swift_bytes param, - the swift_bytes value or None if the param was not found :param content_type: a content-type string :return: a tuple of (content-type, swift_bytes or None) """ content_type, params = parse_content_type(content_type) swift_bytes = None for k, v in params: if k == 'swift_bytes': swift_bytes = v else: content_type += ';%s=%s' % (k, v) return content_type, swift_bytes def override_bytes_from_content_type(listing_dict, logger=None): """ Takes a dict from a container listing and overrides the content_type, bytes fields if swift_bytes is set. """ listing_dict['content_type'], swift_bytes = extract_swift_bytes( listing_dict['content_type']) if swift_bytes is not None: try: listing_dict['bytes'] = int(swift_bytes) except ValueError: if logger: logger.exception(_("Invalid swift_bytes")) def clean_content_type(value): if ';' in value: left, right = value.rsplit(';', 1) if right.lstrip().startswith('swift_bytes='): return left return value def quote(value, safe='/'): """ Patched version of urllib.quote that encodes utf-8 strings before quoting """ return _quote(get_valid_utf8_str(value), safe) def get_expirer_container(x_delete_at, expirer_divisor, acc, cont, obj): """ Returns a expiring object container name for given X-Delete-At and a/c/o. """ shard_int = int(hash_path(acc, cont, obj), 16) % 100 return normalize_delete_at_timestamp( int(x_delete_at) / expirer_divisor * expirer_divisor - shard_int) class _MultipartMimeFileLikeObject(object): def __init__(self, wsgi_input, boundary, input_buffer, read_chunk_size): self.no_more_data_for_this_file = False self.no_more_files = False self.wsgi_input = wsgi_input self.boundary = boundary self.input_buffer = input_buffer self.read_chunk_size = read_chunk_size def read(self, length=None): if not length: length = self.read_chunk_size if self.no_more_data_for_this_file: return b'' # read enough data to know whether we're going to run # into a boundary in next [length] bytes if len(self.input_buffer) < length + len(self.boundary) + 2: to_read = length + len(self.boundary) + 2 while to_read > 0: try: chunk = self.wsgi_input.read(to_read) except (IOError, ValueError) as e: raise swift.common.exceptions.ChunkReadError(str(e)) to_read -= len(chunk) self.input_buffer += chunk if not chunk: self.no_more_files = True break boundary_pos = self.input_buffer.find(self.boundary) # boundary does not exist in the next (length) bytes if boundary_pos == -1 or boundary_pos > length: ret = self.input_buffer[:length] self.input_buffer = self.input_buffer[length:] # if it does, just return data up to the boundary else: ret, self.input_buffer = self.input_buffer.split(self.boundary, 1) self.no_more_files = self.input_buffer.startswith(b'--') self.no_more_data_for_this_file = True self.input_buffer = self.input_buffer[2:] return ret def readline(self): if self.no_more_data_for_this_file: return b'' boundary_pos = newline_pos = -1 while newline_pos < 0 and boundary_pos < 0: try: chunk = self.wsgi_input.read(self.read_chunk_size) except (IOError, ValueError) as e: raise swift.common.exceptions.ChunkReadError(str(e)) self.input_buffer += chunk newline_pos = self.input_buffer.find(b'\r\n') boundary_pos = self.input_buffer.find(self.boundary) if not chunk: self.no_more_files = True break # found a newline if newline_pos >= 0 and \ (boundary_pos < 0 or newline_pos < boundary_pos): # Use self.read to ensure any logic there happens... ret = b'' to_read = newline_pos + 2 while to_read > 0: chunk = self.read(to_read) # Should never happen since we're reading from input_buffer, # but just for completeness... if not chunk: break to_read -= len(chunk) ret += chunk return ret else: # no newlines, just return up to next boundary return self.read(len(self.input_buffer)) def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096): """ Given a multi-part-mime-encoded input file object and boundary, yield file-like objects for each part. Note that this does not split each part into headers and body; the caller is responsible for doing that if necessary. :param wsgi_input: The file-like object to read from. :param boundary: The mime boundary to separate new file-like objects on. :returns: A generator of file-like objects for each part. :raises MimeInvalid: if the document is malformed """ boundary = '--' + boundary blen = len(boundary) + 2 # \r\n try: got = wsgi_input.readline(blen) while got == '\r\n': got = wsgi_input.readline(blen) except (IOError, ValueError) as e: raise swift.common.exceptions.ChunkReadError(str(e)) if got.strip() != boundary: raise swift.common.exceptions.MimeInvalid( 'invalid starting boundary: wanted %r, got %r', (boundary, got)) boundary = '\r\n' + boundary input_buffer = '' done = False while not done: it = _MultipartMimeFileLikeObject(wsgi_input, boundary, input_buffer, read_chunk_size) yield it done = it.no_more_files input_buffer = it.input_buffer def parse_mime_headers(doc_file): """ Takes a file-like object containing a MIME document and returns a HeaderKeyDict containing the headers. The body of the message is not consumed: the position in doc_file is left at the beginning of the body. This function was inspired by the Python standard library's http.client.parse_headers. :param doc_file: binary file-like object containing a MIME document :returns: a swift.common.swob.HeaderKeyDict containing the headers """ headers = [] while True: line = doc_file.readline() done = line in (b'\r\n', b'\n', b'') if six.PY3: try: line = line.decode('utf-8') except UnicodeDecodeError: line = line.decode('latin1') headers.append(line) if done: break if six.PY3: header_string = ''.join(headers) else: header_string = b''.join(headers) headers = email.parser.Parser().parsestr(header_string) return HeaderKeyDict(headers) def mime_to_document_iters(input_file, boundary, read_chunk_size=4096): """ Takes a file-like object containing a multipart MIME document and returns an iterator of (headers, body-file) tuples. :param input_file: file-like object with the MIME doc in it :param boundary: MIME boundary, sans dashes (e.g. "divider", not "--divider") :param read_chunk_size: size of strings read via input_file.read() """ doc_files = iter_multipart_mime_documents(input_file, boundary, read_chunk_size) for i, doc_file in enumerate(doc_files): # this consumes the headers and leaves just the body in doc_file headers = parse_mime_headers(doc_file) yield (headers, doc_file) def maybe_multipart_byteranges_to_document_iters(app_iter, content_type): """ Takes an iterator that may or may not contain a multipart MIME document as well as content type and returns an iterator of body iterators. :param app_iter: iterator that may contain a multipart MIME document :param content_type: content type of the app_iter, used to determine whether it conains a multipart document and, if so, what the boundary is between documents """ content_type, params_list = parse_content_type(content_type) if content_type != 'multipart/byteranges': yield app_iter return body_file = FileLikeIter(app_iter) boundary = dict(params_list)['boundary'] for _headers, body in mime_to_document_iters(body_file, boundary): yield (chunk for chunk in iter(lambda: body.read(65536), '')) def document_iters_to_multipart_byteranges(ranges_iter, boundary): """ Takes an iterator of range iters and yields a multipart/byteranges MIME document suitable for sending as the body of a multi-range 206 response. See document_iters_to_http_response_body for parameter descriptions. """ divider = "--" + boundary + "\r\n" terminator = "--" + boundary + "--" for range_spec in ranges_iter: start_byte = range_spec["start_byte"] end_byte = range_spec["end_byte"] entity_length = range_spec.get("entity_length", "*") content_type = range_spec["content_type"] part_iter = range_spec["part_iter"] part_header = ''.join(( divider, "Content-Type: ", str(content_type), "\r\n", "Content-Range: ", "bytes %d-%d/%s\r\n" % ( start_byte, end_byte, entity_length), "\r\n" )) yield part_header for chunk in part_iter: yield chunk yield "\r\n" yield terminator def document_iters_to_http_response_body(ranges_iter, boundary, multipart, logger): """ Takes an iterator of range iters and turns it into an appropriate HTTP response body, whether that's multipart/byteranges or not. This is almost, but not quite, the inverse of request_helpers.http_response_to_document_iters(). This function only yields chunks of the body, not any headers. :param ranges_iter: an iterator of dictionaries, one per range. Each dictionary must contain at least the following key: "part_iter": iterator yielding the bytes in the range Additionally, if multipart is True, then the following other keys are required: "start_byte": index of the first byte in the range "end_byte": index of the last byte in the range "content_type": value for the range's Content-Type header Finally, there is one optional key that is used in the multipart/byteranges case: "entity_length": length of the requested entity (not necessarily equal to the response length). If omitted, "*" will be used. Each part_iter will be exhausted prior to calling next(ranges_iter). :param boundary: MIME boundary to use, sans dashes (e.g. "boundary", not "--boundary"). :param multipart: True if the response should be multipart/byteranges, False otherwise. This should be True if and only if you have 2 or more ranges. :param logger: a logger """ if multipart: return document_iters_to_multipart_byteranges(ranges_iter, boundary) else: try: response_body_iter = next(ranges_iter)['part_iter'] except StopIteration: return '' # We need to make sure ranges_iter does not get garbage-collected # before response_body_iter is exhausted. The reason is that # ranges_iter has a finally block that calls close_swift_conn, and # so if that finally block fires before we read response_body_iter, # there's nothing there. def string_along(useful_iter, useless_iter_iter, logger): with closing_if_possible(useful_iter): for x in useful_iter: yield x try: next(useless_iter_iter) except StopIteration: pass else: logger.warning( _("More than one part in a single-part response?")) return string_along(response_body_iter, ranges_iter, logger) def multipart_byteranges_to_document_iters(input_file, boundary, read_chunk_size=4096): """ Takes a file-like object containing a multipart/byteranges MIME document (see RFC 7233, Appendix A) and returns an iterator of (first-byte, last-byte, length, document-headers, body-file) 5-tuples. :param input_file: file-like object with the MIME doc in it :param boundary: MIME boundary, sans dashes (e.g. "divider", not "--divider") :param read_chunk_size: size of strings read via input_file.read() """ for headers, body in mime_to_document_iters(input_file, boundary, read_chunk_size): first_byte, last_byte, length = parse_content_range( headers.get('content-range')) yield (first_byte, last_byte, length, headers.items(), body) #: Regular expression to match form attributes. ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)') def parse_content_disposition(header): """ Given the value of a header like: Content-Disposition: form-data; name="somefile"; filename="test.html" Return data like ("form-data", {"name": "somefile", "filename": "test.html"}) :param header: Value of a header (the part after the ': '). :returns: (value name, dict) of the attribute data parsed (see above). """ attributes = {} attrs = '' if ';' in header: header, attrs = [x.strip() for x in header.split(';', 1)] m = True while m: m = ATTRIBUTES_RE.match(attrs) if m: attrs = attrs[len(m.group(0)):] attributes[m.group(1)] = m.group(2).strip('"') return header, attributes class sockaddr_alg(ctypes.Structure): _fields_ = [("salg_family", ctypes.c_ushort), ("salg_type", ctypes.c_ubyte * 14), ("salg_feat", ctypes.c_uint), ("salg_mask", ctypes.c_uint), ("salg_name", ctypes.c_ubyte * 64)] _bound_md5_sockfd = None def get_md5_socket(): """ Get an MD5 socket file descriptor. One can MD5 data with it by writing it to the socket with os.write, then os.read the 16 bytes of the checksum out later. NOTE: It is the caller's responsibility to ensure that os.close() is called on the returned file descriptor. This is a bare file descriptor, not a Python object. It doesn't close itself. """ # Linux's AF_ALG sockets work like this: # # First, initialize a socket with socket() and bind(). This tells the # socket what algorithm to use, as well as setting up any necessary bits # like crypto keys. Of course, MD5 doesn't need any keys, so it's just the # algorithm name. # # Second, to hash some data, get a second socket by calling accept() on # the first socket. Write data to the socket, then when finished, read the # checksum from the socket and close it. This lets you checksum multiple # things without repeating all the setup code each time. # # Since we only need to bind() one socket, we do that here and save it for # future re-use. That way, we only use one file descriptor to get an MD5 # socket instead of two, and we also get to save some syscalls. global _bound_md5_sockfd global _libc_socket global _libc_bind global _libc_accept if _libc_accept is None: _libc_accept = load_libc_function('accept', fail_if_missing=True) if _libc_socket is None: _libc_socket = load_libc_function('socket', fail_if_missing=True) if _libc_bind is None: _libc_bind = load_libc_function('bind', fail_if_missing=True) # Do this at first call rather than at import time so that we don't use a # file descriptor on systems that aren't using any MD5 sockets. if _bound_md5_sockfd is None: sockaddr_setup = sockaddr_alg( AF_ALG, (ord('h'), ord('a'), ord('s'), ord('h'), 0), 0, 0, (ord('m'), ord('d'), ord('5'), 0)) hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG), ctypes.c_int(socket.SOCK_SEQPACKET), ctypes.c_int(0)) if hash_sockfd < 0: raise IOError(ctypes.get_errno(), "Failed to initialize MD5 socket") bind_result = _libc_bind(ctypes.c_int(hash_sockfd), ctypes.pointer(sockaddr_setup), ctypes.c_int(ctypes.sizeof(sockaddr_alg))) if bind_result < 0: os.close(hash_sockfd) raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket") _bound_md5_sockfd = hash_sockfd md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0) if md5_sockfd < 0: raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket") return md5_sockfd def modify_priority(conf, logger): """ Modify priority by nice and ionice. """ global _libc_setpriority if _libc_setpriority is None: _libc_setpriority = load_libc_function('setpriority', errcheck=True) def _setpriority(nice_priority): """ setpriority for this pid :param nice_priority: valid values are -19 to 20 """ try: _libc_setpriority(PRIO_PROCESS, os.getpid(), int(nice_priority)) except (ValueError, OSError): print(_("WARNING: Unable to modify scheduling priority of process." " Keeping unchanged! Check logs for more info. ")) logger.exception('Unable to modify nice priority') else: logger.debug('set nice priority to %s' % nice_priority) nice_priority = conf.get('nice_priority') if nice_priority is not None: _setpriority(nice_priority) global _posix_syscall if _posix_syscall is None: _posix_syscall = load_libc_function('syscall', errcheck=True) def _ioprio_set(io_class, io_priority): """ ioprio_set for this process :param io_class: the I/O class component, can be IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, or IOPRIO_CLASS_IDLE :param io_priority: priority value in the I/O class """ try: io_class = IO_CLASS_ENUM[io_class] io_priority = int(io_priority) _posix_syscall(NR_ioprio_set(), IOPRIO_WHO_PROCESS, os.getpid(), IOPRIO_PRIO_VALUE(io_class, io_priority)) except (KeyError, ValueError, OSError): print(_("WARNING: Unable to modify I/O scheduling class " "and priority of process. Keeping unchanged! " "Check logs for more info.")) logger.exception("Unable to modify ionice priority") else: logger.debug('set ionice class %s priority %s', io_class, io_priority) io_class = conf.get("ionice_class") if io_class is None: return io_priority = conf.get("ionice_priority", 0) _ioprio_set(io_class, io_priority) def o_tmpfile_supported(): """ Returns True if O_TMPFILE flag is supported. O_TMPFILE was introduced in Linux 3.11 but it also requires support from underlying filesystem being used. Some common filesystems and linux versions in which those filesystems added support for O_TMPFILE: xfs (3.15) ext4 (3.11) btrfs (3.16) """ return all([linkat.available, platform.system() == 'Linux', LooseVersion(platform.release()) >= LooseVersion('3.16')]) def safe_json_loads(value): if value: try: return json.loads(value) except (TypeError, ValueError): pass return None def strict_b64decode(value, allow_line_breaks=False): ''' Validate and decode Base64-encoded data. The stdlib base64 module silently discards bad characters, but we often want to treat them as an error. :param value: some base64-encoded data :param allow_line_breaks: if True, ignore carriage returns and newlines :returns: the decoded data :raises ValueError: if ``value`` is not a string, contains invalid characters, or has insufficient padding ''' if not isinstance(value, six.string_types): raise ValueError # b64decode will silently discard bad characters, but we want to # treat them as an error valid_chars = string.digits + string.ascii_letters + '/+' strip_chars = '=' if allow_line_breaks: valid_chars += '\r\n' strip_chars += '\r\n' if any(c not in valid_chars for c in value.strip(strip_chars)): raise ValueError try: return base64.b64decode(value) except (TypeError, binascii.Error): # (py2 error, py3 error) raise ValueError MD5_BLOCK_READ_BYTES = 4096 def md5_hash_for_file(fname): """ Get the MD5 checksum of a file. :param fname: path to file :returns: MD5 checksum, hex encoded """ with open(fname, 'rb') as f: md5sum = md5() for block in iter(lambda: f.read(MD5_BLOCK_READ_BYTES), ''): md5sum.update(block) return md5sum.hexdigest() def replace_partition_in_path(path, part_power): """ Takes a full path to a file and a partition power and returns the same path, but with the correct partition number. Most useful when increasing the partition power. :param path: full path to a file, for example object .data file :param part_power: partition power to compute correct partition number :returns: Path with re-computed partition power """ path_components = path.split(os.sep) digest = binascii.unhexlify(path_components[-2]) part_shift = 32 - int(part_power) part = struct.unpack_from('>I', digest)[0] >> part_shift path_components[-4] = "%d" % part return os.sep.join(path_components) class PipeMutex(object): """ Mutex using a pipe. Works across both greenlets and real threads, even at the same time. """ def __init__(self): self.rfd, self.wfd = os.pipe() # You can't create a pipe in non-blocking mode; you must set it # later. rflags = fcntl.fcntl(self.rfd, fcntl.F_GETFL) fcntl.fcntl(self.rfd, fcntl.F_SETFL, rflags | os.O_NONBLOCK) os.write(self.wfd, b'-') # start unlocked self.owner = None self.recursion_depth = 0 # Usually, it's an error to have multiple greenthreads all waiting # to read the same file descriptor. It's often a sign of inadequate # concurrency control; for example, if you have two greenthreads # trying to use the same memcache connection, they'll end up writing # interleaved garbage to the socket or stealing part of each others' # responses. # # In this case, we have multiple greenthreads waiting on the same # file descriptor by design. This lets greenthreads in real thread A # wait with greenthreads in real thread B for the same mutex. # Therefore, we must turn off eventlet's multiple-reader detection. # # It would be better to turn off multiple-reader detection for only # our calls to trampoline(), but eventlet does not support that. eventlet.debug.hub_prevent_multiple_readers(False) def acquire(self, blocking=True): """ Acquire the mutex. If called with blocking=False, returns True if the mutex was acquired and False if it wasn't. Otherwise, blocks until the mutex is acquired and returns True. This lock is recursive; the same greenthread may acquire it as many times as it wants to, though it must then release it that many times too. """ current_greenthread_id = id(eventlet.greenthread.getcurrent()) if self.owner == current_greenthread_id: self.recursion_depth += 1 return True while True: try: # If there is a byte available, this will read it and remove # it from the pipe. If not, this will raise OSError with # errno=EAGAIN. os.read(self.rfd, 1) self.owner = current_greenthread_id return True except OSError as err: if err.errno != errno.EAGAIN: raise if not blocking: return False # Tell eventlet to suspend the current greenthread until # self.rfd becomes readable. This will happen when someone # else writes to self.wfd. trampoline(self.rfd, read=True) def release(self): """ Release the mutex. """ current_greenthread_id = id(eventlet.greenthread.getcurrent()) if self.owner != current_greenthread_id: raise RuntimeError("cannot release un-acquired lock") if self.recursion_depth > 0: self.recursion_depth -= 1 return self.owner = None os.write(self.wfd, b'X') def close(self): """ Close the mutex. This releases its file descriptors. You can't use a mutex after it's been closed. """ if self.wfd is not None: os.close(self.rfd) self.rfd = None os.close(self.wfd) self.wfd = None self.owner = None self.recursion_depth = 0 def __del__(self): # We need this so we don't leak file descriptors. Otherwise, if you # call get_logger() and don't explicitly dispose of it by calling # logger.logger.handlers[0].lock.close() [1], the pipe file # descriptors are leaked. # # This only really comes up in tests. Swift processes tend to call # get_logger() once and then hang on to it until they exit, but the # test suite calls get_logger() a lot. # # [1] and that's a completely ridiculous thing to expect callers to # do, so nobody does it and that's okay. self.close() class ThreadSafeSysLogHandler(SysLogHandler): def createLock(self): self.lock = PipeMutex() swift-2.17.0/swift/common/wsgi.py0000666000175100017510000012642513236061617016751 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """WSGI tools for use with swift.""" from __future__ import print_function import errno import inspect import os import signal import time from swift import gettext_ as _ from textwrap import dedent import eventlet import eventlet.debug from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout from paste.deploy import loadwsgi from eventlet.green import socket, ssl, os as green_os import six from six import BytesIO from six import StringIO from six.moves.urllib.parse import unquote if six.PY2: import mimetools from swift.common import utils, constraints from swift.common.storage_policy import BindPortsCache from swift.common.swob import Request from swift.common.utils import capture_stdio, disable_fallocate, \ drop_privileges, get_logger, NullLogger, config_true_value, \ validate_configuration, get_hub, config_auto_int_value, \ reiterate # Set maximum line size of message headers to be accepted. wsgi.MAX_HEADER_LINE = constraints.MAX_HEADER_SIZE try: import multiprocessing CPU_COUNT = multiprocessing.cpu_count() or 1 except (ImportError, NotImplementedError): CPU_COUNT = 1 class NamedConfigLoader(loadwsgi.ConfigLoader): """ Patch paste.deploy's ConfigLoader so each context object will know what config section it came from. """ def get_context(self, object_type, name=None, global_conf=None): context = super(NamedConfigLoader, self).get_context( object_type, name=name, global_conf=global_conf) context.name = name context.local_conf['__name__'] = name return context loadwsgi.ConfigLoader = NamedConfigLoader class ConfigDirLoader(NamedConfigLoader): """ Read configuration from multiple files under the given path. """ def __init__(self, conf_dir): # parent class uses filename attribute when building error messages self.filename = conf_dir = conf_dir.strip() defaults = { 'here': os.path.normpath(os.path.abspath(conf_dir)), '__file__': os.path.abspath(conf_dir) } self.parser = loadwsgi.NicerConfigParser(conf_dir, defaults=defaults) self.parser.optionxform = str # Don't lower-case keys utils.read_conf_dir(self.parser, conf_dir) def _loadconfigdir(object_type, uri, path, name, relative_to, global_conf): if relative_to: path = os.path.normpath(os.path.join(relative_to, path)) loader = ConfigDirLoader(path) if global_conf: loader.update_defaults(global_conf, overwrite=False) return loader.get_context(object_type, name, global_conf) # add config_dir parsing to paste.deploy loadwsgi._loaders['config_dir'] = _loadconfigdir class ConfigString(NamedConfigLoader): """ Wrap a raw config string up for paste.deploy. If you give one of these to our loadcontext (e.g. give it to our appconfig) we'll intercept it and get it routed to the right loader. """ def __init__(self, config_string): self.contents = StringIO(dedent(config_string)) self.filename = "string" defaults = { 'here': "string", '__file__': self.contents, } self.parser = loadwsgi.NicerConfigParser("string", defaults=defaults) self.parser.optionxform = str # Don't lower-case keys self.parser.readfp(self.contents) def wrap_conf_type(f): """ Wrap a function whos first argument is a paste.deploy style config uri, such that you can pass it an un-adorned raw filesystem path (or config string) and the config directive (either config:, config_dir:, or config_str:) will be added automatically based on the type of entity (either a file or directory, or if no such entity on the file system - just a string) before passing it through to the paste.deploy function. """ def wrapper(conf_path, *args, **kwargs): if os.path.isdir(conf_path): conf_type = 'config_dir' else: conf_type = 'config' conf_uri = '%s:%s' % (conf_type, conf_path) return f(conf_uri, *args, **kwargs) return wrapper appconfig = wrap_conf_type(loadwsgi.appconfig) def monkey_patch_mimetools(): """ mimetools.Message defaults content-type to "text/plain" This changes it to default to None, so we can detect missing headers. """ if six.PY3: # The mimetools has been removed from Python 3 return orig_parsetype = mimetools.Message.parsetype def parsetype(self): if not self.typeheader: self.type = None self.maintype = None self.subtype = None self.plisttext = '' else: orig_parsetype(self) parsetype.patched = True if not getattr(mimetools.Message.parsetype, 'patched', None): mimetools.Message.parsetype = parsetype def get_socket(conf): """Bind socket to bind ip:port in conf :param conf: Configuration dict to read settings from :returns: a socket object as returned from socket.listen or ssl.wrap_socket if conf specifies cert_file """ try: bind_port = int(conf['bind_port']) except (ValueError, KeyError, TypeError): raise ConfigFilePortError() bind_addr = (conf.get('bind_ip', '0.0.0.0'), bind_port) address_family = [addr[0] for addr in socket.getaddrinfo( bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) if addr[0] in (socket.AF_INET, socket.AF_INET6)][0] sock = None bind_timeout = int(conf.get('bind_timeout', 30)) retry_until = time.time() + bind_timeout warn_ssl = False while not sock and time.time() < retry_until: try: sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)), family=address_family) if 'cert_file' in conf: warn_ssl = True sock = ssl.wrap_socket(sock, certfile=conf['cert_file'], keyfile=conf['key_file']) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise sleep(0.1) if not sock: raise Exception(_('Could not bind to %(addr)s:%(port)s ' 'after trying for %(timeout)s seconds') % { 'addr': bind_addr[0], 'port': bind_addr[1], 'timeout': bind_timeout}) # in my experience, sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600) if warn_ssl: ssl_warning_message = _('WARNING: SSL should only be enabled for ' 'testing purposes. Use external SSL ' 'termination for a production deployment.') get_logger(conf).warning(ssl_warning_message) print(ssl_warning_message) return sock class RestrictedGreenPool(GreenPool): """ Works the same as GreenPool, but if the size is specified as one, then the spawn_n() method will invoke waitall() before returning to prevent the caller from doing any other work (like calling accept()). """ def __init__(self, size=1024): super(RestrictedGreenPool, self).__init__(size=size) self._rgp_do_wait = (size == 1) def spawn_n(self, *args, **kwargs): super(RestrictedGreenPool, self).spawn_n(*args, **kwargs) if self._rgp_do_wait: self.waitall() def pipeline_property(name, **kwargs): """ Create a property accessor for the given name. The property will dig through the bound instance on which it was accessed for an attribute "app" and check that object for an attribute of the given name. If the "app" object does not have such an attribute, it will look for an attribute "app" on THAT object and continue it's search from there. If the named attribute cannot be found accessing the property will raise AttributeError. If a default kwarg is provided you get that instead of the AttributeError. When found the attribute will be cached on instance with the property accessor using the same name as the attribute prefixed with a leading underscore. """ cache_attr_name = '_%s' % name def getter(self): cached_value = getattr(self, cache_attr_name, None) if cached_value: return cached_value app = self # first app is on self while True: app = getattr(app, 'app', None) if not app: break try: value = getattr(app, name) except AttributeError: continue setattr(self, cache_attr_name, value) return value if 'default' in kwargs: return kwargs['default'] raise AttributeError('No apps in pipeline have a ' '%s attribute' % name) return property(getter) class PipelineWrapper(object): """ This class provides a number of utility methods for modifying the composition of a wsgi pipeline. """ def __init__(self, context): self.context = context def __contains__(self, entry_point_name): try: self.index(entry_point_name) return True except ValueError: return False def startswith(self, entry_point_name): """ Tests if the pipeline starts with the given entry point name. :param entry_point_name: entry point of middleware or app (Swift only) :returns: True if entry_point_name is first in pipeline, False otherwise """ try: first_ctx = self.context.filter_contexts[0] except IndexError: first_ctx = self.context.app_context return first_ctx.entry_point_name == entry_point_name def _format_for_display(self, ctx): # Contexts specified by pipeline= have .name set in NamedConfigLoader. if hasattr(ctx, 'name'): return ctx.name # This should not happen: a foreign context. Let's not crash. return "" def __str__(self): parts = [self._format_for_display(ctx) for ctx in self.context.filter_contexts] parts.append(self._format_for_display(self.context.app_context)) return " ".join(parts) def create_filter(self, entry_point_name): """ Creates a context for a filter that can subsequently be added to a pipeline context. :param entry_point_name: entry point of the middleware (Swift only) :returns: a filter context """ spec = 'egg:swift#' + entry_point_name ctx = loadwsgi.loadcontext(loadwsgi.FILTER, spec, global_conf=self.context.global_conf) ctx.protocol = 'paste.filter_factory' ctx.name = entry_point_name return ctx def index(self, entry_point_name): """ Returns the first index of the given entry point name in the pipeline. Raises ValueError if the given module is not in the pipeline. """ for i, ctx in enumerate(self.context.filter_contexts): if ctx.entry_point_name == entry_point_name: return i raise ValueError("%s is not in pipeline" % (entry_point_name,)) def insert_filter(self, ctx, index=0): """ Inserts a filter module into the pipeline context. :param ctx: the context to be inserted :param index: (optional) index at which filter should be inserted in the list of pipeline filters. Default is 0, which means the start of the pipeline. """ self.context.filter_contexts.insert(index, ctx) def loadcontext(object_type, uri, name=None, relative_to=None, global_conf=None): if isinstance(uri, loadwsgi.ConfigLoader): # bypass loadcontext's uri parsing and loader routing and # just directly return the context if global_conf: uri.update_defaults(global_conf, overwrite=False) return uri.get_context(object_type, name, global_conf) add_conf_type = wrap_conf_type(lambda x: x) return loadwsgi.loadcontext(object_type, add_conf_type(uri), name=name, relative_to=relative_to, global_conf=global_conf) def _add_pipeline_properties(app, *names): for property_name in names: if not hasattr(app, property_name): setattr(app.__class__, property_name, pipeline_property(property_name)) def loadapp(conf_file, global_conf=None, allow_modify_pipeline=True): """ Loads a context from a config file, and if the context is a pipeline then presents the app with the opportunity to modify the pipeline. """ global_conf = global_conf or {} ctx = loadcontext(loadwsgi.APP, conf_file, global_conf=global_conf) if ctx.object_type.name == 'pipeline': # give app the opportunity to modify the pipeline context app = ctx.app_context.create() func = getattr(app, 'modify_wsgi_pipeline', None) if func and allow_modify_pipeline: func(PipelineWrapper(ctx)) return ctx.create() def load_app_config(conf_file): """ Read the app config section from a config file. :param conf_file: path to a config file :return: a dict """ app_conf = {} try: ctx = loadcontext(loadwsgi.APP, conf_file) except LookupError: pass else: app_conf.update(ctx.app_context.global_conf) app_conf.update(ctx.app_context.local_conf) return app_conf def run_server(conf, logger, sock, global_conf=None): # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() wsgi.HttpProtocol.default_request_version = "HTTP/1.0" # Turn off logging requests by the underlying WSGI software. wsgi.HttpProtocol.log_request = lambda *a: None # Redirect logging other messages by the underlying WSGI software. wsgi.HttpProtocol.log_message = \ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a) wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) utils.eventlet_monkey_patch() eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) wsgi_logger = NullLogger() if eventlet_debug: # let eventlet.wsgi.server log to stderr wsgi_logger = None # utils.LogAdapter stashes name in server; fallback on unadapted loggers if not global_conf: if hasattr(logger, 'server'): log_name = logger.server else: log_name = logger.name global_conf = {'log_name': log_name} app = loadapp(conf['__file__'], global_conf=global_conf) max_clients = int(conf.get('max_clients', '1024')) pool = RestrictedGreenPool(size=max_clients) try: # Disable capitalizing headers in Eventlet if possible. This is # necessary for the AWS SDK to work with swift3 middleware. argspec = inspect.getargspec(wsgi.server) if 'capitalize_response_headers' in argspec.args: wsgi.server(sock, app, wsgi_logger, custom_pool=pool, capitalize_response_headers=False) else: wsgi.server(sock, app, wsgi_logger, custom_pool=pool) except socket.error as err: if err[0] != errno.EINVAL: raise pool.waitall() class WorkersStrategy(object): """ WSGI server management strategy object for a single bind port and listen socket shared by a configured number of forked-off workers. Used in :py:func:`run_wsgi`. :param dict conf: Server configuration dictionary. :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` object. """ def __init__(self, conf, logger): self.conf = conf self.logger = logger self.sock = None self.children = [] self.worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT) def loop_timeout(self): """ We want to keep from busy-waiting, but we also need a non-None value so the main loop gets a chance to tell whether it should keep running or not (e.g. SIGHUP received). So we return 0.5. """ return 0.5 def do_bind_ports(self): """ Bind the one listen socket for this strategy and drop privileges (since the parent process will never need to bind again). """ try: self.sock = get_socket(self.conf) except ConfigFilePortError: msg = 'bind_port wasn\'t properly set in the config file. ' \ 'It must be explicitly set to a valid port number.' return msg drop_privileges(self.conf.get('user', 'swift')) def no_fork_sock(self): """ Return a server listen socket if the server should run in the foreground (no fork). """ # Useful for profiling [no forks]. if self.worker_count == 0: return self.sock def new_worker_socks(self): """ Yield a sequence of (socket, opqaue_data) tuples for each server which should be forked-off and started. The opaque_data item for each socket will passed into the :py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods where it will be ignored. """ while len(self.children) < self.worker_count: yield self.sock, None def post_fork_hook(self): """ Perform any initialization in a forked-off child process prior to starting the wsgi server. """ pass def log_sock_exit(self, sock, _unused): """ Log a server's exit. :param socket sock: The listen socket for the worker just started. :param _unused: The socket's opaque_data yielded by :py:meth:`new_worker_socks`. """ self.logger.notice('Child %d exiting normally' % os.getpid()) def register_worker_start(self, sock, _unused, pid): """ Called when a new worker is started. :param socket sock: The listen socket for the worker just started. :param _unused: The socket's opaque_data yielded by new_worker_socks(). :param int pid: The new worker process' PID """ self.logger.notice('Started child %s' % pid) self.children.append(pid) def register_worker_exit(self, pid): """ Called when a worker has exited. :param int pid: The PID of the worker that exited. """ self.logger.error('Removing dead child %s' % pid) self.children.remove(pid) def shutdown_sockets(self): """ Shutdown any listen sockets. """ greenio.shutdown_safe(self.sock) self.sock.close() class PortPidState(object): """ A helper class for :py:class:`ServersPerPortStrategy` to track listen sockets and PIDs for each port. :param int servers_per_port: The configured number of servers per port. :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` """ def __init__(self, servers_per_port, logger): self.servers_per_port = servers_per_port self.logger = logger self.sock_data_by_port = {} def sock_for_port(self, port): """ :param int port: The port whose socket is desired. :returns: The bound listen socket for the given port. """ return self.sock_data_by_port[port]['sock'] def port_for_sock(self, sock): """ :param socket sock: A tracked bound listen socket :returns: The port the socket is bound to. """ for port, sock_data in self.sock_data_by_port.items(): if sock_data['sock'] == sock: return port def _pid_to_port_and_index(self, pid): for port, sock_data in self.sock_data_by_port.items(): for server_idx, a_pid in enumerate(sock_data['pids']): if pid == a_pid: return port, server_idx def port_index_pairs(self): """ Returns current (port, server index) pairs. :returns: A set of (port, server_idx) tuples for currently-tracked ports, sockets, and PIDs. """ current_port_index_pairs = set() for port, pid_state in self.sock_data_by_port.items(): current_port_index_pairs |= set( (port, i) for i, pid in enumerate(pid_state['pids']) if pid is not None) return current_port_index_pairs def track_port(self, port, sock): """ Start tracking servers for the given port and listen socket. :param int port: The port to start tracking :param socket sock: The bound listen socket for the port. """ self.sock_data_by_port[port] = { 'sock': sock, 'pids': [None] * self.servers_per_port, } def not_tracking(self, port): """ Return True if the specified port is not being tracked. :param int port: A port to check. """ return port not in self.sock_data_by_port def all_socks(self): """ Yield all current listen sockets. """ for orphan_data in self.sock_data_by_port.values(): yield orphan_data['sock'] def forget_port(self, port): """ Idempotently forget a port, closing the listen socket at most once. """ orphan_data = self.sock_data_by_port.pop(port, None) if orphan_data: greenio.shutdown_safe(orphan_data['sock']) orphan_data['sock'].close() self.logger.notice('Closing unnecessary sock for port %d', port) def add_pid(self, port, index, pid): self.sock_data_by_port[port]['pids'][index] = pid def forget_pid(self, pid): """ Idempotently forget a PID. It's okay if the PID is no longer in our data structure (it could have been removed by the "orphan port" removal in :py:meth:`new_worker_socks`). :param int pid: The PID which exited. """ port_server_idx = self._pid_to_port_and_index(pid) if port_server_idx is None: # This method can lose a race with the "orphan port" removal, when # a ring reload no longer contains a port. So it's okay if we were # unable to find a (port, server_idx) pair. return dead_port, server_idx = port_server_idx self.logger.error('Removing dead child %d (PID: %s) for port %s', server_idx, pid, dead_port) self.sock_data_by_port[dead_port]['pids'][server_idx] = None class ServersPerPortStrategy(object): """ WSGI server management strategy object for an object-server with one listen port per unique local port in the storage policy rings. The `servers_per_port` integer config setting determines how many workers are run per port. Used in :py:func:`run_wsgi`. :param dict conf: Server configuration dictionary. :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` object. :param int servers_per_port: The number of workers to run per port. """ def __init__(self, conf, logger, servers_per_port): self.conf = conf self.logger = logger self.servers_per_port = servers_per_port self.swift_dir = conf.get('swift_dir', '/etc/swift') self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.port_pid_state = PortPidState(servers_per_port, logger) bind_ip = conf.get('bind_ip', '0.0.0.0') self.cache = BindPortsCache(self.swift_dir, bind_ip) def _reload_bind_ports(self): self.bind_ports = self.cache.all_bind_ports_for_node() def _bind_port(self, port): new_conf = self.conf.copy() new_conf['bind_port'] = port sock = get_socket(new_conf) self.port_pid_state.track_port(port, sock) def loop_timeout(self): """ Return timeout before checking for reloaded rings. :returns: The time to wait for a child to exit before checking for reloaded rings (new ports). """ return self.ring_check_interval def do_bind_ports(self): """ Bind one listen socket per unique local storage policy ring port. Then do all the work of drop_privileges except the actual dropping of privileges (each forked-off worker will do that post-fork in :py:meth:`post_fork_hook`). """ self._reload_bind_ports() for port in self.bind_ports: self._bind_port(port) # The workers strategy drops privileges here, which we obviously cannot # do if we want to support binding to low ports. But we do want some # of the actions that drop_privileges did. try: os.setsid() except OSError: pass # In case you need to rmdir where you started the daemon: os.chdir('/') # Ensure files are created with the correct privileges: os.umask(0o22) def no_fork_sock(self): """ This strategy does not support running in the foreground. """ pass def new_worker_socks(self): """ Yield a sequence of (socket, server_idx) tuples for each server which should be forked-off and started. Any sockets for "orphaned" ports no longer in any ring will be closed (causing their associated workers to gracefully exit) after all new sockets have been yielded. The server_idx item for each socket will passed into the :py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods. """ self._reload_bind_ports() desired_port_index_pairs = set( (p, i) for p in self.bind_ports for i in range(self.servers_per_port)) current_port_index_pairs = self.port_pid_state.port_index_pairs() if desired_port_index_pairs != current_port_index_pairs: # Orphan ports are ports which had object-server processes running, # but which no longer appear in the ring. We'll kill them after we # start missing workers. orphan_port_index_pairs = current_port_index_pairs - \ desired_port_index_pairs # Fork off worker(s) for every port who's supposed to have # worker(s) but doesn't missing_port_index_pairs = desired_port_index_pairs - \ current_port_index_pairs for port, server_idx in sorted(missing_port_index_pairs): if self.port_pid_state.not_tracking(port): try: self._bind_port(port) except Exception as e: self.logger.critical('Unable to bind to port %d: %s', port, e) continue yield self.port_pid_state.sock_for_port(port), server_idx for orphan_pair in orphan_port_index_pairs: # For any port in orphan_port_index_pairs, it is guaranteed # that there should be no listen socket for that port, so we # can close and forget them. self.port_pid_state.forget_port(orphan_pair[0]) def post_fork_hook(self): """ Called in each child process, prior to starting the actual wsgi server, to drop privileges. """ drop_privileges(self.conf.get('user', 'swift'), call_setsid=False) def log_sock_exit(self, sock, server_idx): """ Log a server's exit. """ port = self.port_pid_state.port_for_sock(sock) self.logger.notice('Child %d (PID %d, port %d) exiting normally', server_idx, os.getpid(), port) def register_worker_start(self, sock, server_idx, pid): """ Called when a new worker is started. :param socket sock: The listen socket for the worker just started. :param server_idx: The socket's server_idx as yielded by :py:meth:`new_worker_socks`. :param int pid: The new worker process' PID """ port = self.port_pid_state.port_for_sock(sock) self.logger.notice('Started child %d (PID %d) for port %d', server_idx, pid, port) self.port_pid_state.add_pid(port, server_idx, pid) def register_worker_exit(self, pid): """ Called when a worker has exited. :param int pid: The PID of the worker that exited. """ self.port_pid_state.forget_pid(pid) def shutdown_sockets(self): """ Shutdown any listen sockets. """ for sock in self.port_pid_state.all_socks(): greenio.shutdown_safe(sock) sock.close() def run_wsgi(conf_path, app_section, *args, **kwargs): """ Runs the server according to some strategy. The default strategy runs a specified number of workers in pre-fork model. The object-server (only) may use a servers-per-port strategy if its config has a servers_per_port setting with a value greater than zero. :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: 0 if successful, nonzero otherwise """ # Load configuration, Set logger and Load request processor try: (conf, logger, log_name) = \ _initrp(conf_path, app_section, *args, **kwargs) except ConfigFileError as e: print(e) return 1 # optional nice/ionice priority scheduling utils.modify_priority(conf, logger) servers_per_port = int(conf.get('servers_per_port', '0') or 0) # NOTE: for now servers_per_port is object-server-only; future work could # be done to test and allow it to be used for account and container # servers, but that has not been done yet. if servers_per_port and app_section == 'object-server': strategy = ServersPerPortStrategy( conf, logger, servers_per_port=servers_per_port) else: strategy = WorkersStrategy(conf, logger) # Ensure the configuration and application can be loaded before proceeding. global_conf = {'log_name': log_name} if 'global_conf_callback' in kwargs: kwargs['global_conf_callback'](conf, global_conf) loadapp(conf_path, global_conf=global_conf) # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # Start listening on bind_addr/port error_msg = strategy.do_bind_ports() if error_msg: logger.error(error_msg) print(error_msg) return 1 # Redirect errors to logger and close stdio. Do this *after* binding ports; # we use this to signal that the service is ready to accept connections. capture_stdio(logger) no_fork_sock = strategy.no_fork_sock() if no_fork_sock: run_server(conf, logger, no_fork_sock, global_conf=global_conf) return 0 def kill_children(*args): """Kills the entire process group.""" logger.error('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) running[0] = False os.killpg(0, signal.SIGTERM) def hup(*args): """Shuts down the server, but allows running requests to complete""" logger.error('SIGHUP received') signal.signal(signal.SIGHUP, signal.SIG_IGN) running[0] = False running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) while running[0]: for sock, sock_info in strategy.new_worker_socks(): pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) strategy.post_fork_hook() run_server(conf, logger, sock) strategy.log_sock_exit(sock, sock_info) return 0 else: strategy.register_worker_start(sock, sock_info, pid) # The strategy may need to pay attention to something in addition to # child process exits (like new ports showing up in a ring). # # NOTE: a timeout value of None will just instantiate the Timeout # object and not actually schedule it, which is equivalent to no # timeout for the green_os.wait(). loop_timeout = strategy.loop_timeout() with Timeout(loop_timeout, exception=False): try: try: pid, status = green_os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): strategy.register_worker_exit(pid) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise if err.errno == errno.ECHILD: # If there are no children at all (ECHILD), then # there's nothing to actually wait on. We sleep # for a little bit to avoid a tight CPU spin # and still are able to catch any KeyboardInterrupt # events that happen. The value of 0.01 matches the # value in eventlet's waitpid(). sleep(0.01) except KeyboardInterrupt: logger.notice('User quit') running[0] = False break strategy.shutdown_sockets() logger.notice('Exited') return 0 class ConfigFileError(Exception): pass class ConfigFilePortError(ConfigFileError): pass def _initrp(conf_path, app_section, *args, **kwargs): try: conf = appconfig(conf_path, name=app_section) except Exception as e: raise ConfigFileError("Error trying to load config from %s: %s" % (conf_path, e)) validate_configuration() # pre-configure logger log_name = conf.get('log_name', app_section) if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = get_logger(conf, log_name, log_to_console=kwargs.pop('verbose', False), log_route='wsgi') # disable fallocate if desired if config_true_value(conf.get('disable_fallocate', 'no')): disable_fallocate() monkey_patch_mimetools() return (conf, logger, log_name) def init_request_processor(conf_path, app_section, *args, **kwargs): """ Loads common settings from conf Sets the logger Loads the request processor :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: the loaded application entry point :raises ConfigFileError: Exception is raised for config file error """ (conf, logger, log_name) = _initrp(conf_path, app_section, *args, **kwargs) app = loadapp(conf_path, global_conf={'log_name': log_name}) return (app, conf, logger, log_name) class WSGIContext(object): """ This class provides a means to provide context (scope) for a middleware filter to have access to the wsgi start_response results like the request status and headers. """ def __init__(self, wsgi_app): self.app = wsgi_app def _start_response(self, status, headers, exc_info=None): """ Saves response info without sending it to the remote client. Uses the same semantics as the usual WSGI start_response. """ self._response_status = status self._response_headers = headers self._response_exc_info = exc_info def _app_call(self, env): """ Ensures start_response has been called before returning. """ self._response_status = None self._response_headers = None self._response_exc_info = None resp = self.app(env, self._start_response) # if start_response has not been called, iterate until we've got a # non-empty chunk, by which time the app *should* have called it if self._response_status is None: resp = reiterate(resp) return resp def _get_status_int(self): """ Returns the HTTP status int from the last called self._start_response result. """ return int(self._response_status.split(' ', 1)[0]) def _response_header_value(self, key): "Returns str of value for given header key or None" for h_key, val in self._response_headers: if h_key.lower() == key.lower(): return val return None def update_content_length(self, new_total_len): self._response_headers = [ (h, v) for h, v in self._response_headers if h.lower() != 'content-length'] self._response_headers.append(('Content-Length', str(new_total_len))) def make_env(env, method=None, path=None, agent='Swift', query_string=None, swift_source=None): """ Returns a new fresh WSGI environment. :param env: The WSGI environment to base the new environment on. :param method: The new REQUEST_METHOD or None to use the original. :param path: The new path_info or none to use the original. path should NOT be quoted. When building a url, a Webob Request (in accordance with wsgi spec) will quote env['PATH_INFO']. url += quote(environ['PATH_INFO']) :param query_string: The new query_string or none to use the original. When building a url, a Webob Request will append the query string directly to the url. url += '?' + env['QUERY_STRING'] :param agent: The HTTP user agent to use; default 'Swift'. You can put %(orig)s in the agent to have it replaced with the original env's HTTP_USER_AGENT, such as '%(orig)s StaticWeb'. You also set agent to None to use the original env's HTTP_USER_AGENT or '' to have no HTTP_USER_AGENT. :param swift_source: Used to mark the request as originating out of middleware. Will be logged in proxy logs. :returns: Fresh WSGI environment. """ newenv = {} for name in ('HTTP_USER_AGENT', 'HTTP_HOST', 'PATH_INFO', 'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD', 'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT', 'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD', 'SERVER_PROTOCOL', 'swift.cache', 'swift.source', 'swift.trans_id', 'swift.authorize_override', 'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID', 'HTTP_REFERER', 'swift.infocache'): if name in env: newenv[name] = env[name] if method: newenv['REQUEST_METHOD'] = method if path: newenv['PATH_INFO'] = path newenv['SCRIPT_NAME'] = '' if query_string is not None: newenv['QUERY_STRING'] = query_string if agent: newenv['HTTP_USER_AGENT'] = ( agent % {'orig': env.get('HTTP_USER_AGENT', '')}).strip() elif agent == '' and 'HTTP_USER_AGENT' in newenv: del newenv['HTTP_USER_AGENT'] if swift_source: newenv['swift.source'] = swift_source newenv['wsgi.input'] = BytesIO() if 'SCRIPT_NAME' not in newenv: newenv['SCRIPT_NAME'] = '' return newenv def make_subrequest(env, method=None, path=None, body=None, headers=None, agent='Swift', swift_source=None, make_env=make_env): """ Makes a new swob.Request based on the current env but with the parameters specified. :param env: The WSGI environment to base the new request on. :param method: HTTP method of new request; default is from the original env. :param path: HTTP path of new request; default is from the original env. path should be compatible with what you would send to Request.blank. path should be quoted and it can include a query string. for example: '/a%20space?unicode_str%E8%AA%9E=y%20es' :param body: HTTP body of new request; empty by default. :param headers: Extra HTTP headers of new request; None by default. :param agent: The HTTP user agent to use; default 'Swift'. You can put %(orig)s in the agent to have it replaced with the original env's HTTP_USER_AGENT, such as '%(orig)s StaticWeb'. You also set agent to None to use the original env's HTTP_USER_AGENT or '' to have no HTTP_USER_AGENT. :param swift_source: Used to mark the request as originating out of middleware. Will be logged in proxy logs. :param make_env: make_subrequest calls this make_env to help build the swob.Request. :returns: Fresh swob.Request object. """ query_string = None path = path or '' if path and '?' in path: path, query_string = path.split('?', 1) newenv = make_env(env, method, path=unquote(path), agent=agent, query_string=query_string, swift_source=swift_source) if not headers: headers = {} if body: return Request.blank(path, environ=newenv, body=body, headers=headers) else: return Request.blank(path, environ=newenv, headers=headers) def make_pre_authed_env(env, method=None, path=None, agent='Swift', query_string=None, swift_source=None): """Same as :py:func:`make_env` but with preauthorization.""" newenv = make_env( env, method=method, path=path, agent=agent, query_string=query_string, swift_source=swift_source) newenv['swift.authorize'] = lambda req: None newenv['swift.authorize_override'] = True newenv['REMOTE_USER'] = '.wsgi.pre_authed' return newenv def make_pre_authed_request(env, method=None, path=None, body=None, headers=None, agent='Swift', swift_source=None): """Same as :py:func:`make_subrequest` but with preauthorization.""" return make_subrequest( env, method=method, path=path, body=body, headers=headers, agent=agent, swift_source=swift_source, make_env=make_pre_authed_env) swift-2.17.0/swift/common/bufferedhttp.py0000666000175100017510000002167413236061617020462 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Monkey Patch httplib.HTTPResponse to buffer reads of headers. This can improve performance when making large numbers of small HTTP requests. This module also provides helper functions to make HTTP connections using BufferedHTTPResponse. .. warning:: If you use this, be sure that the libraries you are using do not access the socket directly (xmlrpclib, I'm looking at you :/), and instead make all calls through httplib. """ from swift.common import constraints import logging import time import socket import eventlet from eventlet.green.httplib import CONTINUE, HTTPConnection, HTTPMessage, \ HTTPResponse, HTTPSConnection, _UNKNOWN from six.moves.urllib.parse import quote import six if six.PY2: httplib = eventlet.import_patched('httplib') else: httplib = eventlet.import_patched('http.client') httplib._MAXHEADERS = constraints.MAX_HEADER_COUNT class BufferedHTTPResponse(HTTPResponse): """HTTPResponse class that buffers reading of headers""" def __init__(self, sock, debuglevel=0, strict=0, method=None): # pragma: no cover self.sock = sock # sock is an eventlet.greenio.GreenSocket # sock.fd is a socket._socketobject # sock.fd._sock is a socket._socket object, which is what we want. self._real_socket = sock.fd._sock self.fp = sock.makefile('rb') self.debuglevel = debuglevel self.strict = strict self._method = method self.msg = None # from the Status-Line of the response self.version = _UNKNOWN # HTTP-Version self.status = _UNKNOWN # Status-Code self.reason = _UNKNOWN # Reason-Phrase self.chunked = _UNKNOWN # is "chunked" being used? self.chunk_left = _UNKNOWN # bytes left to read in current chunk self.length = _UNKNOWN # number of bytes left in response self.will_close = _UNKNOWN # conn will close at end of response self._readline_buffer = '' def expect_response(self): if self.fp: self.fp.close() self.fp = None self.fp = self.sock.makefile('rb', 0) version, status, reason = self._read_status() if status != CONTINUE: self._read_status = lambda: (version, status, reason) self.begin() else: self.status = status self.reason = reason.strip() self.version = 11 self.msg = HTTPMessage(self.fp, 0) self.msg.fp = None def read(self, amt=None): if not self._readline_buffer: return HTTPResponse.read(self, amt) if amt is None: # Unbounded read: send anything we have buffered plus whatever # is left. buffered = self._readline_buffer self._readline_buffer = '' return buffered + HTTPResponse.read(self, amt) elif amt <= len(self._readline_buffer): # Bounded read that we can satisfy entirely from our buffer res = self._readline_buffer[:amt] self._readline_buffer = self._readline_buffer[amt:] return res else: # Bounded read that wants more bytes than we have smaller_amt = amt - len(self._readline_buffer) buf = self._readline_buffer self._readline_buffer = '' return buf + HTTPResponse.read(self, smaller_amt) def readline(self, size=1024): # You'd think Python's httplib would provide this, but it doesn't. # It does, however, provide a comment in the HTTPResponse class: # # # XXX It would be nice to have readline and __iter__ for this, # # too. # # Yes, it certainly would. while ('\n' not in self._readline_buffer and len(self._readline_buffer) < size): read_size = size - len(self._readline_buffer) chunk = HTTPResponse.read(self, read_size) if not chunk: break self._readline_buffer += chunk line, newline, rest = self._readline_buffer.partition('\n') self._readline_buffer = rest return line + newline def nuke_from_orbit(self): """ Terminate the socket with extreme prejudice. Closes the underlying socket regardless of whether or not anyone else has references to it. Use this when you are certain that nobody else you care about has a reference to this socket. """ if self._real_socket: # this is idempotent; see sock_close in Modules/socketmodule.c in # the Python source for details. self._real_socket.close() self._real_socket = None self.close() def close(self): HTTPResponse.close(self) self.sock = None self._real_socket = None class BufferedHTTPConnection(HTTPConnection): """HTTPConnection class that uses BufferedHTTPResponse""" response_class = BufferedHTTPResponse def connect(self): self._connected_time = time.time() ret = HTTPConnection.connect(self) self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) return ret def putrequest(self, method, url, skip_host=0, skip_accept_encoding=0): self._method = method self._path = url return HTTPConnection.putrequest(self, method, url, skip_host, skip_accept_encoding) def getexpect(self): response = BufferedHTTPResponse(self.sock, strict=self.strict, method=self._method) response.expect_response() return response def getresponse(self): response = HTTPConnection.getresponse(self) logging.debug("HTTP PERF: %(time).5f seconds to %(method)s " "%(host)s:%(port)s %(path)s)", {'time': time.time() - self._connected_time, 'method': self._method, 'host': self.host, 'port': self.port, 'path': self._path}) return response def http_connect(ipaddr, port, device, partition, method, path, headers=None, query_string=None, ssl=False): """ Helper function to create an HTTPConnection object. If ssl is set True, HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection will be used, which is buffered for backend Swift services. :param ipaddr: IPv4 address to connect to :param port: port to connect to :param device: device of the node to query :param partition: partition on the device :param method: HTTP method to request ('GET', 'PUT', 'POST', etc.) :param path: request path :param headers: dictionary of headers :param query_string: request query string :param ssl: set True if SSL should be used (default: False) :returns: HTTPConnection object """ if isinstance(path, six.text_type): path = path.encode("utf-8") if isinstance(device, six.text_type): device = device.encode("utf-8") path = quote('/' + device + '/' + str(partition) + path) return http_connect_raw( ipaddr, port, method, path, headers, query_string, ssl) def http_connect_raw(ipaddr, port, method, path, headers=None, query_string=None, ssl=False): """ Helper function to create an HTTPConnection object. If ssl is set True, HTTPSConnection will be used. However, if ssl=False, BufferedHTTPConnection will be used, which is buffered for backend Swift services. :param ipaddr: IPv4 address to connect to :param port: port to connect to :param method: HTTP method to request ('GET', 'PUT', 'POST', etc.) :param path: request path :param headers: dictionary of headers :param query_string: request query string :param ssl: set True if SSL should be used (default: False) :returns: HTTPConnection object """ if not port: port = 443 if ssl else 80 if ssl: conn = HTTPSConnection('%s:%s' % (ipaddr, port)) else: conn = BufferedHTTPConnection('%s:%s' % (ipaddr, port)) if query_string: path += '?' + query_string conn.path = path conn.putrequest(method, path, skip_host=(headers and 'Host' in headers)) if headers: for header, value in headers.items(): conn.putheader(header, str(value)) conn.endheaders() return conn swift-2.17.0/swift/common/memcached.py0000666000175100017510000004603213236061617017701 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Why our own memcache client? By Michael Barton python-memcached doesn't use consistent hashing, so adding or removing a memcache server from the pool invalidates a huge percentage of cached items. If you keep a pool of python-memcached client objects, each client object has its own connection to every memcached server, only one of which is ever in use. So you wind up with n * m open sockets and almost all of them idle. This client effectively has a pool for each server, so the number of backend connections is hopefully greatly reduced. python-memcache uses pickle to store things, and there was already a huge stink about Swift using pickles in memcache (http://osvdb.org/show/osvdb/86581). That seemed sort of unfair, since nova and keystone and everyone else use pickles for memcache too, but it's hidden behind a "standard" library. But changing would be a security regression at this point. Also, pylibmc wouldn't work for us because it needs to use python sockets in order to play nice with eventlet. Lucid comes with memcached: v1.4.2. Protocol documentation for that version is at: http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt """ import six.moves.cPickle as pickle import json import logging import time from bisect import bisect from hashlib import md5 from eventlet.green import socket from eventlet.pools import Pool from eventlet import Timeout from six.moves import range from swift.common import utils DEFAULT_MEMCACHED_PORT = 11211 CONN_TIMEOUT = 0.3 POOL_TIMEOUT = 1.0 # WAG IO_TIMEOUT = 2.0 PICKLE_FLAG = 1 JSON_FLAG = 2 NODE_WEIGHT = 50 PICKLE_PROTOCOL = 2 TRY_COUNT = 3 # if ERROR_LIMIT_COUNT errors occur in ERROR_LIMIT_TIME seconds, the server # will be considered failed for ERROR_LIMIT_DURATION seconds. ERROR_LIMIT_COUNT = 10 ERROR_LIMIT_TIME = 60 ERROR_LIMIT_DURATION = 60 def md5hash(key): return md5(key).hexdigest() def sanitize_timeout(timeout): """ Sanitize a timeout value to use an absolute expiration time if the delta is greater than 30 days (in seconds). Note that the memcached server translates negative values to mean a delta of 30 days in seconds (and 1 additional second), client beware. """ if timeout > (30 * 24 * 60 * 60): timeout += time.time() return timeout class MemcacheConnectionError(Exception): pass class MemcachePoolTimeout(Timeout): pass class MemcacheConnPool(Pool): """ Connection pool for Memcache Connections The *server* parameter can be a hostname, an IPv4 address, or an IPv6 address with an optional port. See :func:`swift.common.utils.parse_socket_string` for details. """ def __init__(self, server, size, connect_timeout): Pool.__init__(self, max_size=size) self.host, self.port = utils.parse_socket_string( server, DEFAULT_MEMCACHED_PORT) self._connect_timeout = connect_timeout def create(self): addrs = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) family, socktype, proto, canonname, sockaddr = addrs[0] sock = socket.socket(family, socket.SOCK_STREAM) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) with Timeout(self._connect_timeout): sock.connect(sockaddr) return (sock.makefile(), sock) def get(self): fp, sock = super(MemcacheConnPool, self).get() if fp is None: # An error happened previously, so we need a new connection fp, sock = self.create() return fp, sock class MemcacheRing(object): """ Simple, consistent-hashed memcache client. """ def __init__(self, servers, connect_timeout=CONN_TIMEOUT, io_timeout=IO_TIMEOUT, pool_timeout=POOL_TIMEOUT, tries=TRY_COUNT, allow_pickle=False, allow_unpickle=False, max_conns=2): self._ring = {} self._errors = dict(((serv, []) for serv in servers)) self._error_limited = dict(((serv, 0) for serv in servers)) for server in sorted(servers): for i in range(NODE_WEIGHT): self._ring[md5hash('%s-%s' % (server, i))] = server self._tries = tries if tries <= len(servers) else len(servers) self._sorted = sorted(self._ring) self._client_cache = dict(((server, MemcacheConnPool(server, max_conns, connect_timeout)) for server in servers)) self._connect_timeout = connect_timeout self._io_timeout = io_timeout self._pool_timeout = pool_timeout self._allow_pickle = allow_pickle self._allow_unpickle = allow_unpickle or allow_pickle def _exception_occurred(self, server, e, action='talking', sock=None, fp=None, got_connection=True): if isinstance(e, Timeout): logging.error("Timeout %(action)s to memcached: %(server)s", {'action': action, 'server': server}) elif isinstance(e, (socket.error, MemcacheConnectionError)): logging.error("Error %(action)s to memcached: %(server)s: %(err)s", {'action': action, 'server': server, 'err': e}) else: logging.exception("Error %(action)s to memcached: %(server)s", {'action': action, 'server': server}) try: if fp: fp.close() del fp except Exception: pass try: if sock: sock.close() del sock except Exception: pass if got_connection: # We need to return something to the pool # A new connection will be created the next time it is retrieved self._return_conn(server, None, None) now = time.time() self._errors[server].append(time.time()) if len(self._errors[server]) > ERROR_LIMIT_COUNT: self._errors[server] = [err for err in self._errors[server] if err > now - ERROR_LIMIT_TIME] if len(self._errors[server]) > ERROR_LIMIT_COUNT: self._error_limited[server] = now + ERROR_LIMIT_DURATION logging.error('Error limiting server %s', server) def _get_conns(self, key): """ Retrieves a server conn from the pool, or connects a new one. Chooses the server based on a consistent hash of "key". """ pos = bisect(self._sorted, key) served = [] while len(served) < self._tries: pos = (pos + 1) % len(self._sorted) server = self._ring[self._sorted[pos]] if server in served: continue served.append(server) if self._error_limited[server] > time.time(): continue sock = None try: with MemcachePoolTimeout(self._pool_timeout): fp, sock = self._client_cache[server].get() yield server, fp, sock except MemcachePoolTimeout as e: self._exception_occurred( server, e, action='getting a connection', got_connection=False) except (Exception, Timeout) as e: # Typically a Timeout exception caught here is the one raised # by the create() method of this server's MemcacheConnPool # object. self._exception_occurred( server, e, action='connecting', sock=sock) def _return_conn(self, server, fp, sock): """Returns a server connection to the pool.""" self._client_cache[server].put((fp, sock)) def set(self, key, value, serialize=True, time=0, min_compress_len=0): """ Set a key/value pair in memcache :param key: key :param value: value :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) :param time: the time to live :param min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it. """ key = md5hash(key) timeout = sanitize_timeout(time) flags = 0 if serialize and self._allow_pickle: value = pickle.dumps(value, PICKLE_PROTOCOL) flags |= PICKLE_FLAG elif serialize: value = json.dumps(value) flags |= JSON_FLAG for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('set %s %d %d %s\r\n%s\r\n' % (key, flags, timeout, len(value), value)) # Wait for the set to complete fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def get(self, key): """ Gets the object specified by key. It will also unserialize the object before returning if it is serialized in memcache with JSON, or if it is pickled and unpickling is allowed. :param key: key :returns: value of the key in memcache """ key = md5hash(key) value = None for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('get %s\r\n' % key) line = fp.readline().strip().split() while True: if not line: raise MemcacheConnectionError('incomplete read') if line[0].upper() == 'END': break if line[0].upper() == 'VALUE' and line[1] == key: size = int(line[3]) value = fp.read(size) if int(line[2]) & PICKLE_FLAG: if self._allow_unpickle: value = pickle.loads(value) else: value = None elif int(line[2]) & JSON_FLAG: value = json.loads(value) fp.readline() line = fp.readline().strip().split() self._return_conn(server, fp, sock) return value except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def incr(self, key, delta=1, time=0): """ Increments a key which has a numeric value by delta. If the key can't be found, it's added as delta or 0 if delta < 0. If passed a negative number, will use memcached's decr. Returns the int stored in memcached Note: The data memcached stores as the result of incr/decr is an unsigned int. decr's that result in a number below 0 are stored as 0. :param key: key :param delta: amount to add to the value of key (or set as the value if the key is not found) will be cast to an int :param time: the time to live :returns: result of incrementing :raises MemcacheConnectionError: """ key = md5hash(key) command = 'incr' if delta < 0: command = 'decr' delta = str(abs(int(delta))) timeout = sanitize_timeout(time) for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('%s %s %s\r\n' % (command, key, delta)) line = fp.readline().strip().split() if not line: raise MemcacheConnectionError('incomplete read') if line[0].upper() == 'NOT_FOUND': add_val = delta if command == 'decr': add_val = '0' sock.sendall('add %s %d %d %s\r\n%s\r\n' % (key, 0, timeout, len(add_val), add_val)) line = fp.readline().strip().split() if line[0].upper() == 'NOT_STORED': sock.sendall('%s %s %s\r\n' % (command, key, delta)) line = fp.readline().strip().split() ret = int(line[0].strip()) else: ret = int(add_val) else: ret = int(line[0].strip()) self._return_conn(server, fp, sock) return ret except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) raise MemcacheConnectionError("No Memcached connections succeeded.") def decr(self, key, delta=1, time=0): """ Decrements a key which has a numeric value by delta. Calls incr with -delta. :param key: key :param delta: amount to subtract to the value of key (or set the value to 0 if the key is not found) will be cast to an int :param time: the time to live :returns: result of decrementing :raises MemcacheConnectionError: """ return self.incr(key, delta=-delta, time=time) def delete(self, key): """ Deletes a key/value pair from memcache. :param key: key to be deleted """ key = md5hash(key) for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('delete %s\r\n' % key) # Wait for the delete to complete fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def set_multi(self, mapping, server_key, serialize=True, time=0, min_compress_len=0): """ Sets multiple key/value pairs in memcache. :param mapping: dictionary of keys and values to be set in memcache :param server_key: key to use in determining which server in the ring is used :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) :param time: the time to live :min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it """ server_key = md5hash(server_key) timeout = sanitize_timeout(time) msg = '' for key, value in mapping.items(): key = md5hash(key) flags = 0 if serialize and self._allow_pickle: value = pickle.dumps(value, PICKLE_PROTOCOL) flags |= PICKLE_FLAG elif serialize: value = json.dumps(value) flags |= JSON_FLAG msg += ('set %s %d %d %s\r\n%s\r\n' % (key, flags, timeout, len(value), value)) for (server, fp, sock) in self._get_conns(server_key): try: with Timeout(self._io_timeout): sock.sendall(msg) # Wait for the set to complete for line in range(len(mapping)): fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def get_multi(self, keys, server_key): """ Gets multiple values from memcache for the given keys. :param keys: keys for values to be retrieved from memcache :param server_key: key to use in determining which server in the ring is used :returns: list of values """ server_key = md5hash(server_key) keys = [md5hash(key) for key in keys] for (server, fp, sock) in self._get_conns(server_key): try: with Timeout(self._io_timeout): sock.sendall('get %s\r\n' % ' '.join(keys)) line = fp.readline().strip().split() responses = {} while True: if not line: raise MemcacheConnectionError('incomplete read') if line[0].upper() == 'END': break if line[0].upper() == 'VALUE': size = int(line[3]) value = fp.read(size) if int(line[2]) & PICKLE_FLAG: if self._allow_unpickle: value = pickle.loads(value) else: value = None elif int(line[2]) & JSON_FLAG: value = json.loads(value) responses[line[1]] = value fp.readline() line = fp.readline().strip().split() values = [] for key in keys: if key in responses: values.append(responses[key]) else: values.append(None) self._return_conn(server, fp, sock) return values except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) swift-2.17.0/swift/common/direct_client.py0000666000175100017510000005265213236061617020610 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Internal client library for making calls directly to the servers rather than through the proxy. """ import json import os import socket from eventlet import sleep, Timeout import six import six.moves.cPickle as pickle from six.moves.http_client import HTTPException from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ClientException from swift.common.utils import Timestamp, FileLikeIter from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \ is_success, is_server_error from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import quote class DirectClientException(ClientException): def __init__(self, stype, method, node, part, path, resp, host=None): # host can be used to override the node ip and port reported in # the exception host = host if host is not None else node if not isinstance(path, six.text_type): path = path.decode("utf-8") full_path = quote('/%s/%s%s' % (node['device'], part, path)) msg = '%s server %s:%s direct %s %r gave status %s' % ( stype, host['ip'], host['port'], method, full_path, resp.status) headers = HeaderKeyDict(resp.getheaders()) super(DirectClientException, self).__init__( msg, http_host=host['ip'], http_port=host['port'], http_device=node['device'], http_status=resp.status, http_reason=resp.reason, http_headers=headers) def _make_req(node, part, method, path, _headers, stype, conn_timeout=5, response_timeout=15): """ Make request to backend storage node. (i.e. 'Account', 'Container', 'Object') :param node: a node dict from a ring :param part: an integer, the partion number :param method: a string, the HTTP method (e.g. 'PUT', 'DELETE', etc) :param path: a string, the request path :param headers: a dict, header name => value :param stype: a string, describing the type of service :returns: an HTTPResponse object """ with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, method, path, headers=_headers) with Timeout(response_timeout): resp = conn.getresponse() resp.read() if not is_success(resp.status): raise DirectClientException(stype, method, node, part, path, resp) return resp def _get_direct_account_container(path, stype, node, part, marker=None, limit=None, prefix=None, delimiter=None, conn_timeout=5, response_timeout=15, end_marker=None, reverse=None): """Base class for get direct account and container. Do not use directly use the get_direct_account or get_direct_container instead. """ params = ['format=json'] if marker: params.append('marker=%s' % quote(marker)) if limit: params.append('limit=%d' % limit) if prefix: params.append('prefix=%s' % quote(prefix)) if delimiter: params.append('delimiter=%s' % quote(delimiter)) if end_marker: params.append('end_marker=%s' % quote(end_marker)) if reverse: params.append('reverse=%s' % quote(reverse)) qs = '&'.join(params) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'GET', path, query_string=qs, headers=gen_headers()) with Timeout(response_timeout): resp = conn.getresponse() if not is_success(resp.status): resp.read() raise DirectClientException(stype, 'GET', node, part, path, resp) resp_headers = HeaderKeyDict() for header, value in resp.getheaders(): resp_headers[header] = value if resp.status == HTTP_NO_CONTENT: resp.read() return resp_headers, [] return resp_headers, json.loads(resp.read()) def gen_headers(hdrs_in=None, add_ts=False): hdrs_out = HeaderKeyDict(hdrs_in) if hdrs_in else HeaderKeyDict() if add_ts: hdrs_out['X-Timestamp'] = Timestamp.now().internal hdrs_out['User-Agent'] = 'direct-client %s' % os.getpid() return hdrs_out def direct_get_account(node, part, account, marker=None, limit=None, prefix=None, delimiter=None, conn_timeout=5, response_timeout=15, end_marker=None, reverse=None): """ Get listings directly from the account server. :param node: node dictionary from the ring :param part: partition the account is on :param account: account name :param marker: marker query :param limit: query limit :param prefix: prefix query :param delimiter: delimiter for the query :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param end_marker: end_marker query :param reverse: reverse the returned listing :returns: a tuple of (response headers, a list of containers) The response headers will HeaderKeyDict. """ path = '/' + account return _get_direct_account_container(path, "Account", node, part, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, end_marker=end_marker, reverse=reverse, conn_timeout=conn_timeout, response_timeout=response_timeout) def direct_delete_account(node, part, account, conn_timeout=5, response_timeout=15, headers=None): if headers is None: headers = {} path = '/%s' % account _make_req(node, part, 'DELETE', path, gen_headers(headers, True), 'Account', conn_timeout, response_timeout) def direct_head_container(node, part, account, container, conn_timeout=5, response_timeout=15): """ Request container information directly from the container server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :returns: a dict containing the response's headers in a HeaderKeyDict :raises ClientException: HTTP HEAD request failed """ path = '/%s/%s' % (account, container) resp = _make_req(node, part, 'HEAD', path, gen_headers(), 'Container', conn_timeout, response_timeout) resp_headers = HeaderKeyDict() for header, value in resp.getheaders(): resp_headers[header] = value return resp_headers def direct_get_container(node, part, account, container, marker=None, limit=None, prefix=None, delimiter=None, conn_timeout=5, response_timeout=15, end_marker=None, reverse=None): """ Get container listings directly from the container server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param marker: marker query :param limit: query limit :param prefix: prefix query :param delimiter: delimiter for the query :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param end_marker: end_marker query :param reverse: reverse the returned listing :returns: a tuple of (response headers, a list of objects) The response headers will be a HeaderKeyDict. """ path = '/%s/%s' % (account, container) return _get_direct_account_container(path, "Container", node, part, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, end_marker=end_marker, reverse=reverse, conn_timeout=conn_timeout, response_timeout=response_timeout) def direct_delete_container(node, part, account, container, conn_timeout=5, response_timeout=15, headers=None): """ Delete container directly from the container server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param headers: dict to be passed into HTTPConnection headers :raises ClientException: HTTP DELETE request failed """ if headers is None: headers = {} path = '/%s/%s' % (account, container) add_timestamp = 'x-timestamp' not in (k.lower() for k in headers) _make_req(node, part, 'DELETE', path, gen_headers(headers, add_timestamp), 'Container', conn_timeout, response_timeout) def direct_put_container_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers=None): if headers is None: headers = {} have_x_timestamp = 'x-timestamp' in (k.lower() for k in headers) path = '/%s/%s/%s' % (account, container, obj) _make_req(node, part, 'PUT', path, gen_headers(headers, add_ts=(not have_x_timestamp)), 'Container', conn_timeout, response_timeout) def direct_delete_container_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers=None): if headers is None: headers = {} headers = gen_headers(headers, add_ts='x-timestamp' not in ( k.lower() for k in headers)) path = '/%s/%s/%s' % (account, container, obj) _make_req(node, part, 'DELETE', path, headers, 'Container', conn_timeout, response_timeout) def direct_head_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers=None): """ Request object information directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param obj: object name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param headers: dict to be passed into HTTPConnection headers :returns: a dict containing the response's headers in a HeaderKeyDict :raises ClientException: HTTP HEAD request failed """ if headers is None: headers = {} headers = gen_headers(headers) path = '/%s/%s/%s' % (account, container, obj) resp = _make_req(node, part, 'HEAD', path, headers, 'Object', conn_timeout, response_timeout) resp_headers = HeaderKeyDict() for header, value in resp.getheaders(): resp_headers[header] = value return resp_headers def direct_get_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, resp_chunk_size=None, headers=None): """ Get object directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param obj: object name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param resp_chunk_size: if defined, chunk size of data to read. :param headers: dict to be passed into HTTPConnection headers :returns: a tuple of (response headers, the object's contents) The response headers will be a HeaderKeyDict. :raises ClientException: HTTP GET request failed """ if headers is None: headers = {} path = '/%s/%s/%s' % (account, container, obj) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'GET', path, headers=gen_headers(headers)) with Timeout(response_timeout): resp = conn.getresponse() if not is_success(resp.status): resp.read() raise DirectClientException('Object', 'GET', node, part, path, resp) if resp_chunk_size: def _object_body(): buf = resp.read(resp_chunk_size) while buf: yield buf buf = resp.read(resp_chunk_size) object_body = _object_body() else: object_body = resp.read() resp_headers = HeaderKeyDict() for header, value in resp.getheaders(): resp_headers[header] = value return resp_headers, object_body def direct_put_object(node, part, account, container, name, contents, content_length=None, etag=None, content_type=None, headers=None, conn_timeout=5, response_timeout=15, chunk_size=65535): """ Put object directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param name: object name :param contents: an iterable or string to read object data from :param content_length: value to send as content-length header :param etag: etag of contents :param content_type: value to send as content-type header :param headers: additional headers to include in the request :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param chunk_size: if defined, chunk size of data to send. :returns: etag from the server response :raises ClientException: HTTP PUT request failed """ path = '/%s/%s/%s' % (account, container, name) if headers is None: headers = {} if etag: headers['ETag'] = etag.strip('"') if content_length is not None: headers['Content-Length'] = str(content_length) else: for n, v in headers.items(): if n.lower() == 'content-length': content_length = int(v) if content_type is not None: headers['Content-Type'] = content_type else: headers['Content-Type'] = 'application/octet-stream' if not contents: headers['Content-Length'] = '0' if isinstance(contents, six.string_types): contents = [contents] # Incase the caller want to insert an object with specific age add_ts = 'X-Timestamp' not in headers if content_length is None: headers['Transfer-Encoding'] = 'chunked' with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'PUT', path, headers=gen_headers(headers, add_ts)) contents_f = FileLikeIter(contents) if content_length is None: chunk = contents_f.read(chunk_size) while chunk: conn.send('%x\r\n%s\r\n' % (len(chunk), chunk)) chunk = contents_f.read(chunk_size) conn.send('0\r\n\r\n') else: left = content_length while left > 0: size = chunk_size if size > left: size = left chunk = contents_f.read(size) if not chunk: break conn.send(chunk) left -= len(chunk) with Timeout(response_timeout): resp = conn.getresponse() resp.read() if not is_success(resp.status): raise DirectClientException('Object', 'PUT', node, part, path, resp) return resp.getheader('etag').strip('"') def direct_post_object(node, part, account, container, name, headers, conn_timeout=5, response_timeout=15): """ Direct update to object metadata on object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param name: object name :param headers: headers to store as metadata :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :raises ClientException: HTTP POST request failed """ path = '/%s/%s/%s' % (account, container, name) _make_req(node, part, 'POST', path, gen_headers(headers, True), 'Object', conn_timeout, response_timeout) def direct_delete_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers=None): """ Delete object directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param obj: object name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :raises ClientException: HTTP DELETE request failed """ if headers is None: headers = {} headers = gen_headers(headers, add_ts='x-timestamp' not in ( k.lower() for k in headers)) path = '/%s/%s/%s' % (account, container, obj) _make_req(node, part, 'DELETE', path, headers, 'Object', conn_timeout, response_timeout) def direct_get_suffix_hashes(node, part, suffixes, conn_timeout=5, response_timeout=15, headers=None): """ Get suffix hashes directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param headers: dict to be passed into HTTPConnection headers :returns: dict of suffix hashes :raises ClientException: HTTP REPLICATE request failed """ if headers is None: headers = {} path = '/%s' % '-'.join(suffixes) with Timeout(conn_timeout): conn = http_connect(node['replication_ip'], node['replication_port'], node['device'], part, 'REPLICATE', path, headers=gen_headers(headers)) with Timeout(response_timeout): resp = conn.getresponse() if not is_success(resp.status): raise DirectClientException('Object', 'REPLICATE', node, part, path, resp, host={'ip': node['replication_ip'], 'port': node['replication_port']} ) return pickle.loads(resp.read()) def retry(func, *args, **kwargs): """ Helper function to retry a given function a number of times. :param func: callable to be called :param retries: number of retries :param error_log: logger for errors :param args: arguments to send to func :param kwargs: keyward arguments to send to func (if retries or error_log are sent, they will be deleted from kwargs before sending on to func) :returns: result of func :raises ClientException: all retries failed """ retries = kwargs.pop('retries', 5) error_log = kwargs.pop('error_log', None) attempts = 0 backoff = 1 while attempts <= retries: attempts += 1 try: return attempts, func(*args, **kwargs) except (socket.error, HTTPException, Timeout) as err: if error_log: error_log(err) if attempts > retries: raise except ClientException as err: if error_log: error_log(err) if attempts > retries or not is_server_error(err.http_status) or \ err.http_status == HTTP_INSUFFICIENT_STORAGE: raise sleep(backoff) backoff *= 2 # Shouldn't actually get down here, but just in case. if args and 'ip' in args[0]: raise ClientException('Raise too many retries', http_host=args[0]['ip'], http_port=args[0]['port'], http_device=args[0]['device']) else: raise ClientException('Raise too many retries') swift-2.17.0/swift/common/daemon.py0000666000175100017510000002604113236061617017234 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import os import sys import time import signal from re import sub import eventlet.debug from eventlet.hubs import use_hub from swift.common import utils class Daemon(object): """ Daemon base class A daemon has a run method that accepts a ``once`` kwarg and will dispatch to :meth:`run_once` or :meth:`run_forever`. A subclass of Daemon must implement :meth:`run_once` and :meth:`run_forever`. A subclass of Daemon may override :meth:`get_worker_args` to dispatch arguments to individual child process workers and :meth:`is_healthy` to perform context specific periodic wellness checks which can reset worker arguments. Implementations of Daemon do not know *how* to daemonize, or execute multiple daemonized workers, they simply provide the behavior of the daemon and context specific knowledge about how workers should be started. """ def __init__(self, conf): self.conf = conf self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs): """Override this to run the script once""" raise NotImplementedError('run_once not implemented') def run_forever(self, *args, **kwargs): """Override this to run forever""" raise NotImplementedError('run_forever not implemented') def run(self, once=False, **kwargs): if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def get_worker_args(self, once=False, **kwargs): """ For each worker yield a (possibly empty) dict of kwargs to pass along to the daemon's :meth:`run` method after fork. The length of elements returned from this method will determine the number of processes created. If the returned iterable is empty, the Strategy will fallback to run-inline strategy. :param once: False if the worker(s) will be daemonized, True if the worker(s) will be run once :param kwargs: plumbed through via command line argparser :returns: an iterable of dicts, each element represents the kwargs to be passed to a single worker's :meth:`run` method after fork. """ return [] def is_healthy(self): """ This method is called very frequently on the instance of the daemon held by the parent process. If it returns False, all child workers are terminated, and new workers will be created. :returns: a boolean, True only if all workers should continue to run """ return True class DaemonStrategy(object): """ This is the execution strategy for using subclasses of Daemon. The default behavior is to invoke the daemon's :meth:`Daemon.run` method from within the parent process. When the :meth:`Daemon.run` method returns the parent process will exit. However, if the Daemon returns a non-empty iterable from :meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will be invoked in child processes, with the arguments provided from the parent process's instance of the daemon. If a child process exits it will be restarted with the same options, unless it was executed in once mode. :param daemon: an instance of a :class:`Daemon` (has a `run` method) :param logger: a logger instance """ def __init__(self, daemon, logger): self.daemon = daemon self.logger = logger self.running = False # only used by multi-worker strategy self.options_by_pid = {} self.unspawned_worker_options = [] def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running = False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running = True def _run_inline(self, once=False, **kwargs): """Run the daemon""" self.daemon.run(once=once, **kwargs) def run(self, once=False, **kwargs): """Daemonize and execute our strategy""" self.setup(**kwargs) try: self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup() self.running = False def _fork(self, once, **kwargs): pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked worker %s finished', os.getpid()) # do not return from this stack, nor execute any finally blocks os._exit(0) else: self.register_worker_start(pid, kwargs) return pid def iter_unspawned_workers(self): while True: try: per_worker_options = self.unspawned_worker_options.pop() except IndexError: return yield per_worker_options def spawned_pids(self): return self.options_by_pid.keys() def register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned worker %s with %r', pid, per_worker_options) self.options_by_pid[pid] = per_worker_options def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy(): self.logger.debug( 'Daemon needs to change options, aborting workers') self.cleanup() return True return False def check_on_all_running_workers(self): for p in self.spawned_pids(): try: pid, status = os.waitpid(p, os.WNOHANG) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker %s died', p) else: if pid == 0: # child still running continue self.logger.debug('Worker %s exited', p) self.register_worker_exit(p) def _run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if not self.unspawned_worker_options: return self._run_inline(once, **kwargs) for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 while self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if not once: for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 else: if not self.spawned_pids(): self.logger.notice('Finished %s', os.getpid()) break time.sleep(0.1) return 0 def cleanup(self): for p in self.spawned_pids(): try: os.kill(p, signal.SIGTERM) except OSError as err: if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned up worker %s', p) def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): """ Loads settings from conf, then instantiates daemon ``klass`` and runs the daemon with the specified ``once`` kwarg. The section_name will be derived from the daemon ``klass`` if not provided (e.g. ObjectReplicator => object-replicator). :param klass: Class to instantiate, subclass of :class:`Daemon` :param conf_file: Path to configuration file :param section_name: Section name from conf file to load config from :param once: Passed to daemon :meth:`Daemon.run` method """ # very often the config section_name is based on the class name # the None singleton will be passed through to readconf as is if section_name is '': section_name = sub(r'([a-z])([A-Z])', r'\1-\2', klass.__name__).lower() try: conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError) as e: # The message will be printed to stderr # and results in an exit code of 1. sys.exit(e) use_hub(utils.get_hub()) # once on command line (i.e. daemonize=false) will over-ride config once = once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional nice/ionice priority scheduling utils.modify_priority(conf, logger) # disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User quit') logger.notice('Exited %s', os.getpid()) swift-2.17.0/swift/common/swob.py0000666000175100017510000015261013236061617016745 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implementation of WSGI Request and Response objects. This library has a very similar API to Webob. It wraps WSGI request environments and response values into objects that are more friendly to interact with. Why Swob and not just use WebOb? By Michael Barton We used webob for years. The main problem was that the interface wasn't stable. For a while, each of our several test suites required a slightly different version of webob to run, and none of them worked with the then-current version. It was a huge headache, so we just scrapped it. This is kind of a ton of code, but it's also been a huge relief to not have to scramble to add a bunch of code branches all over the place to keep Swift working every time webob decides some interface needs to change. """ from collections import defaultdict, MutableMapping import time from functools import partial from datetime import datetime, timedelta, tzinfo from email.utils import parsedate import re import random import functools import inspect import six from six import BytesIO from six import StringIO from six.moves import urllib from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import reiterate, split_path, Timestamp, pairs, \ close_if_possible, closing_if_possible from swift.common.exceptions import InvalidTimestamp RESPONSE_REASONS = { 100: ('Continue', ''), 200: ('OK', ''), 201: ('Created', ''), 202: ('Accepted', 'The request is accepted for processing.'), 204: ('No Content', ''), 206: ('Partial Content', ''), 301: ('Moved Permanently', 'The resource has moved permanently.'), 302: ('Found', 'The resource has moved temporarily.'), 303: ('See Other', 'The response to the request can be found under a ' 'different URI.'), 304: ('Not Modified', ''), 307: ('Temporary Redirect', 'The resource has moved temporarily.'), 400: ('Bad Request', 'The server could not comply with the request since ' 'it is either malformed or otherwise incorrect.'), 401: ('Unauthorized', 'This server could not verify that you are ' 'authorized to access the document you requested.'), 402: ('Payment Required', 'Access was denied for financial reasons.'), 403: ('Forbidden', 'Access was denied to this resource.'), 404: ('Not Found', 'The resource could not be found.'), 405: ('Method Not Allowed', 'The method is not allowed for this ' 'resource.'), 406: ('Not Acceptable', 'The resource is not available in a format ' 'acceptable to your browser.'), 408: ('Request Timeout', 'The server has waited too long for the request ' 'to be sent by the client.'), 409: ('Conflict', 'There was a conflict when trying to complete ' 'your request.'), 410: ('Gone', 'This resource is no longer available.'), 411: ('Length Required', 'Content-Length header required.'), 412: ('Precondition Failed', 'A precondition for this request was not ' 'met.'), 413: ('Request Entity Too Large', 'The body of your request was too ' 'large for this server.'), 414: ('Request URI Too Long', 'The request URI was too long for this ' 'server.'), 415: ('Unsupported Media Type', 'The request media type is not ' 'supported by this server.'), 416: ('Requested Range Not Satisfiable', 'The Range requested is not ' 'available.'), 417: ('Expectation Failed', 'Expectation failed.'), 422: ('Unprocessable Entity', 'Unable to process the contained ' 'instructions'), 499: ('Client Disconnect', 'The client was disconnected during request.'), 500: ('Internal Error', 'The server has either erred or is incapable of ' 'performing the requested operation.'), 501: ('Not Implemented', 'The requested method is not implemented by ' 'this server.'), 502: ('Bad Gateway', 'Bad gateway.'), 503: ('Service Unavailable', 'The server is currently unavailable. ' 'Please try again at a later time.'), 504: ('Gateway Timeout', 'A timeout has occurred speaking to a ' 'backend server.'), 507: ('Insufficient Storage', 'There was not enough space to save the ' 'resource. Drive: %(drive)s'), } MAX_RANGE_OVERLAPS = 2 MAX_NONASCENDING_RANGES = 8 MAX_RANGES = 50 class _UTC(tzinfo): """ A tzinfo class for datetime objects that returns a 0 timedelta (UTC time) """ def dst(self, dt): return timedelta(0) utcoffset = dst def tzname(self, dt): return 'UTC' UTC = _UTC() class WsgiBytesIO(BytesIO): """ This class adds support for the additional wsgi.input methods defined on eventlet.wsgi.Input to the BytesIO class which would otherwise be a fine stand-in for the file-like object in the WSGI environment. """ def set_hundred_continue_response_headers(self, headers): pass def send_hundred_continue_response(self): pass def _datetime_property(header): """ Set and retrieve the datetime value of self.headers[header] (Used by both request and response) The header is parsed on retrieval and a datetime object is returned. The header can be set using a datetime, numeric value, or str. If a value of None is given, the header is deleted. :param header: name of the header, e.g. "Content-Length" """ def getter(self): value = self.headers.get(header, None) if value is not None: try: parts = parsedate(self.headers[header])[:7] return datetime(*(parts + (UTC,))) except Exception: return None def setter(self, value): if isinstance(value, (float,) + six.integer_types): self.headers[header] = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(value)) elif isinstance(value, datetime): self.headers[header] = value.strftime("%a, %d %b %Y %H:%M:%S GMT") else: self.headers[header] = value return property(getter, setter, doc=("Retrieve and set the %s header as a datetime, " "set it with a datetime, int, or str") % header) def _header_property(header): """ Set and retrieve the value of self.headers[header] (Used by both request and response) If a value of None is given, the header is deleted. :param header: name of the header, e.g. "Transfer-Encoding" """ def getter(self): return self.headers.get(header, None) def setter(self, value): self.headers[header] = value return property(getter, setter, doc="Retrieve and set the %s header" % header) def _header_int_property(header): """ Set and retrieve the value of self.headers[header] (Used by both request and response) On retrieval, it converts values to integers. If a value of None is given, the header is deleted. :param header: name of the header, e.g. "Content-Length" """ def getter(self): val = self.headers.get(header, None) if val is not None: val = int(val) return val def setter(self, value): self.headers[header] = value return property(getter, setter, doc="Retrieve and set the %s header as an int" % header) def header_to_environ_key(header_name): header_name = 'HTTP_' + header_name.replace('-', '_').upper() if header_name == 'HTTP_CONTENT_LENGTH': return 'CONTENT_LENGTH' if header_name == 'HTTP_CONTENT_TYPE': return 'CONTENT_TYPE' return header_name class HeaderEnvironProxy(MutableMapping): """ A dict-like object that proxies requests to a wsgi environ, rewriting header keys to environ keys. For example, headers['Content-Range'] sets and gets the value of headers.environ['HTTP_CONTENT_RANGE'] """ def __init__(self, environ): self.environ = environ def __iter__(self): for k in self.keys(): yield k def __len__(self): return len(self.keys()) def __getitem__(self, key): return self.environ[header_to_environ_key(key)] def __setitem__(self, key, value): if value is None: self.environ.pop(header_to_environ_key(key), None) elif isinstance(value, six.text_type): self.environ[header_to_environ_key(key)] = value.encode('utf-8') else: self.environ[header_to_environ_key(key)] = str(value) def __contains__(self, key): return header_to_environ_key(key) in self.environ def __delitem__(self, key): del self.environ[header_to_environ_key(key)] def keys(self): keys = [key[5:].replace('_', '-').title() for key in self.environ if key.startswith('HTTP_')] if 'CONTENT_LENGTH' in self.environ: keys.append('Content-Length') if 'CONTENT_TYPE' in self.environ: keys.append('Content-Type') return keys def _resp_status_property(): """ Set and retrieve the value of Response.status On retrieval, it concatenates status_int and title. When set to a str, it splits status_int and title apart. When set to an integer, retrieves the correct title for that response code from the RESPONSE_REASONS dict. """ def getter(self): return '%s %s' % (self.status_int, self.title) def setter(self, value): if isinstance(value, six.integer_types): self.status_int = value self.explanation = self.title = RESPONSE_REASONS[value][0] else: if isinstance(value, six.text_type): value = value.encode('utf-8') self.status_int = int(value.split(' ', 1)[0]) self.explanation = self.title = value.split(' ', 1)[1] return property(getter, setter, doc="Retrieve and set the Response status, e.g. '200 OK'") def _resp_body_property(): """ Set and retrieve the value of Response.body If necessary, it will consume Response.app_iter to create a body. On assignment, encodes unicode values to utf-8, and sets the content-length to the length of the str. """ def getter(self): if not self._body: if not self._app_iter: return '' with closing_if_possible(self._app_iter): self._body = ''.join(self._app_iter) self._app_iter = None return self._body def setter(self, value): if isinstance(value, six.text_type): value = value.encode('utf-8') if isinstance(value, str): self.content_length = len(value) self._app_iter = None self._body = value return property(getter, setter, doc="Retrieve and set the Response body str") def _resp_etag_property(): """ Set and retrieve Response.etag This may be broken for etag use cases other than Swift's. Quotes strings when assigned and unquotes when read, for compatibility with webob. """ def getter(self): etag = self.headers.get('etag', None) if etag: etag = etag.replace('"', '') return etag def setter(self, value): if value is None: self.headers['etag'] = None else: self.headers['etag'] = '"%s"' % value return property(getter, setter, doc="Retrieve and set the response Etag header") def _resp_content_type_property(): """ Set and retrieve Response.content_type Strips off any charset when retrieved -- that is accessible via Response.charset. """ def getter(self): if 'content-type' in self.headers: return self.headers.get('content-type').split(';')[0] def setter(self, value): self.headers['content-type'] = value return property(getter, setter, doc="Retrieve and set the response Content-Type header") def _resp_charset_property(): """ Set and retrieve Response.charset On retrieval, separates the charset from the content-type. On assignment, removes any existing charset from the content-type and appends the new one. """ def getter(self): if '; charset=' in self.headers['content-type']: return self.headers['content-type'].split('; charset=')[1] def setter(self, value): if 'content-type' in self.headers: self.headers['content-type'] = self.headers['content-type'].split( ';')[0] if value: self.headers['content-type'] += '; charset=' + value return property(getter, setter, doc="Retrieve and set the response charset") def _resp_app_iter_property(): """ Set and retrieve Response.app_iter Mostly a pass-through to Response._app_iter; it's a property so it can zero out an existing content-length on assignment. """ def getter(self): return self._app_iter def setter(self, value): if isinstance(value, (list, tuple)): self.content_length = sum(map(len, value)) elif value is not None: self.content_length = None self._body = None self._app_iter = value return property(getter, setter, doc="Retrieve and set the response app_iter") def _req_fancy_property(cls, header, even_if_nonexistent=False): """ Set and retrieve "fancy" properties. On retrieval, these properties return a class that takes the value of the header as the only argument to their constructor. For assignment, those classes should implement a __str__ that converts them back to their header values. :param header: name of the header, e.g. "Accept" :param even_if_nonexistent: Return a value even if the header does not exist. Classes using this should be prepared to accept None as a parameter. """ def getter(self): try: if header in self.headers or even_if_nonexistent: return cls(self.headers.get(header)) except ValueError: return None def setter(self, value): self.headers[header] = value return property(getter, setter, doc=("Retrieve and set the %s " "property in the WSGI environ, as a %s object") % (header, cls.__name__)) class Range(object): """ Wraps a Request's Range header as a friendly object. After initialization, "range.ranges" is populated with a list of (start, end) tuples denoting the requested ranges. If there were any syntactically-invalid byte-range-spec values, the constructor will raise a ValueError, per the relevant RFC: "The recipient of a byte-range-set that includes one or more syntactically invalid byte-range-spec values MUST ignore the header field that includes that byte-range-set." According to the RFC 2616 specification, the following cases will be all considered as syntactically invalid, thus, a ValueError is thrown so that the range header will be ignored. If the range value contains at least one of the following cases, the entire range is considered invalid, ValueError will be thrown so that the header will be ignored. 1. value not starts with bytes= 2. range value start is greater than the end, eg. bytes=5-3 3. range does not have start or end, eg. bytes=- 4. range does not have hyphen, eg. bytes=45 5. range value is non numeric 6. any combination of the above Every syntactically valid range will be added into the ranges list even when some of the ranges may not be satisfied by underlying content. :param headerval: value of the header as a str """ def __init__(self, headerval): if not headerval: raise ValueError('Invalid Range header: %r' % headerval) headerval = headerval.replace(' ', '') if not headerval.lower().startswith('bytes='): raise ValueError('Invalid Range header: %s' % headerval) self.ranges = [] for rng in headerval[6:].split(','): # Check if the range has required hyphen. if rng.find('-') == -1: raise ValueError('Invalid Range header: %s' % headerval) start, end = rng.split('-', 1) if start: # when start contains non numeric value, this also causes # ValueError start = int(start) else: start = None if end: # We could just rely on int() raising the ValueError, but # this catches things like '--0' if not end.isdigit(): raise ValueError('Invalid Range header: %s' % headerval) end = int(end) if end < 0: raise ValueError('Invalid Range header: %s' % headerval) elif start is not None and end < start: raise ValueError('Invalid Range header: %s' % headerval) else: end = None if start is None: raise ValueError('Invalid Range header: %s' % headerval) self.ranges.append((start, end)) def __str__(self): string = 'bytes=' for i, (start, end) in enumerate(self.ranges): if start is not None: string += str(start) string += '-' if end is not None: string += str(end) if i < len(self.ranges) - 1: string += ',' return string def ranges_for_length(self, length): """ This method is used to return multiple ranges for a given length which should represent the length of the underlying content. The constructor method __init__ made sure that any range in ranges list is syntactically valid. So if length is None or size of the ranges is zero, then the Range header should be ignored which will eventually make the response to be 200. If an empty list is returned by this method, it indicates that there are unsatisfiable ranges found in the Range header, 416 will be returned. if a returned list has at least one element, the list indicates that there is at least one range valid and the server should serve the request with a 206 status code. The start value of each range represents the starting position in the content, the end value represents the ending position. This method purposely adds 1 to the end number because the spec defines the Range to be inclusive. The Range spec can be found at the following link: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1 :param length: length of the underlying content """ # not syntactically valid ranges, must ignore if length is None or not self.ranges or self.ranges == []: return None all_ranges = [] for single_range in self.ranges: begin, end = single_range # The possible values for begin and end are # None, 0, or a positive numeric number if begin is None: if end == 0: # this is the bytes=-0 case continue elif end > length: # This is the case where the end is greater than the # content length, as the RFC 2616 stated, the entire # content should be returned. all_ranges.append((0, length)) else: all_ranges.append((length - end, length)) continue # begin can only be 0 and numeric value from this point on if end is None: if begin < length: all_ranges.append((begin, length)) else: # the begin position is greater than or equal to the # content length; skip and move on to the next range continue # end can only be 0 or numeric value elif begin < length: # the begin position is valid, take the min of end + 1 or # the total length of the content all_ranges.append((begin, min(end + 1, length))) # RFC 7233 section 6.1 ("Denial-of-Service Attacks Using Range") says: # # Unconstrained multiple range requests are susceptible to denial-of- # service attacks because the effort required to request many # overlapping ranges of the same data is tiny compared to the time, # memory, and bandwidth consumed by attempting to serve the requested # data in many parts. Servers ought to ignore, coalesce, or reject # egregious range requests, such as requests for more than two # overlapping ranges or for many small ranges in a single set, # particularly when the ranges are requested out of order for no # apparent reason. Multipart range requests are not designed to # support random access. # # We're defining "egregious" here as: # # * more than 50 requested ranges OR # * more than 2 overlapping ranges OR # * more than 8 non-ascending-order ranges if len(all_ranges) > MAX_RANGES: return [] overlaps = 0 for ((start1, end1), (start2, end2)) in pairs(all_ranges): if ((start1 < start2 < end1) or (start1 < end2 < end1) or (start2 < start1 < end2) or (start2 < end1 < end2)): overlaps += 1 if overlaps > MAX_RANGE_OVERLAPS: return [] ascending = True for start1, start2 in zip(all_ranges, all_ranges[1:]): if start1 > start2: ascending = False break if not ascending and len(all_ranges) >= MAX_NONASCENDING_RANGES: return [] return all_ranges class Match(object): """ Wraps a Request's If-[None-]Match header as a friendly object. :param headerval: value of the header as a str """ def __init__(self, headerval): self.tags = set() for tag in headerval.split(', '): if tag.startswith('"') and tag.endswith('"'): self.tags.add(tag[1:-1]) else: self.tags.add(tag) def __contains__(self, val): return '*' in self.tags or val in self.tags class Accept(object): """ Wraps a Request's Accept header as a friendly object. :param headerval: value of the header as a str """ # RFC 2616 section 2.2 token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+' qdtext = r'[^"]' quoted_pair = r'(?:\\.)' quoted_string = r'"(?:' + qdtext + r'|' + quoted_pair + r')*"' extension = (r'(?:\s*;\s*(?:' + token + r")\s*=\s*" + r'(?:' + token + r'|' + quoted_string + r'))') acc = (r'^\s*(' + token + r')/(' + token + r')(' + extension + r'*?\s*)$') acc_pattern = re.compile(acc) def __init__(self, headerval): self.headerval = headerval def _get_types(self): types = [] if not self.headerval: return [] for typ in self.headerval.split(','): type_parms = self.acc_pattern.findall(typ) if not type_parms: raise ValueError('Invalid accept header') typ, subtype, parms = type_parms[0] parms = [p.strip() for p in parms.split(';') if p.strip()] seen_q_already = False quality = 1.0 for parm in parms: name, value = parm.split('=') name = name.strip() value = value.strip() if name == 'q': if seen_q_already: raise ValueError('Multiple "q" params') seen_q_already = True quality = float(value) pattern = '^' + \ (self.token if typ == '*' else re.escape(typ)) + '/' + \ (self.token if subtype == '*' else re.escape(subtype)) + '$' types.append((pattern, quality, '*' not in (typ, subtype))) # sort candidates by quality, then whether or not there were globs types.sort(reverse=True, key=lambda t: (t[1], t[2])) return [t[0] for t in types] def best_match(self, options): """ Returns the item from "options" that best matches the accept header. Returns None if no available options are acceptable to the client. :param options: a list of content-types the server can respond with :raises ValueError: if the header is malformed """ types = self._get_types() if not types and options: return options[0] for pattern in types: for option in options: if re.match(pattern, option): return option return None def __repr__(self): return self.headerval def _req_environ_property(environ_field): """ Set and retrieve value of the environ_field entry in self.environ. (Used by both request and response) """ def getter(self): return self.environ.get(environ_field, None) def setter(self, value): if isinstance(value, six.text_type): self.environ[environ_field] = value.encode('utf-8') else: self.environ[environ_field] = value return property(getter, setter, doc=("Get and set the %s property " "in the WSGI environment") % environ_field) def _req_body_property(): """ Set and retrieve the Request.body parameter. It consumes wsgi.input and returns the results. On assignment, uses a WsgiBytesIO to create a new wsgi.input. """ def getter(self): body = self.environ['wsgi.input'].read() self.environ['wsgi.input'] = WsgiBytesIO(body) return body def setter(self, value): self.environ['wsgi.input'] = WsgiBytesIO(value) self.environ['CONTENT_LENGTH'] = str(len(value)) return property(getter, setter, doc="Get and set the request body str") def _host_url_property(): """ Retrieves the best guess that can be made for an absolute location up to the path, for example: https://host.com:1234 """ def getter(self): if 'HTTP_HOST' in self.environ: host = self.environ['HTTP_HOST'] else: host = '%s:%s' % (self.environ['SERVER_NAME'], self.environ['SERVER_PORT']) scheme = self.environ.get('wsgi.url_scheme', 'http') if scheme == 'http' and host.endswith(':80'): host, port = host.rsplit(':', 1) elif scheme == 'https' and host.endswith(':443'): host, port = host.rsplit(':', 1) return '%s://%s' % (scheme, host) return property(getter, doc="Get url for request/response up to path") def is_chunked(headers): te = None for key in headers: if key.lower() == 'transfer-encoding': te = headers.get(key) if te: encodings = te.split(',') if len(encodings) > 1: raise AttributeError('Unsupported Transfer-Coding header' ' value specified in Transfer-Encoding' ' header') # If there are more than one transfer encoding value, the last # one must be chunked, see RFC 2616 Sec. 3.6 if encodings[-1].lower() == 'chunked': return True else: raise ValueError('Invalid Transfer-Encoding header value') else: return False class Request(object): """ WSGI Request object. """ range = _req_fancy_property(Range, 'range') if_none_match = _req_fancy_property(Match, 'if-none-match') accept = _req_fancy_property(Accept, 'accept', True) method = _req_environ_property('REQUEST_METHOD') referrer = referer = _req_environ_property('HTTP_REFERER') script_name = _req_environ_property('SCRIPT_NAME') path_info = _req_environ_property('PATH_INFO') host = _req_environ_property('HTTP_HOST') host_url = _host_url_property() remote_addr = _req_environ_property('REMOTE_ADDR') remote_user = _req_environ_property('REMOTE_USER') user_agent = _req_environ_property('HTTP_USER_AGENT') query_string = _req_environ_property('QUERY_STRING') if_match = _req_fancy_property(Match, 'if-match') body_file = _req_environ_property('wsgi.input') content_length = _header_int_property('content-length') if_modified_since = _datetime_property('if-modified-since') if_unmodified_since = _datetime_property('if-unmodified-since') body = _req_body_property() charset = None _params_cache = None _timestamp = None acl = _req_environ_property('swob.ACL') def __init__(self, environ): self.environ = environ self.headers = HeaderEnvironProxy(self.environ) @classmethod def blank(cls, path, environ=None, headers=None, body=None, **kwargs): """ Create a new request object with the given parameters, and an environment otherwise filled in with non-surprising default values. :param path: encoded, parsed, and unquoted into PATH_INFO :param environ: WSGI environ dictionary :param headers: HTTP headers :param body: stuffed in a WsgiBytesIO and hung on wsgi.input :param kwargs: any environ key with an property setter """ headers = headers or {} environ = environ or {} if isinstance(path, six.text_type): path = path.encode('utf-8') parsed_path = urllib.parse.urlparse(path) server_name = 'localhost' if parsed_path.netloc: server_name = parsed_path.netloc.split(':', 1)[0] server_port = parsed_path.port if server_port is None: server_port = {'http': 80, 'https': 443}.get(parsed_path.scheme, 80) if parsed_path.scheme and parsed_path.scheme not in ['http', 'https']: raise TypeError('Invalid scheme: %s' % parsed_path.scheme) env = { 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'QUERY_STRING': parsed_path.query, 'PATH_INFO': urllib.parse.unquote(parsed_path.path), 'SERVER_NAME': server_name, 'SERVER_PORT': str(server_port), 'HTTP_HOST': '%s:%d' % (server_name, server_port), 'SERVER_PROTOCOL': 'HTTP/1.0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': parsed_path.scheme or 'http', 'wsgi.errors': StringIO(), 'wsgi.multithread': False, 'wsgi.multiprocess': False } env.update(environ) if body is not None: env['wsgi.input'] = WsgiBytesIO(body) env['CONTENT_LENGTH'] = str(len(body)) elif 'wsgi.input' not in env: env['wsgi.input'] = WsgiBytesIO() req = Request(env) for key, val in headers.items(): req.headers[key] = val for key, val in kwargs.items(): prop = getattr(Request, key, None) if prop and isinstance(prop, property): try: setattr(req, key, val) except AttributeError: pass else: continue raise TypeError("got unexpected keyword argument %r" % key) return req @property def params(self): "Provides QUERY_STRING parameters as a dictionary" if self._params_cache is None: if 'QUERY_STRING' in self.environ: self._params_cache = dict( urllib.parse.parse_qsl(self.environ['QUERY_STRING'], True)) else: self._params_cache = {} return self._params_cache str_params = params @params.setter def params(self, param_pairs): self._params_cache = None self.query_string = urllib.parse.urlencode(param_pairs) @property def timestamp(self): """ Provides HTTP_X_TIMESTAMP as a :class:`~swift.common.utils.Timestamp` """ if self._timestamp is None: try: raw_timestamp = self.environ['HTTP_X_TIMESTAMP'] except KeyError: raise InvalidTimestamp('Missing X-Timestamp header') try: self._timestamp = Timestamp(raw_timestamp) except ValueError: raise InvalidTimestamp('Invalid X-Timestamp header') return self._timestamp @property def path_qs(self): """The path of the request, without host but with query string.""" path = self.path if self.query_string: path += '?' + self.query_string return path @property def path(self): "Provides the full path of the request, excluding the QUERY_STRING" return urllib.parse.quote(self.environ.get('SCRIPT_NAME', '') + self.environ['PATH_INFO']) @property def swift_entity_path(self): """ Provides the account/container/object path, sans API version. This can be useful when constructing a path to send to a backend server, as that path will need everything after the "/v1". """ _ver, entity_path = self.split_path(1, 2, rest_with_last=True) if entity_path is not None: return '/' + entity_path @property def is_chunked(self): return is_chunked(self.headers) @property def url(self): "Provides the full url of the request" return self.host_url + self.path_qs def as_referer(self): return self.method + ' ' + self.url def path_info_pop(self): """ Takes one path portion (delineated by slashes) from the path_info, and appends it to the script_name. Returns the path segment. """ path_info = self.path_info if not path_info or not path_info.startswith('/'): return None try: slash_loc = path_info.index('/', 1) except ValueError: slash_loc = len(path_info) self.script_name += path_info[:slash_loc] self.path_info = path_info[slash_loc:] return path_info[1:slash_loc] def copy_get(self): """ Makes a copy of the request, converting it to a GET. """ env = self.environ.copy() env.update({ 'REQUEST_METHOD': 'GET', 'CONTENT_LENGTH': '0', 'wsgi.input': WsgiBytesIO(), }) return Request(env) def call_application(self, application): """ Calls the application with this request's environment. Returns the status, headers, and app_iter for the response as a tuple. :param application: the WSGI application to call """ output = [] captured = [] def start_response(status, headers, exc_info=None): captured[:] = [status, headers, exc_info] return output.append app_iter = application(self.environ, start_response) if not app_iter: app_iter = output if not captured: app_iter = reiterate(app_iter) if not captured: raise RuntimeError('application never called start_response') return (captured[0], captured[1], app_iter) def get_response(self, application): """ Calls the application with this request's environment. Returns a Response object that wraps up the application's result. :param application: the WSGI application to call """ status, headers, app_iter = self.call_application(application) return Response(status=status, headers=dict(headers), app_iter=app_iter, request=self) def split_path(self, minsegs=1, maxsegs=None, rest_with_last=False): """ Validate and split the Request's path. **Examples**:: ['a'] = split_path('/a') ['a', None] = split_path('/a', 1, 2) ['a', 'c'] = split_path('/a/c', 1, 2) ['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True) :param minsegs: Minimum number of segments to be extracted :param maxsegs: Maximum number of segments to be extracted :param rest_with_last: If True, trailing data will be returned as part of last segment. If False, and there is trailing data, raises ValueError. :returns: list of segments with a length of maxsegs (non-existent segments will return as None) :raises ValueError: if given an invalid path """ return split_path( self.environ.get('SCRIPT_NAME', '') + self.environ['PATH_INFO'], minsegs, maxsegs, rest_with_last) def message_length(self): """ Properly determine the message length for this request. It will return an integer if the headers explicitly contain the message length, or None if the headers don't contain a length. The ValueError exception will be raised if the headers are invalid. :raises ValueError: if either transfer-encoding or content-length headers have bad values :raises AttributeError: if the last value of the transfer-encoding header is not "chunked" """ if not is_chunked(self.headers): # Because we are not using chunked transfer encoding we can pay # attention to the content-length header. fsize = self.headers.get('content-length', None) if fsize is not None: try: fsize = int(fsize) except ValueError: raise ValueError('Invalid Content-Length header value') else: fsize = None return fsize def content_range_header_value(start, stop, size): return 'bytes %s-%s/%s' % (start, (stop - 1), size) def content_range_header(start, stop, size): return "Content-Range: " + content_range_header_value(start, stop, size) def multi_range_iterator(ranges, content_type, boundary, size, sub_iter_gen): for start, stop in ranges: yield ''.join(['--', boundary, '\r\n', 'Content-Type: ', content_type, '\r\n']) yield content_range_header(start, stop, size) + '\r\n\r\n' sub_iter = sub_iter_gen(start, stop) for chunk in sub_iter: yield chunk yield '\r\n' yield '--' + boundary + '--' class Response(object): """ WSGI Response object. """ content_length = _header_int_property('content-length') content_type = _resp_content_type_property() content_range = _header_property('content-range') etag = _resp_etag_property() status = _resp_status_property() status_int = None body = _resp_body_property() host_url = _host_url_property() last_modified = _datetime_property('last-modified') location = _header_property('location') accept_ranges = _header_property('accept-ranges') charset = _resp_charset_property() app_iter = _resp_app_iter_property() def __init__(self, body=None, status=200, headers=None, app_iter=None, request=None, conditional_response=False, conditional_etag=None, **kw): self.headers = HeaderKeyDict( [('Content-Type', 'text/html; charset=UTF-8')]) self.conditional_response = conditional_response self._conditional_etag = conditional_etag self.request = request self.body = body self.app_iter = app_iter self.response_iter = None self.status = status self.boundary = "%.32x" % random.randint(0, 256 ** 16) if request: self.environ = request.environ else: self.environ = {} if headers: if self._body and 'Content-Length' in headers: # If body is not empty, prioritize actual body length over # content_length in headers del headers['Content-Length'] self.headers.update(headers) if self.status_int == 401 and 'www-authenticate' not in self.headers: self.headers.update({'www-authenticate': self.www_authenticate()}) for key, value in kw.items(): setattr(self, key, value) # When specifying both 'content_type' and 'charset' in the kwargs, # charset needs to be applied *after* content_type, otherwise charset # can get wiped out when content_type sorts later in dict order. if 'charset' in kw and 'content_type' in kw: self.charset = kw['charset'] @property def conditional_etag(self): """ The conditional_etag keyword argument for Response will allow the conditional match value of a If-Match request to be compared to a non-standard value. This is available for Storage Policies that do not store the client object data verbatim on the storage nodes, but still need support conditional requests. It's most effectively used with X-Backend-Etag-Is-At which would define the additional Metadata key(s) where the original ETag of the clear-form client request data may be found. """ if self._conditional_etag is not None: return self._conditional_etag else: return self.etag def _prepare_for_ranges(self, ranges): """ Prepare the Response for multiple ranges. """ content_size = self.content_length content_type = self.headers.get('content-type') self.content_type = ''.join(['multipart/byteranges;', 'boundary=', self.boundary]) # This section calculates the total size of the response. section_header_fixed_len = ( # --boundary\r\n len(self.boundary) + 4 # Content-Type: \r\n + len('Content-Type: ') + len(content_type) + 2 # Content-Range: \r\n; accounted for later + len('Content-Range: ') + 2 # \r\n at end of headers + 2) body_size = 0 for start, end in ranges: body_size += section_header_fixed_len # length of the value of Content-Range, not including the \r\n # since that's already accounted for cr = content_range_header_value(start, end, content_size) body_size += len(cr) # the actual bytes (note: this range is half-open, i.e. begins # with byte and ends with byte , so there's no # fencepost error here) body_size += (end - start) # \r\n prior to --boundary body_size += 2 # --boundary-- terminates the message body_size += len(self.boundary) + 4 self.content_length = body_size self.content_range = None return content_size, content_type def _get_conditional_response_status(self): """Checks for a conditional response from an If-Match or If-Modified. request. If so, returns the correct status code (304 or 412). :returns: conditional response status (304 or 412) or None """ if self.conditional_etag and self.request.if_none_match and \ self.conditional_etag in self.request.if_none_match: return 304 if self.conditional_etag and self.request.if_match and \ self.conditional_etag not in self.request.if_match: return 412 if self.status_int == 404 and self.request.if_match \ and '*' in self.request.if_match: # If none of the entity tags match, or if "*" is given and no # current entity exists, the server MUST NOT perform the # requested method, and MUST return a 412 (Precondition # Failed) response. [RFC 2616 section 14.24] return 412 if self.last_modified and self.request.if_modified_since \ and self.last_modified <= self.request.if_modified_since: return 304 if self.last_modified and self.request.if_unmodified_since \ and self.last_modified > self.request.if_unmodified_since: return 412 return None def _response_iter(self, app_iter, body): if self.conditional_response and self.request: empty_resp = self._get_conditional_response_status() if empty_resp is not None: self.status = empty_resp self.content_length = 0 close_if_possible(app_iter) return [''] if self.request and self.request.method == 'HEAD': # We explicitly do NOT want to set self.content_length to 0 here return [''] if self.conditional_response and self.request and \ self.request.range and self.request.range.ranges and \ not self.content_range: ranges = self.request.range.ranges_for_length(self.content_length) if ranges == []: self.status = 416 close_if_possible(app_iter) self.headers['Content-Range'] = \ 'bytes */%d' % self.content_length # Setting body + app_iter to None makes us emit the default # body text from RESPONSE_REASONS. body = None app_iter = None elif ranges: range_size = len(ranges) if range_size > 0: # There is at least one valid range in the request, so try # to satisfy the request if range_size == 1: start, end = ranges[0] if app_iter and hasattr(app_iter, 'app_iter_range'): self.status = 206 self.content_range = content_range_header_value( start, end, self.content_length) self.content_length = (end - start) return app_iter.app_iter_range(start, end) elif body: self.status = 206 self.content_range = content_range_header_value( start, end, self.content_length) self.content_length = (end - start) return [body[start:end]] elif range_size > 1: if app_iter and hasattr(app_iter, 'app_iter_ranges'): self.status = 206 content_size, content_type = \ self._prepare_for_ranges(ranges) return app_iter.app_iter_ranges(ranges, content_type, self.boundary, content_size) elif body: self.status = 206 content_size, content_type, = \ self._prepare_for_ranges(ranges) def _body_slicer(start, stop): yield body[start:stop] return multi_range_iterator(ranges, content_type, self.boundary, content_size, _body_slicer) if app_iter: return app_iter if body is not None: return [body] if self.status_int in RESPONSE_REASONS: title, exp = RESPONSE_REASONS[self.status_int] if exp: body = '

%s

%s

' % ( title, exp % defaultdict(lambda: 'unknown', self.__dict__)) self.content_length = len(body) return [body] return [''] def fix_conditional_response(self): """ You may call this once you have set the content_length to the whole object length and body or app_iter to reset the content_length properties on the request. It is ok to not call this method, the conditional response will be maintained for you when you __call__ the response. """ self.response_iter = self._response_iter(self.app_iter, self._body) def absolute_location(self): """ Attempt to construct an absolute location. """ if not self.location.startswith('/'): return self.location return self.host_url + self.location def www_authenticate(self): """ Construct a suitable value for WWW-Authenticate response header If we have a request and a valid-looking path, the realm is the account; otherwise we set it to 'unknown'. """ try: vrs, realm, rest = self.request.split_path(2, 3, True) if realm in ('v1.0', 'auth'): realm = 'unknown' except (AttributeError, ValueError): realm = 'unknown' return 'Swift realm="%s"' % urllib.parse.quote(realm) @property def is_success(self): return self.status_int // 100 == 2 def __call__(self, env, start_response): """ Respond to the WSGI request. .. warning:: This will translate any relative Location header value to an absolute URL using the WSGI environment's HOST_URL as a prefix, as RFC 2616 specifies. However, it is quite common to use relative redirects, especially when it is difficult to know the exact HOST_URL the browser would have used when behind several CNAMEs, CDN services, etc. All modern browsers support relative redirects. To skip over RFC enforcement of the Location header value, you may set ``env['swift.leave_relative_location'] = True`` in the WSGI environment. """ if not self.request: self.request = Request(env) self.environ = env if not self.response_iter: self.response_iter = self._response_iter(self.app_iter, self._body) if 'location' in self.headers and \ not env.get('swift.leave_relative_location'): self.location = self.absolute_location() start_response(self.status, self.headers.items()) return self.response_iter class HTTPException(Response, Exception): def __init__(self, *args, **kwargs): Response.__init__(self, *args, **kwargs) Exception.__init__(self, self.status) def wsgify(func): """ A decorator for translating functions which take a swob Request object and return a Response object into WSGI callables. Also catches any raised HTTPExceptions and treats them as a returned Response. """ argspec = inspect.getargspec(func) if argspec.args and argspec.args[0] == 'self': @functools.wraps(func) def _wsgify_self(self, env, start_response): try: return func(self, Request(env))(env, start_response) except HTTPException as err_resp: return err_resp(env, start_response) return _wsgify_self else: @functools.wraps(func) def _wsgify_bare(env, start_response): try: return func(Request(env))(env, start_response) except HTTPException as err_resp: return err_resp(env, start_response) return _wsgify_bare class StatusMap(object): """ A dict-like object that returns HTTPException subclasses/factory functions where the given key is the status code. """ def __getitem__(self, key): return partial(HTTPException, status=key) status_map = StatusMap() HTTPOk = status_map[200] HTTPCreated = status_map[201] HTTPAccepted = status_map[202] HTTPNoContent = status_map[204] HTTPPartialContent = status_map[206] HTTPMovedPermanently = status_map[301] HTTPFound = status_map[302] HTTPSeeOther = status_map[303] HTTPNotModified = status_map[304] HTTPTemporaryRedirect = status_map[307] HTTPBadRequest = status_map[400] HTTPUnauthorized = status_map[401] HTTPForbidden = status_map[403] HTTPMethodNotAllowed = status_map[405] HTTPNotFound = status_map[404] HTTPNotAcceptable = status_map[406] HTTPRequestTimeout = status_map[408] HTTPConflict = status_map[409] HTTPLengthRequired = status_map[411] HTTPPreconditionFailed = status_map[412] HTTPRequestEntityTooLarge = status_map[413] HTTPRequestedRangeNotSatisfiable = status_map[416] HTTPUnprocessableEntity = status_map[422] HTTPClientDisconnect = status_map[499] HTTPServerError = status_map[500] HTTPInternalServerError = status_map[500] HTTPNotImplemented = status_map[501] HTTPBadGateway = status_map[502] HTTPServiceUnavailable = status_map[503] HTTPInsufficientStorage = status_map[507] swift-2.17.0/swift/common/internal_client.py0000666000175100017510000010753613236061617021154 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import sleep, Timeout from eventlet.green import httplib, socket import json import six from six.moves import range from six.moves import urllib import struct from sys import exc_info, exit import zlib from time import gmtime, strftime, time from zlib import compressobj from swift.common.exceptions import ClientException from swift.common.http import (HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES, is_server_error) from swift.common.swob import Request from swift.common.utils import quote, closing_if_possible from swift.common.wsgi import loadapp, pipeline_property if six.PY3: from eventlet.green.urllib import request as urllib2 else: from eventlet.green import urllib2 class UnexpectedResponse(Exception): """ Exception raised on invalid responses to InternalClient.make_request(). :param message: Exception message. :param resp: The unexpected response. """ def __init__(self, message, resp): super(UnexpectedResponse, self).__init__(message) self.resp = resp class CompressingFileReader(object): """ Wrapper for file object to compress object while reading. Can be used to wrap file objects passed to InternalClient.upload_object(). Used in testing of InternalClient. :param file_obj: File object to wrap. :param compresslevel: Compression level, defaults to 9. :param chunk_size: Size of chunks read when iterating using object, defaults to 4096. """ def __init__(self, file_obj, compresslevel=9, chunk_size=4096): self._f = file_obj self.compresslevel = compresslevel self.chunk_size = chunk_size self.set_initial_state() def set_initial_state(self): """ Sets the object to the state needed for the first read. """ self._f.seek(0) self._compressor = compressobj( self.compresslevel, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0) self.done = False self.first = True self.crc32 = 0 self.total_size = 0 def read(self, *a, **kw): """ Reads a chunk from the file object. Params are passed directly to the underlying file object's read(). :returns: Compressed chunk from file object. """ if self.done: return '' x = self._f.read(*a, **kw) if x: self.crc32 = zlib.crc32(x, self.crc32) & 0xffffffff self.total_size += len(x) compressed = self._compressor.compress(x) if not compressed: compressed = self._compressor.flush(zlib.Z_SYNC_FLUSH) else: compressed = self._compressor.flush(zlib.Z_FINISH) crc32 = struct.pack("= HTTP_MULTIPLE_CHOICES: ''.join(resp.app_iter) break data = json.loads(resp.body) if not data: break for item in data: yield item marker = data[-1]['name'].encode('utf8') def make_path(self, account, container=None, obj=None): """ Returns a swift path for a request quoting and utf-8 encoding the path parts as need be. :param account: swift account :param container: container, defaults to None :param obj: object, defaults to None :raises ValueError: Is raised if obj is specified and container is not. """ path = '/v1/%s' % quote(account) if container: path += '/%s' % quote(container) if obj: path += '/%s' % quote(obj) elif obj: raise ValueError('Object specified without container') return path def _set_metadata( self, path, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets metadata on path using metadata_prefix to set values in headers of POST request. :param path: Path to do POST on. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = {} for k, v in metadata.items(): if k.lower().startswith(metadata_prefix): headers[k] = v else: headers['%s%s' % (metadata_prefix, k)] = v self.make_request('POST', path, headers, acceptable_statuses) # account methods def iter_containers( self, account, marker='', end_marker='', prefix='', acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Returns an iterator of containers dicts from an account. :param account: Account on which to do the container listing. :param marker: Prefix of first desired item, defaults to ''. :param end_marker: Last item returned will be 'less' than this, defaults to ''. :param prefix: Prefix of containers :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) return self._iter_items(path, marker, end_marker, prefix, acceptable_statuses) def get_account_info( self, account, acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Returns (container_count, object_count) for an account. :param account: Account on which to get the information. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) resp = self.make_request('HEAD', path, {}, acceptable_statuses) if not resp.status_int // 100 == 2: return (0, 0) return (int(resp.headers.get('x-account-container-count', 0)), int(resp.headers.get('x-account-object-count', 0))) def get_account_metadata( self, account, metadata_prefix='', acceptable_statuses=(2,)): """Gets account metadata. :param account: Account on which to get the metadata. :param metadata_prefix: Used to filter values from the headers returned. Will strip that prefix from the keys in the dict returned. Defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :returns: Returns dict of account metadata. Keys will be lowercase. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) return self._get_metadata(path, metadata_prefix, acceptable_statuses) def set_account_metadata( self, account, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets account metadata. A call to this will add to the account metadata and not overwrite all of it with values in the metadata dict. To clear an account metadata value, pass an empty string as the value for the key in the metadata dict. :param account: Account on which to get the metadata. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) self._set_metadata( path, metadata, metadata_prefix, acceptable_statuses) # container methods def container_exists(self, account, container): """Checks to see if a container exists. :param account: The container's account. :param container: Container to check. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. :returns: True if container exists, false otherwise. """ path = self.make_path(account, container) resp = self.make_request('HEAD', path, {}, (2, HTTP_NOT_FOUND)) return not resp.status_int == HTTP_NOT_FOUND def create_container( self, account, container, headers=None, acceptable_statuses=(2,)): """ Creates container. :param account: The container's account. :param container: Container to create. :param headers: Defaults to empty dict. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = headers or {} path = self.make_path(account, container) self.make_request('PUT', path, headers, acceptable_statuses) def delete_container( self, account, container, acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Deletes a container. :param account: The container's account. :param container: Container to delete. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) self.make_request('DELETE', path, {}, acceptable_statuses) def get_container_metadata( self, account, container, metadata_prefix='', acceptable_statuses=(2,)): """Gets container metadata. :param account: The container's account. :param container: Container to get metadata on. :param metadata_prefix: Used to filter values from the headers returned. Will strip that prefix from the keys in the dict returned. Defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :returns: Returns dict of container metadata. Keys will be lowercase. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) return self._get_metadata(path, metadata_prefix, acceptable_statuses) def iter_objects( self, account, container, marker='', end_marker='', prefix='', acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Returns an iterator of object dicts from a container. :param account: The container's account. :param container: Container to iterate objects on. :param marker: Prefix of first desired item, defaults to ''. :param end_marker: Last item returned will be 'less' than this, defaults to ''. :param prefix: Prefix of objects :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) return self._iter_items(path, marker, end_marker, prefix, acceptable_statuses) def set_container_metadata( self, account, container, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets container metadata. A call to this will add to the container metadata and not overwrite all of it with values in the metadata dict. To clear a container metadata value, pass an empty string as the value for the key in the metadata dict. :param account: The container's account. :param container: Container to set metadata on. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) self._set_metadata( path, metadata, metadata_prefix, acceptable_statuses) # object methods def delete_object( self, account, container, obj, acceptable_statuses=(2, HTTP_NOT_FOUND), headers=None): """ Deletes an object. :param account: The object's account. :param container: The object's container. :param obj: The object. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :param headers: extra headers to send with request :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container, obj) self.make_request('DELETE', path, (headers or {}), acceptable_statuses) def get_object_metadata( self, account, container, obj, metadata_prefix='', acceptable_statuses=(2,), headers=None): """Gets object metadata. :param account: The object's account. :param container: The object's container. :param obj: The object. :param metadata_prefix: Used to filter values from the headers returned. Will strip that prefix from the keys in the dict returned. Defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :param headers: extra headers to send with request :returns: Dict of object metadata. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container, obj) return self._get_metadata(path, metadata_prefix, acceptable_statuses, headers=headers) def get_object(self, account, container, obj, headers, acceptable_statuses=(2,), params=None): """ Gets an object. :param account: The object's account. :param container: The object's container. :param obj: The object name. :param headers: Headers to send with request, defaults to empty dict. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :param params: A dict of params to be set in request query string, defaults to None. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. :returns: A 3-tuple (status, headers, iterator of object body) """ headers = headers or {} path = self.make_path(account, container, obj) resp = self.make_request( 'GET', path, headers, acceptable_statuses, params=params) return (resp.status_int, resp.headers, resp.app_iter) def iter_object_lines( self, account, container, obj, headers=None, acceptable_statuses=(2,)): """ Returns an iterator of object lines from an uncompressed or compressed text object. Uncompress object as it is read if the object's name ends with '.gz'. :param account: The object's account. :param container: The object's container. :param obj: The object. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = headers or {} path = self.make_path(account, container, obj) resp = self.make_request('GET', path, headers, acceptable_statuses) if not resp.status_int // 100 == 2: return last_part = '' compressed = obj.endswith('.gz') # magic in the following zlib.decompressobj argument is courtesy of # Python decompressing gzip chunk-by-chunk # http://stackoverflow.com/questions/2423866 d = zlib.decompressobj(16 + zlib.MAX_WBITS) for chunk in resp.app_iter: if compressed: chunk = d.decompress(chunk) parts = chunk.split('\n') if len(parts) == 1: last_part = last_part + parts[0] else: parts[0] = last_part + parts[0] for part in parts[:-1]: yield part last_part = parts[-1] if last_part: yield last_part def set_object_metadata( self, account, container, obj, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets an object's metadata. The object's metadata will be overwritten by the values in the metadata dict. :param account: The object's account. :param container: The object's container. :param obj: The object. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container, obj) self._set_metadata( path, metadata, metadata_prefix, acceptable_statuses) def upload_object( self, fobj, account, container, obj, headers=None): """ :param fobj: File object to read object's content from. :param account: The object's account. :param container: The object's container. :param obj: The object. :param headers: Headers to send with request, defaults to empty dict. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = dict(headers or {}) if 'Content-Length' not in headers: headers['Transfer-Encoding'] = 'chunked' path = self.make_path(account, container, obj) self.make_request('PUT', path, headers, (2,), fobj) def get_auth(url, user, key, auth_version='1.0', **kwargs): if auth_version != '1.0': exit('ERROR: swiftclient missing, only auth v1.0 supported') req = urllib2.Request(url) req.add_header('X-Auth-User', user) req.add_header('X-Auth-Key', key) conn = urllib2.urlopen(req) headers = conn.info() return ( headers.getheader('X-Storage-Url'), headers.getheader('X-Auth-Token')) class SimpleClient(object): """ Simple client that is used in bin/swift-dispersion-* and container sync """ def __init__(self, url=None, token=None, starting_backoff=1, max_backoff=5, retries=5): self.url = url self.token = token self.attempts = 0 # needed in swif-dispersion-populate self.starting_backoff = starting_backoff self.max_backoff = max_backoff self.retries = retries def base_request(self, method, container=None, name=None, prefix=None, headers=None, proxy=None, contents=None, full_listing=None, logger=None, additional_info=None, timeout=None, marker=None): # Common request method trans_start = time() url = self.url if full_listing: info, body_data = self.base_request( method, container, name, prefix, headers, proxy, timeout=timeout, marker=marker) listing = body_data while listing: marker = listing[-1]['name'] info, listing = self.base_request( method, container, name, prefix, headers, proxy, timeout=timeout, marker=marker) if listing: body_data.extend(listing) return [info, body_data] if headers is None: headers = {} if self.token: headers['X-Auth-Token'] = self.token if container: url = '%s/%s' % (url.rstrip('/'), quote(container)) if name: url = '%s/%s' % (url.rstrip('/'), quote(name)) else: params = ['format=json'] if prefix: params.append('prefix=%s' % prefix) if marker: params.append('marker=%s' % quote(marker)) url += '?' + '&'.join(params) req = urllib2.Request(url, headers=headers, data=contents) if proxy: proxy = urllib.parse.urlparse(proxy) req.set_proxy(proxy.netloc, proxy.scheme) req.get_method = lambda: method conn = urllib2.urlopen(req, timeout=timeout) body = conn.read() info = conn.info() try: body_data = json.loads(body) except ValueError: body_data = None trans_stop = time() if logger: sent_content_length = 0 for n, v in headers.items(): nl = n.lower() if nl == 'content-length': try: sent_content_length = int(v) break except ValueError: pass logger.debug("-> " + " ".join( quote(str(x) if x else "-", ":/") for x in ( strftime('%Y-%m-%dT%H:%M:%S', gmtime(trans_stop)), method, url, conn.getcode(), sent_content_length, info['content-length'], trans_start, trans_stop, trans_stop - trans_start, additional_info ))) return [info, body_data] def retry_request(self, method, **kwargs): retries = kwargs.pop('retries', self.retries) self.attempts = 0 backoff = self.starting_backoff while self.attempts <= retries: self.attempts += 1 try: return self.base_request(method, **kwargs) except (socket.error, httplib.HTTPException, urllib2.URLError) \ as err: if self.attempts > retries: if isinstance(err, urllib2.HTTPError): raise ClientException('Raise too many retries', http_status=err.getcode()) else: raise sleep(backoff) backoff = min(backoff * 2, self.max_backoff) def get_account(self, *args, **kwargs): # Used in swift-dispersion-populate return self.retry_request('GET', **kwargs) def put_container(self, container, **kwargs): # Used in swift-dispersion-populate return self.retry_request('PUT', container=container, **kwargs) def get_container(self, container, **kwargs): # Used in swift-dispersion-populate return self.retry_request('GET', container=container, **kwargs) def put_object(self, container, name, contents, **kwargs): # Used in swift-dispersion-populate return self.retry_request('PUT', container=container, name=name, contents=contents.read(), **kwargs) def head_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) return client.retry_request('HEAD', **kwargs) def put_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) client.retry_request('PUT', **kwargs) def delete_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) client.retry_request('DELETE', **kwargs) swift-2.17.0/swift/common/http.py0000666000175100017510000001076713236061617016760 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. def is_informational(status): """ Check if HTTP status code is informational. :param status: http status code :returns: True if status is successful, else False """ return 100 <= status <= 199 def is_success(status): """ Check if HTTP status code is successful. :param status: http status code :returns: True if status is successful, else False """ return 200 <= status <= 299 def is_redirection(status): """ Check if HTTP status code is redirection. :param status: http status code :returns: True if status is redirection, else False """ return 300 <= status <= 399 def is_client_error(status): """ Check if HTTP status code is client error. :param status: http status code :returns: True if status is client error, else False """ return 400 <= status <= 499 def is_server_error(status): """ Check if HTTP status code is server error. :param status: http status code :returns: True if status is server error, else False """ return 500 <= status <= 599 # List of HTTP status codes ############################################################################### # 1xx Informational ############################################################################### HTTP_CONTINUE = 100 HTTP_SWITCHING_PROTOCOLS = 101 HTTP_PROCESSING = 102 # WebDAV HTTP_CHECKPOINT = 103 HTTP_REQUEST_URI_TOO_LONG = 122 ############################################################################### # 2xx Success ############################################################################### HTTP_OK = 200 HTTP_CREATED = 201 HTTP_ACCEPTED = 202 HTTP_NON_AUTHORITATIVE_INFORMATION = 203 HTTP_NO_CONTENT = 204 HTTP_RESET_CONTENT = 205 HTTP_PARTIAL_CONTENT = 206 HTTP_MULTI_STATUS = 207 # WebDAV HTTP_IM_USED = 226 ############################################################################### # 3xx Redirection ############################################################################### HTTP_MULTIPLE_CHOICES = 300 HTTP_MOVED_PERMANENTLY = 301 HTTP_FOUND = 302 HTTP_SEE_OTHER = 303 HTTP_NOT_MODIFIED = 304 HTTP_USE_PROXY = 305 HTTP_SWITCH_PROXY = 306 HTTP_TEMPORARY_REDIRECT = 307 HTTP_RESUME_INCOMPLETE = 308 ############################################################################### # 4xx Client Error ############################################################################### HTTP_BAD_REQUEST = 400 HTTP_UNAUTHORIZED = 401 HTTP_PAYMENT_REQUIRED = 402 HTTP_FORBIDDEN = 403 HTTP_NOT_FOUND = 404 HTTP_METHOD_NOT_ALLOWED = 405 HTTP_NOT_ACCEPTABLE = 406 HTTP_PROXY_AUTHENTICATION_REQUIRED = 407 HTTP_REQUEST_TIMEOUT = 408 HTTP_CONFLICT = 409 HTTP_GONE = 410 HTTP_LENGTH_REQUIRED = 411 HTTP_PRECONDITION_FAILED = 412 HTTP_REQUEST_ENTITY_TOO_LARGE = 413 HTTP_REQUEST_URI_TOO_LONG = 414 HTTP_UNSUPPORTED_MEDIA_TYPE = 415 HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = 416 HTTP_EXPECTATION_FAILED = 417 HTTP_IM_A_TEAPOT = 418 HTTP_UNPROCESSABLE_ENTITY = 422 # WebDAV HTTP_LOCKED = 423 # WebDAV HTTP_FAILED_DEPENDENCY = 424 # WebDAV HTTP_UNORDERED_COLLECTION = 425 HTTP_UPGRADE_REQUIED = 426 HTTP_PRECONDITION_REQUIRED = 428 HTTP_TOO_MANY_REQUESTS = 429 HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = 431 HTTP_NO_RESPONSE = 444 HTTP_RETRY_WITH = 449 HTTP_BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450 HTTP_CLIENT_CLOSED_REQUEST = 499 ############################################################################### # 5xx Server Error ############################################################################### HTTP_INTERNAL_SERVER_ERROR = 500 HTTP_NOT_IMPLEMENTED = 501 HTTP_BAD_GATEWAY = 502 HTTP_SERVICE_UNAVAILABLE = 503 HTTP_GATEWAY_TIMEOUT = 504 HTTP_VERSION_NOT_SUPPORTED = 505 HTTP_VARIANT_ALSO_NEGOTIATES = 506 HTTP_INSUFFICIENT_STORAGE = 507 # WebDAV HTTP_BANDWIDTH_LIMIT_EXCEEDED = 509 HTTP_NOT_EXTENDED = 510 HTTP_NETWORK_AUTHENTICATION_REQUIRED = 511 HTTP_NETWORK_READ_TIMEOUT_ERROR = 598 # not used in RFC HTTP_NETWORK_CONNECT_TIMEOUT_ERROR = 599 # not used in RFC swift-2.17.0/swift/common/middleware/0000775000175100017510000000000013236061751017526 5ustar zuulzuul00000000000000swift-2.17.0/swift/common/middleware/name_check.py0000666000175100017510000001232413236061617022162 0ustar zuulzuul00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Created on February 27, 2012 A filter that disallows any paths that contain defined forbidden characters or that exceed a defined length. Place early in the proxy-server pipeline after the left-most occurrence of the ``proxy-logging`` middleware (if present) and before the final ``proxy-logging`` middleware (if present) or the ``proxy-serer`` app itself, e.g.:: [pipeline:main] pipeline = catch_errors healthcheck proxy-logging name_check cache \ ratelimit tempauth sos proxy-logging proxy-server [filter:name_check] use = egg:swift#name_check forbidden_chars = '"`<> maximum_length = 255 There are default settings for forbidden_chars (FORBIDDEN_CHARS) and maximum_length (MAX_LENGTH) The filter returns HTTPBadRequest if path is invalid. @author: eamonn-otoole ''' import re from swift.common.utils import get_logger, register_swift_info from swift.common.swob import Request, HTTPBadRequest FORBIDDEN_CHARS = "\'\"`<>" MAX_LENGTH = 255 FORBIDDEN_REGEXP = "/\./|/\.\./|/\.$|/\.\.$" class NameCheckMiddleware(object): def __init__(self, app, conf): self.app = app self.conf = conf self.forbidden_chars = self.conf.get('forbidden_chars', FORBIDDEN_CHARS) self.maximum_length = int(self.conf.get('maximum_length', MAX_LENGTH)) self.forbidden_regexp = self.conf.get('forbidden_regexp', FORBIDDEN_REGEXP) if self.forbidden_regexp: self.forbidden_regexp_compiled = re.compile(self.forbidden_regexp) else: self.forbidden_regexp_compiled = None self.logger = get_logger(self.conf, log_route='name_check') self.register_info() def register_info(self): register_swift_info('name_check', forbidden_chars=self.forbidden_chars, maximum_length=self.maximum_length, forbidden_regexp=self.forbidden_regexp ) def check_character(self, req): ''' Checks req.path for any forbidden characters Returns True if there are any forbidden characters Returns False if there aren't any forbidden characters ''' self.logger.debug("name_check: path %s" % req.path) self.logger.debug("name_check: self.forbidden_chars %s" % self.forbidden_chars) return any((c in req.path_info) for c in self.forbidden_chars) def check_length(self, req): ''' Checks that req.path doesn't exceed the defined maximum length Returns True if the length exceeds the maximum Returns False if the length is <= the maximum ''' length = len(req.path_info) return length > self.maximum_length def check_regexp(self, req): ''' Checks that req.path doesn't contain a substring matching regexps. Returns True if there are any forbidden substring Returns False if there aren't any forbidden substring ''' if self.forbidden_regexp_compiled is None: return False self.logger.debug("name_check: path %s" % req.path) self.logger.debug("name_check: self.forbidden_regexp %s" % self.forbidden_regexp) match = self.forbidden_regexp_compiled.search(req.path_info) return (match is not None) def __call__(self, env, start_response): req = Request(env) if self.check_character(req): return HTTPBadRequest( request=req, body=("Object/Container/Account name contains forbidden " "chars from %s" % self.forbidden_chars))(env, start_response) elif self.check_length(req): return HTTPBadRequest( request=req, body=("Object/Container/Account name longer than the " "allowed maximum " "%s" % self.maximum_length))(env, start_response) elif self.check_regexp(req): return HTTPBadRequest( request=req, body=("Object/Container/Account name contains a forbidden " "substring from regular expression %s" % self.forbidden_regexp))(env, start_response) else: # Pass on to downstream WSGI component return self.app(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def name_check_filter(app): return NameCheckMiddleware(app, conf) return name_check_filter swift-2.17.0/swift/common/middleware/proxy_logging.py0000666000175100017510000003566213236061617023006 0ustar zuulzuul00000000000000# Copyright (c) 2010-2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging middleware for the Swift proxy. This serves as both the default logging implementation and an example of how to plug in your own logging format/method. The logging format implemented below is as follows: client_ip remote_addr datetime request_method request_path protocol status_int referer user_agent auth_token bytes_recvd bytes_sent client_etag transaction_id headers request_time source log_info request_start_time request_end_time These values are space-separated, and each is url-encoded, so that they can be separated with a simple .split() * remote_addr is the contents of the REMOTE_ADDR environment variable, while client_ip is swift's best guess at the end-user IP, extracted variously from the X-Forwarded-For header, X-Cluster-Ip header, or the REMOTE_ADDR environment variable. * source (swift.source in the WSGI environment) indicates the code that generated the request, such as most middleware. (See below for more detail.) * log_info (swift.log_info in the WSGI environment) is for additional information that could prove quite useful, such as any x-delete-at value or other "behind the scenes" activity that might not otherwise be detectable from the plain log information. Code that wishes to add additional log information should use code like ``env.setdefault('swift.log_info', []).append(your_info)`` so as to not disturb others' log information. * Values that are missing (e.g. due to a header not being present) or zero are generally represented by a single hyphen ('-'). The proxy-logging can be used twice in the proxy server's pipeline when there is middleware installed that can return custom responses that don't follow the standard pipeline to the proxy server. For example, with staticweb, the middleware might intercept a request to /v1/AUTH_acc/cont/, make a subrequest to the proxy to retrieve /v1/AUTH_acc/cont/index.html and, in effect, respond to the client's original request using the 2nd request's body. In this instance the subrequest will be logged by the rightmost middleware (with a swift.source set) and the outgoing request (with body overridden) will be logged by leftmost middleware. Requests that follow the normal pipeline (use the same wsgi environment throughout) will not be double logged because an environment variable (swift.proxy_access_log_made) is checked/set when a log is made. All middleware making subrequests should take care to set swift.source when needed. With the doubled proxy logs, any consumer/processor of swift's proxy logs should look at the swift.source field, the rightmost log value, to decide if this is a middleware subrequest or not. A log processor calculating bandwidth usage will want to only sum up logs with no swift.source. """ import sys import time import six from six.moves.urllib.parse import quote, unquote from swift.common.swob import Request from swift.common.utils import (get_logger, get_remote_client, get_valid_utf8_str, config_true_value, InputProxy, list_from_csv, get_policy_index) from swift.common.storage_policy import POLICIES QUOTE_SAFE = '/:' class ProxyLoggingMiddleware(object): """ Middleware that logs Swift proxy requests in the swift log format. """ def __init__(self, app, conf, logger=None): self.app = app self.log_hdrs = config_true_value(conf.get( 'access_log_headers', conf.get('log_headers', 'no'))) log_hdrs_only = list_from_csv(conf.get( 'access_log_headers_only', '')) self.log_hdrs_only = [x.title() for x in log_hdrs_only] # The leading access_* check is in case someone assumes that # log_statsd_valid_http_methods behaves like the other log_statsd_* # settings. self.valid_methods = conf.get( 'access_log_statsd_valid_http_methods', conf.get('log_statsd_valid_http_methods', 'GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS')) self.valid_methods = [m.strip().upper() for m in self.valid_methods.split(',') if m.strip()] access_log_conf = {} for key in ('log_facility', 'log_name', 'log_level', 'log_udp_host', 'log_udp_port', 'log_statsd_host', 'log_statsd_port', 'log_statsd_default_sample_rate', 'log_statsd_sample_rate_factor', 'log_statsd_metric_prefix'): value = conf.get('access_' + key, conf.get(key, None)) if value: access_log_conf[key] = value self.access_logger = logger or get_logger(access_log_conf, log_route='proxy-access') self.access_logger.set_statsd_prefix('proxy-server') self.reveal_sensitive_prefix = int( conf.get('reveal_sensitive_prefix', 16)) def method_from_req(self, req): return req.environ.get('swift.orig_req_method', req.method) def req_already_logged(self, env): return env.get('swift.proxy_access_log_made') def mark_req_logged(self, env): env['swift.proxy_access_log_made'] = True def obscure_sensitive(self, value): if value and len(value) > self.reveal_sensitive_prefix: return value[:self.reveal_sensitive_prefix] + '...' return value def log_request(self, req, status_int, bytes_received, bytes_sent, start_time, end_time, resp_headers=None): """ Log a request. :param req: swob.Request object for the request :param status_int: integer code for the response status :param bytes_received: bytes successfully read from the request body :param bytes_sent: bytes yielded to the WSGI server :param start_time: timestamp request started :param end_time: timestamp request completed :param resp_headers: dict of the response headers """ resp_headers = resp_headers or {} req_path = get_valid_utf8_str(req.path) the_request = quote(unquote(req_path), QUOTE_SAFE) if req.query_string: the_request = the_request + '?' + req.query_string logged_headers = None if self.log_hdrs: if self.log_hdrs_only: logged_headers = '\n'.join('%s: %s' % (k, v) for k, v in req.headers.items() if k in self.log_hdrs_only) else: logged_headers = '\n'.join('%s: %s' % (k, v) for k, v in req.headers.items()) method = self.method_from_req(req) end_gmtime_str = time.strftime('%d/%b/%Y/%H/%M/%S', time.gmtime(end_time)) duration_time_str = "%.4f" % (end_time - start_time) start_time_str = "%.9f" % start_time end_time_str = "%.9f" % end_time policy_index = get_policy_index(req.headers, resp_headers) self.access_logger.info(' '.join( quote(str(x) if x else '-', QUOTE_SAFE) for x in ( get_remote_client(req), req.remote_addr, end_gmtime_str, method, the_request, req.environ.get('SERVER_PROTOCOL'), status_int, req.referer, req.user_agent, self.obscure_sensitive(req.headers.get('x-auth-token')), bytes_received, bytes_sent, req.headers.get('etag', None), req.environ.get('swift.trans_id'), logged_headers, duration_time_str, req.environ.get('swift.source'), ','.join(req.environ.get('swift.log_info') or ''), start_time_str, end_time_str, policy_index ))) # Log timing and bytes-transferred data to StatsD metric_name = self.statsd_metric_name(req, status_int, method) metric_name_policy = self.statsd_metric_name_policy(req, status_int, method, policy_index) # Only log data for valid controllers (or SOS) to keep the metric count # down (egregious errors will get logged by the proxy server itself). if metric_name: self.access_logger.timing(metric_name + '.timing', (end_time - start_time) * 1000) self.access_logger.update_stats(metric_name + '.xfer', bytes_received + bytes_sent) if metric_name_policy: self.access_logger.timing(metric_name_policy + '.timing', (end_time - start_time) * 1000) self.access_logger.update_stats(metric_name_policy + '.xfer', bytes_received + bytes_sent) def get_metric_name_type(self, req): if req.path.startswith('/v1/'): try: stat_type = [None, 'account', 'container', 'object'][req.path.strip('/').count('/')] except IndexError: stat_type = 'object' else: stat_type = req.environ.get('swift.source') return stat_type def statsd_metric_name(self, req, status_int, method): stat_type = self.get_metric_name_type(req) if stat_type is None: return None stat_method = method if method in self.valid_methods \ else 'BAD_METHOD' return '.'.join((stat_type, stat_method, str(status_int))) def statsd_metric_name_policy(self, req, status_int, method, policy_index): if policy_index is None: return None stat_type = self.get_metric_name_type(req) if stat_type == 'object': stat_method = method if method in self.valid_methods \ else 'BAD_METHOD' # The policy may not exist policy = POLICIES.get_by_index(policy_index) if policy: return '.'.join((stat_type, 'policy', str(policy_index), stat_method, str(status_int))) else: return None else: return None def __call__(self, env, start_response): if self.req_already_logged(env): return self.app(env, start_response) self.mark_req_logged(env) start_response_args = [None] input_proxy = InputProxy(env['wsgi.input']) env['wsgi.input'] = input_proxy start_time = time.time() def my_start_response(status, headers, exc_info=None): start_response_args[0] = (status, list(headers), exc_info) def status_int_for_logging(client_disconnect=False, start_status=None): # log disconnected clients as '499' status code if client_disconnect or input_proxy.client_disconnect: ret_status_int = 499 elif start_status is None: ret_status_int = int( start_response_args[0][0].split(' ', 1)[0]) else: ret_status_int = start_status return ret_status_int def iter_response(iterable): iterator = iter(iterable) try: chunk = next(iterator) while not chunk: chunk = next(iterator) except StopIteration: chunk = '' for h, v in start_response_args[0][1]: if h.lower() in ('content-length', 'transfer-encoding'): break else: if not chunk: start_response_args[0][1].append(('Content-Length', '0')) elif isinstance(iterable, list): start_response_args[0][1].append( ('Content-Length', str(sum(len(i) for i in iterable)))) resp_headers = dict(start_response_args[0][1]) start_response(*start_response_args[0]) req = Request(env) # Log timing information for time-to-first-byte (GET requests only) method = self.method_from_req(req) if method == 'GET': status_int = status_int_for_logging() policy_index = get_policy_index(req.headers, resp_headers) metric_name = self.statsd_metric_name(req, status_int, method) metric_name_policy = self.statsd_metric_name_policy( req, status_int, method, policy_index) if metric_name: self.access_logger.timing_since( metric_name + '.first-byte.timing', start_time) if metric_name_policy: self.access_logger.timing_since( metric_name_policy + '.first-byte.timing', start_time) bytes_sent = 0 client_disconnect = False try: while chunk: bytes_sent += len(chunk) yield chunk chunk = next(iterator) except GeneratorExit: # generator was closed before we finished client_disconnect = True raise finally: status_int = status_int_for_logging(client_disconnect) self.log_request( req, status_int, input_proxy.bytes_received, bytes_sent, start_time, time.time(), resp_headers=resp_headers) close_method = getattr(iterable, 'close', None) if callable(close_method): close_method() try: iterable = self.app(env, my_start_response) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() req = Request(env) status_int = status_int_for_logging(start_status=500) self.log_request( req, status_int, input_proxy.bytes_received, 0, start_time, time.time()) six.reraise(exc_type, exc_value, exc_traceback) else: return iter_response(iterable) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def proxy_logger(app): return ProxyLoggingMiddleware(app, conf) return proxy_logger swift-2.17.0/swift/common/middleware/symlink.py0000666000175100017510000005753013236061617021603 0ustar zuulzuul00000000000000# Copyright (c) 2010-2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Symlink Middleware Symlinks are objects stored in Swift that contain a reference to another object (hereinafter, this is called "target object"). They are analogous to symbolic links in Unix-like operating systems. The existence of a symlink object does not affect the target object in any way. An important use case is to use a path in one container to access an object in a different container, with a different policy. This allows policy cost/performance trade-offs to be made on individual objects. Clients create a Swift symlink by performing a zero-length PUT request with the header ``X-Symlink-Target: /``. For a cross-account symlink, the header ``X-Symlink-Target-Account: `` must be included. If omitted, it is inserted automatically with the account of the symlink object in the PUT request process. Symlinks must be zero-byte objects. Attempting to PUT a symlink with a non-empty request body will result in a 400-series error. Also, POST with X-Symlink-Target header always results in a 400-series error. The target object need not exist at symlink creation time. It is suggested to set the ``Content-Type`` of symlink objects to a distinct value such as ``application/symlink``. A GET/HEAD request to a symlink will result in a request to the target object referenced by the symlink's ``X-Symlink-Target-Account`` and ``X-Symlink-Target`` headers. The response of the GET/HEAD request will contain a ``Content-Location`` header with the path location of the target object. A GET/HEAD request to a symlink with the query parameter ``?symlink=get`` will result in the request targeting the symlink itself. A symlink can point to another symlink. Chained symlinks will be traversed until target is not a symlink. If the number of chained symlinks exceeds the limit ``symloop_max`` an error response will be produced. The value of ``symloop_max`` can be defined in the symlink config section of `proxy-server.conf`. If not specified, the default ``symloop_max`` value is 2. If a value less than 1 is specified, the default value will be used. A HEAD/GET request to a symlink object behaves as a normal HEAD/GET request to the target object. Therefore issuing a HEAD request to the symlink will return the target metadata, and issuing a GET request to the symlink will return the data and metadata of the target object. To return the symlink metadata (with its empty body) a GET/HEAD request with the ``?symlink=get`` query parameter must be sent to a symlink object. A POST request to a symlink will result in a 307 TemporaryRedirect response. The response will contain a ``Location`` header with the path of the target object as the value. The request is never redirected to the target object by Swift. Nevertheless, the metadata in the POST request will be applied to the symlink because object servers cannot know for sure if the current object is a symlink or not in eventual consistency. A DELETE request to a symlink will delete the symlink itself. The target object will not be deleted. A COPY request, or a PUT request with a ``X-Copy-From`` header, to a symlink will copy the target object. The same request to a symlink with the query parameter ``?symlink=get`` will copy the symlink itself. An OPTIONS request to a symlink will respond with the options for the symlink only, the request will not be redirected to the target object. Please note that if the symlink's target object is in another container with CORS settings, the response will not reflect the settings. Tempurls can be used to GET/HEAD symlink objects, but PUT is not allowed and will result in a 400-series error. The GET/HEAD tempurls honor the scope of the tempurl key. Container tempurl will only work on symlinks where the target container is the same as the symlink. In case a symlink targets an object in a different container, a GET/HEAD request will result in a 401 Unauthorized error. The account level tempurl will allow cross container symlinks. If a symlink object is overwritten while it is in a versioned container, the symlink object itself is versioned, not the referenced object. A GET request with query parameter ``?format=json`` to a container which contains symlinks will respond with additional information ``symlink_path`` for each symlink object in the container listing. The ``symlink_path`` value is the target path of the symlink. Clients can differentiate symlinks and other objects by this function. Note that responses of any other format (e.g.``?format=xml``) won't include ``symlink_path`` info. Errors * PUT with the header ``X-Symlink-Target`` with non-zero Content-Length will produce a 400 BadRequest error. * POST with the header ``X-Symlink-Target`` will produce a 400 BadRequest error. * GET/HEAD traversing more than ``symloop_max`` chained symlinks will produce a 409 Conflict error. * POSTs will produce a 307 TemporaryRedirect error. ---------- Deployment ---------- Symlinks are enabled by adding the `symlink` middleware to the proxy server WSGI pipeline and including a corresponding filter configuration section in the `proxy-server.conf` file. The `symlink` middleware should be placed after `slo`, `dlo` and `versioned_writes` middleware, but before `encryption` middleware in the pipeline. See the `proxy-server.conf-sample` file for further details. :ref:`Additional steps ` are required if the container sync feature is being used. .. note:: Once you have deployed `symlink` middleware in your pipeline, you should neither remove the `symlink` middleware nor downgrade swift to a version earlier than symlinks being supported. Doing so may result in unexpected container listing results in addition to symlink objects behaving like a normal object. .. _symlink_container_sync_client_config: Container sync configuration ---------------------------- If container sync is being used then the `symlink` middleware must be added to the container sync internal client pipeline. The following configuration steps are required: #. Create a custom internal client configuration file for container sync (if one is not already in use) based on the sample file `internal-client.conf-sample`. For example, copy `internal-client.conf-sample` to `/etc/swift/container-sync-client.conf`. #. Modify this file to include the `symlink` middleware in the pipeline in the same way as described above for the proxy server. #. Modify the container-sync section of all container server config files to point to this internal client config file using the ``internal_client_conf_path`` option. For example:: internal_client_conf_path = /etc/swift/container-sync-client.conf .. note:: These container sync configuration steps will be necessary for container sync probe tests to pass if the `symlink` middleware is included in the proxy pipeline of a test cluster. """ import json import os from cgi import parse_header from six.moves.urllib.parse import unquote from swift.common.utils import get_logger, register_swift_info, split_path, \ MD5_OF_EMPTY_STRING, closing_if_possible from swift.common.constraints import check_account_format from swift.common.wsgi import WSGIContext, make_subrequest from swift.common.request_helpers import get_sys_meta_prefix, \ check_path_header from swift.common.swob import Request, HTTPBadRequest, HTTPTemporaryRedirect, \ HTTPException, HTTPConflict, HTTPPreconditionFailed from swift.common.http import is_success from swift.common.exceptions import LinkIterError from swift.common.header_key_dict import HeaderKeyDict DEFAULT_SYMLOOP_MAX = 2 # Header values for symlink target path strings will be quoted values. TGT_OBJ_SYMLINK_HDR = 'x-symlink-target' TGT_ACCT_SYMLINK_HDR = 'x-symlink-target-account' TGT_OBJ_SYSMETA_SYMLINK_HDR = get_sys_meta_prefix('object') + 'symlink-target' TGT_ACCT_SYSMETA_SYMLINK_HDR = \ get_sys_meta_prefix('object') + 'symlink-target-account' def _check_symlink_header(req): """ Validate that the value from x-symlink-target header is well formatted. We assume the caller ensures that x-symlink-target header is present in req.headers. :param req: HTTP request object :raise: HTTPPreconditionFailed if x-symlink-target value is not well formatted. :raise: HTTPBadRequest if the x-symlink-target value points to the request path. """ # N.B. check_path_header doesn't assert the leading slash and # copy middleware may accept the format. In the symlink, API # says apparently to use "container/object" format so add the # validation first, here. if unquote(req.headers[TGT_OBJ_SYMLINK_HDR]).startswith('/'): raise HTTPPreconditionFailed( body='X-Symlink-Target header must be of the ' 'form /', request=req, content_type='text/plain') # check container and object format container, obj = check_path_header( req, TGT_OBJ_SYMLINK_HDR, 2, 'X-Symlink-Target header must be of the ' 'form /') # Check account format if it exists account = check_account_format( req, unquote(req.headers[TGT_ACCT_SYMLINK_HDR])) \ if TGT_ACCT_SYMLINK_HDR in req.headers else None # Extract request path _junk, req_acc, req_cont, req_obj = req.split_path(4, 4, True) if not account: account = req_acc # Check if symlink targets the symlink itself or not if (account, container, obj) == (req_acc, req_cont, req_obj): raise HTTPBadRequest( body='Symlink cannot target itself', request=req, content_type='text/plain') def symlink_usermeta_to_sysmeta(headers): """ Helper function to translate from X-Symlink-Target and X-Symlink-Target-Account to X-Object-Sysmeta-Symlink-Target and X-Object-Sysmeta-Symlink-Target-Account. :param headers: request headers dict. Note that the headers dict will be updated directly. """ # To preseve url-encoded value in the symlink header, use raw value if TGT_OBJ_SYMLINK_HDR in headers: headers[TGT_OBJ_SYSMETA_SYMLINK_HDR] = headers.pop( TGT_OBJ_SYMLINK_HDR) if TGT_ACCT_SYMLINK_HDR in headers: headers[TGT_ACCT_SYSMETA_SYMLINK_HDR] = headers.pop( TGT_ACCT_SYMLINK_HDR) def symlink_sysmeta_to_usermeta(headers): """ Helper function to translate from X-Object-Sysmeta-Symlink-Target and X-Object-Sysmeta-Symlink-Target-Account to X-Symlink-Target and X-Sysmeta-Symlink-Target-Account :param headers: request headers dict. Note that the headers dict will be updated directly. """ if TGT_OBJ_SYSMETA_SYMLINK_HDR in headers: headers[TGT_OBJ_SYMLINK_HDR] = headers.pop( TGT_OBJ_SYSMETA_SYMLINK_HDR) if TGT_ACCT_SYSMETA_SYMLINK_HDR in headers: headers[TGT_ACCT_SYMLINK_HDR] = headers.pop( TGT_ACCT_SYSMETA_SYMLINK_HDR) class SymlinkContainerContext(WSGIContext): def __init__(self, wsgi_app, logger): super(SymlinkContainerContext, self).__init__(wsgi_app) self.logger = logger def handle_container(self, req, start_response): """ Handle container requests. :param req: a :class:`~swift.common.swob.Request` :param start_response: start_response function :return: Response Iterator after start_response called. """ app_resp = self._app_call(req.environ) if req.method == 'GET' and is_success(self._get_status_int()): app_resp = self._process_json_resp(app_resp, req) start_response(self._response_status, self._response_headers, self._response_exc_info) return app_resp def _process_json_resp(self, resp_iter, req): """ Iterate through json body looking for symlinks and modify its content :return: modified json body """ with closing_if_possible(resp_iter): resp_body = ''.join(resp_iter) body_json = json.loads(resp_body) swift_version, account, _junk = split_path(req.path, 2, 3, True) new_body = json.dumps( [self._extract_symlink_path_json(obj_dict, swift_version, account) for obj_dict in body_json]) self.update_content_length(len(new_body)) return [new_body] def _extract_symlink_path_json(self, obj_dict, swift_version, account): """ Extract the symlink path from the hash value :return: object dictionary with additional key:value pair if object is a symlink. The new key is symlink_path. """ if 'hash' in obj_dict: hash_value, meta = parse_header(obj_dict['hash']) obj_dict['hash'] = hash_value target = None for key in meta: if key == 'symlink_target': target = meta[key] elif key == 'symlink_target_account': account = meta[key] else: # make sure to add all other (key, values) back in place obj_dict['hash'] += '; %s=%s' % (key, meta[key]) else: if target: obj_dict['symlink_path'] = os.path.join( '/', swift_version, account, target) return obj_dict class SymlinkObjectContext(WSGIContext): def __init__(self, wsgi_app, logger, symloop_max): super(SymlinkObjectContext, self).__init__(wsgi_app) self.symloop_max = symloop_max self.logger = logger # N.B. _loop_count and _last_target_path are used to keep # the statement in the _recursive_get. Hence they should not be touched # from other resources. self._loop_count = 0 self._last_target_path = None def handle_get_head_symlink(self, req): """ Handle get/head request when client sent parameter ?symlink=get :param req: HTTP GET or HEAD object request with param ?symlink=get :returns: Response Iterator """ resp = self._app_call(req.environ) response_header_dict = HeaderKeyDict(self._response_headers) symlink_sysmeta_to_usermeta(response_header_dict) self._response_headers = response_header_dict.items() return resp def handle_get_head(self, req): """ Handle get/head request and in case the response is a symlink, redirect request to target object. :param req: HTTP GET or HEAD object request :returns: Response Iterator """ try: return self._recursive_get_head(req) except LinkIterError: errmsg = 'Too many levels of symbolic links, ' \ 'maximum allowed is %d' % self.symloop_max raise HTTPConflict( body=errmsg, request=req, content_type='text/plain') def _recursive_get_head(self, req): resp = self._app_call(req.environ) def build_traversal_req(symlink_target): """ :returns: new request for target path if it's symlink otherwise None """ version, account, _junk = split_path(req.path, 2, 3, True) account = self._response_header_value( TGT_ACCT_SYSMETA_SYMLINK_HDR) or account target_path = os.path.join( '/', version, account, symlink_target.lstrip('/')) self._last_target_path = target_path new_req = make_subrequest( req.environ, path=target_path, method=req.method, headers=req.headers, swift_source='SYM') new_req.headers.pop('X-Backend-Storage-Policy-Index', None) return new_req symlink_target = self._response_header_value( TGT_OBJ_SYSMETA_SYMLINK_HDR) if symlink_target: if self._loop_count >= self.symloop_max: raise LinkIterError() # format: /// new_req = build_traversal_req(symlink_target) self._loop_count += 1 return self._recursive_get_head(new_req) else: if self._last_target_path: # Content-Location will be applied only when one or more # symlink recursion occurred. # In this case, Content-Location is applied to show which # object path caused the error response. # To preserve '%2F'(= quote('/')) in X-Symlink-Target # header value as it is, Content-Location value comes from # TGT_OBJ_SYMLINK_HDR, not req.path self._response_headers.extend( [('Content-Location', self._last_target_path)]) return resp def handle_put(self, req): """ Handle put request when it contains X-Symlink-Target header. Symlink headers are validated and moved to sysmeta namespace. :param req: HTTP PUT object request :returns: Response Iterator """ if req.content_length != 0: raise HTTPBadRequest( body='Symlink requests require a zero byte body', request=req, content_type='text/plain') _check_symlink_header(req) symlink_usermeta_to_sysmeta(req.headers) # Store info in container update that this object is a symlink. # We have a design decision to use etag space to store symlink info for # object listing because it's immutable unless the object is # overwritten. This may impact the downgrade scenario that the symlink # info can be appreared as the suffix in the hash value of object # listing result for clients. # To create override etag easily, we have a contraint that the symlink # must be 0 byte so we can add etag of the empty string + symlink info # here, simply. Note that this override etag may be encrypted in the # container db by encrypion middleware. etag_override = [ MD5_OF_EMPTY_STRING, 'symlink_target=%s' % req.headers[TGT_OBJ_SYSMETA_SYMLINK_HDR] ] if TGT_ACCT_SYSMETA_SYMLINK_HDR in req.headers: etag_override.append( 'symlink_target_account=%s' % req.headers[TGT_OBJ_SYSMETA_SYMLINK_HDR]) req.headers['X-Object-Sysmeta-Container-Update-Override-Etag'] = \ '; '.join(etag_override) return self._app_call(req.environ) def handle_post(self, req): """ Handle post request. If POSTing to a symlink, a HTTPTemporaryRedirect error message is returned to client. Clients that POST to symlinks should understand that the POST is not redirected to the target object like in a HEAD/GET request. POSTs to a symlink will be handled just like a normal object by the object server. It cannot reject it because it may not have symlink state when the POST lands. The object server has no knowledge of what is a symlink object is. On the other hand, on POST requests, the object server returns all sysmeta of the object. This method uses that sysmeta to determine if the stored object is a symlink or not. :param req: HTTP POST object request :raises: HTTPTemporaryRedirect if POSTing to a symlink. :returns: Response Iterator """ if TGT_OBJ_SYMLINK_HDR in req.headers: raise HTTPBadRequest( body='A PUT request is required to set a symlink target', request=req, content_type='text/plain') resp = self._app_call(req.environ) if not is_success(self._get_status_int()): return resp tgt_co = self._response_header_value(TGT_OBJ_SYSMETA_SYMLINK_HDR) if tgt_co: version, account, _junk = req.split_path(2, 3, True) target_acc = self._response_header_value( TGT_ACCT_SYSMETA_SYMLINK_HDR) or account location_hdr = os.path.join( '/', version, target_acc, tgt_co) req.environ['swift.leave_relative_location'] = True errmsg = 'The requested POST was applied to a symlink. POST ' +\ 'directly to the target to apply requested metadata.' raise HTTPTemporaryRedirect( body=errmsg, headers={'location': location_hdr}) else: return resp def handle_object(self, req, start_response): """ Handle object requests. :param req: a :class:`~swift.common.swob.Request` :param start_response: start_response function :returns: Response Iterator after start_response has been called """ if req.method in ('GET', 'HEAD'): # if GET request came from versioned writes, then it should get # the symlink only, not the referenced target if req.params.get('symlink') == 'get' or \ req.environ.get('swift.source') == 'VW': resp = self.handle_get_head_symlink(req) else: resp = self.handle_get_head(req) elif req.method == 'PUT' and (TGT_OBJ_SYMLINK_HDR in req.headers): resp = self.handle_put(req) elif req.method == 'POST': resp = self.handle_post(req) else: # DELETE and OPTIONS reqs for a symlink and # PUT reqs without X-Symlink-Target behave like any other object resp = self._app_call(req.environ) start_response(self._response_status, self._response_headers, self._response_exc_info) return resp class SymlinkMiddleware(object): """ Middleware that implements symlinks. Symlinks are objects stored in Swift that contain a reference to another object (i.e., the target object). An important use case is to use a path in one container to access an object in a different container, with a different policy. This allows policy cost/performance trade-offs to be made on individual objects. """ def __init__(self, app, conf, symloop_max): self.app = app self.conf = conf self.logger = get_logger(self.conf, log_route='symlink') self.symloop_max = symloop_max def __call__(self, env, start_response): req = Request(env) try: version, acc, cont, obj = req.split_path(3, 4, True) except ValueError: return self.app(env, start_response) try: if obj: # object context context = SymlinkObjectContext(self.app, self.logger, self.symloop_max) return context.handle_object(req, start_response) else: # container context context = SymlinkContainerContext(self.app, self.logger) return context.handle_container(req, start_response) except HTTPException as err_resp: return err_resp(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) symloop_max = int(conf.get('symloop_max', DEFAULT_SYMLOOP_MAX)) if symloop_max < 1: symloop_max = int(DEFAULT_SYMLOOP_MAX) register_swift_info('symlink', symloop_max=symloop_max) def symlink_mw(app): return SymlinkMiddleware(app, conf, symloop_max) return symlink_mw swift-2.17.0/swift/common/middleware/tempurl.py0000666000175100017510000010037013236061617021574 0ustar zuulzuul00000000000000# Copyright (c) 2011-2014 Greg Holt # Copyright (c) 2012-2013 John Dickinson # Copyright (c) 2012 Felipe Reyes # Copyright (c) 2012 Peter Portante # Copyright (c) 2012 Victor Rodionov # Copyright (c) 2013-2014 Samuel Merritt # Copyright (c) 2013 Chuck Thier # Copyright (c) 2013 David Goetz # Copyright (c) 2013 Dirk Mueller # Copyright (c) 2013 Donagh McCabe # Copyright (c) 2013 Fabien Boucher # Copyright (c) 2013 Greg Lange # Copyright (c) 2013 Kun Huang # Copyright (c) 2013 Richard Hawkins # Copyright (c) 2013 Tong Li # Copyright (c) 2013 ZhiQiang Fan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. r""" TempURL Middleware Allows the creation of URLs to provide temporary access to objects. For example, a website may wish to provide a link to download a large object in Swift, but the Swift account has no public access. The website can generate a URL that will provide GET access for a limited time to the resource. When the web browser user clicks on the link, the browser will download the object directly from Swift, obviating the need for the website to act as a proxy for the request. If the user were to share the link with all his friends, or accidentally post it on a forum, etc. the direct access would be limited to the expiration time set when the website created the link. Beyond that, the middleware provides the ability to create URLs, which contain signatures which are valid for all objects which share a common prefix. These prefix-based URLs are useful for sharing a set of objects. ------------ Client Usage ------------ To create temporary URLs, first an ``X-Account-Meta-Temp-URL-Key`` header must be set on the Swift account. Then, an HMAC (RFC 2104) signature is generated using the HTTP method to allow (``GET``, ``PUT``, ``DELETE``, etc.), the Unix timestamp until which the access should be allowed, the full path to the object, and the key set on the account. The digest algorithm to be used may be configured by the operator. By default, HMAC-SHA1, HMAC-SHA256, and HMAC-SHA512 are supported. Check the ``tempurl.allowed_digests`` entry in the cluster's capabilities response to see which algorithms are supported by your deployment; see :doc:`api/discoverability` for more information. On older clusters, the ``tempurl`` key may be present while the ``allowed_digests`` subkey is not; in this case, only HMAC-SHA1 is supported. For example, here is code generating the signature for a ``GET`` for 60 seconds on ``/v1/AUTH_account/container/object``:: import hmac from hashlib import sha1 from time import time method = 'GET' expires = int(time() + 60) path = '/v1/AUTH_account/container/object' key = 'mykey' hmac_body = '%s\n%s\n%s' % (method, expires, path) sig = hmac.new(key, hmac_body, sha1).hexdigest() Be certain to use the full path, from the ``/v1/`` onward. Let's say ``sig`` ends up equaling ``da39a3ee5e6b4b0d3255bfef95601890afd80709`` and ``expires`` ends up ``1323479485``. Then, for example, the website could provide a link to:: https://swift-cluster.example.com/v1/AUTH_account/container/object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=1323479485 For longer hashes, a hex encoding becomes unwieldy. Base64 encoding is also supported, and indicated by prefixing the signature with ``":"``. This is *required* for HMAC-SHA512 signatures. For example, comparable code for generating a HMAC-SHA512 signature would be:: import base64 import hmac from hashlib import sha512 from time import time method = 'GET' expires = int(time() + 60) path = '/v1/AUTH_account/container/object' key = 'mykey' hmac_body = '%s\n%s\n%s' % (method, expires, path) sig = 'sha512:' + base64.urlsafe_b64encode(hmac.new( key, hmac_body, sha512).digest()) Supposing that ``sig`` ends up equaling ``sha512:ZrSijn0GyDhsv1ltIj9hWUTrbAeE45NcKXyBaz7aPbSMvROQ4jtYH4nRAmm 5ErY2X11Yc1Yhy2OMCyN3yueeXg==`` and ``expires`` ends up ``1516741234``, then the website could provide a link to:: https://swift-cluster.example.com/v1/AUTH_account/container/object? temp_url_sig=sha512:ZrSijn0GyDhsv1ltIj9hWUTrbAeE45NcKXyBaz7aPbSMvRO Q4jtYH4nRAmm5ErY2X11Yc1Yhy2OMCyN3yueeXg==& temp_url_expires=1516741234 You may also use ISO 8601 UTC timestamps with the format ``"%Y-%m-%dT%H:%M:%SZ"`` instead of UNIX timestamps in the URL (but NOT in the code above for generating the signature!). So, the above HMAC-SHA1 URL could also be formulated as:: https://swift-cluster.example.com/v1/AUTH_account/container/object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=2011-12-10T01:11:25Z If a prefix-based signature with the prefix ``pre`` is desired, set path to:: path = 'prefix:/v1/AUTH_account/container/pre' The generated signature would be valid for all objects starting with ``pre``. The middleware detects a prefix-based temporary URL by a query parameter called ``temp_url_prefix``. So, if ``sig`` and ``expires`` would end up like above, following URL would be valid:: https://swift-cluster.example.com/v1/AUTH_account/container/pre/object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=1323479485& temp_url_prefix=pre Another valid URL:: https://swift-cluster.example.com/v1/AUTH_account/container/pre/ subfolder/another_object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=1323479485& temp_url_prefix=pre Any alteration of the resource path or query arguments of a temporary URL would result in ``401 Unauthorized``. Similarly, a ``PUT`` where ``GET`` was the allowed method would be rejected with ``401 Unauthorized``. However, ``HEAD`` is allowed if ``GET``, ``PUT``, or ``POST`` is allowed. Using this in combination with browser form post translation middleware could also allow direct-from-browser uploads to specific locations in Swift. TempURL supports both account and container level keys. Each allows up to two keys to be set, allowing key rotation without invalidating all existing temporary URLs. Account keys are specified by ``X-Account-Meta-Temp-URL-Key`` and ``X-Account-Meta-Temp-URL-Key-2``, while container keys are specified by ``X-Container-Meta-Temp-URL-Key`` and ``X-Container-Meta-Temp-URL-Key-2``. Signatures are checked against account and container keys, if present. With ``GET`` TempURLs, a ``Content-Disposition`` header will be set on the response so that browsers will interpret this as a file attachment to be saved. The filename chosen is based on the object name, but you can override this with a filename query parameter. Modifying the above example:: https://swift-cluster.example.com/v1/AUTH_account/container/object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=1323479485&filename=My+Test+File.pdf If you do not want the object to be downloaded, you can cause ``Content-Disposition: inline`` to be set on the response by adding the ``inline`` parameter to the query string, like so:: https://swift-cluster.example.com/v1/AUTH_account/container/object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=1323479485&inline In some cases, the client might not able to present the content of the object, but you still want the content able to save to local with the specific filename. So you can cause ``Content-Disposition: inline; filename=...`` to be set on the response by adding the ``inline&filename=...`` parameter to the query string, like so:: https://swift-cluster.example.com/v1/AUTH_account/container/object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=1323479485&inline&filename=My+Test+File.pdf --------------------- Cluster Configuration --------------------- This middleware understands the following configuration settings: ``incoming_remove_headers`` A whitespace-delimited list of the headers to remove from incoming requests. Names may optionally end with ``*`` to indicate a prefix match. ``incoming_allow_headers`` is a list of exceptions to these removals. Default: ``x-timestamp`` ``incoming_allow_headers`` A whitespace-delimited list of the headers allowed as exceptions to ``incoming_remove_headers``. Names may optionally end with ``*`` to indicate a prefix match. Default: None ``outgoing_remove_headers`` A whitespace-delimited list of the headers to remove from outgoing responses. Names may optionally end with ``*`` to indicate a prefix match. ``outgoing_allow_headers`` is a list of exceptions to these removals. Default: ``x-object-meta-*`` ``outgoing_allow_headers`` A whitespace-delimited list of the headers allowed as exceptions to ``outgoing_remove_headers``. Names may optionally end with ``*`` to indicate a prefix match. Default: ``x-object-meta-public-*`` ``methods`` A whitespace delimited list of request methods that are allowed to be used with a temporary URL. Default: ``GET HEAD PUT POST DELETE`` ``allowed_digests`` A whitespace delimited list of digest algorithms that are allowed to be used when calculating the signature for a temporary URL. Default: ``sha1 sha256 sha512`` """ __all__ = ['TempURL', 'filter_factory', 'DEFAULT_INCOMING_REMOVE_HEADERS', 'DEFAULT_INCOMING_ALLOW_HEADERS', 'DEFAULT_OUTGOING_REMOVE_HEADERS', 'DEFAULT_OUTGOING_ALLOW_HEADERS'] import binascii from calendar import timegm import functools import hashlib from os.path import basename from time import time, strftime, strptime, gmtime from six.moves.urllib.parse import parse_qs from six.moves.urllib.parse import urlencode from swift.proxy.controllers.base import get_account_info, get_container_info from swift.common.header_key_dict import HeaderKeyDict from swift.common.swob import header_to_environ_key, HTTPUnauthorized, \ HTTPBadRequest from swift.common.utils import split_path, get_valid_utf8_str, \ register_swift_info, get_hmac, streq_const_time, quote, get_logger, \ strict_b64decode DISALLOWED_INCOMING_HEADERS = 'x-object-manifest x-symlink-target' #: Default headers to remove from incoming requests. Simply a whitespace #: delimited list of header names and names can optionally end with '*' to #: indicate a prefix match. DEFAULT_INCOMING_ALLOW_HEADERS is a list of #: exceptions to these removals. DEFAULT_INCOMING_REMOVE_HEADERS = 'x-timestamp' #: Default headers as exceptions to DEFAULT_INCOMING_REMOVE_HEADERS. Simply a #: whitespace delimited list of header names and names can optionally end with #: '*' to indicate a prefix match. DEFAULT_INCOMING_ALLOW_HEADERS = '' #: Default headers to remove from outgoing responses. Simply a whitespace #: delimited list of header names and names can optionally end with '*' to #: indicate a prefix match. DEFAULT_OUTGOING_ALLOW_HEADERS is a list of #: exceptions to these removals. DEFAULT_OUTGOING_REMOVE_HEADERS = 'x-object-meta-*' #: Default headers as exceptions to DEFAULT_OUTGOING_REMOVE_HEADERS. Simply a #: whitespace delimited list of header names and names can optionally end with #: '*' to indicate a prefix match. DEFAULT_OUTGOING_ALLOW_HEADERS = 'x-object-meta-public-*' DEFAULT_ALLOWED_DIGESTS = 'sha1 sha256 sha512' SUPPORTED_DIGESTS = set(DEFAULT_ALLOWED_DIGESTS.split()) CONTAINER_SCOPE = 'container' ACCOUNT_SCOPE = 'account' EXPIRES_ISO8601_FORMAT = '%Y-%m-%dT%H:%M:%SZ' def get_tempurl_keys_from_metadata(meta): """ Extracts the tempurl keys from metadata. :param meta: account metadata :returns: list of keys found (possibly empty if no keys set) Example: meta = get_account_info(...)['meta'] keys = get_tempurl_keys_from_metadata(meta) """ return [get_valid_utf8_str(value) for key, value in meta.items() if key.lower() in ('temp-url-key', 'temp-url-key-2')] def disposition_format(disposition_type, filename): # Content-Disposition in HTTP is defined in # https://tools.ietf.org/html/rfc6266 and references # https://tools.ietf.org/html/rfc5987#section-3.2 # to explain the filename*= encoding format. The summary # is that it's the charset, then an optional (and empty) language # then the filename. Looks funny, but it's right. return '''%s; filename="%s"; filename*=UTF-8''%s''' % ( disposition_type, quote(filename, safe=' /'), quote(filename)) def authorize_same_account(account_to_match): def auth_callback_same_account(req): try: _ver, acc, _rest = req.split_path(2, 3, True) except ValueError: return HTTPUnauthorized(request=req) if acc == account_to_match: return None else: return HTTPUnauthorized(request=req) return auth_callback_same_account def authorize_same_container(account_to_match, container_to_match): def auth_callback_same_container(req): try: _ver, acc, con, _rest = req.split_path(3, 4, True) except ValueError: return HTTPUnauthorized(request=req) if acc == account_to_match and con == container_to_match: return None else: return HTTPUnauthorized(request=req) return auth_callback_same_container class TempURL(object): """ WSGI Middleware to grant temporary URLs specific access to Swift resources. See the overview for more information. The proxy logs created for any subrequests made will have swift.source set to "TU". :param app: The next WSGI filter or app in the paste.deploy chain. :param conf: The configuration dict for the middleware. """ def __init__(self, app, conf): #: The next WSGI application/filter in the paste.deploy pipeline. self.app = app #: The filter configuration dict. self.conf = conf self.allowed_digests = conf.get( 'allowed_digests', DEFAULT_ALLOWED_DIGESTS.split()) self.disallowed_headers = set( header_to_environ_key(h) for h in DISALLOWED_INCOMING_HEADERS.split()) headers = [header_to_environ_key(h) for h in conf.get('incoming_remove_headers', DEFAULT_INCOMING_REMOVE_HEADERS.split())] #: Headers to remove from incoming requests. Uppercase WSGI env style, #: like `HTTP_X_PRIVATE`. self.incoming_remove_headers = \ [h for h in headers if not h.endswith('*')] #: Header with match prefixes to remove from incoming requests. #: Uppercase WSGI env style, like `HTTP_X_SENSITIVE_*`. self.incoming_remove_headers_startswith = \ [h[:-1] for h in headers if h.endswith('*')] headers = [header_to_environ_key(h) for h in conf.get('incoming_allow_headers', DEFAULT_INCOMING_ALLOW_HEADERS.split())] #: Headers to allow in incoming requests. Uppercase WSGI env style, #: like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY`. self.incoming_allow_headers = \ [h for h in headers if not h.endswith('*')] #: Header with match prefixes to allow in incoming requests. Uppercase #: WSGI env style, like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY_*`. self.incoming_allow_headers_startswith = \ [h[:-1] for h in headers if h.endswith('*')] headers = [h.title() for h in conf.get('outgoing_remove_headers', DEFAULT_OUTGOING_REMOVE_HEADERS.split())] #: Headers to remove from outgoing responses. Lowercase, like #: `x-account-meta-temp-url-key`. self.outgoing_remove_headers = \ [h for h in headers if not h.endswith('*')] #: Header with match prefixes to remove from outgoing responses. #: Lowercase, like `x-account-meta-private-*`. self.outgoing_remove_headers_startswith = \ [h[:-1] for h in headers if h.endswith('*')] headers = [h.title() for h in conf.get('outgoing_allow_headers', DEFAULT_OUTGOING_ALLOW_HEADERS.split())] #: Headers to allow in outgoing responses. Lowercase, like #: `x-matches-remove-prefix-but-okay`. self.outgoing_allow_headers = \ [h for h in headers if not h.endswith('*')] #: Header with match prefixes to allow in outgoing responses. #: Lowercase, like `x-matches-remove-prefix-but-okay-*`. self.outgoing_allow_headers_startswith = \ [h[:-1] for h in headers if h.endswith('*')] #: HTTP user agent to use for subrequests. self.agent = '%(orig)s TempURL' def __call__(self, env, start_response): """ Main hook into the WSGI paste.deploy filter/app pipeline. :param env: The WSGI environment dict. :param start_response: The WSGI start_response hook. :returns: Response as per WSGI. """ if env['REQUEST_METHOD'] == 'OPTIONS': return self.app(env, start_response) info = self._get_temp_url_info(env) temp_url_sig, temp_url_expires, temp_url_prefix, filename,\ inline_disposition = info if temp_url_sig is None and temp_url_expires is None: return self.app(env, start_response) if not temp_url_sig or not temp_url_expires: return self._invalid(env, start_response) if ':' in temp_url_sig: hash_algorithm, temp_url_sig = temp_url_sig.split(':', 1) if ('-' in temp_url_sig or '_' in temp_url_sig) and not ( '+' in temp_url_sig or '/' in temp_url_sig): temp_url_sig = temp_url_sig.replace('-', '+').replace('_', '/') try: temp_url_sig = binascii.hexlify(strict_b64decode( temp_url_sig + '==')) except ValueError: return self._invalid(env, start_response) elif len(temp_url_sig) == 40: hash_algorithm = 'sha1' elif len(temp_url_sig) == 64: hash_algorithm = 'sha256' else: return self._invalid(env, start_response) if hash_algorithm not in self.allowed_digests: return self._invalid(env, start_response) account, container, obj = self._get_path_parts(env) if not account: return self._invalid(env, start_response) keys = self._get_keys(env) if not keys: return self._invalid(env, start_response) if temp_url_prefix is None: path = '/v1/%s/%s/%s' % (account, container, obj) else: if not obj.startswith(temp_url_prefix): return self._invalid(env, start_response) path = 'prefix:/v1/%s/%s/%s' % (account, container, temp_url_prefix) if env['REQUEST_METHOD'] == 'HEAD': hmac_vals = [ hmac for method in ('HEAD', 'GET', 'POST', 'PUT') for hmac in self._get_hmacs( env, temp_url_expires, path, keys, hash_algorithm, request_method=method)] else: hmac_vals = self._get_hmacs( env, temp_url_expires, path, keys, hash_algorithm) is_valid_hmac = False hmac_scope = None for hmac, scope in hmac_vals: # While it's true that we short-circuit, this doesn't affect the # timing-attack resistance since the only way this will # short-circuit is when a valid signature is passed in. if streq_const_time(temp_url_sig, hmac): is_valid_hmac = True hmac_scope = scope break if not is_valid_hmac: return self._invalid(env, start_response) # disallowed headers prevent accidentally allowing upload of a pointer # to data that the PUT tempurl would not otherwise allow access for. # It should be safe to provide a GET tempurl for data that an # untrusted client just uploaded with a PUT tempurl. resp = self._clean_disallowed_headers(env, start_response) if resp: return resp self._clean_incoming_headers(env) if hmac_scope == ACCOUNT_SCOPE: env['swift.authorize'] = authorize_same_account(account) else: env['swift.authorize'] = authorize_same_container(account, container) env['swift.authorize_override'] = True env['REMOTE_USER'] = '.wsgi.tempurl' qs = {'temp_url_sig': temp_url_sig, 'temp_url_expires': temp_url_expires} if temp_url_prefix is not None: qs['temp_url_prefix'] = temp_url_prefix if filename: qs['filename'] = filename env['QUERY_STRING'] = urlencode(qs) def _start_response(status, headers, exc_info=None): headers = self._clean_outgoing_headers(headers) if env['REQUEST_METHOD'] in ('GET', 'HEAD') and status[0] == '2': # figure out the right value for content-disposition # 1) use the value from the query string # 2) use the value from the object metadata # 3) use the object name (default) out_headers = [] existing_disposition = None for h, v in headers: if h.lower() != 'content-disposition': out_headers.append((h, v)) else: existing_disposition = v if inline_disposition: if filename: disposition_value = disposition_format('inline', filename) else: disposition_value = 'inline' elif filename: disposition_value = disposition_format('attachment', filename) elif existing_disposition: disposition_value = existing_disposition else: name = basename(env['PATH_INFO'].rstrip('/')) disposition_value = disposition_format('attachment', name) # this is probably just paranoia, I couldn't actually get a # newline into existing_disposition value = disposition_value.replace('\n', '%0A') out_headers.append(('Content-Disposition', value)) # include Expires header for better cache-control out_headers.append(('Expires', strftime( "%a, %d %b %Y %H:%M:%S GMT", gmtime(temp_url_expires)))) headers = out_headers return start_response(status, headers, exc_info) return self.app(env, _start_response) def _get_path_parts(self, env): """ Return the account, container and object name for the request, if it's an object request and one of the configured methods; otherwise, None is returned. :param env: The WSGI environment for the request. :returns: (Account str, container str, object str) or (None, None, None). """ if env['REQUEST_METHOD'] in self.conf['methods']: try: ver, acc, cont, obj = split_path(env['PATH_INFO'], 4, 4, True) except ValueError: return (None, None, None) if ver == 'v1' and obj.strip('/'): return (acc, cont, obj) return (None, None, None) def _get_temp_url_info(self, env): """ Returns the provided temporary URL parameters (sig, expires, prefix), if given and syntactically valid. Either sig, expires or prefix could be None if not provided. If provided, expires is also converted to an int if possible or 0 if not, and checked for expiration (returns 0 if expired). :param env: The WSGI environment for the request. :returns: (sig, expires, prefix, filename, inline) as described above. """ temp_url_sig = temp_url_expires = temp_url_prefix = filename =\ inline = None qs = parse_qs(env.get('QUERY_STRING', ''), keep_blank_values=True) if 'temp_url_sig' in qs: temp_url_sig = qs['temp_url_sig'][0] if 'temp_url_expires' in qs: try: temp_url_expires = int(qs['temp_url_expires'][0]) except ValueError: try: temp_url_expires = timegm(strptime( qs['temp_url_expires'][0], EXPIRES_ISO8601_FORMAT)) except ValueError: temp_url_expires = 0 if temp_url_expires < time(): temp_url_expires = 0 if 'temp_url_prefix' in qs: temp_url_prefix = qs['temp_url_prefix'][0] if 'filename' in qs: filename = qs['filename'][0] if 'inline' in qs: inline = True return (temp_url_sig, temp_url_expires, temp_url_prefix, filename, inline) def _get_keys(self, env): """ Returns the X-[Account|Container]-Meta-Temp-URL-Key[-2] header values for the account or container, or an empty list if none are set. Each value comes as a 2-tuple (key, scope), where scope is either CONTAINER_SCOPE or ACCOUNT_SCOPE. Returns 0-4 elements depending on how many keys are set in the account's or container's metadata. :param env: The WSGI environment for the request. :returns: [ (X-Account-Meta-Temp-URL-Key str value, ACCOUNT_SCOPE) if set, (X-Account-Meta-Temp-URL-Key-2 str value, ACCOUNT_SCOPE if set, (X-Container-Meta-Temp-URL-Key str value, CONTAINER_SCOPE) if set, (X-Container-Meta-Temp-URL-Key-2 str value, CONTAINER_SCOPE if set, ] """ account_info = get_account_info(env, self.app, swift_source='TU') account_keys = get_tempurl_keys_from_metadata(account_info['meta']) container_info = get_container_info(env, self.app, swift_source='TU') container_keys = get_tempurl_keys_from_metadata( container_info.get('meta', [])) return ([(ak, ACCOUNT_SCOPE) for ak in account_keys] + [(ck, CONTAINER_SCOPE) for ck in container_keys]) def _get_hmacs(self, env, expires, path, scoped_keys, hash_algorithm, request_method=None): """ :param env: The WSGI environment for the request. :param expires: Unix timestamp as an int for when the URL expires. :param path: The path which is used for hashing. :param scoped_keys: (key, scope) tuples like _get_keys() returns :param hash_algorithm: The hash algorithm to use. :param request_method: Optional override of the request in the WSGI env. For example, if a HEAD does not match, you may wish to override with GET to still allow the HEAD. :returns: a list of (hmac, scope) 2-tuples """ if not request_method: request_method = env['REQUEST_METHOD'] digest = functools.partial(hashlib.new, hash_algorithm) return [ (get_hmac(request_method, path, expires, key, digest), scope) for (key, scope) in scoped_keys] def _invalid(self, env, start_response): """ Performs the necessary steps to indicate a WSGI 401 Unauthorized response to the request. :param env: The WSGI environment for the request. :param start_response: The WSGI start_response hook. :returns: 401 response as per WSGI. """ if env['REQUEST_METHOD'] == 'HEAD': body = None else: body = '401 Unauthorized: Temp URL invalid\n' return HTTPUnauthorized(body=body)(env, start_response) def _clean_disallowed_headers(self, env, start_response): """ Validate the absence of disallowed headers for "unsafe" operations. :returns: None for safe operations or swob.HTTPBadResponse if the request includes disallowed headers. """ if env['REQUEST_METHOD'] in ('GET', 'HEAD', 'OPTIONS'): return for h in env: if h in self.disallowed_headers: return HTTPBadRequest( body='The header %r is not allowed in this tempurl' % h[len('HTTP_'):].title().replace('_', '-'))( env, start_response) def _clean_incoming_headers(self, env): """ Removes any headers from the WSGI environment as per the middleware configuration for incoming requests. :param env: The WSGI environment for the request. """ for h in env.keys(): if h in self.incoming_allow_headers: continue for p in self.incoming_allow_headers_startswith: if h.startswith(p): break else: if h in self.incoming_remove_headers: del env[h] continue for p in self.incoming_remove_headers_startswith: if h.startswith(p): del env[h] break def _clean_outgoing_headers(self, headers): """ Removes any headers as per the middleware configuration for outgoing responses. :param headers: A WSGI start_response style list of headers, [('header1', 'value), ('header2', 'value), ...] :returns: The same headers list, but with some headers removed as per the middlware configuration for outgoing responses. """ headers = HeaderKeyDict(headers) for h in headers.keys(): if h in self.outgoing_allow_headers: continue for p in self.outgoing_allow_headers_startswith: if h.startswith(p): break else: if h in self.outgoing_remove_headers: del headers[h] continue for p in self.outgoing_remove_headers_startswith: if h.startswith(p): del headers[h] break return headers.items() def filter_factory(global_conf, **local_conf): """Returns the WSGI filter for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) defaults = { 'methods': 'GET HEAD PUT POST DELETE', 'incoming_remove_headers': DEFAULT_INCOMING_REMOVE_HEADERS, 'incoming_allow_headers': DEFAULT_INCOMING_ALLOW_HEADERS, 'outgoing_remove_headers': DEFAULT_OUTGOING_REMOVE_HEADERS, 'outgoing_allow_headers': DEFAULT_OUTGOING_ALLOW_HEADERS, 'allowed_digests': DEFAULT_ALLOWED_DIGESTS, } info_conf = {k: conf.get(k, v).split() for k, v in defaults.items()} allowed_digests = set(digest.lower() for digest in info_conf['allowed_digests']) not_supported = allowed_digests - SUPPORTED_DIGESTS if not_supported: logger = get_logger(conf, log_route='tempurl') logger.warning('The following digest algorithms are configured but ' 'not supported: %s', ', '.join(not_supported)) allowed_digests -= not_supported if not allowed_digests: raise ValueError('No valid digest algorithms are configured ' 'for tempurls') info_conf['allowed_digests'] = sorted(allowed_digests) register_swift_info('tempurl', **info_conf) conf.update(info_conf) return lambda app: TempURL(app, conf) swift-2.17.0/swift/common/middleware/account_quotas.py0000666000175100017510000001150413236061617023134 0ustar zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ ``account_quotas`` is a middleware which blocks write requests (PUT, POST) if a given account quota (in bytes) is exceeded while DELETE requests are still allowed. ``account_quotas`` uses the ``x-account-meta-quota-bytes`` metadata entry to store the quota. Write requests to this metadata entry are only permitted for resellers. There is no quota limit if ``x-account-meta-quota-bytes`` is not set. The ``account_quotas`` middleware should be added to the pipeline in your ``/etc/swift/proxy-server.conf`` file just after any auth middleware. For example:: [pipeline:main] pipeline = catch_errors cache tempauth account_quotas proxy-server [filter:account_quotas] use = egg:swift#account_quotas To set the quota on an account:: swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \ post -m quota-bytes:10000 Remove the quota:: swift -A http://127.0.0.1:8080/auth/v1.0 -U account:reseller -K secret \ post -m quota-bytes: The same limitations apply for the account quotas as for the container quotas. For example, when uploading an object without a content-length header the proxy server doesn't know the final size of the currently uploaded object and the upload will be allowed if the current account size is within the quota. Due to the eventual consistency further uploads might be possible until the account size has been updated. """ from swift.common.swob import HTTPForbidden, HTTPBadRequest, \ HTTPRequestEntityTooLarge, wsgify from swift.common.utils import register_swift_info from swift.proxy.controllers.base import get_account_info class AccountQuotaMiddleware(object): """Account quota middleware See above for a full description. """ def __init__(self, app, *args, **kwargs): self.app = app @wsgify def __call__(self, request): if request.method not in ("POST", "PUT"): return self.app try: ver, account, container, obj = request.split_path( 2, 4, rest_with_last=True) except ValueError: return self.app if not container: # account request, so we pay attention to the quotas new_quota = request.headers.get( 'X-Account-Meta-Quota-Bytes') remove_quota = request.headers.get( 'X-Remove-Account-Meta-Quota-Bytes') else: # container or object request; even if the quota headers are set # in the request, they're meaningless new_quota = remove_quota = None if remove_quota: new_quota = 0 # X-Remove dominates if both are present if request.environ.get('reseller_request') is True: if new_quota and not new_quota.isdigit(): return HTTPBadRequest() return self.app # deny quota set for non-reseller if new_quota is not None: return HTTPForbidden() if request.method == "POST" or not obj: return self.app content_length = (request.content_length or 0) account_info = get_account_info(request.environ, self.app) if not account_info or not account_info['bytes']: return self.app try: quota = int(account_info['meta'].get('quota-bytes', -1)) except ValueError: return self.app if quota < 0: return self.app new_size = int(account_info['bytes']) + content_length if quota < new_size: resp = HTTPRequestEntityTooLarge(body='Upload exceeds quota.') if 'swift.authorize' in request.environ: orig_authorize = request.environ['swift.authorize'] def reject_authorize(*args, **kwargs): aresp = orig_authorize(*args, **kwargs) if aresp: return aresp return resp request.environ['swift.authorize'] = reject_authorize else: return resp return self.app def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" register_swift_info('account_quotas') def account_quota_filter(app): return AccountQuotaMiddleware(app) return account_quota_filter swift-2.17.0/swift/common/middleware/tempauth.py0000666000175100017510000010720013236061617021732 0ustar zuulzuul00000000000000# Copyright (c) 2011-2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test authentication and authorization system. Add to your pipeline in proxy-server.conf, such as:: [pipeline:main] pipeline = catch_errors cache tempauth proxy-server Set account auto creation to true in proxy-server.conf:: [app:proxy-server] account_autocreate = true And add a tempauth filter section, such as:: [filter:tempauth] use = egg:swift#tempauth user_admin_admin = admin .admin .reseller_admin user_test_tester = testing .admin user_test2_tester2 = testing2 .admin user_test_tester3 = testing3 # To allow accounts/users with underscores you can base64 encode them. # Here is the account "under_score" and username "a_b" (note the lack # of padding equal signs): user64_dW5kZXJfc2NvcmU_YV9i = testing4 See the proxy-server.conf-sample for more information. Account/User List ^^^^^^^^^^^^^^^^^ All accounts/users are listed in the filter section. The format is:: user__ = [group] [group] [...] [storage_url] If you want to be able to include underscores in the ```` or ```` portions, you can base64 encode them (with *no* equal signs) in a line like this:: user64__ = [group] [...] [storage_url] There are two special groups: * ``.reseller_admin`` -- can do anything to any account for this auth * ``.admin`` -- can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a ``.admin`` or ``.reseller_admin``. The trailing optional ``storage_url`` allows you to specify an alternate URL to hand back to the user upon authentication. If not specified, this defaults to:: $HOST/v1/_ Where ``$HOST`` will do its best to resolve to what the requester would need to use to reach this host, ```` is from this section, and ```` is from the ``user__`` name. Note that ``$HOST`` cannot possibly handle when you have a load balancer in front of it that does https while TempAuth itself runs with http; in such a case, you'll have to specify the ``storage_url_scheme`` configuration value as an override. Multiple Reseller Prefix Items ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The reseller prefix specifies which parts of the account namespace this middleware is responsible for managing authentication and authorization. By default, the prefix is 'AUTH' so accounts and tokens are prefixed by 'AUTH\_'. When a request's token and/or path start with 'AUTH\_', this middleware knows it is responsible. We allow the reseller prefix to be a list. In tempauth, the first item in the list is used as the prefix for tokens and user groups. The other prefixes provide alternate accounts that user's can access. For example if the reseller prefix list is 'AUTH, OTHER', a user with admin access to 'AUTH_account' also has admin access to 'OTHER_account'. Required Group ^^^^^^^^^^^^^^ The group ``.admin`` is normally needed to access an account (ACLs provide an additional way to access an account). You can specify the ``require_group`` parameter. This means that you also need the named group to access an account. If you have several reseller prefix items, prefix the ``require_group`` parameter with the appropriate prefix. X-Service-Token ^^^^^^^^^^^^^^^ If an ``X-Service-Token`` is presented in the request headers, the groups derived from the token are appended to the roles derived from ``X-Auth-Token``. If ``X-Auth-Token`` is missing or invalid, ``X-Service-Token`` is not processed. The ``X-Service-Token`` is useful when combined with multiple reseller prefix items. In the following configuration, accounts prefixed ``SERVICE\_`` are only accessible if ``X-Auth-Token`` is from the end-user and ``X-Service-Token`` is from the ``glance`` user:: [filter:tempauth] use = egg:swift#tempauth reseller_prefix = AUTH, SERVICE SERVICE_require_group = .service user_admin_admin = admin .admin .reseller_admin user_joeacct_joe = joepw .admin user_maryacct_mary = marypw .admin user_glance_glance = glancepw .service The name ``.service`` is an example. Unlike ``.admin`` and ``.reseller_admin`` it is not a reserved name. Please note that ACLs can be set on service accounts and are matched against the identity validated by ``X-Auth-Token``. As such ACLs can grant access to a service account's container without needing to provide a service token, just like any other cross-reseller request using ACLs. Account ACLs ^^^^^^^^^^^^ If a swift_owner issues a POST or PUT to the account with the ``X-Account-Access-Control`` header set in the request, then this may allow certain types of access for additional users. * Read-Only: Users with read-only access can list containers in the account, list objects in any container, retrieve objects, and view unprivileged account/container/object metadata. * Read-Write: Users with read-write access can (in addition to the read-only privileges) create objects, overwrite existing objects, create new containers, and set unprivileged container/object metadata. * Admin: Users with admin access are swift_owners and can perform any action, including viewing/setting privileged metadata (e.g. changing account ACLs). To generate headers for setting an account ACL:: from swift.common.middleware.acl import format_acl acl_data = { 'admin': ['alice'], 'read-write': ['bob', 'carol'] } header_value = format_acl(version=2, acl_dict=acl_data) To generate a curl command line from the above:: token=... storage_url=... python -c ' from swift.common.middleware.acl import format_acl acl_data = { 'admin': ['alice'], 'read-write': ['bob', 'carol'] } headers = {'X-Account-Access-Control': format_acl(version=2, acl_dict=acl_data)} header_str = ' '.join(["-H '%s: %s'" % (k, v) for k, v in headers.items()]) print('curl -D- -X POST -H "x-auth-token: $token" %s ' '$storage_url' % header_str) ' """ from __future__ import print_function from time import time from traceback import format_exc from uuid import uuid4 import base64 from eventlet import Timeout import six from swift.common.swob import Response, Request from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \ HTTPUnauthorized from swift.common.request_helpers import get_sys_meta_prefix from swift.common.middleware.acl import ( clean_acl, parse_acl, referrer_allowed, acls_from_account_info) from swift.common.utils import cache_from_env, get_logger, \ split_path, config_true_value, register_swift_info from swift.common.utils import config_read_reseller_options from swift.proxy.controllers.base import get_account_info DEFAULT_TOKEN_LIFE = 86400 class TempAuth(object): """ :param app: The next WSGI app in the pipeline :param conf: The dict of configuration values from the Paste config file """ def __init__(self, app, conf): self.app = app self.conf = conf self.logger = get_logger(conf, log_route='tempauth') self.log_headers = config_true_value(conf.get('log_headers', 'f')) self.reseller_prefixes, self.account_rules = \ config_read_reseller_options(conf, dict(require_group='')) self.reseller_prefix = self.reseller_prefixes[0] self.logger.set_statsd_prefix('tempauth.%s' % ( self.reseller_prefix if self.reseller_prefix else 'NONE',)) self.auth_prefix = conf.get('auth_prefix', '/auth/') if not self.auth_prefix or not self.auth_prefix.strip('/'): self.logger.warning('Rewriting invalid auth prefix "%s" to ' '"/auth/" (Non-empty auth prefix path ' 'is required)' % self.auth_prefix) self.auth_prefix = '/auth/' if not self.auth_prefix.startswith('/'): self.auth_prefix = '/' + self.auth_prefix if not self.auth_prefix.endswith('/'): self.auth_prefix += '/' self.token_life = int(conf.get('token_life', DEFAULT_TOKEN_LIFE)) self.allow_overrides = config_true_value( conf.get('allow_overrides', 't')) self.storage_url_scheme = conf.get('storage_url_scheme', 'default') self.users = {} for conf_key in conf: if conf_key.startswith('user_') or conf_key.startswith('user64_'): account, username = conf_key.split('_', 1)[1].split('_') if conf_key.startswith('user64_'): # Because trailing equal signs would screw up config file # parsing, we auto-pad with '=' chars. account += '=' * (len(account) % 4) account = base64.b64decode(account) username += '=' * (len(username) % 4) username = base64.b64decode(username) values = conf[conf_key].split() if not values: raise ValueError('%s has no key set' % conf_key) key = values.pop(0) if values and ('://' in values[-1] or '$HOST' in values[-1]): url = values.pop() else: url = '$HOST/v1/%s%s' % (self.reseller_prefix, account) self.users[account + ':' + username] = { 'key': key, 'url': url, 'groups': values} def __call__(self, env, start_response): """ Accepts a standard WSGI application call, authenticating the request and installing callback hooks for authorization and ACL header validation. For an authenticated request, REMOTE_USER will be set to a comma separated list of the user's groups. With a non-empty reseller prefix, acts as the definitive auth service for just tokens and accounts that begin with that prefix, but will deny requests outside this prefix if no other auth middleware overrides it. With an empty reseller prefix, acts as the definitive auth service only for tokens that validate to a non-empty set of groups. For all other requests, acts as the fallback auth service when no other auth middleware overrides it. Alternatively, if the request matches the self.auth_prefix, the request will be routed through the internal auth request handler (self.handle). This is to handle granting tokens, etc. """ if self.allow_overrides and env.get('swift.authorize_override', False): return self.app(env, start_response) if env.get('PATH_INFO', '').startswith(self.auth_prefix): return self.handle(env, start_response) s3 = env.get('swift3.auth_details') token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN')) service_token = env.get('HTTP_X_SERVICE_TOKEN') if s3 or (token and token.startswith(self.reseller_prefix)): # Note: Empty reseller_prefix will match all tokens. groups = self.get_groups(env, token) if service_token: service_groups = self.get_groups(env, service_token) if groups and service_groups: groups += ',' + service_groups if groups: user = groups and groups.split(',', 1)[0] or '' trans_id = env.get('swift.trans_id') self.logger.debug('User: %s uses token %s (trans_id %s)' % (user, 's3' if s3 else token, trans_id)) env['REMOTE_USER'] = groups env['swift.authorize'] = self.authorize env['swift.clean_acl'] = clean_acl if '.reseller_admin' in groups: env['reseller_request'] = True else: # Unauthorized token if self.reseller_prefix and not s3: # Because I know I'm the definitive auth for this token, I # can deny it outright. self.logger.increment('unauthorized') try: vrs, realm, rest = split_path(env['PATH_INFO'], 2, 3, True) except ValueError: realm = 'unknown' return HTTPUnauthorized(headers={ 'Www-Authenticate': 'Swift realm="%s"' % realm})( env, start_response) # Because I'm not certain if I'm the definitive auth for empty # reseller_prefixed tokens, I won't overwrite swift.authorize. elif 'swift.authorize' not in env: env['swift.authorize'] = self.denied_response else: if self._is_definitive_auth(env.get('PATH_INFO', '')): # Handle anonymous access to accounts I'm the definitive # auth for. env['swift.authorize'] = self.authorize env['swift.clean_acl'] = clean_acl elif self.reseller_prefix == '': # Because I'm not certain if I'm the definitive auth, I won't # overwrite swift.authorize. if 'swift.authorize' not in env: env['swift.authorize'] = self.authorize env['swift.clean_acl'] = clean_acl else: # Not my token, not my account, I can't authorize this request, # deny all is a good idea if not already set... if 'swift.authorize' not in env: env['swift.authorize'] = self.denied_response return self.app(env, start_response) def _is_definitive_auth(self, path): """ Determine if we are the definitive auth Determines if we are the definitive auth for a given path. If the account name is prefixed with something matching one of the reseller_prefix items, then we are the auth (return True) Non-matching: we are not the auth. However, one of the reseller_prefix items can be blank. If so, we cannot always be definite so return False. :param path: A path (e.g., /v1/AUTH_joesaccount/c/o) :return:True if we are definitive auth """ try: version, account, rest = split_path(path, 1, 3, True) except ValueError: return False if account: return bool(self._get_account_prefix(account)) return False def _non_empty_reseller_prefixes(self): return iter([pre for pre in self.reseller_prefixes if pre != '']) def _get_account_prefix(self, account): """ Get the prefix of an account Determines which reseller prefix matches the account and returns that prefix. If account does not start with one of the known reseller prefixes, returns None. :param account: Account name (e.g., AUTH_joesaccount) or None :return: The prefix string (examples: 'AUTH_', 'SERVICE_', '') If we can't match the prefix of the account, return None """ if account is None: return None # Empty prefix matches everything, so try to match others first for prefix in self._non_empty_reseller_prefixes(): if account.startswith(prefix): return prefix if '' in self.reseller_prefixes: return '' return None def _dot_account(self, account): """ Detect if account starts with dot character after the prefix :param account: account in path (e.g., AUTH_joesaccount) :return:True if name starts with dot character """ prefix = self._get_account_prefix(account) return prefix is not None and account[len(prefix)] == '.' def _get_user_groups(self, account, account_user, account_id): """ :param account: example: test :param account_user: example: test:tester :param account_id: example: AUTH_test :return: a comma separated string of group names. The group names are as follows: account,account_user,groups... If .admin is in the groups, this is replaced by all the possible account ids. For example, for user joe, account acct and resellers AUTH_, OTHER_, the returned string is as follows: acct,acct:joe,AUTH_acct,OTHER_acct """ groups = [account, account_user] groups.extend(self.users[account_user]['groups']) if '.admin' in groups: groups.remove('.admin') for prefix in self._non_empty_reseller_prefixes(): groups.append('%s%s' % (prefix, account)) if account_id not in groups: groups.append(account_id) groups = ','.join(groups) return groups def get_groups(self, env, token): """ Get groups for the given token. :param env: The current WSGI environment dictionary. :param token: Token to validate and return a group string for. :returns: None if the token is invalid or a string containing a comma separated list of groups the authenticated user is a member of. The first group in the list is also considered a unique identifier for that user. """ groups = None memcache_client = cache_from_env(env) if not memcache_client: raise Exception('Memcache required') memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token) cached_auth_data = memcache_client.get(memcache_token_key) if cached_auth_data: expires, groups = cached_auth_data if expires < time(): groups = None s3_auth_details = env.get('swift3.auth_details') if s3_auth_details: if 'check_signature' not in s3_auth_details: self.logger.warning( 'Swift3 did not provide a check_signature function; ' 'upgrade Swift3 if you want to use it with tempauth') return None account_user = s3_auth_details['access_key'] if account_user not in self.users: return None user = self.users[account_user] account = account_user.split(':', 1)[0] account_id = user['url'].rsplit('/', 1)[-1] if not s3_auth_details['check_signature'](user['key']): return None env['PATH_INFO'] = env['PATH_INFO'].replace( account_user, account_id, 1) groups = self._get_user_groups(account, account_user, account_id) return groups def account_acls(self, req): """ Return a dict of ACL data from the account server via get_account_info. Auth systems may define their own format, serialization, structure, and capabilities implemented in the ACL headers and persisted in the sysmeta data. However, auth systems are strongly encouraged to be interoperable with Tempauth. Account ACLs are set and retrieved via the header X-Account-Access-Control For header format and syntax, see: * :func:`swift.common.middleware.acl.parse_acl()` * :func:`swift.common.middleware.acl.format_acl()` """ info = get_account_info(req.environ, self.app, swift_source='TA') try: acls = acls_from_account_info(info) except ValueError as e1: self.logger.warning("Invalid ACL stored in metadata: %r" % e1) return None except NotImplementedError as e2: self.logger.warning( "ACL version exceeds middleware version: %r" % e2) return None return acls def extract_acl_and_report_errors(self, req): """ Return a user-readable string indicating the errors in the input ACL, or None if there are no errors. """ acl_header = 'x-account-access-control' acl_data = req.headers.get(acl_header) result = parse_acl(version=2, data=acl_data) if result is None: return 'Syntax error in input (%r)' % acl_data tempauth_acl_keys = 'admin read-write read-only'.split() for key in result: # While it is possible to construct auth systems that collaborate # on ACLs, TempAuth is not such an auth system. At this point, # it thinks it is authoritative. if key not in tempauth_acl_keys: return "Key '%s' not recognized" % key for key in tempauth_acl_keys: if key not in result: continue if not isinstance(result[key], list): return "Value for key '%s' must be a list" % key for grantee in result[key]: if not isinstance(grantee, six.string_types): return "Elements of '%s' list must be strings" % key # Everything looks fine, no errors found internal_hdr = get_sys_meta_prefix('account') + 'core-access-control' req.headers[internal_hdr] = req.headers.pop(acl_header) return None def authorize(self, req): """ Returns None if the request is authorized to continue or a standard WSGI response callable if not. """ try: _junk, account, container, obj = req.split_path(1, 4, True) except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) if self._get_account_prefix(account) is None: self.logger.debug("Account name: %s doesn't start with " "reseller_prefix(s): %s." % (account, ','.join(self.reseller_prefixes))) return self.denied_response(req) # At this point, TempAuth is convinced that it is authoritative. # If you are sending an ACL header, it must be syntactically valid # according to TempAuth's rules for ACL syntax. acl_data = req.headers.get('x-account-access-control') if acl_data is not None: error = self.extract_acl_and_report_errors(req) if error: msg = 'X-Account-Access-Control invalid: %s\n\nInput: %s\n' % ( error, acl_data) headers = [('Content-Type', 'text/plain; charset=UTF-8')] return HTTPBadRequest(request=req, headers=headers, body=msg) user_groups = (req.remote_user or '').split(',') account_user = user_groups[1] if len(user_groups) > 1 else None if '.reseller_admin' in user_groups and \ account not in self.reseller_prefixes and \ not self._dot_account(account): req.environ['swift_owner'] = True self.logger.debug("User %s has reseller admin authorizing." % account_user) return None if account in user_groups and \ (req.method not in ('DELETE', 'PUT') or container): # The user is admin for the account and is not trying to do an # account DELETE or PUT account_prefix = self._get_account_prefix(account) require_group = self.account_rules.get(account_prefix).get( 'require_group') if require_group and require_group in user_groups: req.environ['swift_owner'] = True self.logger.debug("User %s has admin and %s group." " Authorizing." % (account_user, require_group)) return None elif not require_group: req.environ['swift_owner'] = True self.logger.debug("User %s has admin authorizing." % account_user) return None if (req.environ.get('swift_sync_key') and (req.environ['swift_sync_key'] == req.headers.get('x-container-sync-key', None)) and 'x-timestamp' in req.headers): self.logger.debug("Allow request with container sync-key: %s." % req.environ['swift_sync_key']) return None if req.method == 'OPTIONS': # allow OPTIONS requests to proceed as normal self.logger.debug("Allow OPTIONS request.") return None referrers, groups = parse_acl(getattr(req, 'acl', None)) if referrer_allowed(req.referer, referrers): if obj or '.rlistings' in groups: self.logger.debug("Allow authorizing %s via referer ACL." % req.referer) return None for user_group in user_groups: if user_group in groups: self.logger.debug("User %s allowed in ACL: %s authorizing." % (account_user, user_group)) return None # Check for access via X-Account-Access-Control acct_acls = self.account_acls(req) if acct_acls: # At least one account ACL is set in this account's sysmeta data, # so we should see whether this user is authorized by the ACLs. user_group_set = set(user_groups) if user_group_set.intersection(acct_acls['admin']): req.environ['swift_owner'] = True self.logger.debug('User %s allowed by X-Account-Access-Control' ' (admin)' % account_user) return None if (user_group_set.intersection(acct_acls['read-write']) and (container or req.method in ('GET', 'HEAD'))): # The RW ACL allows all operations to containers/objects, but # only GET/HEAD to accounts (and OPTIONS, above) self.logger.debug('User %s allowed by X-Account-Access-Control' ' (read-write)' % account_user) return None if (user_group_set.intersection(acct_acls['read-only']) and req.method in ('GET', 'HEAD')): self.logger.debug('User %s allowed by X-Account-Access-Control' ' (read-only)' % account_user) return None return self.denied_response(req) def denied_response(self, req): """ Returns a standard WSGI response callable with the status of 403 or 401 depending on whether the REMOTE_USER is set or not. """ if req.remote_user: self.logger.increment('forbidden') return HTTPForbidden(request=req) else: self.logger.increment('unauthorized') return HTTPUnauthorized(request=req) def handle(self, env, start_response): """ WSGI entry point for auth requests (ones that match the self.auth_prefix). Wraps env in swob.Request object and passes it down. :param env: WSGI environment dictionary :param start_response: WSGI callable """ try: req = Request(env) if self.auth_prefix: req.path_info_pop() req.bytes_transferred = '-' req.client_disconnect = False if 'x-storage-token' in req.headers and \ 'x-auth-token' not in req.headers: req.headers['x-auth-token'] = req.headers['x-storage-token'] return self.handle_request(req)(env, start_response) except (Exception, Timeout): print("EXCEPTION IN handle: %s: %s" % (format_exc(), env)) self.logger.increment('errors') start_response('500 Server Error', [('Content-Type', 'text/plain')]) return ['Internal server error.\n'] def handle_request(self, req): """ Entry point for auth requests (ones that match the self.auth_prefix). Should return a WSGI-style callable (such as swob.Response). :param req: swob.Request object """ req.start_time = time() handler = None try: version, account, user, _junk = split_path(req.path_info, 1, 4, True) except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) if version in ('v1', 'v1.0', 'auth'): if req.method == 'GET': handler = self.handle_get_token if not handler: self.logger.increment('errors') req.response = HTTPBadRequest(request=req) else: req.response = handler(req) return req.response def handle_get_token(self, req): """ Handles the various `request for token and service end point(s)` calls. There are various formats to support the various auth servers in the past. Examples:: GET /v1//auth X-Auth-User: : or X-Storage-User: X-Auth-Key: or X-Storage-Pass: GET /auth X-Auth-User: : or X-Storage-User: : X-Auth-Key: or X-Storage-Pass: GET /v1.0 X-Auth-User: : or X-Storage-User: : X-Auth-Key: or X-Storage-Pass: On successful authentication, the response will have X-Auth-Token and X-Storage-Token set to the token to use with Swift and X-Storage-URL set to the URL to the default Swift cluster to use. :param req: The swob.Request to process. :returns: swob.Response, 2xx on success with data set as explained above. """ # Validate the request info try: pathsegs = split_path(req.path_info, 1, 3, True) except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) if pathsegs[0] == 'v1' and pathsegs[2] == 'auth': account = pathsegs[1] user = req.headers.get('x-storage-user') if not user: user = req.headers.get('x-auth-user') if not user or ':' not in user: self.logger.increment('token_denied') auth = 'Swift realm="%s"' % account return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) account2, user = user.split(':', 1) if account != account2: self.logger.increment('token_denied') auth = 'Swift realm="%s"' % account return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) key = req.headers.get('x-storage-pass') if not key: key = req.headers.get('x-auth-key') elif pathsegs[0] in ('auth', 'v1.0'): user = req.headers.get('x-auth-user') if not user: user = req.headers.get('x-storage-user') if not user or ':' not in user: self.logger.increment('token_denied') auth = 'Swift realm="unknown"' return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) account, user = user.split(':', 1) key = req.headers.get('x-auth-key') if not key: key = req.headers.get('x-storage-pass') else: return HTTPBadRequest(request=req) if not all((account, user, key)): self.logger.increment('token_denied') realm = account or 'unknown' return HTTPUnauthorized(request=req, headers={'Www-Authenticate': 'Swift realm="%s"' % realm}) # Authenticate user account_user = account + ':' + user if account_user not in self.users: self.logger.increment('token_denied') auth = 'Swift realm="%s"' % account return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) if self.users[account_user]['key'] != key: self.logger.increment('token_denied') auth = 'Swift realm="unknown"' return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) account_id = self.users[account_user]['url'].rsplit('/', 1)[-1] # Get memcache client memcache_client = cache_from_env(req.environ) if not memcache_client: raise Exception('Memcache required') # See if a token already exists and hasn't expired token = None memcache_user_key = '%s/user/%s' % (self.reseller_prefix, account_user) candidate_token = memcache_client.get(memcache_user_key) if candidate_token: memcache_token_key = \ '%s/token/%s' % (self.reseller_prefix, candidate_token) cached_auth_data = memcache_client.get(memcache_token_key) if cached_auth_data: expires, old_groups = cached_auth_data old_groups = old_groups.split(',') new_groups = self._get_user_groups(account, account_user, account_id) if expires > time() and \ set(old_groups) == set(new_groups.split(',')): token = candidate_token # Create a new token if one didn't exist if not token: # Generate new token token = '%stk%s' % (self.reseller_prefix, uuid4().hex) expires = time() + self.token_life groups = self._get_user_groups(account, account_user, account_id) # Save token memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token) memcache_client.set(memcache_token_key, (expires, groups), time=float(expires - time())) # Record the token with the user info for future use. memcache_user_key = \ '%s/user/%s' % (self.reseller_prefix, account_user) memcache_client.set(memcache_user_key, token, time=float(expires - time())) resp = Response(request=req, headers={ 'x-auth-token': token, 'x-storage-token': token, 'x-auth-token-expires': str(int(expires - time()))}) url = self.users[account_user]['url'].replace('$HOST', resp.host_url) if self.storage_url_scheme != 'default': url = self.storage_url_scheme + ':' + url.split(':', 1)[1] resp.headers['x-storage-url'] = url return resp def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) register_swift_info('tempauth', account_acls=True) def auth_filter(app): return TempAuth(app, conf) return auth_filter swift-2.17.0/swift/common/middleware/cname_lookup.py0000666000175100017510000002031413236061617022557 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ CNAME Lookup Middleware Middleware that translates an unknown domain in the host header to something that ends with the configured storage_domain by looking up the given domain's CNAME record in DNS. This middleware will continue to follow a CNAME chain in DNS until it finds a record ending in the configured storage domain or it reaches the configured maximum lookup depth. If a match is found, the environment's Host header is rewritten and the request is passed further down the WSGI chain. """ from six.moves import range from swift import gettext_ as _ try: import dns.resolver import dns.exception except ImportError: # catch this to allow docs to be built without the dependency MODULE_DEPENDENCY_MET = False else: # executed if the try block finishes with no errors MODULE_DEPENDENCY_MET = True from swift.common.middleware import RewriteContext from swift.common.swob import Request, HTTPBadRequest from swift.common.utils import cache_from_env, get_logger, is_valid_ip, \ list_from_csv, parse_socket_string, register_swift_info def lookup_cname(domain, resolver): # pragma: no cover """ Given a domain, returns its DNS CNAME mapping and DNS ttl. :param domain: domain to query on :param resolver: dns.resolver.Resolver() instance used for executing DNS queries :returns: (ttl, result) """ try: answer = resolver.query(domain, 'CNAME').rrset ttl = answer.ttl result = answer.items[0].to_text() result = result.rstrip('.') return ttl, result except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer): # As the memcache lib returns None when nothing is found in cache, # returning false helps to distinguish between "nothing in cache" # (None) and "nothing to cache" (False). return 60, False except (dns.exception.DNSException): return 0, None class _CnameLookupContext(RewriteContext): base_re = r'^(https?://)%s(/.*)?$' class CNAMELookupMiddleware(object): """ CNAME Lookup Middleware See above for a full description. :param app: The next WSGI filter or app in the paste.deploy chain. :param conf: The configuration dict for the middleware. """ def __init__(self, app, conf): if not MODULE_DEPENDENCY_MET: # reraise the exception if the dependency wasn't met raise ImportError('dnspython is required for this module') self.app = app storage_domain = conf.get('storage_domain', 'example.com') self.storage_domain = ['.' + s for s in list_from_csv(storage_domain) if not s.startswith('.')] self.storage_domain += [s for s in list_from_csv(storage_domain) if s.startswith('.')] self.lookup_depth = int(conf.get('lookup_depth', '1')) nameservers = list_from_csv(conf.get('nameservers')) try: for i, server in enumerate(nameservers): ip_or_host, maybe_port = nameservers[i] = \ parse_socket_string(server, None) if not is_valid_ip(ip_or_host): raise ValueError if maybe_port is not None: int(maybe_port) except ValueError: raise ValueError('Invalid cname_lookup/nameservers configuration ' 'found. All nameservers must be valid IPv4 or ' 'IPv6, followed by an optional : port.') self.resolver = dns.resolver.Resolver() if nameservers: self.resolver.nameservers = [ip for (ip, port) in nameservers] self.resolver.nameserver_ports = { ip: int(port) for (ip, port) in nameservers if port is not None} self.memcache = None self.logger = get_logger(conf, log_route='cname-lookup') def _domain_endswith_in_storage_domain(self, a_domain): a_domain = '.' + a_domain for domain in self.storage_domain: if a_domain.endswith(domain): return True return False def __call__(self, env, start_response): if not self.storage_domain: return self.app(env, start_response) if 'HTTP_HOST' in env: requested_host = given_domain = env['HTTP_HOST'] else: requested_host = given_domain = env['SERVER_NAME'] port = '' if ':' in given_domain: given_domain, port = given_domain.rsplit(':', 1) if is_valid_ip(given_domain): return self.app(env, start_response) a_domain = given_domain if not self._domain_endswith_in_storage_domain(a_domain): if self.memcache is None: self.memcache = cache_from_env(env) error = True for tries in range(self.lookup_depth): found_domain = None if self.memcache: memcache_key = ''.join(['cname-', a_domain]) found_domain = self.memcache.get(memcache_key) if found_domain is None: ttl, found_domain = lookup_cname(a_domain, self.resolver) if self.memcache and ttl > 0: memcache_key = ''.join(['cname-', given_domain]) self.memcache.set(memcache_key, found_domain, time=ttl) if not found_domain or found_domain == a_domain: # no CNAME records or we're at the last lookup error = True found_domain = None break elif self._domain_endswith_in_storage_domain(found_domain): # Found it! self.logger.info( _('Mapped %(given_domain)s to %(found_domain)s') % {'given_domain': given_domain, 'found_domain': found_domain}) if port: env['HTTP_HOST'] = ':'.join([found_domain, port]) else: env['HTTP_HOST'] = found_domain error = False break else: # try one more deep in the chain self.logger.debug( _('Following CNAME chain for ' '%(given_domain)s to %(found_domain)s') % {'given_domain': given_domain, 'found_domain': found_domain}) a_domain = found_domain if error: if found_domain: msg = 'CNAME lookup failed after %d tries' % \ self.lookup_depth else: msg = 'CNAME lookup failed to resolve to a valid domain' resp = HTTPBadRequest(request=Request(env), body=msg, content_type='text/plain') return resp(env, start_response) else: context = _CnameLookupContext(self.app, requested_host, env['HTTP_HOST']) return context.handle_request(env, start_response) return self.app(env, start_response) def filter_factory(global_conf, **local_conf): # pragma: no cover conf = global_conf.copy() conf.update(local_conf) register_swift_info('cname_lookup', lookup_depth=int(conf.get('lookup_depth', '1'))) def cname_filter(app): return CNAMELookupMiddleware(app, conf) return cname_filter swift-2.17.0/swift/common/middleware/staticweb.py0000666000175100017510000005655013236061617022103 0ustar zuulzuul00000000000000# Copyright (c) 2010-2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ This StaticWeb WSGI middleware will serve container data as a static web site with index file and error file resolution and optional file listings. This mode is normally only active for anonymous requests. When using keystone for authentication set ``delay_auth_decision = true`` in the authtoken middleware configuration in your ``/etc/swift/proxy-server.conf`` file. If you want to use it with authenticated requests, set the ``X-Web-Mode: true`` header on the request. The ``staticweb`` filter should be added to the pipeline in your ``/etc/swift/proxy-server.conf`` file just after any auth middleware. Also, the configuration section for the ``staticweb`` middleware itself needs to be added. For example:: [DEFAULT] ... [pipeline:main] pipeline = catch_errors healthcheck proxy-logging cache ratelimit tempauth staticweb proxy-logging proxy-server ... [filter:staticweb] use = egg:swift#staticweb Any publicly readable containers (for example, ``X-Container-Read: .r:*``, see :ref:`acls` for more information on this) will be checked for X-Container-Meta-Web-Index and X-Container-Meta-Web-Error header values:: X-Container-Meta-Web-Index X-Container-Meta-Web-Error If X-Container-Meta-Web-Index is set, any files will be served without having to specify the part. For instance, setting ``X-Container-Meta-Web-Index: index.html`` will be able to serve the object .../pseudo/path/index.html with just .../pseudo/path or .../pseudo/path/ If X-Container-Meta-Web-Error is set, any errors (currently just 401 Unauthorized and 404 Not Found) will instead serve the .../ object. For instance, setting ``X-Container-Meta-Web-Error: error.html`` will serve .../404error.html for requests for paths not found. For pseudo paths that have no , this middleware can serve HTML file listings if you set the ``X-Container-Meta-Web-Listings: true`` metadata item on the container. If listings are enabled, the listings can have a custom style sheet by setting the X-Container-Meta-Web-Listings-CSS header. For instance, setting ``X-Container-Meta-Web-Listings-CSS: listing.css`` will make listings link to the .../listing.css style sheet. If you "view source" in your browser on a listing page, you will see the well defined document structure that can be styled. By default, the listings will be rendered with a label of "Listing of /v1/account/container/path". This can be altered by setting a ``X-Container-Meta-Web-Listings-Label: