swift-2.7.0/0000775000567000056710000000000012675204211014015 5ustar jenkinsjenkins00000000000000swift-2.7.0/MANIFEST.in0000664000567000056710000000047112675204037015563 0ustar jenkinsjenkins00000000000000include AUTHORS LICENSE .functests .unittests .probetests test/__init__.py include CHANGELOG CONTRIBUTING.md README.md include babel.cfg include test/sample.conf include tox.ini include requirements.txt test-requirements.txt graft doc graft etc graft locale graft test/functional graft test/probe graft test/unit swift-2.7.0/swift.egg-info/0000775000567000056710000000000012675204211016643 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift.egg-info/SOURCES.txt0000664000567000056710000003370112675204211020533 0ustar jenkinsjenkins00000000000000.alltests .coveragerc .functests .mailmap .manpages .probetests .testr.conf .unittests AUTHORS CHANGELOG CONTRIBUTING.md LICENSE MANIFEST.in README.md babel.cfg bandit.yaml requirements.txt setup.cfg setup.py test-requirements.txt tox.ini bin/swift-account-audit bin/swift-account-auditor bin/swift-account-info bin/swift-account-reaper bin/swift-account-replicator bin/swift-account-server bin/swift-config bin/swift-container-auditor bin/swift-container-info bin/swift-container-reconciler bin/swift-container-replicator bin/swift-container-server bin/swift-container-sync bin/swift-container-updater bin/swift-dispersion-populate bin/swift-dispersion-report bin/swift-drive-audit bin/swift-form-signature bin/swift-get-nodes bin/swift-init bin/swift-object-auditor bin/swift-object-expirer bin/swift-object-info bin/swift-object-reconstructor bin/swift-object-replicator bin/swift-object-server bin/swift-object-updater bin/swift-oldies bin/swift-orphans bin/swift-proxy-server bin/swift-recon bin/swift-recon-cron bin/swift-reconciler-enqueue bin/swift-ring-builder bin/swift-ring-builder-analyzer bin/swift-temp-url doc/manpages/account-server.conf.5 doc/manpages/container-server.conf.5 doc/manpages/dispersion.conf.5 doc/manpages/object-expirer.conf.5 doc/manpages/object-server.conf.5 doc/manpages/proxy-server.conf.5 doc/manpages/swift-account-auditor.1 doc/manpages/swift-account-info.1 doc/manpages/swift-account-reaper.1 doc/manpages/swift-account-replicator.1 doc/manpages/swift-account-server.1 doc/manpages/swift-container-auditor.1 doc/manpages/swift-container-info.1 doc/manpages/swift-container-replicator.1 doc/manpages/swift-container-server.1 doc/manpages/swift-container-sync.1 doc/manpages/swift-container-updater.1 doc/manpages/swift-dispersion-populate.1 doc/manpages/swift-dispersion-report.1 doc/manpages/swift-get-nodes.1 doc/manpages/swift-init.1 doc/manpages/swift-object-auditor.1 doc/manpages/swift-object-expirer.1 doc/manpages/swift-object-info.1 doc/manpages/swift-object-replicator.1 doc/manpages/swift-object-server.1 doc/manpages/swift-object-updater.1 doc/manpages/swift-orphans.1 doc/manpages/swift-proxy-server.1 doc/manpages/swift-recon.1 doc/manpages/swift-ring-builder.1 doc/saio/rsyncd.conf doc/saio/bin/remakerings doc/saio/bin/resetswift doc/saio/bin/startmain doc/saio/bin/startrest doc/saio/rsyslog.d/10-swift.conf doc/saio/swift/container-reconciler.conf doc/saio/swift/container-sync-realms.conf doc/saio/swift/object-expirer.conf doc/saio/swift/proxy-server.conf doc/saio/swift/swift.conf doc/saio/swift/account-server/1.conf doc/saio/swift/account-server/2.conf doc/saio/swift/account-server/3.conf doc/saio/swift/account-server/4.conf doc/saio/swift/container-server/1.conf doc/saio/swift/container-server/2.conf doc/saio/swift/container-server/3.conf doc/saio/swift/container-server/4.conf doc/saio/swift/object-server/1.conf doc/saio/swift/object-server/2.conf doc/saio/swift/object-server/3.conf doc/saio/swift/object-server/4.conf doc/source/account.rst doc/source/admin_guide.rst doc/source/apache_deployment_guide.rst doc/source/associated_projects.rst doc/source/conf.py doc/source/container.rst doc/source/cors.rst doc/source/crossdomain.rst doc/source/db.rst doc/source/deployment_guide.rst doc/source/development_auth.rst doc/source/development_guidelines.rst doc/source/development_middleware.rst doc/source/development_ondisk_backends.rst doc/source/development_saio.rst doc/source/first_contribution_swift.rst doc/source/getting_started.rst doc/source/howto_installmultinode.rst doc/source/index.rst doc/source/logs.rst doc/source/middleware.rst doc/source/misc.rst doc/source/object.rst doc/source/overview_architecture.rst doc/source/overview_auth.rst doc/source/overview_backing_store.rst doc/source/overview_container_sync.rst doc/source/overview_erasure_code.rst doc/source/overview_expiring_objects.rst doc/source/overview_large_objects.rst doc/source/overview_object_versioning.rst doc/source/overview_policies.rst doc/source/overview_reaper.rst doc/source/overview_replication.rst doc/source/overview_ring.rst doc/source/policies_saio.rst doc/source/proxy.rst doc/source/ratelimit.rst doc/source/replication_network.rst doc/source/ring.rst doc/source/test-cors.html doc/source/api/authentication.rst doc/source/api/container_quotas.rst doc/source/api/discoverability.rst doc/source/api/form_post_middleware.rst doc/source/api/large_objects.rst doc/source/api/object_api_v1_overview.rst doc/source/api/object_versioning.rst doc/source/api/temporary_url_middleware.rst doc/source/api/use_content-encoding_metadata.rst doc/source/api/use_the_content-disposition_metadata.rst doc/source/images/ec_overview.png doc/source/ops_runbook/diagnose.rst doc/source/ops_runbook/index.rst doc/source/ops_runbook/maintenance.rst doc/source/ops_runbook/procedures.rst doc/source/ops_runbook/troubleshooting.rst etc/account-server.conf-sample etc/container-reconciler.conf-sample etc/container-server.conf-sample etc/container-sync-realms.conf-sample etc/dispersion.conf-sample etc/drive-audit.conf-sample etc/internal-client.conf-sample etc/memcache.conf-sample etc/mime.types-sample etc/object-expirer.conf-sample etc/object-server.conf-sample etc/proxy-server.conf-sample etc/rsyncd.conf-sample etc/swift-rsyslog.conf-sample etc/swift.conf-sample examples/apache2/account-server.template examples/apache2/container-server.template examples/apache2/object-server.template examples/apache2/proxy-server.template examples/wsgi/account-server.wsgi.template examples/wsgi/container-server.wsgi.template examples/wsgi/object-server.wsgi.template examples/wsgi/proxy-server.wsgi.template swift/__init__.py swift.egg-info/PKG-INFO swift.egg-info/SOURCES.txt swift.egg-info/dependency_links.txt swift.egg-info/entry_points.txt swift.egg-info/not-zip-safe swift.egg-info/pbr.json swift.egg-info/requires.txt swift.egg-info/top_level.txt swift/account/__init__.py swift/account/auditor.py swift/account/backend.py swift/account/reaper.py swift/account/replicator.py swift/account/server.py swift/account/utils.py swift/cli/__init__.py swift/cli/form_signature.py swift/cli/info.py swift/cli/recon.py swift/cli/ring_builder_analyzer.py swift/cli/ringbuilder.py swift/common/__init__.py swift/common/base_storage_server.py swift/common/bufferedhttp.py swift/common/constraints.py swift/common/container_sync_realms.py swift/common/daemon.py swift/common/db.py swift/common/db_replicator.py swift/common/direct_client.py swift/common/exceptions.py swift/common/header_key_dict.py swift/common/http.py swift/common/internal_client.py swift/common/manager.py swift/common/memcached.py swift/common/request_helpers.py swift/common/splice.py swift/common/storage_policy.py swift/common/swob.py swift/common/utils.py swift/common/wsgi.py swift/common/middleware/__init__.py swift/common/middleware/account_quotas.py swift/common/middleware/acl.py swift/common/middleware/bulk.py swift/common/middleware/catch_errors.py swift/common/middleware/cname_lookup.py swift/common/middleware/container_quotas.py swift/common/middleware/container_sync.py swift/common/middleware/crossdomain.py swift/common/middleware/dlo.py swift/common/middleware/domain_remap.py swift/common/middleware/formpost.py swift/common/middleware/gatekeeper.py swift/common/middleware/healthcheck.py swift/common/middleware/keystoneauth.py swift/common/middleware/list_endpoints.py swift/common/middleware/memcache.py swift/common/middleware/name_check.py swift/common/middleware/proxy_logging.py swift/common/middleware/ratelimit.py swift/common/middleware/recon.py swift/common/middleware/slo.py swift/common/middleware/staticweb.py swift/common/middleware/tempauth.py swift/common/middleware/tempurl.py swift/common/middleware/versioned_writes.py swift/common/middleware/xprofile.py swift/common/middleware/x_profile/__init__.py swift/common/middleware/x_profile/exceptions.py swift/common/middleware/x_profile/html_viewer.py swift/common/middleware/x_profile/profile_model.py swift/common/ring/__init__.py swift/common/ring/builder.py swift/common/ring/ring.py swift/common/ring/utils.py swift/container/__init__.py swift/container/auditor.py swift/container/backend.py swift/container/reconciler.py swift/container/replicator.py swift/container/server.py swift/container/sync.py swift/container/sync_store.py swift/container/updater.py swift/locale/swift.pot swift/locale/de/LC_MESSAGES/swift.po swift/locale/es/LC_MESSAGES/swift.po swift/locale/fr/LC_MESSAGES/swift.po swift/locale/it/LC_MESSAGES/swift.po swift/locale/ja/LC_MESSAGES/swift.po swift/locale/ko_KR/LC_MESSAGES/swift.po swift/locale/pt_BR/LC_MESSAGES/swift.po swift/locale/ru/LC_MESSAGES/swift.po swift/locale/tr_TR/LC_MESSAGES/swift.po swift/locale/zh_CN/LC_MESSAGES/swift.po swift/locale/zh_TW/LC_MESSAGES/swift.po swift/obj/__init__.py swift/obj/auditor.py swift/obj/diskfile.py swift/obj/expirer.py swift/obj/mem_diskfile.py swift/obj/mem_server.py swift/obj/reconstructor.py swift/obj/replicator.py swift/obj/server.py swift/obj/ssync_receiver.py swift/obj/ssync_sender.py swift/obj/updater.py swift/proxy/__init__.py swift/proxy/server.py swift/proxy/controllers/__init__.py swift/proxy/controllers/account.py swift/proxy/controllers/base.py swift/proxy/controllers/container.py swift/proxy/controllers/info.py swift/proxy/controllers/obj.py test/__init__.py test/sample.conf test/functional/__init__.py test/functional/swift_test_client.py test/functional/test_access_control.py test/functional/test_account.py test/functional/test_container.py test/functional/test_object.py test/functional/tests.py test/probe/__init__.py test/probe/brain.py test/probe/common.py test/probe/test_account_failures.py test/probe/test_account_get_fake_responses_match.py test/probe/test_account_reaper.py test/probe/test_container_failures.py test/probe/test_container_merge_policy_index.py test/probe/test_container_sync.py test/probe/test_empty_device_handoff.py test/probe/test_object_async_update.py test/probe/test_object_expirer.py test/probe/test_object_failures.py test/probe/test_object_handoff.py test/probe/test_object_metadata_replication.py test/probe/test_reconstructor_durable.py test/probe/test_reconstructor_rebuild.py test/probe/test_reconstructor_revert.py test/probe/test_replication_servers_working.py test/probe/test_wsgi_servers.py test/unit/__init__.py test/unit/account/__init__.py test/unit/account/test_auditor.py test/unit/account/test_backend.py test/unit/account/test_reaper.py test/unit/account/test_replicator.py test/unit/account/test_server.py test/unit/account/test_utils.py test/unit/cli/__init__.py test/unit/cli/test_form_signature.py test/unit/cli/test_info.py test/unit/cli/test_recon.py test/unit/cli/test_ring_builder_analyzer.py test/unit/cli/test_ringbuilder.py test/unit/common/__init__.py test/unit/common/corrupted_example.db test/unit/common/malformed_example.db test/unit/common/test_base_storage_server.py test/unit/common/test_bufferedhttp.py test/unit/common/test_constraints.py test/unit/common/test_container_sync_realms.py test/unit/common/test_daemon.py test/unit/common/test_db.py test/unit/common/test_db_replicator.py test/unit/common/test_direct_client.py test/unit/common/test_exceptions.py test/unit/common/test_header_key_dict.py test/unit/common/test_internal_client.py test/unit/common/test_manager.py test/unit/common/test_memcached.py test/unit/common/test_request_helpers.py test/unit/common/test_splice.py test/unit/common/test_storage_policy.py test/unit/common/test_swob.py test/unit/common/test_utils.py test/unit/common/test_wsgi.py test/unit/common/middleware/__init__.py test/unit/common/middleware/helpers.py test/unit/common/middleware/test_account_quotas.py test/unit/common/middleware/test_acl.py test/unit/common/middleware/test_bulk.py test/unit/common/middleware/test_cname_lookup.py test/unit/common/middleware/test_container_sync.py test/unit/common/middleware/test_crossdomain.py test/unit/common/middleware/test_dlo.py test/unit/common/middleware/test_domain_remap.py test/unit/common/middleware/test_except.py test/unit/common/middleware/test_formpost.py test/unit/common/middleware/test_gatekeeper.py test/unit/common/middleware/test_healthcheck.py test/unit/common/middleware/test_keystoneauth.py test/unit/common/middleware/test_list_endpoints.py test/unit/common/middleware/test_memcache.py test/unit/common/middleware/test_name_check.py test/unit/common/middleware/test_proxy_logging.py test/unit/common/middleware/test_quotas.py test/unit/common/middleware/test_ratelimit.py test/unit/common/middleware/test_recon.py test/unit/common/middleware/test_slo.py test/unit/common/middleware/test_staticweb.py test/unit/common/middleware/test_tempauth.py test/unit/common/middleware/test_tempurl.py test/unit/common/middleware/test_versioned_writes.py test/unit/common/middleware/test_xprofile.py test/unit/common/ring/__init__.py test/unit/common/ring/test_builder.py test/unit/common/ring/test_ring.py test/unit/common/ring/test_utils.py test/unit/container/__init__.py test/unit/container/test_auditor.py test/unit/container/test_backend.py test/unit/container/test_reconciler.py test/unit/container/test_replicator.py test/unit/container/test_server.py test/unit/container/test_sync.py test/unit/container/test_sync_store.py test/unit/container/test_updater.py test/unit/obj/__init__.py test/unit/obj/common.py test/unit/obj/test_auditor.py test/unit/obj/test_diskfile.py test/unit/obj/test_expirer.py test/unit/obj/test_reconstructor.py test/unit/obj/test_replicator.py test/unit/obj/test_server.py test/unit/obj/test_ssync.py test/unit/obj/test_ssync_receiver.py test/unit/obj/test_ssync_sender.py test/unit/obj/test_updater.py test/unit/proxy/__init__.py test/unit/proxy/test_mem_server.py test/unit/proxy/test_server.py test/unit/proxy/test_sysmeta.py test/unit/proxy/controllers/__init__.py test/unit/proxy/controllers/test_account.py test/unit/proxy/controllers/test_base.py test/unit/proxy/controllers/test_container.py test/unit/proxy/controllers/test_info.py test/unit/proxy/controllers/test_obj.py test/unit/test_locale/README test/unit/test_locale/__init__.py test/unit/test_locale/eo.po test/unit/test_locale/messages.mo test/unit/test_locale/test_locale.py test/unit/test_locale/eo/LC_MESSAGES/swift.moswift-2.7.0/swift.egg-info/entry_points.txt0000664000567000056710000000344712675204210022150 0ustar jenkinsjenkins00000000000000[paste.app_factory] account = swift.account.server:app_factory container = swift.container.server:app_factory mem_object = swift.obj.mem_server:app_factory object = swift.obj.server:app_factory proxy = swift.proxy.server:app_factory [paste.filter_factory] account_quotas = swift.common.middleware.account_quotas:filter_factory bulk = swift.common.middleware.bulk:filter_factory catch_errors = swift.common.middleware.catch_errors:filter_factory cname_lookup = swift.common.middleware.cname_lookup:filter_factory container_quotas = swift.common.middleware.container_quotas:filter_factory container_sync = swift.common.middleware.container_sync:filter_factory crossdomain = swift.common.middleware.crossdomain:filter_factory dlo = swift.common.middleware.dlo:filter_factory domain_remap = swift.common.middleware.domain_remap:filter_factory formpost = swift.common.middleware.formpost:filter_factory gatekeeper = swift.common.middleware.gatekeeper:filter_factory healthcheck = swift.common.middleware.healthcheck:filter_factory keystoneauth = swift.common.middleware.keystoneauth:filter_factory list_endpoints = swift.common.middleware.list_endpoints:filter_factory memcache = swift.common.middleware.memcache:filter_factory name_check = swift.common.middleware.name_check:filter_factory proxy_logging = swift.common.middleware.proxy_logging:filter_factory ratelimit = swift.common.middleware.ratelimit:filter_factory recon = swift.common.middleware.recon:filter_factory slo = swift.common.middleware.slo:filter_factory staticweb = swift.common.middleware.staticweb:filter_factory tempauth = swift.common.middleware.tempauth:filter_factory tempurl = swift.common.middleware.tempurl:filter_factory versioned_writes = swift.common.middleware.versioned_writes:filter_factory xprofile = swift.common.middleware.xprofile:filter_factory swift-2.7.0/swift.egg-info/requires.txt0000664000567000056710000000032612675204210021243 0ustar jenkinsjenkins00000000000000eventlet>=0.17.4 greenlet>=0.3.1 netifaces>=0.5,!=0.10.0,!=0.10.1 pastedeploy>=1.3.3 six>=1.9.0 xattr>=0.4 PyECLib>=1.2.0 [:(python_version<'3.0')] dnspython>=1.12.0 [:(python_version>='3.0')] dnspython3>=1.12.0 swift-2.7.0/swift.egg-info/pbr.json0000664000567000056710000000005612675204210020321 0ustar jenkinsjenkins00000000000000{"git_version": "e0bac5e", "is_release": true}swift-2.7.0/swift.egg-info/PKG-INFO0000664000567000056710000001074612675204210017747 0ustar jenkinsjenkins00000000000000Metadata-Version: 1.1 Name: swift Version: 2.7.0 Summary: OpenStack Object Storage Home-page: http://www.openstack.org/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: # Swift A distributed object storage system designed to scale from a single machine to thousands of servers. Swift is optimized for multi-tenancy and high concurrency. Swift is ideal for backups, web and mobile content, and any other unstructured data that can grow without bound. Swift provides a simple, REST-based API fully documented at http://docs.openstack.org/. Swift was originally developed as the basis for Rackspace's Cloud Files and was open-sourced in 2010 as part of the OpenStack project. It has since grown to include contributions from many companies and has spawned a thriving ecosystem of 3rd party tools. Swift's contributors are listed in the AUTHORS file. ## Docs To build documentation install sphinx (`pip install sphinx`), run `python setup.py build_sphinx`, and then browse to /doc/build/html/index.html. These docs are auto-generated after every commit and available online at http://docs.openstack.org/developer/swift/. ## For Developers The best place to get started is the ["SAIO - Swift All In One"](http://docs.openstack.org/developer/swift/development_saio.html). This document will walk you through setting up a development cluster of Swift in a VM. The SAIO environment is ideal for running small-scale tests against swift and trying out new features and bug fixes. You can run unit tests with `.unittests` and functional tests with `.functests`. If you would like to start contributing, check out these [notes](CONTRIBUTING.md) to help you get started. ### Code Organization * bin/: Executable scripts that are the processes run by the deployer * doc/: Documentation * etc/: Sample config files * swift/: Core code * account/: account server * common/: code shared by different modules * middleware/: "standard", officially-supported middleware * ring/: code implementing Swift's ring * container/: container server * obj/: object server * proxy/: proxy server * test/: Unit and functional tests ### Data Flow Swift is a WSGI application and uses eventlet's WSGI server. After the processes are running, the entry point for new requests is the `Application` class in `swift/proxy/server.py`. From there, a controller is chosen, and the request is processed. The proxy may choose to forward the request to a back- end server. For example, the entry point for requests to the object server is the `ObjectController` class in `swift/obj/server.py`. ## For Deployers Deployer docs are also available at http://docs.openstack.org/developer/swift/. A good starting point is at http://docs.openstack.org/developer/swift/deployment_guide.html You can run functional tests against a swift cluster with `.functests`. These functional tests require `/etc/swift/test.conf` to run. A sample config file can be found in this source tree in `test/sample.conf`. ## For Client Apps For client applications, official Python language bindings are provided at http://github.com/openstack/python-swiftclient. Complete API documentation at http://docs.openstack.org/api/openstack-object-storage/1.0/content/ ---- For more information come hang out in #openstack-swift on freenode. Thanks, The Swift Development Team Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 swift-2.7.0/swift.egg-info/dependency_links.txt0000664000567000056710000000000112675204210022710 0ustar jenkinsjenkins00000000000000 swift-2.7.0/swift.egg-info/not-zip-safe0000664000567000056710000000000112675204203021072 0ustar jenkinsjenkins00000000000000 swift-2.7.0/swift.egg-info/top_level.txt0000664000567000056710000000000612675204210021370 0ustar jenkinsjenkins00000000000000swift swift-2.7.0/swift/0000775000567000056710000000000012675204211015151 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/0000775000567000056710000000000012675204211016410 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/ru/0000775000567000056710000000000012675204211017036 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/ru/LC_MESSAGES/0000775000567000056710000000000012675204211020623 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/ru/LC_MESSAGES/swift.po0000664000567000056710000012727612675204037022344 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Filatov Sergey , 2016. #zanata # Grigory Mokhin , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev244\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 03:44+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-21 07:06+0000\n" "Last-Translator: Grigory Mokhin \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Russian\n" msgid "" "\n" "user quit" msgstr "" "\n" "Завершение работы пользователя" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - параллельно, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "Проверено суффиксов: %(checked)d - хэшировано: %(hashed).2f%%, " "синхронизировано: %(synced).2f%%" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "Ответили как размонтированные: %(ip)s/%(device)s" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " "(%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "Реконструированно разделов: %(reconstructed)d/%(total)d (%(percentage).2f%%) " "partitions of %(device)d/%(dtotal)d (%(dpercentage).2f%%) за время " "%(time).2fs (%(rate).2f/sec, осталось: %(remaining)s)" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "Реплицировано разделов: %(replicated)d/%(total)d (%(percentage).2f%%) за " "время %(time).2f с (%(rate).2f/с, осталось: %(remaining)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s успешно, %(failure)s с ошибками" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s возвратил 503 для %(statuses)s" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d не запущен (%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "Возможно, %s (%s) остановлен" #, python-format msgid "%s already started..." msgstr "%s уже запущен..." #, python-format msgid "%s does not exist" msgstr "%s не существует" #, python-format msgid "%s is not mounted" msgstr "%s не смонтирован" #, python-format msgid "%s responded as unmounted" msgstr "%s ответил как размонтированный" #, python-format msgid "%s running (%s - %s)" msgstr "%s выполняется (%s - %s)" #, python-format msgid "%s: %s" msgstr "%s: %s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: соединение сброшено на другой стороне" #, python-format msgid ", %s containers deleted" msgstr ", удалено контейнеров: %s" #, python-format msgid ", %s containers possibly remaining" msgstr ", осталось контейнеров (возможно): %s" #, python-format msgid ", %s containers remaining" msgstr ", осталось контейнеров: %s" #, python-format msgid ", %s objects deleted" msgstr ", удалено объектов: %s" #, python-format msgid ", %s objects possibly remaining" msgstr ", осталось объектов (возможно): %s" #, python-format msgid ", %s objects remaining" msgstr ", осталось объектов: %s" #, python-format msgid ", elapsed: %.02fs" msgstr ", прошло: %.02fs" msgid ", return codes: " msgstr ", коды возврата: " msgid "Account" msgstr "Учетная запись" #, python-format msgid "Account %s has not been reaped since %s" msgstr "Учетная запись %s не очищалась после %s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Проверка учетной записи в \"однократном\" режиме завершена: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Проход контроля учетной записи выполнен: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Попытка репликации %(count)d баз данных за %(time).5f секунд (%(rate).5f/s)" #, python-format msgid "Audit Failed for %s: %s" msgstr "Контроль %s не выполнен: %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Неправильный код возврата rsync: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Начать проверку учетной записи в \"однократном\" режиме" msgid "Begin account audit pass." msgstr "Начать проход проверки учетной записи." msgid "Begin container audit \"once\" mode" msgstr "Начать проверку контейнера в \"однократном\" режиме" msgid "Begin container audit pass." msgstr "Начать проход проверки контейнера." msgid "Begin container sync \"once\" mode" msgstr "Начать синхронизацию контейнера в \"однократном\" режиме" msgid "Begin container update single threaded sweep" msgstr "Начать однонитевую сплошную проверку обновлений контейнера" msgid "Begin container update sweep" msgstr "Начать сплошную проверку обновлений контейнера" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "Начать проверку объекта в режиме \"%s\" (%s%s)" msgid "Begin object update single threaded sweep" msgstr "Начать однонитевую сплошную проверку обновлений объекта" msgid "Begin object update sweep" msgstr "Начать сплошную проверку обновлений объекта" #, python-format msgid "Beginning pass on account %s" msgstr "Начинается проход для учетной записи %s" msgid "Beginning replication run" msgstr "Запуск репликации" msgid "Broker error trying to rollback locked connection" msgstr "Ошибка посредника при попытке отката заблокированного соединения" #, python-format msgid "Can not access the file %s." msgstr "Отсутствует доступ к файлу %s." #, python-format msgid "Can not load profile data from %s." msgstr "Не удается загрузить данные профайла из %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Клиент не прочитал данные из proxy в %ss" msgid "Client disconnected on read" msgstr "Клиент отключен во время чтения" msgid "Client disconnected without sending enough data" msgstr "Клиент отключен без отправки данных" msgid "Client disconnected without sending last chunk" msgstr "Клиент отключился, не отправив последний фрагмент данных" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Путь клиента %(client)s не соответствует пути в метаданных объекта %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Опция internal_client_conf_path конфигурации не определена. Используется " "конфигурация по умолчанию. Используйте intenal-client.conf-sample для " "информации об опциях" msgid "Connection refused" msgstr "Соединение отклонено" msgid "Connection timeout" msgstr "Тайм-аут соединения" msgid "Container" msgstr "контейнер" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Проверка контейнера в \"однократном\" режиме завершена: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Проход проверки контейнера завершен: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Синхронизация контейнера в \"однократном\" режиме завершена: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Сплошная однонитевая проверка обновлений контейнера завершена: " "%(elapsed).02fs, успешно: %(success)s, сбоев: %(fail)s, без изменений: " "%(no_change)s" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Сплошная проверка обновлений контейнера завершена: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Сплошная проверка обновлений контейнера в %(path)s завершена: " "%(elapsed).02fs, успешно: %(success)s, сбоев: %(fail)s, без изменений: " "%(no_change)s" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "Не удалось подключиться к порту %s:%s по истечении %s секунд" #, python-format msgid "Could not load %r: %s" msgstr "Не удалось загрузить %r: %s" #, python-format msgid "Data download error: %s" msgstr "Ошибка загрузки данных: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Проход устройств выполнен: %.02fs" #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "Каталог %r не связан со стратегией policy (%s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "Ошибка %(status)d %(body)s из сервера %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "Ошибка %(status)d %(body)s, ответ от сервера объекта: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "Ошибка %(status)d. Ожидаемое значение от сервера объекта: 100-continue" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" "Ошибка %(status)d. попытка выполнить метод %(method)s %(path)s из сервера " "контейнера" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "Ошибка: обновление учетной записи не выполнено для %(ip)s:%(port)s/" "%(device)s (операция будет повторена позднее): Ответ: %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "Ошибка: обновление учетной записи не выполнено, в запросе указано разное " "число хостов и устройств: \"%s\" и \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "Ошибка: Неправильный запрос %(status)s из %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "Ошибка: тайм-аут чтения клиента (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "Ошибка. Обновление контейнера не выполнено (сохранение асинхронных " "обновлений будет выполнено позднее): %(status)d ответ от %(ip)s:%(port)s/" "%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "Ошибка: обновление контейнера не выполнено, в запросе указано разное число " "хостов и устройств: \"%s\" и \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "Ошибка: не удалось получить сведения об учетной записи %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "Ошибка: не удалось получить информацию о контейнере %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "Ошибка: ошибка закрытия DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "Ошибка. Исключительная ситуация при отключении клиента" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" "ОШИБКА. Исключительная ситуация при передаче данных на серверы объектов %s" msgid "ERROR Failed to get my own IPs?" msgstr "Ошибка: не удалось получить собственные IP-адреса?" msgid "ERROR Insufficient Storage" msgstr "Ошибка - недостаточно памяти" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "Ошибка: контроль объекта %(obj)s не выполнен, объект помещен в карантин: " "%(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "Ошибка Pickle, %s помещается в карантин" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "Ошибка: удаленный накопитель не смонтирован %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "Ошибка синхронизации %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "Ошибка синхронизации %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "Ошибка при попытке контроля %s" msgid "ERROR Unhandled exception in request" msgstr "Ошибка. Необрабатываемая исключительная ситуация в запросе" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "Ошибка: ошибка __call__ в %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "Ошибка: обновление учетной записи не выполнено для %(ip)s:%(port)s/" "%(device)s (операция будет повторена позднее)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "Ошибка: обновление учетной записи не выполнено для %(ip)s:%(port)s/" "%(device)s (операция будет повторена позднее): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" "Ошибка выполнения асинхронной передачи ожидающего файла с непредвиденным " "именем %s" msgid "ERROR auditing" msgstr "ОШИБКА контроля" #, python-format msgid "ERROR auditing: %s" msgstr "Ошибка контроля: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "Ошибка. Обновление контейнера не выполнена с %(ip)s:%(port)s/%(dev)s " "(сохранение асинхронного обновления будет выполнено позднее)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "Ошибка чтения ответа HTTP из %s" #, python-format msgid "ERROR reading db %s" msgstr "Ошибка чтения базы данных %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "Ошибка: команда rsync не выполнена с кодом %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "Ошибка синхронизации %(file)s с узлом %(node)s" msgid "ERROR trying to replicate" msgstr "Ошибка при попытке репликации" #, python-format msgid "ERROR while trying to clean up %s" msgstr "Ошибка при попытке очистки %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "Ошибка с сервером %(type)s %(ip)s:%(port)s/%(device)s, возврат: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "Ошибка при загрузки скрытых объектов из %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "Ошибка с удаленным сервером %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "Ошибка: не удалось получить пути к разделам накопителей: %s" msgid "ERROR: An error occurred while retrieving segments" msgstr "Ошибка: ошибка при извлечении сегментов" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "Ошибка: не удалось получить доступ к %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "Ошибка: не удалось запустить процесс контроля: %s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "Ошибка действия %(action)s для сохранения в кэш памяти: %(server)s" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "Ошибка кодирования в UTF-8: %s" msgid "Error hashing suffix" msgstr "Ошибка хэширования суффикса" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "Ошибка в %r с mtime_check_interval: %s" #, python-format msgid "Error limiting server %s" msgstr "Ошибка ограничения сервера %s" msgid "Error listing devices" msgstr "Ошибка при выводе списка устройств" #, python-format msgid "Error on render profiling results: %s" msgstr "Ошибка при выводе результатов профилирования: %s" msgid "Error parsing recon cache file" msgstr "Ошибка анализа файла кэша recon" msgid "Error reading recon cache file" msgstr "Ошибка чтения файла кэша recon" msgid "Error reading ringfile" msgstr "Ошибка при чтении ringfile" msgid "Error reading swift.conf" msgstr "Ошибка чтения swift.conf" msgid "Error retrieving recon data" msgstr "Ошибка при получении данных recon" msgid "Error syncing handoff partition" msgstr "Ошибка при синхронизации раздела передачи управления" msgid "Error syncing partition" msgstr "Ошибка синхронизации раздела" #, python-format msgid "Error syncing with node: %s" msgstr "Ошибка синхронизации с узлом %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Ошибка при попытке перекомпоновки стратегии %(path)s: номер#%(policy)d " "фрагмент#%(frag_index)s" msgid "Error: An error occurred" msgstr "Ошибка: произошла ошибка" msgid "Error: missing config path argument" msgstr "Ошибка: отсутствует аргумент пути конфигурации" #, python-format msgid "Error: unable to locate %s" msgstr "Ошибка: не удалось найти %s" msgid "Exception dumping recon cache" msgstr "Исключительная ситуация при создании кэша recon" msgid "Exception in top-level account reaper loop" msgstr "" "Исключительная ситуация в цикле чистильщика учетных записей верхнего уровня" msgid "Exception in top-level replication loop" msgstr "Исключительная ситуация в цикле репликации верхнего уровня" msgid "Exception in top-levelreconstruction loop" msgstr "Исключение в цикле реконструкции верхнего уровня" #, python-format msgid "Exception while deleting container %s %s" msgstr "Исключительная ситуация во время удаления контейнера %s %s" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "Исключительная ситуация во время удаления объекта %s %s %s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Исключительная ситуация в %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Исключительная ситуация в учетной записи %s" #, python-format msgid "Exception with containers for account %s" msgstr "Исключительная ситуация в контейнерах для учетной записи %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Исключительная ситуация в объектах для контейнера %(container)s для учетной " "записи %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Ожидаемое значение: 100-continue в %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Следующая цепочка CNAME для %(given_domain)s в %(found_domain)s" msgid "Found configs:" msgstr "Обнаружены конфигурации:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "В режиме передачи управления не все операции завершены. Принудительное " "завершение текущего прохода репликации." msgid "Host unreachable" msgstr "Хост недоступен" #, python-format msgid "Incomplete pass on account %s" msgstr "Не завершен проход для учетной записи %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Недопустимый формат X-Container-Sync-To %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Недопустимый хост %r в X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Недопустимая ожидающая запись %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Недопустимый ответ %(resp)s от %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Недопустимый ответ %(resp)s от %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Недопустимая схема %r в X-Container-Sync-To, допустимые значения: \"//\", " "\"http\" или \"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Принудительное завершение долго выполняющегося rsync: %s" msgid "Lockup detected.. killing live coros." msgstr "Обнаружена блокировка.. принудительное завершение работающих модулей." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "Преобразовано %(given_domain)s в %(found_domain)s" #, python-format msgid "No %s running" msgstr "%s не выполняется" #, python-format msgid "No cluster endpoint for %r %r" msgstr "Отсутствует конечная точка кластера для %r %r" #, python-format msgid "No permission to signal PID %d" msgstr "Нет прав доступа для отправки сигнала в PID %d" #, python-format msgid "No policy with index %s" msgstr "Не найдено стратегии с индексом %s" #, python-format msgid "No realm key for %r" msgstr "Отсутствует ключ области для %r" #, python-format msgid "No space left on device for %s (%s)" msgstr "Не устройстве %s (%s) закончилось место" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Ограниченная ошибка узла %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Недостаточное число подтверждений с серверов объектов (получено %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Не найдено: %(sync_from)r => %(sync_to)r - объект " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Ничего не реконструировано за %s с." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Ничего не реплицировано за %s с." msgid "Object" msgstr "Объект" msgid "Object PUT" msgstr "Функция PUT объекта" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Функция PUT объекта возвратила 202 для 409: %(req_timestamp)s <= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Функция PUT объекта возвратила 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Контроль объекта (%(type)s) в режиме \"%(mode)s\" завершен: %(elapsed).02fs. " "Всего в карантине: %(quars)d, всего ошибок: %(errors)d, всего файлов/с: " "%(frate).2f, всего байт/с: %(brate).2f, время контроля: %(audit).2f, " "скорость: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Проверка объекта (%(type)s). После %(start_time)s: локально: успешно - " "%(passes)d, в карантине - %(quars)d, файлов с ошибками %(errors)d в секунду: " "%(frate).2f , байт/с: %(brate).2f, общее время: %(total).2f, время контроля: " "%(audit).2f, скорость: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Состояние контроля объекта: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Реконструкция объекта выполнена (однократно). (%.02f мин.)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Реконструкция объекта выполнена. (%.02f мин.)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Репликация объекта выполнена (однократно). (%.02f мин.)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Репликация объекта выполнена. (%.02f мин.)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Серверы объектов вернули несоответствующие etag: %s" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Сплошная однонитевая проверка обновлений объекта завершена: %(elapsed).02fs, " "%(success)s успешно, %(fail)s с ошибками" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Сплошная проверка обновлений объекта завершена: %.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Сплошная проверка обновлений объекта на устройстве %(device)s завершена: " "%(elapsed).02fs, успешно: %(success)s, ошибка: %(fail)s" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "В X-Container-Sync-To не разрешены параметры, запросы и фрагменты" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Время раздела: максимум: %(max).4fs, минимум: %(min).4fs, среднее: %(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "Проход запущен; возможных контейнеров: %s; возможных объектов: %s" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "Проход выполнен за %ds; устарело объектов: %d" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "Проход выполняется до настоящего времени %ds; устарело объектов: %d" msgid "Path required in X-Container-Sync-To" msgstr "Требуется путь в X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Неполадка при очистке %s" #, python-format msgid "Problem cleaning up %s (%s)" msgstr "Возникла проблема при очистке %s (%s)" #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "Возникла неполадка при записи файла сохраняемого состояния %s (%s)" #, python-format msgid "Profiling Error: %s" msgstr "Ошибка профилирования: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s помещен в карантин в %(quar_path)s, так как не является " "каталогом" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s помещен в карантин в %(quar_path)s, так как не является " "каталогом" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "%s помещено в карантин %s из-за базы данных %s" #, python-format msgid "Quarantining DB %s" msgstr "БД %s помещена в карантин" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Протокол тайм-аута при ограничении скорости %(sleep)s для %(account)s/" "%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Удалено баз данных: %(remove)d" #, python-format msgid "Removing %s objects" msgstr "Удаление объектов %s" #, python-format msgid "Removing partition: %s" msgstr "Удаление раздела: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Удаление файла pid %(pid_file)s с ошибочным pid %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Удаление pid файла %s с неверным pid-ом" #, python-format msgid "Removing stale pid file %s" msgstr "Удаление устаревшего файла pid %s" msgid "Replication run OVER" msgstr "Репликация запущена поверх" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Возвращено 497 из-за черного списка: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Возвращено 498 для %(meth)s в %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(максимальная задержка): %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Обнаружено изменение кольца. Принудительное завершение текущего прохода " "реконструкции." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Обнаружено кольцевое изменение. Принудительное завершение текущего прохода " "репликации." #, python-format msgid "Running %s once" msgstr "Однократное выполнение %s" msgid "Running object reconstructor in script mode." msgstr "Запуск утилиты реконструкции объектов в режиме скрипта." msgid "Running object replicator in script mode." msgstr "Запуск утилиты репликации объектов в режиме сценариев." #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "Сигнал: %s, pid: %s, сигнал: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "За %(time)s операций синхронизировано %(sync)s [удалено: %(delete)s, " "добавлено: %(put)s], пропущено: %(skip)s, ошибки: %(fail)s" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Выполнено проверок учетной записи: %(time)s, из них успешно: %(passed)s, с " "ошибками: %(failed)s " #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Выполнено проверок контейнера: %(time)s, из них успешно: %(pass)s, с " "ошибками: %(fail)s " #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s будет пропущен, так как он не смонтирован" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s будет пропущен, так как он не смонтирован" #, python-format msgid "Starting %s" msgstr "Запуск %s" msgid "Starting object reconstruction pass." msgstr "Запуск прохода реконструкции объектов." msgid "Starting object reconstructor in daemon mode." msgstr "Запуск утилиты реконструкции объектов в режиме демона." msgid "Starting object replication pass." msgstr "Запуск прохода репликации объектов." msgid "Starting object replicator in daemon mode." msgstr "Запуск утилиты репликации объектов в режиме демона." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Успешное выполнение rsync для %(src)s на %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Запрещен доступ к этому типу файла!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Общее число %(key)s для контейнера (%(total)s) не соответствует сумме " "%(key)s в стратегиях (%(sum)s)" #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Тайм-аут действия %(action)s для сохранения в кэш памяти: %(server)s" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Исключение по таймауту %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Попытка выполнения метода %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Попытка GET-запроса %(full_path)s" #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "Попытка получения состояния %s операции PUT в %s" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Попытка получения конечного состояния PUT в %s" msgid "Trying to read during GET" msgstr "Попытка чтения во время операции GET" msgid "Trying to read during GET (retrying)" msgstr "Попытка чтения во время операции GET (выполняется повтор)" msgid "Trying to send to client" msgstr "Попытка отправки клиенту" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Попытка синхронизации суффиксов с %s" #, python-format msgid "Trying to write to %s" msgstr "Попытка записи в %s" msgid "UNCAUGHT EXCEPTION" msgstr "Необрабатываемая исключительная ситуация" #, python-format msgid "Unable to find %s config section in %s" msgstr "Не удалось найти раздел конфигурации %s в %s" #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "Не удалось загрузить клиент из конфигурации: %r (%s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Не удалось найти %s в libc. Оставлено как no-op." #, python-format msgid "Unable to locate config for %s" msgstr "Не удалось найти конфигурационный файл для %s" #, python-format msgid "Unable to locate config number %s for %s" msgstr "Не удается найти конфигурации с номером %s для %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Не удалось найти fallocate, posix_fallocate в libc. Оставлено как no-op." #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "Не удалось выполнить функцию fsync() для каталога %s: %s" #, python-format msgid "Unable to read config from %s" msgstr "Не удалось прочитать конфигурацию из %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Синхронизация %(sync_from)r => %(sync_to)r без прав доступа" #, python-format msgid "Unexpected response: %s" msgstr "Непредвиденный ответ: %s" msgid "Unhandled exception" msgstr "Необработанная исключительная ситуация" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Неизвестное исключение в GET-запросе: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Отчет об обновлении для %(container)s %(dbfile)s не выполнен" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Отчет об обновлении отправлен для %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "Предупреждение: SSL должен быть включен только в целях тестирования. " "Используйте внешнее завершение SSL для развертывания в рабочем режиме." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "Предупреждение: не удалось изменить предельное значение для дескриптора " "файла. Запущен без прав доступа root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "Предупреждение: не удалось изменить предельное значение для числа процессов. " "Запущен без прав доступа root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "Предупреждение: не удалось изменить предельное значение для памяти. Запущен " "без прав доступа root?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "Система ожидала %s секунд для %s завершения; освобождение" #, python-format msgid "Waited %s seconds for %s to die; killing" msgstr "Система ожидала %s секунд для %s завершения; Принудительное завершение" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Предупреждение: не удается ограничить скорость без клиента с кэшированием " "памяти" #, python-format msgid "method %s is not allowed." msgstr "Метод %s не разрешен." msgid "no log file found" msgstr "Не найден файл протокола" msgid "odfpy not installed." msgstr "Библиотека odfpy не установлена." #, python-format msgid "plotting results failed due to %s" msgstr "Ошибка в результатах plotting из-за %s" msgid "python-matplotlib not installed." msgstr "Библиотека python-matplotlib не установлена." swift-2.7.0/swift/locale/zh_TW/0000775000567000056710000000000012675204211017443 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/zh_TW/LC_MESSAGES/0000775000567000056710000000000012675204211021230 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/zh_TW/LC_MESSAGES/swift.po0000664000567000056710000010160012675204037022730 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Jennifer , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-19 12:55+0000\n" "Last-Translator: Jennifer \n" "Language: zh-TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (Taiwan)\n" msgid "" "\n" "user quit" msgstr "" "\n" "使用者退出" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - 平行,%s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "已檢查 %(checked)d 個字尾 - %(hashed).2f%% 個已雜湊,%(synced).2f%% 個已同步" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s 已回應為未裝載" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " "(%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "在 %(time).2fs 內重新建構了 %(device)d/%(dtotal)d (%(dpercentage).2f%%) 個裝" "置的 %(reconstructed)d/%(total)d (%(percentage).2f%%) 個分割區(%(rate).2f/" "秒,剩餘 %(remaining)s)" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "已抄寫 %(replicated)d/%(total)d (%(percentage).2f%%) 個分割區(在 " "%(time).2fs 內,%(rate).2f/秒,剩餘 %(remaining)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s 個成功,%(failure)s 個失敗" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s 針對 %(statuses)s 正在傳回 503" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d 未在執行中 (%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s) 似乎已停止" #, python-format msgid "%s already started..." msgstr "%s 已啟動..." #, python-format msgid "%s does not exist" msgstr "%s 不存在" #, python-format msgid "%s is not mounted" msgstr "未裝載 %s" #, python-format msgid "%s responded as unmounted" msgstr "%s 已回應為未裝載" #, python-format msgid "%s running (%s - %s)" msgstr "%s 在執行中 (%s - %s)" #, python-format msgid "%s: %s" msgstr "%s:%s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s:已由對等項目重設連線" #, python-format msgid ", %s containers deleted" msgstr ",已刪除 %s 個儲存器" #, python-format msgid ", %s containers possibly remaining" msgstr ",可能剩餘 %s 個儲存器" #, python-format msgid ", %s containers remaining" msgstr ",剩餘 %s 個儲存器" #, python-format msgid ", %s objects deleted" msgstr ",已刪除 %s 個物件" #, python-format msgid ", %s objects possibly remaining" msgstr ",可能剩餘 %s 個物件" #, python-format msgid ", %s objects remaining" msgstr ",剩餘 %s 個物件" #, python-format msgid ", elapsed: %.02fs" msgstr ",經歷時間:%.02fs" msgid ", return codes: " msgstr ",回覆碼:" msgid "Account" msgstr "帳戶" #, python-format msgid "Account %s has not been reaped since %s" msgstr "尚未回收帳戶 %s(自 %s 之後)" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "帳戶審核「一次性」模式已完成:%.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "帳戶審核通過已完成:%.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "已嘗試在 %(time).5f 秒內抄寫 %(count)d 個資料庫 (%(rate).5f/s)" #, python-format msgid "Audit Failed for %s: %s" msgstr "%s 的審核失敗:%s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "不當的遠端同步回覆碼:%(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "開始帳戶審核「一次性」模式" msgid "Begin account audit pass." msgstr "開始帳戶審核通過。" msgid "Begin container audit \"once\" mode" msgstr "開始儲存器審核「一次性」模式" msgid "Begin container audit pass." msgstr "開始儲存器審核通過。" msgid "Begin container sync \"once\" mode" msgstr "開始儲存器同步「一次性」模式" msgid "Begin container update single threaded sweep" msgstr "開始儲存器更新單一執行緒清理" msgid "Begin container update sweep" msgstr "開始儲存器更新清理" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "開始物件審核 \"%s\" 模式 (%s%s)" msgid "Begin object update single threaded sweep" msgstr "開始物件更新單一執行緒清理" msgid "Begin object update sweep" msgstr "開始物件更新清理" #, python-format msgid "Beginning pass on account %s" msgstr "正在開始帳戶 %s 上的通過" msgid "Beginning replication run" msgstr "正在開始抄寫執行" msgid "Broker error trying to rollback locked connection" msgstr "嘗試回復已鎖定的連線時發生分配管理系統錯誤" #, python-format msgid "Can not access the file %s." msgstr "無法存取檔案 %s。" #, python-format msgid "Can not load profile data from %s." msgstr "無法從 %s 中載入設定檔資料。" #, python-format msgid "Client did not read from proxy within %ss" msgstr "用戶端未在 %s 秒內從 Proxy 中讀取" msgid "Client disconnected on read" msgstr "用戶端在讀取時中斷連線" msgid "Client disconnected without sending enough data" msgstr "用戶端已中斷連線,未傳送足夠的資料" msgid "Client disconnected without sending last chunk" msgstr "用戶端已中斷連線,未傳送最後一個片段" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "用戶端路徑 %(client)s 不符合物件 meta 資料%(meta)s 中儲存的路徑" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "為定義配置選項 internal_client_conf_path。將使用預設配置,請參閱 internal-" "client.conf-sample 以取得選項" msgid "Connection refused" msgstr "連線遭拒" msgid "Connection timeout" msgstr "連線逾時" msgid "Container" msgstr "容器" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "儲存器審核「一次性」模式已完成:%.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "儲存器審核通過已完成:%.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "儲存器同步「一次性」模式已完成:%.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "儲存器更新單一執行緒清理已完成:%(elapsed).02fs,%(success)s 個成" "功,%(fail)s 個失敗,%(no_change)s 個無變更" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "儲存器更新清理已完成:%.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s 的儲存器更新清理已完成:%(elapsed).02fs,%(success)s 個成" "功,%(fail)s 個失敗,%(no_change)s 個無變更" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "嘗試 %s 秒後仍無法連結至 %s:%s" #, python-format msgid "Could not load %r: %s" msgstr "無法載入 %r:%s" #, python-format msgid "Data download error: %s" msgstr "資料下載錯誤:%s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "裝置通過已完成:%.02fs" #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "目錄 %r 未對映至有效的原則 (%s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "錯誤:%(db_file)s:%(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "錯誤:%(status)d %(body)s 來自 %(type)s 伺服器" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "錯誤:%(status)d %(body)s 來自物件伺服器 re:%(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "錯誤:%(status)d 預期:100 繼續自物件伺服器" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "錯誤:%(status)d 正在嘗試從儲存器伺服器 %(method)s %(path)s" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將稍後重試):回應 " "%(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "錯誤:帳戶更新失敗:要求中的主機與裝置數目不同:\"%s\" 對 \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "錯誤:來自 %(host)s 的回應 %(status)s 不當" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "錯誤:用戶端讀取逾時 (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "錯誤:儲存器更新失敗(儲存以稍後進行非同步更新):%(status)d回應(來自 " "%(ip)s:%(port)s/%(dev)s)" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "錯誤:儲存器更新失敗:要求中的主機與裝置數目不同:\"%s\" 對 \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "錯誤:無法取得帳戶資訊 %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "錯誤:無法取得儲存器資訊 %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "錯誤:磁碟檔 %(data_file)s 關閉失敗:%(exc)s:%(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "錯誤:異常狀況造成用戶端中斷連線" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "錯誤:將資料轉送至物件伺服器 %s 時發生異常狀況" msgid "ERROR Failed to get my own IPs?" msgstr "錯誤:無法取得我自己的 IP?" msgid "ERROR Insufficient Storage" msgstr "錯誤:儲存體不足" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "錯誤:物件 %(obj)s 審核失敗,已隔離:%(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "錯誤:挑選問題,正在隔離 %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "錯誤:未裝載遠端磁碟機 %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "同步 %(db_file)s %(row)s 時發生錯誤" #, python-format msgid "ERROR Syncing %s" msgstr "同步 %s 時發生錯誤" #, python-format msgid "ERROR Trying to audit %s" msgstr "嘗試審核 %s 時發生錯誤" msgid "ERROR Unhandled exception in request" msgstr "錯誤:要求中有無法處理的異常狀況" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "錯誤:%(method)s %(path)s 發生呼叫錯誤" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將稍後重試)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將稍後重試):" #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "錯誤:具有非預期名稱 %s 的非同步擱置檔案" msgid "ERROR auditing" msgstr "審核時發生錯誤" #, python-format msgid "ERROR auditing: %s" msgstr "審核時發生錯誤:%s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "錯誤:%(ip)s:%(port)s/%(dev)s 的儲存器更新失敗(儲存以稍後進行非同步更新)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "從 %s 讀取 HTTP 回應時發生錯誤" #, python-format msgid "ERROR reading db %s" msgstr "讀取資料庫 %s 時發生錯誤" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "錯誤:遠端同步失敗,%(code)s:%(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "同步 %(file)s 與節點 %(node)s 時發生錯誤" msgid "ERROR trying to replicate" msgstr "嘗試抄寫時發生錯誤" #, python-format msgid "ERROR while trying to clean up %s" msgstr "嘗試清除 %s 時發生錯誤" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)s 伺服器發生錯誤:%(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "從 %s 載入抑制時發生錯誤:" #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "遠端伺服器發生錯誤:%(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "錯誤:無法取得磁碟機分割區的路徑:%s" msgid "ERROR: An error occurred while retrieving segments" msgstr "錯誤:擷取區段時發生錯誤" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "錯誤:無法存取 %(path)s:%(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "錯誤:無法執行審核:%s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "對 memcached %(server)s 執行%(action)s作業時發生錯誤" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "編碼為 UTF-8 時發生錯誤:%s" msgid "Error hashing suffix" msgstr "混合字尾時發生錯誤" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "在 mtime_check_interval 中,%r 發生錯誤:%s" #, python-format msgid "Error limiting server %s" msgstr "限制伺服器 %s 時發生錯誤" msgid "Error listing devices" msgstr "列出裝置時發生錯誤" #, python-format msgid "Error on render profiling results: %s" msgstr "呈現側寫結果時發生錯誤:%s" msgid "Error parsing recon cache file" msgstr "剖析 recon 快取檔案時發生錯誤" msgid "Error reading recon cache file" msgstr "讀取 recon 快取檔案時發生錯誤" msgid "Error reading ringfile" msgstr "讀取 ringfile 時發生錯誤" msgid "Error reading swift.conf" msgstr "讀取 swift.conf 時發生錯誤" msgid "Error retrieving recon data" msgstr "擷取 recon 資料時發生錯誤" msgid "Error syncing handoff partition" msgstr "同步遞交分割區時發生錯誤" msgid "Error syncing partition" msgstr "同步分割區時發生錯誤" #, python-format msgid "Error syncing with node: %s" msgstr "與節點同步時發生錯誤:%s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "嘗試重建 %(path)s 原則 #%(policy)d 分段 #%(frag_index)s 時發生錯誤" msgid "Error: An error occurred" msgstr "錯誤:發生錯誤" msgid "Error: missing config path argument" msgstr "錯誤:遺漏配置路徑引數" #, python-format msgid "Error: unable to locate %s" msgstr "錯誤:找不到 %s" msgid "Exception dumping recon cache" msgstr "傾出 recon 快取時發生異常狀況" msgid "Exception in top-level account reaper loop" msgstr "最上層帳戶 Reaper 迴圈發生異常狀況" msgid "Exception in top-level replication loop" msgstr "最上層抄寫迴圈中發生異常狀況" msgid "Exception in top-levelreconstruction loop" msgstr "最上層重新建構迴圈中發生異常狀況" #, python-format msgid "Exception while deleting container %s %s" msgstr "刪除儲存器 %s %s 時發生異常狀況" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "刪除物件 %s %s %s 時發生異常狀況" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 發生異常狀況" #, python-format msgid "Exception with account %s" msgstr "帳戶 %s 發生異常狀況" #, python-format msgid "Exception with containers for account %s" msgstr "帳戶 %s 的儲存器發生異常狀況" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "帳戶 %(account)s 儲存器 %(container)s 的物件發生異常狀況" #, python-format msgid "Expect: 100-continue on %s" msgstr "預期 100 - 在 %s 上繼續" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "遵循 %(given_domain)s 到 %(found_domain)s 的 CNAME 鏈" msgid "Found configs:" msgstr "找到配置:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "「遞交作業最先」模式仍有剩餘的遞交作業。正在中斷現行抄寫傳遞。" msgid "Host unreachable" msgstr "無法抵達主機" #, python-format msgid "Incomplete pass on account %s" msgstr "帳戶 %s 上的通過未完成" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "無效的 X-Container-Sync-To 格式 %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To 中的主機 %r 無效" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "無效的擱置項目 %(file)s:%(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "來自 %(full_path)s 的回應 %(resp)s 無效" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "來自 %(ip)s 的回應 %(resp)s 無效" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "X-Container-Sync-To 中的架構 %r 無效,必須是 \"//\"、\"http\" 或\"https\"。" #, python-format msgid "Killing long-running rsync: %s" msgstr "正在結束長時間執行的遠端同步:%s" msgid "Lockup detected.. killing live coros." msgstr "偵測到鎖定。正在結束即時 coro。" #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "已將 %(given_domain)s 對映至 %(found_domain)s" #, python-format msgid "No %s running" msgstr "沒有 %s 在執行中" #, python-format msgid "No cluster endpoint for %r %r" msgstr "沒有 %r %r 的叢集端點" #, python-format msgid "No permission to signal PID %d" msgstr "沒有信號 PID %d 的許可權" #, python-format msgid "No policy with index %s" msgstr "沒有具有索引 %s 的原則" #, python-format msgid "No realm key for %r" msgstr "沒有 %r 的範圍金鑰" #, python-format msgid "No space left on device for %s (%s)" msgstr "裝置上沒有用於 %s 的剩餘空間 (%s)" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "節點錯誤限制 %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "未確認足夠的物件伺服器(已取得 %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "找不到 %(sync_from)r => %(sync_to)r - 物件%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%s 秒未重新建構任何內容。" #, python-format msgid "Nothing replicated for %s seconds." msgstr "未抄寫任何項目達 %s 秒。" msgid "Object" msgstr "物件" msgid "Object PUT" msgstr "物件 PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "物件 PUT 針對 409 正在傳回 202:%(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "物件 PUT 正在傳回 412,%(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "物件審核 (%(type)s) \"%(mode)s\" 模式已完成:%(elapsed).02fs。已隔離總計:" "%(quars)d,錯誤總計:%(errors)d,檔案/秒總計:%(frate).2f,位元組/秒總計:" "%(brate).2f,審核時間:%(audit).2f,速率:%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "物件審核 (%(type)s)。自 %(start_time)s 以來:本端:%(passes)d 個已通" "過,%(quars)d 個已隔離,%(errors)d 個錯誤,檔案/秒:%(frate).2f,位元組數/" "秒:%(brate).2f,時間總計:%(total).2f,審核時間:%(audit).2f,速率:" "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "物件審核統計資料:%s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "物件重新建構完成(一次性)。(%.02f 分鐘)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "物件重新建構完成。(%.02f 分鐘)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "物件抄寫完成(一次性)。(%.02f 分鐘)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "物件抄寫完成。(%.02f 分鐘)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "物件伺服器已傳回 %s 個不符 etag" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "物件更新單一執行緒清理已完成:%(elapsed).02fs,%(success)s 個成功,%(fail)s " "個失敗" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "物件更新清理已完成:%.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "%(device)s 的物件更新清理已完成:%(elapsed).02fs,%(success)s個成" "功,%(fail)s 個失敗" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "X-Container-Sync-To 中不容許參數、查詢及片段" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "分割區時間:最大 %(max).4fs,最小 %(min).4fs,中間 %(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "通過正在開始;%s 個可能儲存器;%s 個可能物件" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "已完成通過 %ds 個;%d 個物件已過期" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "目前為止通過 %ds 個;%d 個物件過期" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To 中需要路徑" #, python-format msgid "Problem cleaning up %s" msgstr "清除 %s 時發生問題" #, python-format msgid "Problem cleaning up %s (%s)" msgstr "清除 %s 時發生問題 (%s)" #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "寫入可延續狀態檔 %s 時發生問題 (%s)" #, python-format msgid "Profiling Error: %s" msgstr "側寫錯誤:%s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "已將 %(hsh_path)s 隔離至 %(quar_path)s,原因是它不是目錄" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "已將 %(object_path)s 隔離至 %(quar_path)s,原因是它不是目錄" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "已將 %s 隔離至 %s,原因是 %s 資料庫" #, python-format msgid "Quarantining DB %s" msgstr "正在隔離資料庫 %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "%(account)s/%(container)s/%(object)s 的 ratelimit 休眠日誌:%(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "已移除 %(remove)d 個資料庫" #, python-format msgid "Removing %s objects" msgstr "正在移除 %s 物件" #, python-format msgid "Removing partition: %s" msgstr "正在移除分割區:%s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "正在移除具有錯誤 PID %(pid)d 的 PID 檔 %(pid_file)s" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "正在移除具有無效 PID 的 PID 檔 %s" #, python-format msgid "Removing stale pid file %s" msgstr "正在移除過時 PID 檔案 %s" msgid "Replication run OVER" msgstr "抄寫執行結束" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "由於黑名單,正在傳回 497:%s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "正在將 %(meth)s 的 498 傳回至 %(acc)s/%(cont)s/%(obj)s。Ratelimit(休眠上" "限)%(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "偵測到環變更。正在中斷現行重新建構傳遞。" msgid "Ring change detected. Aborting current replication pass." msgstr "偵測到環變更。正在中斷現行抄寫傳遞。" #, python-format msgid "Running %s once" msgstr "正在執行 %s 一次" msgid "Running object reconstructor in script mode." msgstr "正在 Script 模式下執行物件重新建構器。" msgid "Running object replicator in script mode." msgstr "正在 Script 模式下執行物件抄寫器" #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "信號 %s PID:%s 信號:%s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "自 %(time)s 以來:已同步 %(sync)s 個 [已刪除 [%(delete)s 個,已放置 %(put)s " "個],已跳過 %(skip)s 個,%(fail)s 個失敗" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "自 %(time)s 以來:帳戶審核:%(passed)s 個已通過審核,%(failed)s 個失敗審核" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "自 %(time)s 以來:儲存器審核:%(pass)s 個已通過審核,%(fail)s 個失敗審核" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "正在跳過 %(device)s,原因是它未裝載" #, python-format msgid "Skipping %s as it is not mounted" msgstr "正在跳過 %s,原因是它未裝載" #, python-format msgid "Starting %s" msgstr "正在啟動 %s" msgid "Starting object reconstruction pass." msgstr "正在啟動物件重新建構傳遞。" msgid "Starting object reconstructor in daemon mode." msgstr "正在常駐程式模式下啟動物件重新建構器。" msgid "Starting object replication pass." msgstr "正在啟動物件抄寫傳遞。" msgid "Starting object replicator in daemon mode." msgstr "正在常駐程式模式下啟動物件抄寫器。" #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "已順利遠端同步 %(dst)s 中的 %(src)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "此檔案類型禁止存取!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "儲存器的 %(key)s 總計 (%(total)s) 不符合原則中的 %(key)s 總和 (%(sum)s) " #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "對 memcached %(server)s 執行%(action)s作業時逾時" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 發生逾時異常狀況" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "正在嘗試 %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "正在嘗試對 %(full_path)s 執行 GET 動作" #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "正在嘗試使 PUT 的 %s 狀態為 %s" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "正在嘗試使 PUT 的最終狀態為 %s" msgid "Trying to read during GET" msgstr "正在嘗試於 GET 期間讀取" msgid "Trying to read during GET (retrying)" msgstr "正在嘗試於 GET 期間讀取(正在重試)" msgid "Trying to send to client" msgstr "正在嘗試傳送至用戶端" #, python-format msgid "Trying to sync suffixes with %s" msgstr "正在嘗試與 %s 同步字尾" #, python-format msgid "Trying to write to %s" msgstr "正在嘗試寫入至 %s" msgid "UNCAUGHT EXCEPTION" msgstr "未捕捉的異常狀況" #, python-format msgid "Unable to find %s config section in %s" msgstr "找不到 %s 配置區段(在 %s 中)" #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "無法從配置載入內部用戶端:%r (%s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "在 libc 中找不到 %s。保留為 no-op。" #, python-format msgid "Unable to locate config for %s" msgstr "找不到 %s 的配置" #, python-format msgid "Unable to locate config number %s for %s" msgstr "找不到配置號碼 %s(針對 %s)" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "在 libc 中找不到 fallocate、posix_fallocate。保留為 no-op。" #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "無法在目錄 %s 上執行 fsync():%s" #, python-format msgid "Unable to read config from %s" msgstr "無法從 %s 讀取配置" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "未鑑別 %(sync_from)r => %(sync_to)r" #, python-format msgid "Unexpected response: %s" msgstr "非預期的回應:%s" msgid "Unhandled exception" msgstr "無法處理的異常狀況" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "嘗試執行 GET 動作時發生不明異常狀況:%(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s 的更新報告失敗" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "已傳送 %(container)s %(dbfile)s 的更新報告" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "警告:應該僅啟用 SSL 以用於測試目的。使用外部SSL 終止以進行正式作業部署。" msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告:無法修改檔案描述子限制。以非 root 使用者身分執行?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告:無法修改處理程序數上限限制。以非 root 使用者身分執行?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:無法修改記憶體限制。以非 root 使用者身分執行?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "已等待 %s 秒以讓 %s 當掉;正在放棄" #, python-format msgid "Waited %s seconds for %s to die; killing" msgstr "已等待 %s 秒以讓 %s 當掉" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:無法在沒有 memcached 用戶端的情況下限制速率" #, python-format msgid "method %s is not allowed." msgstr "不容許方法 %s。" msgid "no log file found" msgstr "找不到日誌檔" msgid "odfpy not installed." msgstr "未安裝 odfpy。" #, python-format msgid "plotting results failed due to %s" msgstr "由於 %s,繪製結果失敗" msgid "python-matplotlib not installed." msgstr "未安裝 python-matplotlib。" swift-2.7.0/swift/locale/tr_TR/0000775000567000056710000000000012675204211017442 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/tr_TR/LC_MESSAGES/0000775000567000056710000000000012675204211021227 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/tr_TR/LC_MESSAGES/swift.po0000664000567000056710000010041712675204037022734 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # İşbaran Akçayır , 2015 # OpenStack Infra , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-04 07:42+0000\n" "Last-Translator: İşbaran Akçayır \n" "Language: tr-TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Turkish (Turkey)\n" msgid "" "\n" "user quit" msgstr "" "\n" "kullanıcı çıktı" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - paralel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d sonek kontrol edildi - %(hashed).2f%% özetlenen, %(synced).2f%% " "eşzamanlanan" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s bağlı değil olarak yanıt verdi" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " "(%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(device)d/%(dtotal)d (%(dpercentage).2f%%) aygıtın %(reconstructed)d/" "%(total)d (%(percentage).2f%%) bölümü %(time).2fs (%(rate).2f/sn, " "%(remaining)s kalan) içinde yeniden oluşturuldu" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) bölüm %(time).2fs (%(rate).2f/" "sn, %(remaining)s kalan) içinde çoğaltıldı" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s başarı, %(failure)s başarısızlık" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s %(statuses)s için 503 döndürüyor" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d çalışmıyor (%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s) durmuş gibi görünüyor" #, python-format msgid "%s already started..." msgstr "%s zaten başlatıldı..." #, python-format msgid "%s does not exist" msgstr "%s mevcut değil" #, python-format msgid "%s is not mounted" msgstr "%s bağlı değil" #, python-format msgid "%s responded as unmounted" msgstr "%s bağlı değil olarak yanıt verdi" #, python-format msgid "%s running (%s - %s)" msgstr "%s çalışıyor (%s - %s)" #, python-format msgid "%s: %s" msgstr "%s: %s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Bağlantı eş tarafından sıfırlandı" #, python-format msgid ", %s containers deleted" msgstr ", %s kap silindi" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s kap kaldı muhtemelen" #, python-format msgid ", %s containers remaining" msgstr ", %s kap kaldı" #, python-format msgid ", %s objects deleted" msgstr ", %s nesne silindi" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s nesne kaldı muhtemelen" #, python-format msgid ", %s objects remaining" msgstr ", %s nesne kaldı" #, python-format msgid ", elapsed: %.02fs" msgstr ", geçen süre: %.02fs" msgid ", return codes: " msgstr ", dönen kodlar: " msgid "Account" msgstr "Hesap" #, python-format msgid "Account %s has not been reaped since %s" msgstr "Hesap %s %s'den beri biçilmedi" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Hesap denetimi \"bir kere\" kipi tamamlandı: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Hesap denetimi geçişi tamamlandı: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(count)d db %(time).5f saniyede çoğaltılmaya çalışıldı (%(rate).5f/s)" #, python-format msgid "Audit Failed for %s: %s" msgstr "Denetim %s için başarısız: %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Kötü rsync dönüş kodu: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Hesap denetimi \"bir kere\" kipini başlat" msgid "Begin account audit pass." msgstr "Hesap denetimi başlatma geçildi." msgid "Begin container audit \"once\" mode" msgstr "Kap denetimine \"bir kere\" kipinde başla" msgid "Begin container audit pass." msgstr "Kap denetimi geçişini başlat." msgid "Begin container sync \"once\" mode" msgstr "Kap eşzamanlamayı \"bir kere\" kipinde başlat" msgid "Begin container update single threaded sweep" msgstr "Kap güncelleme tek iş iplikli süpürmeye başla" msgid "Begin container update sweep" msgstr "Kap güncelleme süpürmesine başla" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "Nesne denetimini \"%s\" kipinde başlat (%s%s)" msgid "Begin object update single threaded sweep" msgstr "Nesne güncelleme tek iş iplikli süpürmeye başla" msgid "Begin object update sweep" msgstr "Nesne güncelleme süpürmesine başla" #, python-format msgid "Beginning pass on account %s" msgstr "%s hesabı üzerinde geçiş başlatılıyor" msgid "Beginning replication run" msgstr "Çoğaltmanın çalıştırılmasına başlanıyor" msgid "Broker error trying to rollback locked connection" msgstr "Kilitli bağlantı geri alınmaya çalışılırken vekil hatası" #, python-format msgid "Can not access the file %s." msgstr "%s dosyasına erişilemiyor." #, python-format msgid "Can not load profile data from %s." msgstr "%s'den profil verisi yüklenemiyor." #, python-format msgid "Client did not read from proxy within %ss" msgstr "İstemci %ss içinde vekilden okumadı" msgid "Client disconnected on read" msgstr "İstemci okuma sırasında bağlantıyı kesti" msgid "Client disconnected without sending enough data" msgstr "İstemci yeterli veri göndermeden bağlantıyı kesti" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "İstemci yolu %(client)s nesne metadata'sında kayıtlı yol ile eşleşmiyor " "%(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Yapılandırma seçeneği internal_client_conf_path belirtilmemiş. Varsayılan " "yapılandırma kullanılıyor, seçenekleri çin internal-client.conf-sample'a " "bakın" msgid "Connection refused" msgstr "Bağlantı reddedildi" msgid "Connection timeout" msgstr "Bağlantı zaman aşımına uğradı" msgid "Container" msgstr "Kap" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Kap denetimi \"bir kere\" kipinde tamamlandı: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Kap denetim geçişi tamamlandı: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Kap eşzamanlama \"bir kere\" kipinde tamamlandı: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Kap güncelleme tek iş iplikli süpürme tamamlandı: %(elapsed).02fs, " "%(success)s başarılı, %(fail)s başarısız, %(no_change)s değişiklik yok" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Kap güncelleme süpürme tamamlandı: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s in kap güncelleme süpürmesi tamamlandı: %(elapsed).02fs, " "%(success)s başarılı, %(fail)s başarısız, %(no_change)s değişiklik yok" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "%s:%s'e bağlanılamadı, %s saniye beklendi" #, python-format msgid "Could not load %r: %s" msgstr "%r yüklenemedi: %s" #, python-format msgid "Data download error: %s" msgstr "Veri indirme hatası: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Aygıtlar geçişi tamamlandı: %.02fs" #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "Dizin %r geçerli bir ilkeye eşleştirilmemiş (%s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "HATA %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "HATA %(status)d %(body)s %(type)s Sunucudan" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "HATA %(status)d %(body)s Nesne Sunucu re'den: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "HATA %(status)d Beklenen: 100-Nesne Sunucusundan devam et" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "HATA %(status)d Kap Sunucusundan %(method)s %(path)s denenirken" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme başarısız (sonra tekrar " "denenecek): Yanıt %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "HATA Hesap güncelleme başarısız: istekte farklı sayıda istemci ve aygıt " "var: \"%s\" \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "HATA %(host)s dan kötü yanıt %(status)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "HATA İstemci okuma zaman aşımına uğradı (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "HATA Kap güncelleme başarısız (daha sonraki async güncellemesi için " "kaydediliyor): %(ip)s:%(port)s/%(dev)s den %(status)d yanıtı" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "HATA Kap güncelleme başarısız: istekte farklı sayıda istemci ve aygıt var: " "\"%s\" e karşı \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "HATA hesap bilgisi %s alınamadı" #, python-format msgid "ERROR Could not get container info %s" msgstr "HATA %s kap bilgisi alınamadı" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "HATA %(data_file)s disk dosyası kapatma başarısız: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "HATA İstisna istemci bağlantısının kesilmesine neden oluyor" msgid "ERROR Failed to get my own IPs?" msgstr "Kendi IP'lerimi alırken HATA?" msgid "ERROR Insufficient Storage" msgstr "HATA Yetersiz Depolama" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "HATA Nesne %(obj)s denetimde başarısız oldu ve karantinaya alındı: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "HATA Picke problemi, %s karantinaya alınıyor" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "HATA Uzak sürücü bağlı değil %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "HATA %(db_file)s %(row)s eşzamanlamada" #, python-format msgid "ERROR Syncing %s" msgstr "HATA %s Eşzamanlama" #, python-format msgid "ERROR Trying to audit %s" msgstr "HATA %s denetimi denemesinde" msgid "ERROR Unhandled exception in request" msgstr "HATA İstekte ele alınmayan istisna var" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ hatası %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme başarısız (sonra " "yeniden denenecek)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "HATA hesap güncelleme başarısız %(ip)s:%(port)s/%(device)s (sonra tekrar " "denenecek):" #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "HATA beklenmeyen isimli async bekleyen dosya %s" msgid "ERROR auditing" msgstr "denetlemede HATA" #, python-format msgid "ERROR auditing: %s" msgstr "HATA denetim: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "HATA kap güncelleme %(ip)s:%(port)s/%(dev)s ile başarısız oldu (sonraki " "async güncellemesi için kaydediliyor)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "%s'den HTTP yanıtı okumada HATA" #, python-format msgid "ERROR reading db %s" msgstr "%s veri tabanı okumada HATA" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "HATA rsync %(code)s ile başarısız oldu: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "%(node)s düğümlü %(file)s eş zamanlamada HATA" msgid "ERROR trying to replicate" msgstr "Çoğaltmaya çalışmada HATA" #, python-format msgid "ERROR while trying to clean up %s" msgstr "%s temizlenmeye çalışırken HATA" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "HATA %(type)s sunucusu %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "HATA %s den baskılamaların yüklenmesinde: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "HATA uzuk sunucuda %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "HATA: Sürücü bölümlerine olan yollar alınamadı: %s" msgid "ERROR: An error occurred while retrieving segments" msgstr "HATA: Dilimler alınırken bir hata oluştu" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "HATA: %(path)s e erişilemiyor: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "HATA: Denetim çalıştırılamıyor: %s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "Memcached'e hata %(action)s: %(server)s" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "UTF-8 ile kodlama hatası: %s" msgid "Error hashing suffix" msgstr "Sonek özetini çıkarmada hata" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "mtime_check_interval ile %r de hata: %s" #, python-format msgid "Error limiting server %s" msgstr "%s sunucusu sınırlandırılırken hata" msgid "Error listing devices" msgstr "Aygıtları listelemede hata" #, python-format msgid "Error on render profiling results: %s" msgstr "Profilleme sonuçlarının gerçeklenmesinde hata: %s" msgid "Error parsing recon cache file" msgstr "Recon zula dosyasını ayrıştırmada hata" msgid "Error reading recon cache file" msgstr "Recon zula dosyası okumada hata" msgid "Error reading ringfile" msgstr "Halka dosyası okunurken hata" msgid "Error reading swift.conf" msgstr "swift.conf okunurken hata" msgid "Error retrieving recon data" msgstr "Recon verisini almada hata" msgid "Error syncing handoff partition" msgstr "Devir bölümünü eş zamanlamada hata" msgid "Error syncing partition" msgstr "Bölüm eşzamanlamada hata" #, python-format msgid "Error syncing with node: %s" msgstr "Düğüm ile eş zamanlamada hata: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Yeniden inşa denenirken hata %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Hata: Bir hata oluştu" msgid "Error: missing config path argument" msgstr "Hata: yapılandırma yolu değişkeni eksik" #, python-format msgid "Error: unable to locate %s" msgstr "Hata: %s bulunamıyor" msgid "Exception dumping recon cache" msgstr "Yeniden bağlanma zulasının dökümünde istisna" msgid "Exception in top-level account reaper loop" msgstr "Üst seviye hesap biçme döngüsünde istisna" msgid "Exception in top-level replication loop" msgstr "Üst seviye çoğaltma döngüsünde istisna" msgid "Exception in top-levelreconstruction loop" msgstr "Üst seviye yeniden oluşturma döngüsünde istisna" #, python-format msgid "Exception while deleting container %s %s" msgstr "%s %s kabı silinirken istisna" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "%s %s %s nesnesi silinirken istisna" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ile istisna" #, python-format msgid "Exception with account %s" msgstr "%s hesabında istisna" #, python-format msgid "Exception with containers for account %s" msgstr "%s hesabı için kaplarla ilgili istisna" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "%(account)s hesabı için %(container)s kabı için nesneler için istisna" #, python-format msgid "Expect: 100-continue on %s" msgstr "Beklenen: 100-%s üzerinden devam et" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s den %(found_domain)s e CNAME zinciri takip ediliyor" msgid "Found configs:" msgstr "Yapılandırmalar bulundu:" msgid "Host unreachable" msgstr "İstemci erişilebilir değil" #, python-format msgid "Incomplete pass on account %s" msgstr "%s hesabından tamamlanmamış geçiş" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Geçersix X-Container-Sync-To biçimi %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To'da geçersiz istemci %r" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Geçersiz bekleyen girdi %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "%(full_path)s den geçersiz yanıt %(resp)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)s den geçersiz yanıt %(resp)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "X-Container-Sync-To'da geçersiz şema %r, \"//\", \"http\", veya \"https\" " "olmalı." #, python-format msgid "Killing long-running rsync: %s" msgstr "Uzun süre çalışan rsync öldürülüyor: %s" msgid "Lockup detected.. killing live coros." msgstr "Kilitleme algılandı.. canlı co-rutinler öldürülüyor." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s %(found_domain)s eşleştirildi" #, python-format msgid "No %s running" msgstr "Çalışan %s yok" #, python-format msgid "No cluster endpoint for %r %r" msgstr "%r %r için küme uç noktası yok" #, python-format msgid "No permission to signal PID %d" msgstr "%d PID'ine sinyalleme izni yok" #, python-format msgid "No policy with index %s" msgstr "%s indisine sahip ilke yok" #, python-format msgid "No realm key for %r" msgstr "%r için realm anahtarı yok" #, python-format msgid "No space left on device for %s (%s)" msgstr "Aygıtta %s için boş alan kalmadı (%s)" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Düğüm hatası sınırlandı %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Yeterince nesne sunucu ack'lenmedi (%d alındı)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Bulunamadı %(sync_from)r => %(sync_to)r - nesne %(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%s saniye boyunca hiçbir şey yeniden oluşturulmadı." #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s saniyedir hiçbir şey çoğaltılmadı." msgid "Object" msgstr "Nesne" msgid "Object PUT" msgstr "Nesne PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "Nesne PUT 409 için 202 döndürüyor: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Nesne PUT 412 döndürüyor, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Nesne denetimi (%(type)s) \"%(mode)s\" kipinde tamamlandı: %(elapsed).02fs. " "Toplam karantina: %(quars)d, Toplam hata: %(errors)d, Toplam dosya/sn: " "%(frate).2f, Toplam bayt/sn: %(brate).2f, Denetleme zamanı: %(audit).2f, " "Oran: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Nesne denetim istatistikleri: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Nesne yeniden oluşturma tamamlandı (bir kere). (%.02f dakika)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Nesne yeniden oluşturma tamamlandı. (%.02f dakika)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Nesne çoğaltma tamamlandı (bir kere). (%.02f dakika)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Nesne çoğaltma tamamlandı. (%.02f dakika)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Nesne sunucuları %s eşleşmeyen etag döndürdü" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Nesne güncelleme tek iş iplikli süpürme tamamlandı: %(elapsed).02fs, " "%(success)s başarılı, %(fail)s başarısız" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Nesne güncelleme süpürmesi tamamlandı: %.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "%(device)s ın nesne güncelleme süpürmesi tamamlandı: %(elapsed).02fs, " "%(success)s başarılı, %(fail)s başarısız" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "X-Container-Sync-To'da parametre, sorgular, ve parçalara izin verilmez" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Bölüm zamanları: azami %(max).4fs, asgari %(min).4fs, ortalama %(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "Geçiş başlıyor; %s olası kap; %s olası nesne" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "Geçiş %ds de tamamlandı; %d nesnenin süresi doldu" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "Şimdiye kadarki geçiş %ds; %d nesnenin süresi doldu" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To'de yol gerekli" #, python-format msgid "Problem cleaning up %s" msgstr "%s temizliğinde problem" #, python-format msgid "Problem cleaning up %s (%s)" msgstr "%s temizlemede problem (%s)" #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "Dayanıklı durum dosyas %s ile ilgili problem (%s)" #, python-format msgid "Profiling Error: %s" msgstr "Profilleme Hatası: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "%(hsh_path)s %(quar_path)s karantinasına alındı çünkü bir dizin değil" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Bir dizin olmadığından %(object_path)s %(quar_path)s e karantinaya alındı" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "%s %s'e karantinaya alındı %s veri tabanı sebebiyle" #, python-format msgid "Quarantining DB %s" msgstr "DB %s karantinaya alınıyor" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Oran sınırı uyku kaydı: %(account)s/%(container)s/%(object)s için %(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d db silindi" #, python-format msgid "Removing %s objects" msgstr "%s nesne kaldırılıyor" #, python-format msgid "Removing partition: %s" msgstr "Bölüm kaldırılıyor: %s" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Geçersiz pid'e sahip pid dosyası %s siliniyor" #, python-format msgid "Removing stale pid file %s" msgstr "Askıdaki pid dosyası siliniyor %s" msgid "Replication run OVER" msgstr "Çoğaltma çalışması BİTTİ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Kara listeleme yüzünden 497 döndürülüyor: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "%(acc)s/%(cont)s/%(obj)s ye %(meth)s için 498 döndürülüyor. Oran sınırı " "(Azami uyku) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Zincir değişikliği algılandı. Mevcut yeniden oluşturma geçişi iptal ediliyor." msgid "Ring change detected. Aborting current replication pass." msgstr "Zincir değişimi algılandı. Mevcut çoğaltma geçişi iptal ediliyor." #, python-format msgid "Running %s once" msgstr "%s bir kere çalıştırılıyor" msgid "Running object reconstructor in script mode." msgstr "Nesne yeniden oluşturma betik kipinde çalıştırılıyor." msgid "Running object replicator in script mode." msgstr "Nesne çoğaltıcı betik kipinde çalıştırılıyor." #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "Sinyal %s pid: %s sinyal: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "%(time)s den beri: %(sync)s eşzamanlandı [%(delete)s silme, %(put)s koyma], " "%(skip)s atlama, %(fail)s başarısız" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "%(time)s den beri: Hesap denetimleri: %(passed)s denetimi geçti, %(failed)s " "denetimi geçemedi" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "%(time)s den beri: Kap denetimleri: %(pass)s denetimi geçti, %(fail)s " "denetimde başarısız" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "Bağlı olmadığından %(device)s atlanıyor" #, python-format msgid "Skipping %s as it is not mounted" msgstr "Bağlı olmadığından %s atlanıyor" #, python-format msgid "Starting %s" msgstr "%s başlatılıyor" msgid "Starting object reconstruction pass." msgstr "Nesne yeniden oluşturma geçişi başlatılıyor." msgid "Starting object reconstructor in daemon mode." msgstr "Nesne yeniden oluşturma artalan işlemi kipinde başlatılıyor." msgid "Starting object replication pass." msgstr "Nesne çoğaltma geçişi başlatılıyor." msgid "Starting object replicator in daemon mode." msgstr "Nesne çoğaltıcı artalan işlemi kipinde başlatılıyor." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "%(dst)s (%(time).03f) de %(src)s başarılı rsync'i" msgid "The file type are forbidden to access!" msgstr "Dosya türüne erişim yasaklanmış!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "(%(total)s) kabı için %(key)s toplamı ilkeler arasındaki %(key)s toplamıyla " "eşleşmiyor (%(sum)s)" #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Memcached'e zaman aşımı %(action)s: %(server)s" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ile zaman aşımı istisnası" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s deneniyor" #, python-format msgid "Trying to GET %(full_path)s" msgstr "%(full_path)s GET deneniyor" #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "%s'e PUT'un %s durumu alınmaya çalışılıyor" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "%s'e PUT için son durum alınmaya çalışılıyor" msgid "Trying to read during GET" msgstr "GET sırasında okuma deneniyor" msgid "Trying to read during GET (retrying)" msgstr "GET sırasında okuma deneniyor (yeniden deneniyor)" msgid "Trying to send to client" msgstr "İstemciye gönderilmeye çalışılıyor" #, python-format msgid "Trying to sync suffixes with %s" msgstr "%s e sahip son ekler eşzamanlanmaya çalışılıyor" #, python-format msgid "Trying to write to %s" msgstr "%s'e yazmaya çalışılıyor" msgid "UNCAUGHT EXCEPTION" msgstr "YAKALANMAYAN İSTİSNA" #, python-format msgid "Unable to find %s config section in %s" msgstr "%s yapılandırma kısmı %s'de bulunamıyor" #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "Yapılandırmadan dahili istemci yüklenemedi: %r (%s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "%s libc'de bulunamadı. No-op olarak çıkılıyor." #, python-format msgid "Unable to locate config for %s" msgstr "%s için yapılandırma bulunamıyor" #, python-format msgid "Unable to locate config number %s for %s" msgstr "Yapılandırma sayısı %s %s için bulunamıyor" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate, posix_fallocate libc'de bulunamadı. No-op olarak çıkılıyor." #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "%s dizininde fsynıc() yapılamıyor: %s" #, python-format msgid "Unable to read config from %s" msgstr "%s'den yapılandırma okunamıyor" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "%(sync_from)r => %(sync_to)r yetki al" #, python-format msgid "Unexpected response: %s" msgstr "Beklenmeyen yanıt: %s" msgid "Unhandled exception" msgstr "Yakalanmamış istisna" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "GET sırasında bilinmeyen istisna: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s için güncelleme raporu başarısız" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s için güncelleme raporu gönderildi" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "UYARI: SSL yalnızca test amaçlı etkinleştirilmelidir. Üretim için kurulumda " "harici SSL sonlandırma kullanın." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "UYARI: Dosya göstericisi sınırı değiştirilemiyor. Root değil misiniz?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "UYARI: Azami süreç limiti değiştirilemiyor. Root değil misiniz?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "UYARI: Hafıza sınırı değiştirilemiyor. Root değil misiniz?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "%s saniye %s'in ölmesi için beklendi; vaz geçiliyor" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Uyarı: Memcached istemcisi olmadan oran sınırlama yapılamaz" #, python-format msgid "method %s is not allowed." msgstr "%s metoduna izin verilmez." msgid "no log file found" msgstr "kayıt dosyası bulunamadı" msgid "odfpy not installed." msgstr "odfpy kurulu değil." #, python-format msgid "plotting results failed due to %s" msgstr "çizdirme sonuçlaru %s sebebiyle başarısız" msgid "python-matplotlib not installed." msgstr "python-matplotlib kurulu değil." swift-2.7.0/swift/locale/it/0000775000567000056710000000000012675204211017024 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/it/LC_MESSAGES/0000775000567000056710000000000012675204211020611 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/it/LC_MESSAGES/swift.po0000664000567000056710000010755512675204037022330 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Alessandra , 2016. #zanata # Remo Mattei , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev254\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 19:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-22 05:31+0000\n" "Last-Translator: Remo Mattei \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Italian\n" msgid "" "\n" "user quit" msgstr "" "\n" "l'utente è uscito" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - parallelo, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d suffissi controllati - %(hashed).2f%% con hash, %(synced).2f%% " "sincronizzati" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s ha risposto come smontato" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " "(%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partizioni di %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) dispositivi ricostruiti in %(time).2fs " "(%(rate).2f/sec, %(remaining)s rimanenti)" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partizioni replicate in " "%(time).2fs (%(rate).2f/sec, %(remaining)s rimanenti)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s operazioni con esito positivo, %(failure)s errori" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s restituisce 503 per %(statuses)s" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d non in esecuzione (%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s) sembra essere stato arrestato" #, python-format msgid "%s already started..." msgstr "%s già avviato..." #, python-format msgid "%s does not exist" msgstr "%s non esiste" #, python-format msgid "%s is not mounted" msgstr "%s non è montato" #, python-format msgid "%s responded as unmounted" msgstr "%s ha risposto come smontato" #, python-format msgid "%s running (%s - %s)" msgstr "%s in esecuzione (%s - %s)" #, python-format msgid "%s: %s" msgstr "%s: %s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Connessione reimpostata dal peer" #, python-format msgid ", %s containers deleted" msgstr ", %s contenitori eliminati" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s contenitori probabilmente rimanenti" #, python-format msgid ", %s containers remaining" msgstr ", %s contenitori rimanenti" #, python-format msgid ", %s objects deleted" msgstr ", %s oggetti eliminati" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s oggetti probabilmente rimanenti" #, python-format msgid ", %s objects remaining" msgstr ", %s oggetti rimanenti" #, python-format msgid ", elapsed: %.02fs" msgstr ", trascorso: %.02fs" msgid ", return codes: " msgstr ", codici di ritorno: " msgid "Account" msgstr "Conto" #, python-format msgid "Account %s has not been reaped since %s" msgstr "Account %s non utilizzato da %s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Modalità \"once\" verifica account completata: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Trasmissione verifica account completata: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "È stato eseguito un tentativo di replicare %(count)d dbs in %(time).5f " "secondi (%(rate).5f/s)" #, python-format msgid "Audit Failed for %s: %s" msgstr "Verifica non riuscita per %s: %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Codice di ritorno rsync errato: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Avvio modalità \"once\" verifica account" msgid "Begin account audit pass." msgstr "Avvio trasmissione verifica account." msgid "Begin container audit \"once\" mode" msgstr "Avvio modalità \"once\" verifica contenitore" msgid "Begin container audit pass." msgstr "Avvio trasmissione verifica contenitore." msgid "Begin container sync \"once\" mode" msgstr "Avvio della modalità \"once\" di sincronizzazione contenitore" msgid "Begin container update single threaded sweep" msgstr "Avvio pulizia a singolo thread aggiornamento contenitore" msgid "Begin container update sweep" msgstr "Avvio pulizia aggiornamento contenitore" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "Avvio modalità \"%s\" verifica oggetto (%s%s)" msgid "Begin object update single threaded sweep" msgstr "Avvio pulizia a singolo thread aggiornamento oggetto" msgid "Begin object update sweep" msgstr "Avvio pulizia aggiornamento oggetto" #, python-format msgid "Beginning pass on account %s" msgstr "Avvio della trasmissione sull'account %s" msgid "Beginning replication run" msgstr "Avvio replica" msgid "Broker error trying to rollback locked connection" msgstr "" "Errore del broker durante il tentativo di eseguire il rollback della " "connessione bloccata" #, python-format msgid "Can not access the file %s." msgstr "Impossibile accedere al file %s." #, python-format msgid "Can not load profile data from %s." msgstr "Impossibile caricare i dati del profilo da %s." #, python-format msgid "Cannot read %s (%s)" msgstr "Non e' possibile leggere %s (%s)" #, python-format msgid "Cannot write %s (%s)" msgstr "Non e' possibile scriver %s (%s)" #, python-format msgid "Client did not read from proxy within %ss" msgstr "Il client non ha eseguito la lettura dal proxy in %ss" msgid "Client disconnected on read" msgstr "Client scollegato alla lettura" msgid "Client disconnected without sending enough data" msgstr "Client disconnesso senza inviare dati sufficienti" msgid "Client disconnected without sending last chunk" msgstr "Client disconnesso senza inviare l'ultima porzione" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Il percorso del client %(client)s non corrisponde al percorso memorizzato " "nei metadati dell'oggetto %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Opzione di configurazione internal_client_conf_path non definita. Viene " "utilizzata la configurazione predefinita, vedere l'esempio internal-client." "conf-sample per le opzioni" msgid "Connection refused" msgstr "Connessione rifiutata" msgid "Connection timeout" msgstr "Timeout della connessione" msgid "Container" msgstr "Contenitore" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Modalità \"once\" verifica contenitore completata: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Trasmissione verifica contenitore completata: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" "Modalità \"once\" di sincronizzazione del contenitore completata: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Pulizia a singolo thread aggiornamento contenitore completata: " "%(elapsed).02fs, %(success)s operazioni con esito positivo, %(fail)s errori, " "%(no_change)s senza modifiche" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Pulizia aggiornamento contenitore completata: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Pulizia aggiornamento contenitore di %(path)s completata: %(elapsed).02fs, " "%(success)s operazioni con esito positivo, %(fail)s errori, %(no_change)s " "senza modifiche" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "" "Impossibile effettuare il bind a %s:%s dopo aver provato per %s secondi" #, python-format msgid "Could not load %r: %s" msgstr "Impossibile caricare %r: %s" #, python-format msgid "Data download error: %s" msgstr "Errore di download dei dati: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Trasmissione dei dispositivi completata: %.02fs" #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "La directory %r non è associata ad una politica valida (%s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERRORE %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERRORE %(status)d %(body)s dal server %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERRORE %(status)d %(body)s Dal server degli oggetti re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERRORE %(status)d Previsto: 100-continue dal server degli oggetti" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" "ERRORE %(status)d Tentativo di %(method)s %(path)s dal server contenitore" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERRORE Aggiornamento dell'account non riuscito con %(ip)s:%(port)s/" "%(device)s (verrà eseguito un nuovo tentativo successivamente): Risposta " "%(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "ERRORE Aggiornamento dell'account non riuscito: numero differente di host e " "dispositivi nella richiesta: \"%s\" vs \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERRORE Risposta errata %(status)s da %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERRORE Timeout di lettura del client (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERRORE Aggiornamento del contenitore non riuscito (salvataggio per " "l'aggiornamento asincrono successivamente): %(status)d risposta da %(ip)s:" "%(port)s/%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "ERRORE Aggiornamento del contenitore non riuscito: numero differente di host " "e dispositivi nella richiesta: \"%s\" vs \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERRORE Impossibile ottenere le informazioni sull'account %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERRORE Impossibile ottenere le informazioni sul contenitore %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ERRORE Errore di chiusura DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERRORE Eccezione che causa la disconnessione del client" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" "ERRORE Eccezione durante il trasferimento di dati nel server degli oggetti %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERRORE Impossibile ottenere i propri IP?" msgid "ERROR Insufficient Storage" msgstr "ERRORE Memoria insufficiente" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "ERRORE L'oggetto %(obj)s non ha superato la verifica ed è stato inserito " "nella quarantena: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERRORE Problema relativo a pickle, inserimento di %s nella quarantena" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERRORE Unità remota non montata %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERRORE durante la sincronizzazione di %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERRORE durante la sincronizzazione di %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERRORE durante il tentativo di eseguire la verifica %s" msgid "ERROR Unhandled exception in request" msgstr "ERRORE Eccezione non gestita nella richiesta" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERRORE errore __call__ con %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERRORE aggiornamento dell'account non riuscito con %(ip)s:%(port)s/" "%(device)s (verrà eseguito un nuovo tentativo successivamente)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERRORE aggiornamento dell'account non riuscito con %(ip)s:%(port)s/" "%(device)s (verrà eseguito un nuovo tentativo successivamente): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "ERRORE file in sospeso asincrono con nome non previsto %s" msgid "ERROR auditing" msgstr "ERRORE durante la verifica" #, python-format msgid "ERROR auditing: %s" msgstr "ERRORE durante la verifica: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERRORE aggiornamento del contenitore non riuscito con %(ip)s:%(port)s/" "%(dev)s (salvataggio per aggiornamento asincrono successivamente)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERRORE durante la lettura della risposta HTTP da %s" #, python-format msgid "ERROR reading db %s" msgstr "ERRORE durante la lettura del db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERRORE rsync non riuscito con %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERRORE durante la sincronizzazione di %(file)s con il nodo %(node)s" msgid "ERROR trying to replicate" msgstr "ERRORE durante il tentativo di eseguire la replica" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERRORE durante il tentativo di ripulire %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "ERRORE relativo al server %(type)s %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERRORE relativo al caricamento delle eliminazioni da %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERRORE relativo al server remoto %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "ERRORE: Impossibile ottenere i percorsi per gestire le partizioni: %s" msgid "ERROR: An error occurred while retrieving segments" msgstr "ERRORE: Si è verificato un errore durante il richiamo dei segmenti" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERRORE: Impossibile accedere a %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERRORE: Impossibile eseguire la verifica: %s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "Errore di %(action)s su memcached: %(server)s" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "Errore durante la codifica in UTF-8: %s" msgid "Error hashing suffix" msgstr "Errore durante l'hash del suffisso" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "Errore in %r con mtime_check_interval: %s" #, python-format msgid "Error limiting server %s" msgstr "Errore durante la limitazione del server %s" msgid "Error listing devices" msgstr "Errore durante l'elenco dei dispositivi" #, python-format msgid "Error on render profiling results: %s" msgstr "" "Errore durante la visualizzazione dei risultati della creazione dei profili: " "%s" msgid "Error parsing recon cache file" msgstr "Errore durante l'analisi del file della cache di riconoscimento" msgid "Error reading recon cache file" msgstr "Errore durante la lettura del file della cache di riconoscimento" msgid "Error reading ringfile" msgstr "Errore durante la lettura del ringfile" msgid "Error reading swift.conf" msgstr "Errore durante la lettura di swift.conf" msgid "Error retrieving recon data" msgstr "Errore durante il richiamo dei dati di riconoscimento" msgid "Error syncing handoff partition" msgstr "Errore durante la sincronizzazione della partizione di passaggio" msgid "Error syncing partition" msgstr "Errore durante la sincronizzazione della partizione" #, python-format msgid "Error syncing with node: %s" msgstr "Errore durante la sincronizzazione con il nodo: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Errore nel tentativo di ricreare %(path)s policy#%(policy)d frag#" "%(frag_index)s" msgid "Error: An error occurred" msgstr "Errore: Si è verificato un errore" msgid "Error: missing config path argument" msgstr "Errore: Argomento path della configurazione mancante" #, python-format msgid "Error: unable to locate %s" msgstr "Errore: impossibile individuare %s" msgid "Exception dumping recon cache" msgstr "Eccezione durante il dump della cache di recon" msgid "Exception in top-level account reaper loop" msgstr "Eccezione nel loop reaper dell'account di livello superiore" msgid "Exception in top-level replication loop" msgstr "Eccezione nel loop di replica di livello superiore" msgid "Exception in top-levelreconstruction loop" msgstr "Eccezione nel loop di ricostruzione di livello superiore" #, python-format msgid "Exception while deleting container %s %s" msgstr "Eccezione durante l'eliminazione del contenitore %s %s" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "Eccezione durante l'eliminazione dell'oggetto %s %s %s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Eccezione relativa a %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Eccezione relativa all'account %s" #, python-format msgid "Exception with containers for account %s" msgstr "Eccezione relativa ai contenitori per l'account %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Eccezione relativa agli oggetti per il contenitore %(container)s per " "l'account %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Previsto: 100-continue su %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "" "Viene seguita la catena CNAME per %(given_domain)s verso %(found_domain)s" msgid "Found configs:" msgstr "Configurazioni trovate:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Nella prima modalità di passaggio ci sono ancora passaggi restanti. " "Interruzione del passaggio di replica corrente." msgid "Host unreachable" msgstr "Host non raggiungibile" #, python-format msgid "Incomplete pass on account %s" msgstr "Trasmissione non completa sull'account %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Formato X-Container-Sync-To non valido %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Host non valido %r in X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Voce in sospeso non valida %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Risposta non valida %(resp)s da %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Risposta non valida %(resp)s da %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Schema non valido %r in X-Container-Sync-To, deve essere \"//\", \"http\" " "oppure \"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Chiusura rsync ad elaborazione prolungata: %s" #, python-format msgid "Loading JSON from %s failed (%s)" msgstr "Caricamento JSON dal %s fallito (%s)" msgid "Lockup detected.. killing live coros." msgstr "Blocco rilevato... chiusura dei coros attivi." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s associato a %(found_domain)s" #, python-format msgid "No %s running" msgstr "Nessun %s in esecuzione" #, python-format msgid "No cluster endpoint for %r %r" msgstr "Nessun endpoint del cluster per %r %r" #, python-format msgid "No permission to signal PID %d" msgstr "Nessuna autorizzazione per la segnalazione del PID %d" #, python-format msgid "No policy with index %s" msgstr "Nessuna politica con indice %s" #, python-format msgid "No realm key for %r" msgstr "Nessuna chiave dell'area di autenticazione per %r" #, python-format msgid "No space left on device for %s (%s)" msgstr "Nessuno spazio rimasto sul dispositivo per %s (%s)" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Errore del nodo limitato %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Server degli oggetti riconosciuti non sufficienti (got %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "%(sync_from)r => %(sync_to)r non trovato - oggetto %(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Nessun elemento ricostruito per %s secondi." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nessun elemento replicato per %s secondi." msgid "Object" msgstr "Oggetto" msgid "Object PUT" msgstr "PUT dell'oggetto" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Il PUT dell'oggetto ha restituito 202 per 409: %(req_timestamp)s <= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Il PUT dell'oggetto ha restituito 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Modalità \"%(mode)s\" (%(type)s) verifica oggetto completata: " "%(elapsed).02fs. Totale in quarantena: %(quars)d, Totale errori: %(errors)d, " "Totale file/sec: %(frate).2f, Totale byte/sec: %(brate).2f, Tempo verifica: " "%(audit).2f, Velocità: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Verifica oggetto (%(type)s). A partire da %(start_time)s: In locale: " "%(passes)d passati, %(quars)d in quarantena, %(errors)d errori file/sec: " "%(frate).2f , byte/sec: %(brate).2f, Tempo totale: %(total).2f, Tempo " "verifica: %(audit).2f, Velocità: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Statistiche verifica oggetto: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Ricostruzione dell'oggetto completata (una volta). (%.02f minuti)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Ricostruzione dell'oggetto completata. (%.02f minuti)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Replica dell'oggetto completata (una volta). (%.02f minuti)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Replica dell'oggetto completata. (%.02f minuti)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "I server dell'oggetto hanno restituito %s etag senza corrispondenza" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Pulizia a singolo thread aggiornamento oggetto completata: %(elapsed).02fs, " "%(success)s operazioni con esito positivo, %(fail)s errori" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Pulizia aggiornamento oggetto completata: %.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Pulizia aggiornamento oggetto di %(device)s completata: %(elapsed).02fs, " "%(success)s operazioni con esito positivo, %(fail)s errori" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Parametri, query e frammenti non consentiti in X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "Tempi partizione: max %(max).4fs, min %(min).4fs, med %(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "" "Avvio della trasmissione; %s contenitori possibili; %s oggetti possibili" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "Trasmissione completata in %ds; %d oggetti scaduti" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "Trasmissione eseguita fino ad ora %ds; %d oggetti scaduti" msgid "Path required in X-Container-Sync-To" msgstr "Percorso richiesto in X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problema durante la ripulitura di %s" #, python-format msgid "Problem cleaning up %s (%s)" msgstr "Problema durante la ripulitura di %s (%s)" #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "Problema durante la scrittura del file obsoleto duraturo %s (%s)" #, python-format msgid "Profiling Error: %s" msgstr "Errore di creazione dei profili: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s inserito in quarantena in %(quar_path)s perché non è una " "directory" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s inserito in quarantena in %(quar_path)s perché non è una " "directory" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "%s inserito in quarantena in %s a causa del database %s" #, python-format msgid "Quarantining DB %s" msgstr "Inserimento in quarantena del DB %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Log di sospensione Ratelimit: %(sleep)s per %(account)s/%(container)s/" "%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Rimossi %(remove)d dbs" #, python-format msgid "Removing %s objects" msgstr "Rimozione di oggetti %s" #, python-format msgid "Removing partition: %s" msgstr "Rimozione della partizione: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Rimozione del file pid %(pid_file)s con pid non valido %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Rimozione del file pid %s con pid non valido" #, python-format msgid "Removing stale pid file %s" msgstr "Rimozione del file pid %s obsoleto in corso" msgid "Replication run OVER" msgstr "Esecuzione della replica TERMINATA" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Viene restituito il codice 497 a causa della blacklist: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Viene restituito 498 per %(meth)s a %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(numero massimo sospensioni) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Modifica ring rilevata. Interruzione della trasmissione della ricostruzione " "corrente." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Modifica ring rilevata. Interruzione della trasmissione della replica " "corrente." #, python-format msgid "Running %s once" msgstr "Esecuzione di %s una volta" msgid "Running object reconstructor in script mode." msgstr "" "Esecuzione del programma di ricostruzione dell'oggetto in modalità script." msgid "Running object replicator in script mode." msgstr "Esecuzione del programma di replica dell'oggetto in modalità script." #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "Segnale %s pid: %s segnale: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "A partire da %(time)s: %(sync)s sincronizzati [%(delete)s eliminazioni, " "%(put)s inserimenti], %(skip)s ignorati, %(fail)s non riusciti" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "A partire da %(time)s: Verifiche account: %(passed)s verifiche superate, " "%(failed)s verifiche non superate" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "A partire da %(time)s: Verifiche contenitore: %(pass)s verifiche superate, " "%(fail)s verifiche non superate" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s viene ignorato perché non è montato" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s viene ignorato perché non è montato" #, python-format msgid "Starting %s" msgstr "Avvio di %s" msgid "Starting object reconstruction pass." msgstr "Avvio della trasmissione della ricostruzione dell'oggetto." msgid "Starting object reconstructor in daemon mode." msgstr "Avvio del programma di ricostruzione dell'oggetto in modalità daemon." msgid "Starting object replication pass." msgstr "Avvio della trasmissione della replica dell'oggetto." msgid "Starting object replicator in daemon mode." msgstr "Avvio del programma di replica dell'oggetto in modalità daemon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Rsync di %(src)s eseguito correttamente su %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Non è consentito l'accesso a questo tipo di file!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Il numero totale di %(key)s per il contenitore (%(total)s) non corrisponde " "alla somma di %(key)s tra le politiche (%(sum)s)" #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Timeout di %(action)s su memcached: %(server)s" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Eccezione di timeout con %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentativo di %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Tentativo di eseguire GET %(full_path)s" #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "Tentativo di acquisire lo stato %s di PUT su %s" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Tentativo di acquisire lo stato finale di PUT su %s" msgid "Trying to read during GET" msgstr "Tentativo di lettura durante GET" msgid "Trying to read during GET (retrying)" msgstr "Tentativo di lettura durante GET (nuovo tentativo)" msgid "Trying to send to client" msgstr "Tentativo di invio al client" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Tentativo di sincronizzazione dei suffissi con %s" #, python-format msgid "Trying to write to %s" msgstr "Tentativo di scrittura in %s" msgid "UNCAUGHT EXCEPTION" msgstr "ECCEZIONE NON RILEVATA" #, python-format msgid "Unable to find %s config section in %s" msgstr "Impossibile trovare la sezione di configurazione %s in %s" #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "Impossibile caricare il client interno dalla configurazione: %r (%s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Impossibile individuare %s in libc. Lasciato come no-op." #, python-format msgid "Unable to locate config for %s" msgstr "Impossibile individuare la configurazione per %s" #, python-format msgid "Unable to locate config number %s for %s" msgstr "Impossibile individuare il numero di configurazione %s per %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Impossibile individuare fallocate, posix_fallocate in libc. Lasciato come " "no-op." #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "Impossibile eseguire fsync() sulla directory %s: %s" #, python-format msgid "Unable to read config from %s" msgstr "Impossibile leggere la configurazione da %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "%(sync_from)r => %(sync_to)r non autorizzato" #, python-format msgid "Unexpected response: %s" msgstr "Risposta imprevista: %s" msgid "Unhandled exception" msgstr "Eccezione non gestita" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Eccezione imprevista nel tentativo di eseguire GET: %(account)r " "%(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Report di aggiornamento non riuscito per %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Report di aggiornamento inviato per %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVVERTENZA: SSL deve essere abilitato solo per scopi di test. Utilizzare la " "terminazione SSL esterna per una distribuzione di produzione." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVVERTENZA: Impossibile modificare il limite del descrittore del file. " "Eseguire come non-root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVVERTENZA: Impossibile modificare il limite del numero massimo di processi. " "Eseguire come non-root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVVERTENZA: Impossibile modificare il limite di memoria. Eseguire come non-" "root?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "" "Sono trascorsi %s secondi in attesa che %s venga interrotto; operazione " "terminata" #, python-format msgid "Waited %s seconds for %s to die; killing" msgstr "" "Sono trascorsi %s secondi in attesa che %s venga interrotto; operazione " "terminata" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avvertenza: impossibile eseguire ratelimit senza un client memcached" #, python-format msgid "method %s is not allowed." msgstr "il metodo %s non è consentito." msgid "no log file found" msgstr "nessun file di log trovato" msgid "odfpy not installed." msgstr "odfpy non installato." #, python-format msgid "plotting results failed due to %s" msgstr "tracciamento dei risultati non riuscito a causa di %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib non installato." swift-2.7.0/swift/locale/de/0000775000567000056710000000000012675204211017000 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/de/LC_MESSAGES/0000775000567000056710000000000012675204211020565 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/de/LC_MESSAGES/swift.po0000664000567000056710000010676112675204037022302 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2014 # Ettore Atalan , 2014-2015 # Jonas John , 2015 # Frank Kloeker , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-20 07:32+0000\n" "Last-Translator: Monika Wolf \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: German\n" msgid "" "\n" "user quit" msgstr "" "\n" "Durch Benutzer beendet" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - parallel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d Suffixe überprüft - %(hashed).2f%% hashverschlüsselt, " "%(synced).2f%% synchronisiert" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s zurückgemeldet als ausgehängt" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " "(%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) Partitionen von %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) Geräten rekonstruiert in %(time).2fs " "(%(rate).2f/sec, %(remaining)s verbleibend)" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) Partitionen repliziert in " "%(time).2fs (%(rate).2f/s, %(remaining)s verbleibend)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s Erfolge, %(failure)s Fehlschläge" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s gab 503 für %(statuses)s zurück" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d läuft nicht (%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s) scheinbar gestoppt" #, python-format msgid "%s already started..." msgstr "%s bereits gestartet..." #, python-format msgid "%s does not exist" msgstr "%s existiert nicht" #, python-format msgid "%s is not mounted" msgstr "%s ist nicht eingehängt" #, python-format msgid "%s responded as unmounted" msgstr "%s zurückgemeldet als ausgehängt" #, python-format msgid "%s running (%s - %s)" msgstr "%s läuft (%s - %s)" #, python-format msgid "%s: %s" msgstr "%s: %s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Verbindung zurückgesetzt durch Peer" #, python-format msgid ", %s containers deleted" msgstr ", %s Container gelöscht" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s Container möglicherweise verbleibend" #, python-format msgid ", %s containers remaining" msgstr ", %s Container verbleibend" #, python-format msgid ", %s objects deleted" msgstr ", %s Objekte gelöscht" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s Objekte möglicherweise verbleibend" #, python-format msgid ", %s objects remaining" msgstr ", %s Objekte verbleibend" #, python-format msgid ", elapsed: %.02fs" msgstr ", vergangen: %.02fs" msgid ", return codes: " msgstr ", Rückgabecodes: " msgid "Account" msgstr "Konto" #, python-format msgid "Account %s has not been reaped since %s" msgstr "Konto %s wurde nicht aufgeräumt seit %s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Kontoprüfungsmodus \"once\" abgeschlossen: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Kontoprüfungsdurchlauf abgeschlossen: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Versuch, %(count)d Datenbanken in %(time).5f Sekunden zu replizieren " "(%(rate).5f/s)" #, python-format msgid "Audit Failed for %s: %s" msgstr "Prüfung fehlgeschlagen für %s: %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Falscher rsync-Rückgabecode: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Kontoprüfungsmodus \"once\" wird gestartet" msgid "Begin account audit pass." msgstr "Kontoprüfungsdurchlauf wird gestartet." msgid "Begin container audit \"once\" mode" msgstr "Containerprüfungsmodus \"once\" wird gestartet" msgid "Begin container audit pass." msgstr "Containerprüfungsdurchlauf wird gestartet." msgid "Begin container sync \"once\" mode" msgstr "Containersynchronisationsmodus \"once\" wird gestartet" msgid "Begin container update single threaded sweep" msgstr "Einzelthread-Scanvorgang für Containeraktualisierung wird gestartet" msgid "Begin container update sweep" msgstr "Scanvorgang für Containeraktualisierung wird gestartet" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "Objektprüfung mit \"%s\"-Modus wird gestartet (%s%s)" msgid "Begin object update single threaded sweep" msgstr "Einzelthread-Scanvorgang für Objektaktualisierung wird gestartet" msgid "Begin object update sweep" msgstr "Scanvorgang für Objektaktualisierung wird gestartet" #, python-format msgid "Beginning pass on account %s" msgstr "Durchlauf für Konto %s wird gestartet" msgid "Beginning replication run" msgstr "Replizierungsdurchlauf wird gestartet" msgid "Broker error trying to rollback locked connection" msgstr "" "Brokerfehler beim Versuch, für eine gesperrte Verbindung ein Rollback " "durchzuführen" #, python-format msgid "Can not access the file %s." msgstr "Kann nicht auf die Datei %s zugreifen." #, python-format msgid "Can not load profile data from %s." msgstr "Die Profildaten von %s können nicht geladen werden." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Client konnte nicht innerhalb von %ss vom Proxy lesen" msgid "Client disconnected on read" msgstr "Client beim Lesen getrennt" msgid "Client disconnected without sending enough data" msgstr "Client getrennt ohne dem Senden von genügend Daten" msgid "Client disconnected without sending last chunk" msgstr "" "Die Verbindung zum Client wurde getrennt, bevor der letzte Chunk gesendet " "wurde. " #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Clientpfad %(client)s entspricht nicht dem in den Objektmetadaten " "gespeicherten Pfad %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Konfigurationsoption internal_client_conf_path nicht definiert. " "Standardkonfiguration wird verwendet. Informationen zu den Optionen finden " "Sie in internal-client.conf-sample." msgid "Connection refused" msgstr "Verbindung abgelehnt" msgid "Connection timeout" msgstr "Verbindungszeitüberschreitung" msgid "Container" msgstr "Container" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Containerprüfungsmodus \"once\" abgeschlossen: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Containerprüfungsdurchlauf abgeschlossen: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Containersynchronisationsmodus \"once\" abgeschlossen: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Einzelthread-Scanvorgang für Containeraktualisierung abgeschlossen: " "%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler, %(no_change)s ohne " "Änderungen" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Scanvorgang für Containeraktualisierung abgeschlossen: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Scanvorgang für Containeraktualisierung von %(path)s abgeschlossen: " "%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler, %(no_change)s ohne " "Änderungen" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "Keine Bindung an %s:%s möglich nach Versuch über %s Sekunden" #, python-format msgid "Could not load %r: %s" msgstr "Konnte %r nicht laden: %s" #, python-format msgid "Data download error: %s" msgstr "Fehler beim Downloaden von Daten: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Gerätedurchgang abgeschlossen: %.02fs" #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "" "Das Verzeichnis %r kann keiner gültigen Richtlinie (%s) zugeordnet werden." #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "FEHLER %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "FEHLER %(status)d %(body)s von %(type)s Server" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "FEHLER %(status)d %(body)s Vom Objektserver bezüglich: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "FEHLER %(status)d Erwartet: 100-continue von Objektserver" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "FEHLER %(status)d Versuch, %(method)s %(path)sAus Container-Server" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "FEHLER Kontoaktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s " "(wird zu einem späteren Zeitpunkt erneut versucht): Antwort %(status)s " "%(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "FEHLER Kontoaktualisierung fehlgeschlagen: Unterschiedliche Anzahl von Hosts " "und Einheiten in der Anforderung: \"%s\" contra \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "FEHLER Falsche Rückmeldung %(status)s von %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "FEHLER Client-Lesezeitüberschreitung (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen (wird für asynchrone " "Aktualisierung zu einem späteren Zeitpunkt gespeichert): %(status)d Antwort " "von %(ip)s:%(port)s/%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen: Unterschiedliche Anzahl von " "Hosts und Einheiten in der Anforderung: \"%s\" contra \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "FEHLER Kontoinfo %s konnte nicht abgerufen werden" #, python-format msgid "ERROR Could not get container info %s" msgstr "FEHLER Containerinformation %s konnte nicht geholt werden" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "FEHLER Fehler beim Schließen von DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "" "FEHLER Ausnahme, die zu einer Unterbrechung der Verbindung zum Client führt" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "FEHLER: Ausnahme bei der Übertragung von Daten an die Ojektserver %s" msgid "ERROR Failed to get my own IPs?" msgstr "FEHLER Eigene IPs konnten nicht abgerufen werden?" msgid "ERROR Insufficient Storage" msgstr "FEHLER Nicht genügend Speicher" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "FEHLER Objekt %(obj)s hat die Prüfung nicht bestanden und wurde unter " "Quarantäne gestellt: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "FEHLER Pickle-Problem, %s wird unter Quarantäne gestellt" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "FEHLER Entferntes Laufwerk nicht eingehängt %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "FEHLER beim Synchronisieren %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "FEHLER beim Synchronisieren %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "FEHLER beim Versuch, %s zu prüfen" msgid "ERROR Unhandled exception in request" msgstr "FEHLER Nicht behandelte Ausnahme in Anforderung" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "FEHLER __call__-Fehler mit %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s " "(wird zu einem späteren Zeitpunkt erneut versucht)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "FEHLER Kontoaktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s " "(wird später erneut versucht): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "FEHLER asynchrone anstehende Datei mit unerwartetem Namen %s" msgid "ERROR auditing" msgstr "FEHLER bei der Prüfung" #, python-format msgid "ERROR auditing: %s" msgstr "FEHLER bei der Prüfung: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(dev)s " "(wird für asynchrone Aktualisierung zu einem späteren Zeitpunkt gespeichert)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "FEHLER beim Lesen der HTTP-Antwort von %s" #, python-format msgid "ERROR reading db %s" msgstr "FEHLER beim Lesen der Datenbank %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "FEHLER rsync fehlgeschlagen mit %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "" "FEHLER beim Synchronisieren von %(file)s Dateien mit dem Knoten %(node)s" msgid "ERROR trying to replicate" msgstr "FEHLER beim Versuch zu replizieren" #, python-format msgid "ERROR while trying to clean up %s" msgstr "FEHLER beim Versuch, %s zu bereinigen" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "FEHLER mit %(type)s Server %(ip)s:%(port)s/%(device)s AW: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "FEHLER beim Laden von Unterdrückungen von %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "FEHLER mit entferntem Server %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" "FEHLER: Pfade zu Laufwerkpartitionen konnten nicht abgerufen werden: %s" msgid "ERROR: An error occurred while retrieving segments" msgstr "FEHLER: Beim Abrufen von Segmenten ist ein Fehler aufgetreten" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "FEHLER: Auf %(path)s kann nicht zugegriffen werden: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "FEHLER: Prüfung konnte nicht durchgeführt werden: %s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "Fehler %(action)s für memcached: %(server)s" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "Fehler beim Kodieren nach UTF-8: %s" msgid "Error hashing suffix" msgstr "Fehler beim Hashing des Suffix" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "Fehler in %r mit mtime_check_interval: %s" #, python-format msgid "Error limiting server %s" msgstr "Fehler beim Begrenzen des Servers %s" msgid "Error listing devices" msgstr "Fehler beim Auflisten der Geräte" #, python-format msgid "Error on render profiling results: %s" msgstr "Fehler beim Wiedergeben der Profilerstellungsergebnisse: %s" msgid "Error parsing recon cache file" msgstr "Fehler beim Analysieren von recon-Zwischenspeicherdatei" msgid "Error reading recon cache file" msgstr "Fehler beim Lesen von recon-Zwischenspeicherdatei" msgid "Error reading ringfile" msgstr "Fehler beim Lesen der Ringdatei" msgid "Error reading swift.conf" msgstr "Fehler beim Lesen der swift.conf" msgid "Error retrieving recon data" msgstr "Fehler beim Abrufen der recon-Daten" msgid "Error syncing handoff partition" msgstr "Fehler bei der Synchronisierung der Übergabepartition" msgid "Error syncing partition" msgstr "Fehler beim Syncen der Partition" #, python-format msgid "Error syncing with node: %s" msgstr "Fehler beim Synchronisieren mit Knoten: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Fehler bei Versuch, erneuten Build zu erstellen für %(path)s policy#" "%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Fehler: Ein Fehler ist aufgetreten" msgid "Error: missing config path argument" msgstr "Fehler: fehlendes Konfigurationspfadargument" #, python-format msgid "Error: unable to locate %s" msgstr "Fehler: %s kann nicht lokalisiert werden" msgid "Exception dumping recon cache" msgstr "Ausnahme beim Löschen von recon-Cache" msgid "Exception in top-level account reaper loop" msgstr "Ausnahme in Reaper-Loop für Konto der höchsten Ebene" msgid "Exception in top-level replication loop" msgstr "Ausnahme in Replizierungsloop der höchsten Ebene" msgid "Exception in top-levelreconstruction loop" msgstr "Ausnahme in Rekonstruktionsloop der höchsten Ebene" #, python-format msgid "Exception while deleting container %s %s" msgstr "Ausnahme beim Löschen von Container %s %s" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "Ausnahme beim Löschen von Objekt %s %s %s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Ausnahme bei %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Ausnahme mit Account %s" #, python-format msgid "Exception with containers for account %s" msgstr "Ausnahme bei Containern für Konto %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Ausnahme bei Objekten für Container %(container)s für Konto %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Erwartet: 100-continue auf %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "CNAME-Kette für %(given_domain)s bis %(found_domain)s wird gefolgt" msgid "Found configs:" msgstr "Gefundene Konfigurationen:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Der Modus 'handoffs_first' ist noch nicht abgeschlossen. Der aktuelle " "Replikationsdurchgang wird abgebrochen." msgid "Host unreachable" msgstr "Host nicht erreichbar" #, python-format msgid "Incomplete pass on account %s" msgstr "Unvollständiger Durchgang auf Konto %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Ungültiges X-Container-Sync-To-Format %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Ungültiger Host %r in X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Ungültiger ausstehender Eintrag %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Ungültige Rückmeldung %(resp)s von %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Ungültige Rückmeldung %(resp)s von %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Ungültiges Schema %r in X-Container-Sync-To, muss \"//\", \"http\" oder " "\"https\" sein." #, python-format msgid "Killing long-running rsync: %s" msgstr "Lange laufendes rsync wird gekillt: %s" msgid "Lockup detected.. killing live coros." msgstr "Suche erkannt. Live-Coros werden gelöscht." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s zugeordnet zu %(found_domain)s" #, python-format msgid "No %s running" msgstr "Kein %s läuft" #, python-format msgid "No cluster endpoint for %r %r" msgstr "Kein Cluster-Endpunkt für %r %r" #, python-format msgid "No permission to signal PID %d" msgstr "Keine Berechtigung zu Signal-Programmkennung %d" #, python-format msgid "No policy with index %s" msgstr "Keine Richtlinie mit Index %s" #, python-format msgid "No realm key for %r" msgstr "Kein Bereichsschlüssel für %r" #, python-format msgid "No space left on device for %s (%s)" msgstr "Kein freier Speicherplatz im Gerät für %s (%s) vorhanden." #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Knotenfehler begrenzt %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Es wurden nicht genügend Objektserver bestätigt (got %d)." #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Nicht gefunden %(sync_from)r => %(sync_to)r - Objekt " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Für %s Sekunden nichts rekonstruiert." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Für %s Sekunden nichts repliziert." msgid "Object" msgstr "Objekt" msgid "Object PUT" msgstr "Objekt PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "PUT-Operation für ein Objekt gibt 202 für 409 zurück: %(req_timestamp)s <= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Objekt PUT Rückgabe 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Objektprüfung (%(type)s) \"%(mode)s\" Modus abgeschlossen: %(elapsed).02fs. " "Unter Quarantäne gestellt insgesamt: %(quars)d, Fehler insgesamt: " "%(errors)d, Dateien/s insgesamt: %(frate).2f, Bytes/s insgesamt: " "%(brate).2f, Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Objektprüfung (%(type)s). Seit %(start_time)s: Lokal: %(passes)d übergeben, " "%(quars)d unter Quarantäne gestellt, %(errors)d Fehler, Dateien/s: " "%(frate).2f, Bytes/s: %(brate).2f, Zeit insgesamt: %(total).2f, " "Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Objektprüfungsstatistik: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Objektrekonstruktion vollständig (einmal). (%.02f Minuten)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Objektrekonstruktion vollständig. (%.02f Minuten)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Objektreplizierung abgeschlossen (einmal). (%.02f Minuten)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Objektreplikation vollständig. (%.02f Minuten)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Objektserver haben %s nicht übereinstimmende Etags zurückgegeben" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Einzelthread-Scanvorgang für Objektaktualisierung abgeschlossen: " "%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Scanvorgang für Objektaktualisierung abgeschlossen: %.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Scanvorgang für Objektaktualisierung von %(device)s abgeschlossen: " "%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "Parameter, Abfragen und Fragmente nicht zulässig in X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Partitionszeiten: max. %(max).4fs, min. %(min).4fs, durchschnittl. %(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "Durchlauf wird gestartet; %s mögliche Container; %s mögliche Objekte" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "Durchgang abgeschlossen in %ds; %d Objekte abgelaufen" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "Bisherige Durchgänge %ds; %d Objekte abgelaufen" msgid "Path required in X-Container-Sync-To" msgstr "Pfad in X-Container-Sync-To ist erforderlich" #, python-format msgid "Problem cleaning up %s" msgstr "Problem bei der Bereinigung von %s" #, python-format msgid "Problem cleaning up %s (%s)" msgstr "Problem bei der Bereinigung von %s (%s)" #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "Problem beim Schreiben der langlebigen Statusdatei %s (%s)" #, python-format msgid "Profiling Error: %s" msgstr "Fehler bei der Profilerstellung: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s bis %(quar_path)s wurden unter Quarantäne gestellt, da es sich " "nicht um ein Verzeichnis handelt" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s bis %(quar_path)s wurden unter Quarantäne gestellt, da es " "sich nicht um ein Verzeichnis handelt" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "%s unter Quarantäne gestellt in %s aufgrund von %s-Datenbank" #, python-format msgid "Quarantining DB %s" msgstr "Datenbank %s wird unter Quarantäne gestellt" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Inaktivitätsprotokoll für Geschwindigkeitsbegrenzung: %(sleep)s für " "%(account)s/%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d Datenbanken entfernt" #, python-format msgid "Removing %s objects" msgstr "%s Objekte werden entfernt" #, python-format msgid "Removing partition: %s" msgstr "Partition wird entfernt: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "PID-Datei %(pid_file)s mit falscher PID %(pid)d wird entfernt" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "PID-Datei %s mit ungültiger PID wird entfernt." #, python-format msgid "Removing stale pid file %s" msgstr "Veraltete PID-Datei %s wird entfernt" msgid "Replication run OVER" msgstr "Replizierungsdurchlauf ABGESCHLOSSEN" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "497 wird aufgrund von Blacklisting zurückgegeben: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "498 wird für %(meth)s auf %(acc)s/%(cont)s/%(obj)s zurückgegeben. " "Geschwindigkeitsbegrenzung (Max. Inaktivität) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Ringänderung erkannt. Aktueller Rekonstruktionsdurchgang wird abgebrochen." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Ringänderung erkannt. Aktueller Replizierungsdurchlauf wird abgebrochen." #, python-format msgid "Running %s once" msgstr "%s läuft einmal" msgid "Running object reconstructor in script mode." msgstr "Objektrekonstruktor läuft im Skriptmodus." msgid "Running object replicator in script mode." msgstr "Objektreplikator läuft im Skriptmodus." #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "Signal %s PID: %s Signal: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Seit %(time)s: %(sync)s synchronisiert [%(delete)s Löschungen, %(put)s " "Puts], %(skip)s übersprungen, %(fail)s fehlgeschlagen" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Seit %(time)s: Kontoprüfungen: %(passed)s bestandene Prüfung,%(failed)s " "nicht bestandene Prüfung" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Seit %(time)s: Containerprüfungen: %(pass)s bestandene Prüfung, %(fail)s " "nicht bestandene Prüfung" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s wird übersprungen, da nicht angehängt" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s wird übersprungen, weil es nicht eingehängt ist" #, python-format msgid "Starting %s" msgstr "%s wird gestartet" msgid "Starting object reconstruction pass." msgstr "Objektrekonstruktionsdurchgang wird gestartet." msgid "Starting object reconstructor in daemon mode." msgstr "Objektrekonstruktor wird im Daemon-Modus gestartet." msgid "Starting object replication pass." msgstr "Objektreplikationsdurchgang wird gestartet." msgid "Starting object replicator in daemon mode." msgstr "Objektreplikator wird im Dämonmodus gestartet." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Erfolgreiches rsync von %(src)s um %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Auf den Dateityp darf nicht zugegriffen werden!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Die Gesamtsumme an %(key)s für den Container (%(total)s) entspricht nicht " "der Summe der %(key)s für alle Richtlinien (%(sum)s)" #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Zeitlimit %(action)s für memcached: %(server)s" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Zeitüberschreitungsausnahme bei %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Versuch, %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Versuch, %(full_path)s mit GET abzurufen" #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "Es wird versucht, %s-Status von PUT für %s abzurufen." #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Versuch, den finalen Status von PUT für %s abzurufen" msgid "Trying to read during GET" msgstr "Versuch, während des GET-Vorgangs zu lesen" msgid "Trying to read during GET (retrying)" msgstr "Versuch, während des GET-Vorgangs zu lesen (Wiederholung)" msgid "Trying to send to client" msgstr "Versuch, an den Client zu senden" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Es wird versucht, Suffixe mit %s zu synchronisieren." #, python-format msgid "Trying to write to %s" msgstr "Versuch, an %s zu schreiben" msgid "UNCAUGHT EXCEPTION" msgstr "NICHT ABGEFANGENE AUSNAHME" #, python-format msgid "Unable to find %s config section in %s" msgstr "%s-Konfigurationsabschnitt in %s kann nicht gefunden werden" #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" "Interner Client konnte nicht aus der Konfiguration geladen werden: %r (%s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" "%s konnte nicht in libc gefunden werden. Wird als Nullbefehl verlassen." #, python-format msgid "Unable to locate config for %s" msgstr "Konfiguration für %s wurde nicht gefunden." #, python-format msgid "Unable to locate config number %s for %s" msgstr "Konfigurationsnummer %s für %s wurde nicht gefunden." msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate, posix_fallocate konnte nicht in libc gefunden werden. Wird als " "Nullbefehl verlassen." #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "fsync() kann für Verzeichnis %s nicht ausgeführt werden: %s" #, python-format msgid "Unable to read config from %s" msgstr "Konfiguration aus %s kann nicht gelesen werden" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Nicht genehmigte %(sync_from)r => %(sync_to)r" #, python-format msgid "Unexpected response: %s" msgstr "Unerwartete Antwort: %s" msgid "Unhandled exception" msgstr "Nicht behandelte Exception" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Unbekannte Ausnahme bei GET-Versuch: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Aktualisierungsbericht fehlgeschlagen für %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Aktualisierungsbericht gesendet für %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "WARNUNG: SSL sollte nur zu Testzwecken aktiviert werden. Verwenden Sie die " "externe SSL-Beendigung für eine Implementierung in der Produktionsumgebung." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "WARNUNG: Grenzwert für Dateideskriptoren kann nicht geändert werden. Wird " "nicht als Root ausgeführt?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "WARNUNG: Grenzwert für maximale Verarbeitung kann nicht geändert werden. " "Wird nicht als Root ausgeführt?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "WARNUNG: Grenzwert für Speicher kann nicht geändert werden. Wird nicht als " "Root ausgeführt?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "Hat %s Sekunden für %s zum Erlöschen gewartet; Gibt auf" #, python-format msgid "Waited %s seconds for %s to die; killing" msgstr "Hat %s Sekunden für %s zum Erlöschen gewartet. Wird abgebrochen." msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Warnung: Geschwindigkeitsbegrenzung kann nicht ohne memcached-Client " "durchgeführt werden" #, python-format msgid "method %s is not allowed." msgstr "Methode %s ist nicht erlaubt." msgid "no log file found" msgstr "keine Protokolldatei gefunden" msgid "odfpy not installed." msgstr "odfpy ist nicht installiert." #, python-format msgid "plotting results failed due to %s" msgstr "" "Die grafische Darstellung der Ergebnisse ist fehlgeschlagen aufgrund von %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib ist nicht installiert." swift-2.7.0/swift/locale/ja/0000775000567000056710000000000012675204211017002 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/ja/LC_MESSAGES/0000775000567000056710000000000012675204211020567 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/ja/LC_MESSAGES/swift.po0000664000567000056710000010044312675204037022273 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Sasuke(Kyohei MORIYAMA) <>, 2015 # Akihiro Motoki , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # 笹原 昌美 , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev254\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 19:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-23 02:20+0000\n" "Last-Translator: 笹原 昌美 \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Japanese\n" msgid "" "\n" "user quit" msgstr "" "\n" "ユーザー終了" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - パラレル、%s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d サフィックスが検査されました - ハッシュ済み %(hashed).2f%%、同期" "済み %(synced).2f%%" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s はアンマウントとして応答しました" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) パーティションが%(time).2fs で" "複製されました (%(rate).2f/秒、残り %(remaining)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "成功 %(success)s、失敗 %(failure)s" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s が %(statuses)s について 503 を返しています" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d が実行されていません (%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s) が停止された可能性があります" #, python-format msgid "%s already started..." msgstr "%s は既に開始されています..." #, python-format msgid "%s does not exist" msgstr "%s が存在しません" #, python-format msgid "%s is not mounted" msgstr "%s がマウントされていません" #, python-format msgid "%s running (%s - %s)" msgstr "%s が実行中 (%s - %s)" #, python-format msgid "%s: %s" msgstr "%s: %s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: 接続がピアによってリセットされました" #, python-format msgid ", %s containers deleted" msgstr "、%s コンテナーが削除されました" #, python-format msgid ", %s containers possibly remaining" msgstr "、%s コンテナーが残っていると思われます" #, python-format msgid ", %s containers remaining" msgstr "、%s コンテナーが残っています" #, python-format msgid ", %s objects deleted" msgstr "、%s オブジェクトが削除されました" #, python-format msgid ", %s objects possibly remaining" msgstr "、%s オブジェクトが残っていると思われます" #, python-format msgid ", %s objects remaining" msgstr "、%s オブジェクトが残っています" #, fuzzy, python-format msgid ", elapsed: %.02fs" msgstr "、経過時間: %.02fs" msgid ", return codes: " msgstr "、戻りコード: " msgid "Account" msgstr "アカウント" #, python-format msgid "Account %s has not been reaped since %s" msgstr "アカウント %s は %s 以降リープされていません" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "アカウント監査 \"once\" モードが完了しました: %.02fs" #, fuzzy, python-format msgid "Account audit pass completed: %.02fs" msgstr "アカウント監査パスが完了しました: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(time).5f 秒で %(count)d 個の DB の複製を試行しました (%(rate).5f/s)" #, python-format msgid "Audit Failed for %s: %s" msgstr "%s の監査が失敗しました: %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "正しくない再同期戻りコード: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "アカウント監査 \"once\" モードの開始" msgid "Begin account audit pass." msgstr "アカウント監査パスを開始します。" msgid "Begin container audit \"once\" mode" msgstr "コンテナー監査「once」モードの開始" msgid "Begin container audit pass." msgstr "コンテナー監査パスを開始します。" msgid "Begin container sync \"once\" mode" msgstr "コンテナー同期「once」モードの開始" msgid "Begin container update single threaded sweep" msgstr "コンテナー更新単一スレッド化スイープの開始" msgid "Begin container update sweep" msgstr "コンテナー更新スイープの開始" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "オブジェクト監査「%s」モードの開始 (%s%s)" msgid "Begin object update single threaded sweep" msgstr "オブジェクト更新単一スレッド化スイープの開始" msgid "Begin object update sweep" msgstr "オブジェクト更新スイープの開始" #, python-format msgid "Beginning pass on account %s" msgstr "アカウント %s でパスを開始中" msgid "Beginning replication run" msgstr "複製の実行を開始中" msgid "Broker error trying to rollback locked connection" msgstr "ロック済み接続のロールバックを試行中のブローカーエラー" #, python-format msgid "Can not access the file %s." msgstr "ファイル %s にアクセスできません。" #, python-format msgid "Can not load profile data from %s." msgstr "プロファイルデータを %s からロードできません。" #, python-format msgid "Client did not read from proxy within %ss" msgstr "クライアントは %s 内のプロキシーからの読み取りを行いませんでした" msgid "Client disconnected on read" msgstr "クライアントが読み取り時に切断されました" msgid "Client disconnected without sending enough data" msgstr "十分なデータを送信せずにクライアントが切断されました" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "クライアントパス %(client)s はオブジェクトメタデータ %(meta)s に保管されたパ" "スに一致しません" msgid "Connection refused" msgstr "接続が拒否されました" msgid "Connection timeout" msgstr "接続がタイムアウトになりました" msgid "Container" msgstr "コンテナー" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "コンテナー監査「once」モードが完了しました: %.02fs" #, fuzzy, python-format msgid "Container audit pass completed: %.02fs" msgstr "コンテナー監査パスが完了しました: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "コンテナー同期「once」モードが完了しました: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "コンテナー更新単一スレッド化スイープが完了しました: %(elapsed).02fs、成功 " "%(success)s、失敗 %(fail)s、未変更 %(no_change)s" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "コンテナー更新スイープが完了しました: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s のコンテナー更新スイープが完了しました: %(elapsed).02fs、成功 " "%(success)s、失敗 %(fail)s、未変更 %(no_change)s" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "%s 秒間の試行後に %s:%s にバインドできませんでした" #, python-format msgid "Could not load %r: %s" msgstr "%r をロードできませんでした: %s" #, python-format msgid "Data download error: %s" msgstr "データダウンロードエラー: %s" #, fuzzy, python-format msgid "Devices pass completed: %.02fs" msgstr "デバイスパスが完了しました: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "エラー %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "エラー %(status)d: %(type)s サーバーからの %(body)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "エラー %(status)d: オブジェクトサーバーからの %(body)s、re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "エラー %(status)d: 予期: オブジェクトサーバーからの 100-continue" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "エラー %(status)d: コンテナーサーバーから %(method)s %(path)s を試行中" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "エラー: アカウント更新が %(ip)s:%(port)s/%(device)s で失敗しました(後で再試行" "されます): 応答 %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "エラー: アカウント更新に失敗しました。要求内のホスト数およびデバイス数が異な" "ります: 「%s」vs「%s」" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "エラー: ホスト %(host)s からの応答 %(status)s が正しくありません" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "エラー: クライアント読み取りがタイムアウトになりました (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "エラー: コンテナー更新に失敗しました (後の非同期更新のために保存中): %(ip)s:" "%(port)s/%(dev)s からの %(status)d 応答" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "エラー: コンテナー更新に失敗しました。要求内のホスト数およびデバイス数が異な" "ります: 「%s」vs「%s」" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERROR アカウント情報 %s が取得できませんでした" #, python-format msgid "ERROR Could not get container info %s" msgstr "エラー: コンテナー情報 %s を取得できませんでした" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "エラー: DiskFile %(data_file)s を閉じることができません: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "エラー: 例外によりクライアントが切断されています" msgid "ERROR Failed to get my own IPs?" msgstr "エラー: 自分の IP の取得に失敗?" msgid "ERROR Insufficient Storage" msgstr "エラー: ストレージが不足しています" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "エラー: オブジェクト %(obj)s は監査に失敗し、検疫されました: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "エラー: ピックルの問題、%s を検疫します" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "エラー: リモートドライブに %s がマウントされていません" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "%(db_file)s %(row)s の同期エラー" #, python-format msgid "ERROR Syncing %s" msgstr "%s の同期エラー" #, python-format msgid "ERROR Trying to audit %s" msgstr "%s の監査を試行中にエラーが発生しました" msgid "ERROR Unhandled exception in request" msgstr "エラー: 要求で未処理例外が発生しました" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "エラー: %(method)s %(path)s での __call__ エラー" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "エラー: アカウント更新が %(ip)s:%(port)s/%(device)s で失敗しました(後で再試行" "されます)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "エラー: アカウント更新が %(ip)s:%(port)s/%(device)s で失敗しました(後で再試行" "されます): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "エラー: 予期しない名前 %s を持つファイルを非同期保留中" msgid "ERROR auditing" msgstr "監査エラー" #, python-format msgid "ERROR auditing: %s" msgstr "監査エラー: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "エラー: コンテナー更新が %(ip)s:%(port)s/%(dev)s で失敗しました (後の非同期更" "新のために保存中)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "%s からの HTTP 応答の読み取りエラー" #, python-format msgid "ERROR reading db %s" msgstr "DB %s の読み取りエラー" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "エラー: %(code)s との再同期に失敗しました: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ノード %(node)s との %(file)s の同期エラー" msgid "ERROR trying to replicate" msgstr "複製の試行エラー" #, python-format msgid "ERROR while trying to clean up %s" msgstr "%s のクリーンアップを試行中にエラーが発生しました" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "%(type)s サーバー %(ip)s:%(port)s/%(device)s でのエラー、返された値: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "%s からの抑止のロードでエラーが発生しました: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "リモートサーバー %(ip)s:%(port)s/%(device)s でのエラー" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "エラー: ドライブパーティションに対するパスの取得に失敗しました: %s" msgid "ERROR: An error occurred while retrieving segments" msgstr "エラー: セグメントの取得中にエラーが発生しました" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "エラー: %(path)s にアクセスできません: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "エラー: 監査を実行できません: %s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "memcached %(server)s に対する %(action)s がエラーになりました" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "UTF-8 へのエンコードエラー: %s" msgid "Error hashing suffix" msgstr "サフィックスのハッシュエラー" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "mtime_check_interval で %r にエラーがあります: %s" #, python-format msgid "Error limiting server %s" msgstr "サーバー %s の制限エラー" msgid "Error listing devices" msgstr "デバイスのリストエラー" #, python-format msgid "Error on render profiling results: %s" msgstr "レンダリングプロファイル結果でのエラー: %s" msgid "Error parsing recon cache file" msgstr "再構成キャッシュファイルの構文解析エラー" msgid "Error reading recon cache file" msgstr "再構成キャッシュファイルの読み取りエラー" msgid "Error reading ringfile" msgstr "リングファイルの読み取りエラー" msgid "Error reading swift.conf" msgstr "swift.conf の読み取りエラー" msgid "Error retrieving recon data" msgstr "再構成データの取得エラー" msgid "Error syncing handoff partition" msgstr "ハンドオフパーティションの同期エラー" msgid "Error syncing partition" msgstr "パーティションとの同期エラー" #, python-format msgid "Error syncing with node: %s" msgstr "ノードとの同期エラー: %s" msgid "Error: An error occurred" msgstr "エラー: エラーが発生しました" msgid "Error: missing config path argument" msgstr "エラー: 構成パス引数がありません" #, python-format msgid "Error: unable to locate %s" msgstr "エラー: %s が見つかりません" msgid "Exception dumping recon cache" msgstr "再構成キャッシュのダンプで例外が発生しました" msgid "Exception in top-level account reaper loop" msgstr "最上位アカウントリーパーループで例外が発生しました" msgid "Exception in top-level replication loop" msgstr "最上位複製ループで例外が発生しました" #, python-format msgid "Exception while deleting container %s %s" msgstr "コンテナー %s %s の削除中に例外が発生しました" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "オブジェクト %s %s %s の削除中に例外が発生しました" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s で例外が発生しました" #, python-format msgid "Exception with account %s" msgstr "アカウント %s で例外が発生しました" #, python-format msgid "Exception with containers for account %s" msgstr "アカウント %s のコンテナーで例外が発生しました" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "アカウント %(account)s のコンテナー %(container)s のオブジェクトで例外が発生" "しました" #, python-format msgid "Expect: 100-continue on %s" msgstr "予期: %s での 100-continue" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s から %(found_domain)s へ CNAME チェーンをフォロー中" msgid "Found configs:" msgstr "構成が見つかりました:" msgid "Host unreachable" msgstr "ホストが到達不能です" #, python-format msgid "Incomplete pass on account %s" msgstr "アカウント %s での不完全なパス" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "X-Container-Sync-To 形式 %r が無効です" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "無効なホスト %r が X-Container-Sync-To にあります" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "無効な保留中項目 %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)s からの応答 %(resp)s が無効です" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "無効なスキーム %r が X-Container-Sync-To にあります。「//」、「http」、" "「https」のいずれかでなければなりません。" #, python-format msgid "Killing long-running rsync: %s" msgstr "長期実行の再同期を強制終了中: %s" msgid "Lockup detected.. killing live coros." msgstr "ロックが検出されました.. ライブ coros を強制終了中" #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s が %(found_domain)s にマップされました" #, python-format msgid "No %s running" msgstr "%s が実行されていません" #, python-format msgid "No cluster endpoint for %r %r" msgstr "%r %r のエンドポイントクラスターがありません" #, python-format msgid "No permission to signal PID %d" msgstr "PID %d にシグナル通知する許可がありません" #, python-format msgid "No realm key for %r" msgstr "%r のレルムキーがありません" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "ノードエラー制限 %(ip)s:%(port)s (%(device)s)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "不検出 %(sync_from)r => %(sync_to)r - オブジェクト " "%(obj_name)r" #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s 秒間で何も複製されませんでした。" msgid "Object" msgstr "オブジェクト" msgid "Object PUT" msgstr "オブジェクト PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "オブジェクト PUT が 409 に対して 202 を返しています: %(req_timestamp)s<= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "オブジェクト PUT が 412 を返しています。%(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "オブジェクト監査 (%(type)s) 「%(mode)s」モード完了: %(elapsed).02fs。合計検疫" "済み: %(quars)d、合計エラー: %(errors)d、合計ファイル/秒: %(frate).2f、合計バ" "イト/秒: %(brate).2f、監査時間: %(audit).2f、率: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "オブジェクト監査統計: %s" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "オブジェクト複製が完了しました (1 回)。(%.02f 分)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "オブジェクト複製が完了しました。(%.02f 分)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "オブジェクトサーバーが %s 個の不一致 etag を返しました" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "オブジェクト更新単一スレッド化スイープが完了しました: %(elapsed).02fs、成功 " "%(success)s、失敗 %(fail)s" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "オブジェクト更新スイープが完了しました: %.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "%(device)s のオブジェクト更新スイープが完了しました: %(elapsed).02fs、成功 " "%(success)s、失敗 %(fail)s" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "パラメーター、照会、およびフラグメントは X-Container-Sync-To で許可されていま" "せん" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "パーティション時間: 最大 %(max).4fs、最小 %(min).4fs、中間 %(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "" "パスの開始中。%s コンテナーおよび %s オブジェクトが存在する可能性があります" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "%d でパスが完了しました。%d オブジェクトの有効期限が切れました" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "現在までのパス %d。%d オブジェクトの有効期限が切れました" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To にパスが必要です" #, python-format msgid "Problem cleaning up %s" msgstr "%s のクリーンアップ中に問題が発生しました" #, python-format msgid "Profiling Error: %s" msgstr "プロファイル作成エラー: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "ディレクトリーではないため、%(hsh_path)s は %(quar_path)s へ検疫されました" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "ディレクトリーではないため、%(object_path)s は %(quar_path)s へ検疫されました" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "%s から %s が検疫されました (%s データベースが原因)" #, python-format msgid "Quarantining DB %s" msgstr "DB %s の検疫中" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Ratelimit スリープログ: %(account)s/%(container)s/%(object)s の %(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d 個の DB が削除されました" #, python-format msgid "Removing %s objects" msgstr "%s オブジェクトの削除中" #, python-format msgid "Removing partition: %s" msgstr "パーティションの削除中: %s" #, python-format msgid "Removing stale pid file %s" msgstr "失効した pid ファイル %s を削除中" msgid "Replication run OVER" msgstr "複製の実行が終了しました" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "ブラックリスティングのため 497 を返しています: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "%(acc)s/%(cont)s/%(obj)s に対する %(meth)s に関して 498 を返しています。" "Ratelimit (最大スリープ) %(e)s" msgid "Ring change detected. Aborting current replication pass." msgstr "リング変更が検出されました。現行複製パスを打ち切ります。" #, python-format msgid "Running %s once" msgstr "%s を 1 回実行中" msgid "Running object replicator in script mode." msgstr "スクリプトモードでオブジェクトレプリケーターを実行中です。" #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "%s のシグナル通知、pid: %s シグナル: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "%(time)s 以降: 同期済み %(sync)s [削除 %(delete)s、書き込み %(put)s]、スキッ" "プ %(skip)s、失敗 %(fail)s" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "%(time)s 以降: アカウント監査: 合格した監査 %(passed)s、不合格の監" "査%(failed)s" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "%(time)s 以降: コンテナー監査: 合格した監査 %(pass)s、不合格の監査%(fail)s" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s はマウントされていないため、スキップされます" #, python-format msgid "Skipping %s as it is not mounted" msgstr "マウントされていないため、 %s をスキップします" #, python-format msgid "Starting %s" msgstr "%s を開始しています" msgid "Starting object replication pass." msgstr "オブジェクト複製パスを開始中です。" msgid "Starting object replicator in daemon mode." msgstr "オブジェクトレプリケーターをデーモンモードで開始中です。" #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "%(dst)s での %(src)s の再同期が成功しました (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "このファイルタイプにはアクセスが禁止されています" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "コンテナーの合計 %(key)s (%(total)s) がポリシー全体の合計 %(key)s(%(sum)s) に" "一致しません" #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "memcached %(server)s に対する %(action)s がタイムアウトになりました" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s のタイムアウト例外" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s を試行中" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "%s への PUT の最終状況の取得を試行中" msgid "Trying to read during GET" msgstr "GET 時に読み取りを試行中" msgid "Trying to read during GET (retrying)" msgstr "GET 時に読み取りを試行中 (再試行中)" msgid "Trying to send to client" msgstr "クライアントへの送信を試行中" #, python-format msgid "Trying to write to %s" msgstr "%s への書き込みを試行中" msgid "UNCAUGHT EXCEPTION" msgstr "キャッチされていない例外" #, python-format msgid "Unable to find %s config section in %s" msgstr "%s 構成セクションが %s に見つかりません" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "%s が libc に見つかりません。no-op として終了します。" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate、posix_fallocate が libc に見つかりません。no-op として終了します。" #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "ディレクトリー %s で fsync() を実行できません: %s" #, python-format msgid "Unable to read config from %s" msgstr "構成を %s から読み取ることができません" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "非認証 %(sync_from)r => %(sync_to)r" #, python-format msgid "Unexpected response: %s" msgstr "予期しない応答: %s" msgid "Unhandled exception" msgstr "未処理例外" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s に関する更新レポートが失敗しました" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s に関する更新レポートが送信されました" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "警告: SSL を有効にするのはテスト目的のみでなければなりません。製品のデプロイ" "には外部 SSL 終端を使用してください。" msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告: ファイル記述子制限を変更できません。非ルートとして実行しますか?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告: 最大処理限界を変更できません。非ルートとして実行しますか?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告: メモリー制限を変更できません。非ルートとして実行しますか?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "%s 秒間、%s の停止を待機しました。中止します" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告: memcached クライアントなしで ratelimit を行うことはできません" #, python-format msgid "method %s is not allowed." msgstr "メソッド %s は許可されていません。" msgid "no log file found" msgstr "ログファイルが見つかりません" msgid "odfpy not installed." msgstr "odfpy がインストールされていません。" #, python-format msgid "plotting results failed due to %s" msgstr "%s が原因で結果のプロットに失敗しました" msgid "python-matplotlib not installed." msgstr "python-matplotlib がインストールされていません。" swift-2.7.0/swift/locale/pt_BR/0000775000567000056710000000000012675204211017416 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/pt_BR/LC_MESSAGES/0000775000567000056710000000000012675204211021203 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/pt_BR/LC_MESSAGES/swift.po0000664000567000056710000010567112675204037022717 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andre Campos Bezerra , 2015 # Lucas Ribeiro , 2014 # thiagol , 2015 # Volmar Oliveira Junior , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Carlos Marques , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev254\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 19:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-22 02:07+0000\n" "Last-Translator: Carlos Marques \n" "Language: pt-BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Portuguese (Brazil)\n" msgid "" "\n" "user quit" msgstr "" "\n" "encerramento do usuário" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - paralelo, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d sufixos verificados – %(hashed).2f%% de hash, %(synced).2f%% " "sincronizados" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s respondeu como desmontado" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " "(%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partições de %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) dispositivos reconstruídos em %(time).2fs " "(%(rate).2f/sec, %(remaining)s restantes)" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partições replicadas em " "%(time).2fs (%(rate).2f/seg, %(remaining)s restantes)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s sucessos, %(failure)s falhas" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s retornando 503 para %(statuses)s" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d não está em execução (%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s) parece ter parado" #, python-format msgid "%s already started..." msgstr "%s já iniciado..." #, python-format msgid "%s does not exist" msgstr "%s não existe" #, python-format msgid "%s is not mounted" msgstr "%s não está montado" #, python-format msgid "%s responded as unmounted" msgstr "%s respondeu como não montado" #, python-format msgid "%s running (%s - %s)" msgstr "%s em execução (%s - %s)" #, python-format msgid "%s: %s" msgstr "%s: %s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Reconfiguração da conexão por peer" #, python-format msgid ", %s containers deleted" msgstr ", %s containers apagados" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s containers possivelmente restando" #, python-format msgid ", %s containers remaining" msgstr ", %s containers restando" #, python-format msgid ", %s objects deleted" msgstr ", %s objetos apagados" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objetos possivelmente restando" #, python-format msgid ", %s objects remaining" msgstr ", %s objetos restando" #, python-format msgid ", elapsed: %.02fs" msgstr ", passados: %.02fs" msgid ", return codes: " msgstr ", códigos de retorno:" msgid "Account" msgstr "Conta" #, python-format msgid "Account %s has not been reaped since %s" msgstr "As contas %s não foram colhidas desde %s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Auditoria de conta em modo \"único\" finalizado: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Passo de auditoria de conta finalizado: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Tentativa de replicação do %(count)d dbs em%(time).5f segundos (%(rate).5f/s)" #, python-format msgid "Audit Failed for %s: %s" msgstr "Auditoria Falhou para %s: %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Código de retorno de ressincronização inválido: %(ret)d <-%(args)s" msgid "Begin account audit \"once\" mode" msgstr "Iniciar auditoria de conta em modo \"único\"" msgid "Begin account audit pass." msgstr "Iniciando passo de auditoria de conta." msgid "Begin container audit \"once\" mode" msgstr "Inicie o modo \"único\" da auditoria do contêiner" msgid "Begin container audit pass." msgstr "Inicie a aprovação da auditoria do contêiner." msgid "Begin container sync \"once\" mode" msgstr "Inicie o modo \"único\" de sincronização do contêiner" msgid "Begin container update single threaded sweep" msgstr "Inicie a varredura de encadeamento único da atualização do contêiner" msgid "Begin container update sweep" msgstr "Inicie a varredura de atualização do contêiner" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "Inicie o modo \"%s\" da auditoria (%s%s)" msgid "Begin object update single threaded sweep" msgstr "Inicie a varredura de encadeamento único da atualização do objeto" msgid "Begin object update sweep" msgstr "Inicie a varredura da atualização do objeto" #, python-format msgid "Beginning pass on account %s" msgstr "Iniciando a estapa nas contas %s" msgid "Beginning replication run" msgstr "Começando execução de replicação" msgid "Broker error trying to rollback locked connection" msgstr "Erro do Broker ao tentar retroceder a conexão bloqueada" #, python-format msgid "Can not access the file %s." msgstr "Não é possível acessar o arquivo %s." #, python-format msgid "Can not load profile data from %s." msgstr "Não é possível carregar dados do perfil a partir de %s." #, python-format msgid "Cannot read %s (%s)" msgstr "Não é possível ler %s (%s)" #, python-format msgid "Cannot write %s (%s)" msgstr "Não é possível gravar %s (%s)" #, python-format msgid "Client did not read from proxy within %ss" msgstr "O cliente não leu no proxy dentro de %ss" msgid "Client disconnected on read" msgstr "Cliente desconectado durante leitura" msgid "Client disconnected without sending enough data" msgstr "Cliente desconecatdo sem ter enviado dados suficientes" msgid "Client disconnected without sending last chunk" msgstr "Cliente desconectado sem ter enviado o último chunk" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Caminho do cliente %(client)s não corresponde ao caminho armazenado nos " "metadados do objeto %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Opção de configuração internal_client_conf_path não definida. Usando a " "configuração padrão. Consulte internal-client.conf-sample para obter opções" msgid "Connection refused" msgstr "Conexão recusada" msgid "Connection timeout" msgstr "Tempo limite de conexão" msgid "Container" msgstr "Contêiner" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Modo \"único\" da auditoria do contêiner concluído: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Aprovação da auditoria do contêiner concluída: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Modo \"único\" de sincronização do contêiner concluído: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Varredura de encadeamento único da atualização do contêiner concluída: " "%(elapsed).02fs, %(success)s com êxito, %(fail)s com falha, %(no_change)s " "sem alterações" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Varredura da atualização do contêiner concluída: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Varredura da atualização do contêiner de %(path)s concluída: " "%(elapsed).02fs, %(success)s com êxito, %(fail)s com falha, %(no_change)s " "sem alterações" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "Não foi possível conectar a %s:%s após tentar por %s segundos" #, python-format msgid "Could not load %r: %s" msgstr "Não é possível carregar %r: %s" #, python-format msgid "Data download error: %s" msgstr "Erro ao fazer download de dados: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Dispositivos finalizados: %.02fs" #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "O diretório %r não está mapeado para uma política válida (%s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERRO %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERRO %(status)d %(body)s Do Servidor %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERRO %(status)d %(body)s No Servidor de Objetos re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERRO %(status)d Expectativa: 100-continuar Do Servidor de Objeto" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "ERRO %(status)d Tentando %(method)s %(path)s Do Servidor de Contêiner" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente " "novamente mais tarde): Resposta %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "ERRO A atualização da conta falhou: números diferentes de hosts e " "dispositivos na solicitação: \"%s\" vs \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERRO Resposta inválida %(status)s a partir de %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERRO Tempo limite de leitura do cliente (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERRO A atualização do contêiner falhou (salvando para atualização assíncrona " "posterior): %(status)d resposta do %(ip)s:%(port)s/%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "ERRO A atualização do contêiner falhou: números diferentes de hosts e " "dispositivos na solicitação: \"%s\" vs \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERRO Não foi possível recuperar as informações da conta %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERRO Não foi possível obter informações do contêiner %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ERROR DiskFile %(data_file)s falha ao fechar: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERRO Exceção causando clientes a desconectar" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "ERRO Exceção ao transferir dados para os servidores de objeto %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERRO Falha ao pegar meu próprio IPs?" msgid "ERROR Insufficient Storage" msgstr "ERRO Capacidade insuficiente" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "ERRO O objeto %(obj)s falhou ao auditar e ficou em quarentena: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERRO Problema de seleção, em quarentena %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERRO Drive remoto não montado %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERRO Sincronizando %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERRO Sincronizando %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERRO Tentando auditar %s" msgid "ERROR Unhandled exception in request" msgstr "ERRO Exceção não manipulada na solicitação" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ erro com %(method)s %(path)s" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente " "novamente mais tarde)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente " "novamente mais tarde): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "ERRO arquivo pendente assíncrono com nome inesperado %s" msgid "ERROR auditing" msgstr "Erro auditando" #, python-format msgid "ERROR auditing: %s" msgstr "ERRO auditoria: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERRO A atualização de contêiner falhou com %(ip)s:%(port)s/%(dev)s (salvando " "para atualização assíncrona posterior)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERRO lendo resposta HTTP de %s" #, python-format msgid "ERROR reading db %s" msgstr "ERRO lendo db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERRO rsync falhou com %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERRO sincronizando %(file)s com nodo %(node)s" msgid "ERROR trying to replicate" msgstr "ERRO tentando replicar" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERRO enquanto tentaava limpar %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "ERRO com %(type)s do servidor %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERRO com as supressões de carregamento a partir de %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERRO com o servidor remoto %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "ERRO: Falha ao obter caminhos para partições de unidade: %s" msgid "ERROR: An error occurred while retrieving segments" msgstr "ERRO: Ocorreu um erro ao recuperar segmentos" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERRO: Não é possível acessar %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERRO: Não é possível executar a auditoria: %s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "Erro %(action)s para memcached: %(server)s" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "Erro encodificando para UTF-8: %s" msgid "Error hashing suffix" msgstr "Erro ao efetuar hash do sufixo" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "Erro em %r com mtime_check_interval: %s" #, python-format msgid "Error limiting server %s" msgstr "Erro ao limitar o servidor %s" msgid "Error listing devices" msgstr "Erro ao listar dispositivos" #, python-format msgid "Error on render profiling results: %s" msgstr "Erro na renderização de resultados de criação de perfil: %s" msgid "Error parsing recon cache file" msgstr "Erro ao analisar o arquivo de cache de reconhecimento" msgid "Error reading recon cache file" msgstr "Erro ao ler o arquivo de cache de reconhecimento" msgid "Error reading ringfile" msgstr "Erro na leitura do ringfile" msgid "Error reading swift.conf" msgstr "Erro ao ler swift.conf" msgid "Error retrieving recon data" msgstr "Erro ao recuperar dados de reconhecimento" msgid "Error syncing handoff partition" msgstr "Erro ao sincronizar a partição de handoff" msgid "Error syncing partition" msgstr "Erro ao sincronizar partição" #, python-format msgid "Error syncing with node: %s" msgstr "Erro ao sincronizar com o nó: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Erro ao tentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Erro: Ocorreu um erro" msgid "Error: missing config path argument" msgstr "Erro: argumento do caminho de configuração ausente" #, python-format msgid "Error: unable to locate %s" msgstr "Erro: não é possível localizar %s" msgid "Exception dumping recon cache" msgstr "Exceção dump de cache de reconhecimento" msgid "Exception in top-level account reaper loop" msgstr "Exceção no loop do removedor da conta de nível superior" msgid "Exception in top-level replication loop" msgstr "Exceção no loop de replicação de nível superior" msgid "Exception in top-levelreconstruction loop" msgstr "Exceção no loop de reconstrução de nível superior" #, python-format msgid "Exception while deleting container %s %s" msgstr "Exceção ao excluir contêiner %s %s" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "Exceção ao excluir objeto %s %s %s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exceção com %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Exceção com a conta %s" #, python-format msgid "Exception with containers for account %s" msgstr "Exceção com os containers para a conta %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Exceção com objetos para o container %(container)s para conta %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Expectativa: 100-continuar em %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Cadeia CNAME a seguir para %(given_domain)s para%(found_domain)s" msgid "Found configs:" msgstr "Localizados arquivos de configuração:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "O primeiro modo de handoffs ainda possui handoffs. Interrompendo a aprovação " "da replicação atual." msgid "Host unreachable" msgstr "Destino inalcançável" #, python-format msgid "Incomplete pass on account %s" msgstr "Estapa incompleta nas contas %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Formato X-Container-Sync-To inválido %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Host inválido %r em X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrada pendente inválida %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Resposta inválida %(resp)s a partir de %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Resposta inválida %(resp)s a partir de %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Esquema inválido %r em X-Container-Sync-To, deve ser \" // \", \"http\" ou " "\"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Eliminando a ressincronização de longa execução: %s" #, python-format msgid "Loading JSON from %s failed (%s)" msgstr "Falha ao carregar JSON a partir do %s (%s)" msgid "Lockup detected.. killing live coros." msgstr "Bloqueio detectado... eliminando núcleos em tempo real." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s mapeado para %(found_domain)s" #, python-format msgid "No %s running" msgstr "Nenhum %s rodando" #, python-format msgid "No cluster endpoint for %r %r" msgstr "Nenhum terminal de cluster para %r %r" #, python-format msgid "No permission to signal PID %d" msgstr "Nenhuma permissão para PID do sinal %d" #, python-format msgid "No policy with index %s" msgstr "Nenhuma política com índice %s" #, python-format msgid "No realm key for %r" msgstr "Nenhuma chave do domínio para %r" #, python-format msgid "No space left on device for %s (%s)" msgstr "Nenhum espaço deixado no dispositivo para %s (%s)" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Erro de nó limitado %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" "Insuficiente número de servidores de objeto confirmaram (%d confirmados)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Não localizado %(sync_from)r => %(sync_to)r – objeto " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Nada foi reconstruído durante %s segundos." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nada foi replicado para %s segundos." msgid "Object" msgstr "Objeto" msgid "Object PUT" msgstr "PUT de objeto" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Objeto PUT retornando 202 para a versão 409: %(req_timestamp)s < = " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "PUT de objeto retornando 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Modo \"%(mode)s\" da auditoria de objeto (%(type)s) concluído: " "%(elapsed).02fs. Total em quarentena: %(quars)d, Total de erros: %(errors)d, " "Total de arquivos/seg: %(frate).2f, Total de bytes/seg: %(brate).2f, Tempo " "de auditoria: %(audit).2f, Taxa: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Auditoria de objeto (%(type)s). Desde %(start_time)s: Localmente: %(passes)d " "aprovado, %(quars)d em quarentena, %(errors)d erros, arquivos/s: " "%(frate).2f, bytes/seg: %(brate).2f, Tempo total: %(total).2f, Tempo de " "auditoria: %(audit).2f, Taxa: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Estatísticas de auditoria do objeto: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Reconstrução do objeto concluída (única). (%.02f minutos)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Reconstrução do objeto concluída. (%.02f minutos)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Replicação completa do objeto (única). (%.02f minutos)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Replicação completa do objeto. (%.02f minutos)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Servidores de objeto retornaram %s etags incompatíveis" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Varredura de encadeamento único da atualização do objeto concluída: " "%(elapsed).02fs, %(success)s com êxito, %(fail)s com falha" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Varredura da atualização de objeto concluída: %.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Varredura da atualização do objeto de %(device)s concluída: %(elapsed).02fs, " "%(success)s com êxito, %(fail)s com falha" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "Parâmetros, consultas e fragmentos não permitidos em X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Tempos de partição: máximo %(max).4fs, mínimo %(min).4fs, médio %(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "Início da aprovação; %s contêineres possíveis; %s objetos possíveis" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "Aprovação concluída em %ds; %d objetos expirados" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "Aprovados até aqui %ds; %d objetos expirados" msgid "Path required in X-Container-Sync-To" msgstr "Caminho necessário em X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problema ao limpar %s" #, python-format msgid "Problem cleaning up %s (%s)" msgstr "Problema ao limpar %s (%s)" #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "Problema ao gravar arquivo de estado durável %s (%s)" #, python-format msgid "Profiling Error: %s" msgstr "Erro da Criação de Perfil: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "Em quarentena %(hsh_path)s para %(quar_path)s porque ele não é um diretório" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Em quarentena %(object_path)s para %(quar_path)s porque ele não é um " "diretório" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "Em quarentena %s para %s devido a %s do banco de dados" #, python-format msgid "Quarantining DB %s" msgstr "Quarentenando BD %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Log de suspensão do limite de taxa: %(sleep)s para %(account)s/%(container)s/" "%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Dbs %(remove)d removido" #, python-format msgid "Removing %s objects" msgstr "Removendo %s objetos" #, python-format msgid "Removing partition: %s" msgstr "Removendo partição: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Removendo arquivo pid %(pid_file)s com pid errado %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Removendo o arquivo pid %s com pid inválido" #, python-format msgid "Removing stale pid file %s" msgstr "Removendo o arquivo pid %s antigo" msgid "Replication run OVER" msgstr "Execução de replicação TERMINADA" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Retornando 497 por causa da listagem negra: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Retornando 498 para %(meth)s para %(acc)s/%(cont)s/%(obj)s. Limite de taxa " "(Suspensão Máxima) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Mudança no anel detectada. Interrompendo a aprovação da recosntrução atual." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Alteração do anel detectada. Interrompendo a aprovação da replicação atual." #, python-format msgid "Running %s once" msgstr "Executando %s uma vez," msgid "Running object reconstructor in script mode." msgstr "Executando o reconstrutor do objeto no modo de script." msgid "Running object replicator in script mode." msgstr "Executando replicador do objeto no modo de script." #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "PID %s do sinal: %s sinal: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Desde %(time)s: %(sync)s sincronizados [%(delete)s exclui, %(put)s coloca], " "%(skip)s ignorados, %(fail)s com falha" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Desde %(time)s: Auditoria de contas: %(passed)s auditorias passaram," "%(failed)s auditorias falharam" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Desde %(time)s: As auditorias do contêiner: %(pass)s de auditoria aprovada, " "%(fail)s com falha auditoria" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "Pulando %(device)s porque não está montado" #, python-format msgid "Skipping %s as it is not mounted" msgstr "Pulando %s porque não está montado" #, python-format msgid "Starting %s" msgstr "Iniciando %s" msgid "Starting object reconstruction pass." msgstr "Iniciando a aprovação da reconstrução de objeto." msgid "Starting object reconstructor in daemon mode." msgstr "Iniciando o reconstrutor do objeto no modo daemon." msgid "Starting object replication pass." msgstr "Iniciando a aprovação da replicação de objeto." msgid "Starting object replicator in daemon mode." msgstr "Iniciando o replicador do objeto no modo daemon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Ressincronização bem-sucedida de %(src)s em %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "O tipo de arquivo é de acesso proibido!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "O total %(key)s para o container (%(total)s) não confere com a soma %(key)s " "pelas politicas (%(sum)s)" #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Tempo limite %(action)s para memcached: %(server)s" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exceção de tempo limite com %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentando %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Tentando GET %(full_path)s" #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "Tentando obter o status %s do PUT para o %s" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Tentando obter o status final do PUT para o %s" msgid "Trying to read during GET" msgstr "Tentando ler durante GET" msgid "Trying to read during GET (retrying)" msgstr "Tentando ler durante GET (tentando novamente)" msgid "Trying to send to client" msgstr "Tentando enviar para o cliente" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Tentando sincronizar sufixos com %s" #, python-format msgid "Trying to write to %s" msgstr "Tentando escrever para %s" msgid "UNCAUGHT EXCEPTION" msgstr "EXCEÇÃO NÃO CAPTURADA" #, python-format msgid "Unable to find %s config section in %s" msgstr "Não é possível localizar %s da seção de configuração em %s" #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" "Não é possível carregar cliente interno a partir da configuração: %r (%s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Não é possível localizar %s em libc. Saindo como um não operacional." #, python-format msgid "Unable to locate config for %s" msgstr "Não é possível localizar configuração para %s" #, python-format msgid "Unable to locate config number %s for %s" msgstr "Não é possível localizar o número de configuração %s para %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Não é possível localizar fallocate, posix_fallocate em libc. Saindo como um " "não operacional." #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "Não é possível executar fsync() no diretório %s: %s" #, python-format msgid "Unable to read config from %s" msgstr "Não é possível ler a configuração a partir de %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Não autorizado %(sync_from)r => %(sync_to)r" #, python-format msgid "Unexpected response: %s" msgstr "Resposta inesperada: %s" msgid "Unhandled exception" msgstr "Exceção não-tratada" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "Exceção inesperada ao tentar GET: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Atualize o relatório com falha para %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Atualize o relatório enviado para %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVISO: SSL deve ser ativada somente para fins de teste. Use rescisão SSL " "externa para uma implementação de produção." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite do descritor de arquivo. Executar " "como não raiz?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite máximo do processo. Executar como " "não raiz?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite de memória. Executar como não raiz?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "Esperou %s segundos para %s eliminar; desistindo" #, python-format msgid "Waited %s seconds for %s to die; killing" msgstr "Esperou %s segundos para %s eliminar; eliminando" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Aviso: Não é possível um limite de taxa sem um cliente memcached" #, python-format msgid "method %s is not allowed." msgstr "o método %s não é permitido." msgid "no log file found" msgstr "Nenhum arquivo de log encontrado" msgid "odfpy not installed." msgstr "odfpy não está instalado." #, python-format msgid "plotting results failed due to %s" msgstr "plotar resultados falhou devido a %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib não instalado." swift-2.7.0/swift/locale/swift.pot0000664000567000056710000007665312675204037020317 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2016 ORGANIZATION # This file is distributed under the same license as the swift project. # FIRST AUTHOR , 2016. # #, fuzzy msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev244\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 06:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=utf-8\n" "Content-Transfer-Encoding: 8bit\n" "Generated-By: Babel 2.2.0\n" #: swift/account/auditor.py:59 #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed" " audit" msgstr "" #: swift/account/auditor.py:82 msgid "Begin account audit pass." msgstr "" #: swift/account/auditor.py:88 swift/container/auditor.py:86 msgid "ERROR auditing" msgstr "" #: swift/account/auditor.py:93 #, python-format msgid "Account audit pass completed: %.02fs" msgstr "" #: swift/account/auditor.py:99 msgid "Begin account audit \"once\" mode" msgstr "" #: swift/account/auditor.py:104 #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "" #: swift/account/auditor.py:123 #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of" " %(key)s across policies (%(sum)s)" msgstr "" #: swift/account/auditor.py:148 #, python-format msgid "Audit Failed for %s: %s" msgstr "" #: swift/account/auditor.py:152 #, python-format msgid "ERROR Could not get account info %s" msgstr "" #: swift/account/reaper.py:139 swift/common/utils.py:2342 #: swift/obj/diskfile.py:361 swift/obj/updater.py:88 swift/obj/updater.py:131 #, python-format msgid "Skipping %s as it is not mounted" msgstr "" #: swift/account/reaper.py:143 msgid "Exception in top-level account reaper loop" msgstr "" #: swift/account/reaper.py:146 #, python-format msgid "Devices pass completed: %.02fs" msgstr "" #: swift/account/reaper.py:254 #, python-format msgid "Beginning pass on account %s" msgstr "" #: swift/account/reaper.py:279 #, python-format msgid "Exception with containers for account %s" msgstr "" #: swift/account/reaper.py:286 #, python-format msgid "Exception with account %s" msgstr "" #: swift/account/reaper.py:287 #, python-format msgid "Incomplete pass on account %s" msgstr "" #: swift/account/reaper.py:289 #, python-format msgid ", %s containers deleted" msgstr "" #: swift/account/reaper.py:291 #, python-format msgid ", %s objects deleted" msgstr "" #: swift/account/reaper.py:293 #, python-format msgid ", %s containers remaining" msgstr "" #: swift/account/reaper.py:296 #, python-format msgid ", %s objects remaining" msgstr "" #: swift/account/reaper.py:298 #, python-format msgid ", %s containers possibly remaining" msgstr "" #: swift/account/reaper.py:301 #, python-format msgid ", %s objects possibly remaining" msgstr "" #: swift/account/reaper.py:304 msgid ", return codes: " msgstr "" #: swift/account/reaper.py:308 #, python-format msgid ", elapsed: %.02fs" msgstr "" #: swift/account/reaper.py:314 #, python-format msgid "Account %s has not been reaped since %s" msgstr "" #: swift/account/reaper.py:373 swift/account/reaper.py:427 #: swift/account/reaper.py:503 swift/container/updater.py:307 #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "" #: swift/account/reaper.py:380 swift/account/reaper.py:436 #: swift/account/reaper.py:514 #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "" #: swift/account/reaper.py:397 #, python-format msgid "Exception with objects for container %(container)s for account %(account)s" msgstr "" #: swift/account/server.py:276 swift/container/server.py:607 #: swift/obj/server.py:1038 #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "" #: swift/common/bufferedhttp.py:206 swift/common/bufferedhttp.py:211 #, python-format msgid "Error encoding to UTF-8: %s" msgstr "" #: swift/common/container_sync_realms.py:60 #: swift/common/container_sync_realms.py:69 #, python-format msgid "Could not load %r: %s" msgstr "" #: swift/common/container_sync_realms.py:82 #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "" #: swift/common/db.py:353 #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "" #: swift/common/db.py:408 msgid "Broker error trying to rollback locked connection" msgstr "" #: swift/common/db.py:611 #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "" #: swift/common/db_replicator.py:144 #, python-format msgid "ERROR reading HTTP response from %s" msgstr "" #: swift/common/db_replicator.py:208 #, python-format msgid "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" #: swift/common/db_replicator.py:214 #, python-format msgid "Removed %(remove)d dbs" msgstr "" #: swift/common/db_replicator.py:215 swift/obj/replicator.py:514 #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "" #: swift/common/db_replicator.py:262 #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "" #: swift/common/db_replicator.py:326 #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "" #: swift/common/db_replicator.py:496 swift/common/db_replicator.py:766 #, python-format msgid "Quarantining DB %s" msgstr "" #: swift/common/db_replicator.py:499 #, python-format msgid "ERROR reading db %s" msgstr "" #: swift/common/db_replicator.py:552 #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "" #: swift/common/db_replicator.py:554 #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "" #: swift/common/db_replicator.py:593 #, python-format msgid "ERROR while trying to clean up %s" msgstr "" #: swift/common/db_replicator.py:621 msgid "ERROR Failed to get my own IPs?" msgstr "" #: swift/common/db_replicator.py:637 #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "" #: swift/common/db_replicator.py:650 msgid "Beginning replication run" msgstr "" #: swift/common/db_replicator.py:655 msgid "Replication run OVER" msgstr "" #: swift/common/db_replicator.py:668 msgid "ERROR trying to replicate" msgstr "" #: swift/common/internal_client.py:196 #, python-format msgid "Unexpected response: %s" msgstr "" #: swift/common/manager.py:68 msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" #: swift/common/manager.py:75 msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" #: swift/common/manager.py:82 msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" #: swift/common/manager.py:241 msgid "" "\n" "user quit" msgstr "" #: swift/common/manager.py:278 swift/common/manager.py:622 #, python-format msgid "No %s running" msgstr "" #: swift/common/manager.py:291 #, python-format msgid "%s (%s) appears to have stopped" msgstr "" #: swift/common/manager.py:303 #, python-format msgid "Waited %s seconds for %s to die; killing" msgstr "" #: swift/common/manager.py:307 swift/common/manager.py:559 #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "" #: swift/common/manager.py:317 #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "" #: swift/common/manager.py:501 #, python-format msgid "Unable to locate config number %s for %s" msgstr "" #: swift/common/manager.py:504 #, python-format msgid "Unable to locate config for %s" msgstr "" #: swift/common/manager.py:507 msgid "Found configs:" msgstr "" #: swift/common/manager.py:554 #, python-format msgid "Removing pid file %s with invalid pid" msgstr "" #: swift/common/manager.py:564 #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "" #: swift/common/manager.py:571 #, python-format msgid "Removing stale pid file %s" msgstr "" #: swift/common/manager.py:574 #, python-format msgid "No permission to signal PID %d" msgstr "" #: swift/common/manager.py:619 #, python-format msgid "%s #%d not running (%s)" msgstr "" #: swift/common/manager.py:626 swift/common/manager.py:719 #: swift/common/manager.py:723 #, python-format msgid "%s running (%s - %s)" msgstr "" #: swift/common/manager.py:726 #, python-format msgid "%s already started..." msgstr "" #: swift/common/manager.py:735 #, python-format msgid "Running %s once" msgstr "" #: swift/common/manager.py:737 #, python-format msgid "Starting %s" msgstr "" #: swift/common/manager.py:744 #, python-format msgid "%s does not exist" msgstr "" #: swift/common/memcached.py:166 #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "" #: swift/common/memcached.py:169 #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "" #: swift/common/memcached.py:194 #, python-format msgid "Error limiting server %s" msgstr "" #: swift/common/request_helpers.py:109 #, python-format msgid "No policy with index %s" msgstr "" #: swift/common/request_helpers.py:456 msgid "ERROR: An error occurred while retrieving segments" msgstr "" #: swift/common/utils.py:397 #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" #: swift/common/utils.py:591 msgid "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" #: swift/common/utils.py:675 #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "" #: swift/common/utils.py:1244 #, python-format msgid "%s: Connection reset by peer" msgstr "" #: swift/common/utils.py:1246 swift/common/utils.py:1249 #, python-format msgid "%s: %s" msgstr "" #: swift/common/utils.py:1497 msgid "Connection refused" msgstr "" #: swift/common/utils.py:1499 msgid "Host unreachable" msgstr "" #: swift/common/utils.py:1501 msgid "Connection timeout" msgstr "" #: swift/common/utils.py:1779 msgid "UNCAUGHT EXCEPTION" msgstr "" #: swift/common/utils.py:1834 msgid "Error: missing config path argument" msgstr "" #: swift/common/utils.py:1839 #, python-format msgid "Error: unable to locate %s" msgstr "" #: swift/common/utils.py:2200 #, python-format msgid "Unable to read config from %s" msgstr "" #: swift/common/utils.py:2206 #, python-format msgid "Unable to find %s config section in %s" msgstr "" #: swift/common/utils.py:2591 #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "" #: swift/common/utils.py:2596 #, python-format msgid "No realm key for %r" msgstr "" #: swift/common/utils.py:2600 #, python-format msgid "No cluster endpoint for %r %r" msgstr "" #: swift/common/utils.py:2609 #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" #: swift/common/utils.py:2613 msgid "Path required in X-Container-Sync-To" msgstr "" #: swift/common/utils.py:2616 msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" #: swift/common/utils.py:2621 #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "" #: swift/common/utils.py:2815 msgid "Exception dumping recon cache" msgstr "" #: swift/common/wsgi.py:199 #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "" #: swift/common/wsgi.py:209 msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external " "SSL termination for a production deployment." msgstr "" #: swift/common/middleware/catch_errors.py:43 msgid "Error: An error occurred" msgstr "" #: swift/common/middleware/cname_lookup.py:146 #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "" #: swift/common/middleware/cname_lookup.py:158 #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "" #: swift/common/middleware/ratelimit.py:248 #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "" #: swift/common/middleware/ratelimit.py:263 #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" #: swift/common/middleware/ratelimit.py:271 #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" #: swift/common/middleware/ratelimit.py:293 msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" #: swift/common/middleware/recon.py:85 msgid "Error reading recon cache file" msgstr "" #: swift/common/middleware/recon.py:87 msgid "Error parsing recon cache file" msgstr "" #: swift/common/middleware/recon.py:89 msgid "Error retrieving recon data" msgstr "" #: swift/common/middleware/recon.py:163 msgid "Error listing devices" msgstr "" #: swift/common/middleware/recon.py:265 msgid "Error reading ringfile" msgstr "" #: swift/common/middleware/recon.py:279 msgid "Error reading swift.conf" msgstr "" #: swift/common/middleware/xprofile.py:226 #, python-format msgid "Error on render profiling results: %s" msgstr "" #: swift/common/middleware/x_profile/exceptions.py:25 #, python-format msgid "Profiling Error: %s" msgstr "" #: swift/common/middleware/x_profile/html_viewer.py:306 #, python-format msgid "method %s is not allowed." msgstr "" #: swift/common/middleware/x_profile/html_viewer.py:317 #, python-format msgid "Can not load profile data from %s." msgstr "" #: swift/common/middleware/x_profile/html_viewer.py:369 #: swift/common/middleware/x_profile/html_viewer.py:399 msgid "no log file found" msgstr "" #: swift/common/middleware/x_profile/html_viewer.py:392 #, python-format msgid "Data download error: %s" msgstr "" #: swift/common/middleware/x_profile/html_viewer.py:397 msgid "python-matplotlib not installed." msgstr "" #: swift/common/middleware/x_profile/html_viewer.py:433 #, python-format msgid "plotting results failed due to %s" msgstr "" #: swift/common/middleware/x_profile/html_viewer.py:444 msgid "The file type are forbidden to access!" msgstr "" #: swift/common/middleware/x_profile/html_viewer.py:465 #, python-format msgid "Can not access the file %s." msgstr "" #: swift/common/middleware/x_profile/profile_model.py:128 msgid "odfpy not installed." msgstr "" #: swift/container/auditor.py:58 #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" #: swift/container/auditor.py:80 msgid "Begin container audit pass." msgstr "" #: swift/container/auditor.py:91 #, python-format msgid "Container audit pass completed: %.02fs" msgstr "" #: swift/container/auditor.py:97 msgid "Begin container audit \"once\" mode" msgstr "" #: swift/container/auditor.py:102 #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "" #: swift/container/auditor.py:123 #, python-format msgid "ERROR Could not get container info %s" msgstr "" #: swift/container/server.py:186 #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" #: swift/container/server.py:231 #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" #: swift/container/server.py:240 #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" #: swift/container/sync.py:225 msgid "" "Configuration option internal_client_conf_path not defined. Using default" " configuration, See internal-client.conf-sample for options" msgstr "" #: swift/container/sync.py:238 #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" #: swift/container/sync.py:269 msgid "Begin container sync \"once\" mode" msgstr "" #: swift/container/sync.py:278 #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" #: swift/container/sync.py:286 #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], " "%(skip)s skipped, %(fail)s failed" msgstr "" #: swift/container/sync.py:352 #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "" #: swift/container/sync.py:408 #, python-format msgid "ERROR Syncing %s" msgstr "" #: swift/container/sync.py:492 #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" #: swift/container/sync.py:525 #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "" #: swift/container/sync.py:531 #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" #: swift/container/sync.py:538 swift/container/sync.py:545 #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "" #: swift/container/updater.py:78 #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" #: swift/container/updater.py:92 swift/obj/reconstructor.py:822 #: swift/obj/replicator.py:598 swift/obj/replicator.py:715 #, python-format msgid "%s is not mounted" msgstr "" #: swift/container/updater.py:111 #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "" #: swift/container/updater.py:121 msgid "Begin container update sweep" msgstr "" #: swift/container/updater.py:155 #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" #: swift/container/updater.py:169 #, python-format msgid "Container update sweep completed: %.02fs" msgstr "" #: swift/container/updater.py:181 msgid "Begin container update single threaded sweep" msgstr "" #: swift/container/updater.py:189 #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" #: swift/container/updater.py:244 #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "" #: swift/container/updater.py:253 #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "" #: swift/container/updater.py:295 #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" #: swift/obj/auditor.py:78 #, python-format msgid " - parallel, %s" msgstr "" #: swift/obj/auditor.py:80 #, python-format msgid " - %s" msgstr "" #: swift/obj/auditor.py:81 #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "" #: swift/obj/auditor.py:110 #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d " "passed, %(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f," " bytes/sec: %(brate).2f, Total time: %(total).2f, Auditing time: " "%(audit).2f, Rate: %(audit_rate).2f" msgstr "" #: swift/obj/auditor.py:144 #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. " "Total quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, " "Rate: %(audit_rate).2f" msgstr "" #: swift/obj/auditor.py:159 #, python-format msgid "Object audit stats: %s" msgstr "" #: swift/obj/auditor.py:190 #, python-format msgid "ERROR Trying to audit %s" msgstr "" #: swift/obj/auditor.py:227 #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" #: swift/obj/auditor.py:279 #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "" #: swift/obj/auditor.py:350 swift/obj/auditor.py:371 #, python-format msgid "ERROR auditing: %s" msgstr "" #: swift/obj/diskfile.py:371 swift/obj/updater.py:162 #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "" #: swift/obj/diskfile.py:413 #, python-format msgid "Cannot read %s (%s)" msgstr "" #: swift/obj/diskfile.py:418 #, python-format msgid "Loading JSON from %s failed (%s)" msgstr "" #: swift/obj/diskfile.py:433 #, python-format msgid "Cannot write %s (%s)" msgstr "" #: swift/obj/diskfile.py:904 #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" #: swift/obj/diskfile.py:1024 msgid "Error hashing suffix" msgstr "" #: swift/obj/diskfile.py:1188 #, python-format msgid "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" #: swift/obj/diskfile.py:1441 #, python-format msgid "Problem cleaning up %s" msgstr "" #: swift/obj/diskfile.py:1786 #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" #: swift/obj/diskfile.py:2114 #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata " "%(meta)s" msgstr "" #: swift/obj/diskfile.py:2522 #, python-format msgid "No space left on device for %s (%s)" msgstr "" #: swift/obj/diskfile.py:2531 #, python-format msgid "Problem cleaning up %s (%s)" msgstr "" #: swift/obj/diskfile.py:2534 #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "" #: swift/obj/expirer.py:80 #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "" #: swift/obj/expirer.py:87 #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "" #: swift/obj/expirer.py:171 #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "" #: swift/obj/expirer.py:197 #, python-format msgid "Exception while deleting container %s %s" msgstr "" #: swift/obj/expirer.py:202 swift/obj/expirer.py:219 msgid "Unhandled exception" msgstr "" #: swift/obj/expirer.py:269 #, python-format msgid "Exception while deleting object %s %s %s" msgstr "" #: swift/obj/reconstructor.py:213 swift/obj/reconstructor.py:499 #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "" #: swift/obj/reconstructor.py:221 #, python-format msgid "Trying to GET %(full_path)s" msgstr "" #: swift/obj/reconstructor.py:328 #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" #: swift/obj/reconstructor.py:355 #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of " "%(device)d/%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" #: swift/obj/reconstructor.py:376 swift/obj/replicator.py:519 #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% " "synced" msgstr "" #: swift/obj/reconstructor.py:383 swift/obj/replicator.py:526 #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" #: swift/obj/reconstructor.py:391 #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "" #: swift/obj/reconstructor.py:420 swift/obj/replicator.py:563 msgid "Lockup detected.. killing live coros." msgstr "" #: swift/obj/reconstructor.py:467 #, python-format msgid "Trying to sync suffixes with %s" msgstr "" #: swift/obj/reconstructor.py:492 #, python-format msgid "%s responded as unmounted" msgstr "" #: swift/obj/reconstructor.py:893 swift/obj/replicator.py:369 #, python-format msgid "Removing partition: %s" msgstr "" #: swift/obj/reconstructor.py:909 msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" #: swift/obj/reconstructor.py:928 msgid "Exception in top-levelreconstruction loop" msgstr "" #: swift/obj/reconstructor.py:938 msgid "Running object reconstructor in script mode." msgstr "" #: swift/obj/reconstructor.py:947 #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "" #: swift/obj/reconstructor.py:954 msgid "Starting object reconstructor in daemon mode." msgstr "" #: swift/obj/reconstructor.py:958 msgid "Starting object reconstruction pass." msgstr "" #: swift/obj/reconstructor.py:963 #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "" #: swift/obj/replicator.py:183 #, python-format msgid "Killing long-running rsync: %s" msgstr "" #: swift/obj/replicator.py:197 #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "" #: swift/obj/replicator.py:204 swift/obj/replicator.py:208 #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "" #: swift/obj/replicator.py:335 #, python-format msgid "Removing %s objects" msgstr "" #: swift/obj/replicator.py:356 msgid "Error syncing handoff partition" msgstr "" #: swift/obj/replicator.py:434 #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "" #: swift/obj/replicator.py:441 #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "" #: swift/obj/replicator.py:485 #, python-format msgid "Error syncing with node: %s" msgstr "" #: swift/obj/replicator.py:490 msgid "Error syncing partition" msgstr "" #: swift/obj/replicator.py:505 #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" #: swift/obj/replicator.py:534 #, python-format msgid "Nothing replicated for %s seconds." msgstr "" #: swift/obj/replicator.py:721 msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" #: swift/obj/replicator.py:727 msgid "Ring change detected. Aborting current replication pass." msgstr "" #: swift/obj/replicator.py:755 msgid "Exception in top-level replication loop" msgstr "" #: swift/obj/replicator.py:765 msgid "Running object replicator in script mode." msgstr "" #: swift/obj/replicator.py:783 #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" #: swift/obj/replicator.py:794 msgid "Starting object replicator in daemon mode." msgstr "" #: swift/obj/replicator.py:798 msgid "Starting object replication pass." msgstr "" #: swift/obj/replicator.py:803 #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "" #: swift/obj/server.py:241 #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d" " response from %(ip)s:%(port)s/%(dev)s" msgstr "" #: swift/obj/server.py:248 #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for " "async update later)" msgstr "" #: swift/obj/server.py:284 #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" #: swift/obj/updater.py:63 #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "" #: swift/obj/updater.py:78 msgid "Begin object update sweep" msgstr "" #: swift/obj/updater.py:104 #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s" " successes, %(fail)s failures" msgstr "" #: swift/obj/updater.py:113 #, python-format msgid "Object update sweep completed: %.02fs" msgstr "" #: swift/obj/updater.py:122 msgid "Begin object update single threaded sweep" msgstr "" #: swift/obj/updater.py:136 #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures" msgstr "" #: swift/obj/updater.py:180 #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" #: swift/obj/updater.py:210 #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "" #: swift/obj/updater.py:275 #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "" #: swift/proxy/server.py:418 msgid "ERROR Unhandled exception in request" msgstr "" #: swift/proxy/server.py:473 #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" #: swift/proxy/server.py:490 swift/proxy/server.py:508 #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "" #: swift/proxy/server.py:531 #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" #: swift/proxy/controllers/account.py:67 msgid "Account" msgstr "" #: swift/proxy/controllers/base.py:813 swift/proxy/controllers/base.py:852 #: swift/proxy/controllers/base.py:944 swift/proxy/controllers/obj.py:340 #: swift/proxy/controllers/obj.py:885 swift/proxy/controllers/obj.py:934 #: swift/proxy/controllers/obj.py:948 swift/proxy/controllers/obj.py:1769 #: swift/proxy/controllers/obj.py:2007 swift/proxy/controllers/obj.py:2145 #: swift/proxy/controllers/obj.py:2379 msgid "Object" msgstr "" #: swift/proxy/controllers/base.py:814 swift/proxy/controllers/base.py:853 msgid "Trying to read during GET (retrying)" msgstr "" #: swift/proxy/controllers/base.py:945 msgid "Trying to read during GET" msgstr "" #: swift/proxy/controllers/base.py:949 #, python-format msgid "Client did not read from proxy within %ss" msgstr "" #: swift/proxy/controllers/base.py:954 msgid "Client disconnected on read" msgstr "" #: swift/proxy/controllers/base.py:956 msgid "Trying to send to client" msgstr "" #: swift/proxy/controllers/base.py:998 swift/proxy/controllers/base.py:1437 #, python-format msgid "Trying to %(method)s %(path)s" msgstr "" #: swift/proxy/controllers/base.py:1037 swift/proxy/controllers/base.py:1425 #: swift/proxy/controllers/obj.py:364 swift/proxy/controllers/obj.py:925 #: swift/proxy/controllers/obj.py:2137 swift/proxy/controllers/obj.py:2424 msgid "ERROR Insufficient Storage" msgstr "" #: swift/proxy/controllers/base.py:1040 #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "" #: swift/proxy/controllers/base.py:1428 #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" #: swift/proxy/controllers/base.py:1558 #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "" #: swift/proxy/controllers/container.py:100 msgid "Container" msgstr "" #: swift/proxy/controllers/obj.py:341 #, python-format msgid "Trying to get final status of PUT to %s" msgstr "" #: swift/proxy/controllers/obj.py:368 swift/proxy/controllers/obj.py:2429 #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "" #: swift/proxy/controllers/obj.py:579 #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "" #: swift/proxy/controllers/obj.py:592 #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" #: swift/proxy/controllers/obj.py:929 swift/proxy/controllers/obj.py:2140 #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" #: swift/proxy/controllers/obj.py:935 swift/proxy/controllers/obj.py:2146 #, python-format msgid "Expect: 100-continue on %s" msgstr "" #: swift/proxy/controllers/obj.py:949 swift/proxy/controllers/obj.py:1770 #, python-format msgid "Trying to write to %s" msgstr "" #: swift/proxy/controllers/obj.py:1000 swift/proxy/controllers/obj.py:2311 #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "" #: swift/proxy/controllers/obj.py:1008 swift/proxy/controllers/obj.py:2317 msgid "Client disconnected without sending last chunk" msgstr "" #: swift/proxy/controllers/obj.py:1013 swift/proxy/controllers/obj.py:2324 msgid "ERROR Exception causing client disconnect" msgstr "" #: swift/proxy/controllers/obj.py:1017 swift/proxy/controllers/obj.py:2328 #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" #: swift/proxy/controllers/obj.py:1023 swift/proxy/controllers/obj.py:2242 msgid "Client disconnected without sending enough data" msgstr "" #: swift/proxy/controllers/obj.py:1069 #, python-format msgid "Object servers returned %s mismatched etags" msgstr "" #: swift/proxy/controllers/obj.py:1073 swift/proxy/controllers/obj.py:2288 #: swift/proxy/controllers/obj.py:2513 msgid "Object PUT" msgstr "" #: swift/proxy/controllers/obj.py:2281 #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" #: swift/proxy/controllers/obj.py:2380 #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "" swift-2.7.0/swift/locale/fr/0000775000567000056710000000000012675204211017017 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/fr/LC_MESSAGES/0000775000567000056710000000000012675204211020604 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/fr/LC_MESSAGES/swift.po0000664000567000056710000011001712675204037022306 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Maxime COQUEREL , 2014 # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata # Angelique Pillal , 2016. #zanata # Gael Rehault , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev235\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-18 23:11+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-18 03:55+0000\n" "Last-Translator: Angelique Pillal \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: French\n" msgid "" "\n" "user quit" msgstr "" "\n" "l'utilisateur quitte le programme" #, python-format msgid " - %s" msgstr "- %s" #, python-format msgid " - parallel, %s" msgstr "- parallel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d suffixe(s) vérifié(s) - %(hashed).2f%% haché(s), %(synced).2f%% " "synchronisé(s)" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s démonté (d'après la réponse)" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " "(%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions sur %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) périphériques reconstruites en %(time).2fs " "(%(rate).2f/sec, %(remaining)s remaining)" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions répliquées en " "%(time).2fs (%(rate).2f/sec ; %(remaining)s restante(s))" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s succès, %(failure)s échec(s)" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s : renvoi de l'erreur 503 pour %(statuses)s" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d n'est pas demarré (%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s) semble s'être arrêté" #, python-format msgid "%s already started..." msgstr "%s déjà démarré..." #, python-format msgid "%s does not exist" msgstr "%s n'existe pas" #, python-format msgid "%s is not mounted" msgstr "%s n'est pas monté" #, python-format msgid "%s responded as unmounted" msgstr "%s ont été identifié(es) comme étant démonté(es)" #, python-format msgid "%s running (%s - %s)" msgstr "%s en cours d'exécution (%s - %s)" #, python-format msgid "%s: %s" msgstr "%s : %s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s : Connexion réinitialisée par l'homologue" #, python-format msgid ", %s containers deleted" msgstr ", %s containers supprimés" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s conteneur(s) restant(s), le cas échéant" #, python-format msgid ", %s containers remaining" msgstr ", %s conteneur(s) restant(s)" #, python-format msgid ", %s objects deleted" msgstr ", %s objets supprimés" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objet(s) restant(s), le cas échéant" #, python-format msgid ", %s objects remaining" msgstr ", %s objet(s) restant(s)" #, python-format msgid ", elapsed: %.02fs" msgstr ", temps écoulé : %.02fs" msgid ", return codes: " msgstr ", return codes: " msgid "Account" msgstr "Compte" #, python-format msgid "Account %s has not been reaped since %s" msgstr "Le compte %s n'a pas été collecté depuis %s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Audit de compte en mode \"Once\" terminé : %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Session d'audit de compte terminée : %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Tentative de réplication de %(count)d bases de données en %(time).5f " "secondes (%(rate).5f/s)" #, python-format msgid "Audit Failed for %s: %s" msgstr "Echec de l'audit pour %s : %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Code retour Rsync non valide : %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Démarrer l'audit de compte en mode \"Once\" (une fois)" msgid "Begin account audit pass." msgstr "Démarrer la session d'audit de compte." msgid "Begin container audit \"once\" mode" msgstr "Démarrer l'audit de conteneur en mode \"Once\" (une fois)" msgid "Begin container audit pass." msgstr "Démarrer la session d'audit de conteneur." msgid "Begin container sync \"once\" mode" msgstr "Démarrer la synchronisation de conteneurs en mode \"Once\" (une fois)" msgid "Begin container update single threaded sweep" msgstr "" "Démarrer le balayage des mises à jour du conteneur (unité d'exécution unique)" msgid "Begin container update sweep" msgstr "Démarrer le balayage des mises à jour du conteneur" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "Démarrer l'audit d'objet en mode \"%s\" (%s%s)" msgid "Begin object update single threaded sweep" msgstr "" "Démarrer le balayage des mises à jour d'objet (unité d'exécution unique)" msgid "Begin object update sweep" msgstr "Démarrer le balayage des mises à jour d'objet" #, python-format msgid "Beginning pass on account %s" msgstr "Démarrage de la session d'audit sur le compte %s" msgid "Beginning replication run" msgstr "Démarrage du cycle de réplication" msgid "Broker error trying to rollback locked connection" msgstr "" "Erreur de courtier lors d'une tentative d'annulation d'une connexion " "verrouillée" #, python-format msgid "Can not access the file %s." msgstr "Ne peut pas accéder au fichier %s." #, python-format msgid "Can not load profile data from %s." msgstr "Impossible de charger des données de profil depuis %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Le client n'a pas lu les données du proxy en %s s" msgid "Client disconnected on read" msgstr "Client déconnecté lors de la lecture" msgid "Client disconnected without sending enough data" msgstr "Client déconnecté avant l'envoi de toutes les données requises" msgid "Client disconnected without sending last chunk" msgstr "Le client a été déconnecté avant l'envoi du dernier bloc" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Le chemin d'accès au client %(client)s ne correspond pas au chemin stocké " "dans les métadonnées d'objet %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "L'option de configuration internal_client_conf_path n'a pas été définie. La " "configuration par défaut est utilisée. Consultez les options dans internal-" "client.conf-sample." msgid "Connection refused" msgstr "Connexion refusée" msgid "Connection timeout" msgstr "Dépassement du délai d'attente de connexion" msgid "Container" msgstr "Containeur" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Audit de conteneur en mode \"Once\" terminé : %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Session d'audit de conteneur terminée : %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Synchronisation de conteneurs en mode \"Once\" terminée : %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Le balayage des mises à jour du conteneur (unité d'exécution unique) est " "terminé : %(elapsed).02fs, %(success)s succès, %(fail)s échec(s), " "%(no_change)s inchangé(s)" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Le balayage des mises à jour du conteneur est terminé : %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Le balayage des mises à jour du conteneur (%(path)s) est terminé : " "%(elapsed).02fs, %(success)s succès, %(fail)s échec(s), %(no_change)s " "inchangé(s)" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "Liaison impossible à %s:%s après une tentative de %s secondes" #, python-format msgid "Could not load %r: %s" msgstr "Impossible de charger %r: %s" #, python-format msgid "Data download error: %s" msgstr "Erreur de téléchargement des données: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Session d'audit d'unité terminée : %.02fs" #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "Le répertoire %r n'est pas mappé à une stratégie valide (%s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERREUR %(db_file)s : %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERREUR %(status)d %(body)s depuis le serveur %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERREUR %(status)d %(body)s depuis le serveur d'objets. Réf. : %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" "ERREUR %(status)d Attendu(s) : 100 - poursuivre depuis le serveur d'objets" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" "ERREUR %(status)d Tentative d'exécution de %(method)s %(path)s à partir du " "serveur de conteneur" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s " "(une nouvelle tentative sera effectuée ultérieurement). Réponse %(status)s " "%(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "ERREUR Echec de la mise à jour du compte. Le nombre d'hôtes et le nombre " "d'unités diffèrent dans la demande : \"%s\" / \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERREUR Réponse incorrecte %(status)s de %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERREUR Dépassement du délai de lecture du client (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERREUR Echec de la mise à jour du conteneur (sauvegarde pour mise à jour " "asynchrone ultérieure) : réponse %(status)d renvoyée par %(ip)s:%(port)s/" "%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "ERREUR Echec de la mise à jour du conteneur. Le nombre d'hôtes et le nombre " "d'unités diffèrent dans la demande : \"%s\" / \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERREUR Impossible d'obtenir les infos de compte %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERREUR Impossible d'obtenir les infos de conteneur %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "ERREUR Incident de fermeture du fichier disque %(data_file)s : %(exc)s : " "%(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERREUR Exception entraînant la déconnexion du client" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" "ERREUR Exception lors du transfert de données vers des serveurs d'objets %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERREUR Obtention impossible de mes propres adresses IP ?" msgid "ERROR Insufficient Storage" msgstr "ERREUR Stockage insuffisant" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "ERREUR L'objet %(obj)s a échoué à l'audit et a été en quarantaine : %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERREUR Problème lié à Pickle. Mise en quarantaine de %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERREUR Unité distante %s non montée" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERREUR lors de la synchronisation de %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERREUR lors de la synchronisation de %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERREUR lors de la tentative d'audit de %s" msgid "ERROR Unhandled exception in request" msgstr "ERREUR Exception non gérée dans la demande" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ error sur %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s " "(une nouvelle tentative sera effectuée ultérieurement)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s " "(une nouvelle tentative sera effectuée ultérieurement) : " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" "ERREUR Le fichier des mises à jour asynchrones en attente porte un nom " "inattendu %s" msgid "ERROR auditing" msgstr "Erreur d'audit" #, python-format msgid "ERROR auditing: %s" msgstr "ERREUR d'audit : %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERREUR Echec de la mise à jour du conteneur avec %(ip)s:%(port)s/%(dev)s " "(sauvegarde pour mise à jour asynchrone ultérieure)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "Erreur de lecture de la réponse HTTP depuis %s" #, python-format msgid "ERROR reading db %s" msgstr "ERREUR de lecture de db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERREUR Echec de Rsync avec %(code)s : %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERREUR de synchronisation de %(file)s avec le noeud %(node)s" msgid "ERROR trying to replicate" msgstr "ERREUR lors de la tentative de réplication" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERREUR pendant le nettoyage %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "ERREUR liée au serveur %(type)s %(ip)s:%(port)s/%(device)s. Réf. : %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERREUR de chargement des suppressions de %s : " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERREUR liée au serveur distant %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" "ERREUR : Echec de l'obtention des chemins d'accès aux partitions d'unité : %s" msgid "ERROR: An error occurred while retrieving segments" msgstr "ERREUR : Une erreur s'est produite lors de l'extraction de segments" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERREUR : Impossible d'accéder à %(path)s : %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERREUR : Impossible d'exécuter l'audit : %s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "Erreur de %(action)s dans memcached : %(server)s" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "Erreur encodage UTF-8: %s" msgid "Error hashing suffix" msgstr "Erreur suffixe hashing" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "Erreur dans %r liée à mtime_check_interval : %s" #, python-format msgid "Error limiting server %s" msgstr "Erreur limitation du serveur %s" msgid "Error listing devices" msgstr "Erreur lors du listage des unités" #, python-format msgid "Error on render profiling results: %s" msgstr "Erreur de rendu des résultats de profilage : %s" msgid "Error parsing recon cache file" msgstr "Erreur lors de l'analyse syntaxique du fichier cache Recon" msgid "Error reading recon cache file" msgstr "Erreur de lecture du fichier cache Recon" msgid "Error reading ringfile" msgstr "Erreur de lecture du fichier Ring" msgid "Error reading swift.conf" msgstr "Erreur de lecture de swift.conf" msgid "Error retrieving recon data" msgstr "Erreur lors de l'extraction des données Recon" msgid "Error syncing handoff partition" msgstr "Erreur lors de la synchronisation de la partition de transfert" msgid "Error syncing partition" msgstr "Erreur de synchronisation de la partition" #, python-format msgid "Error syncing with node: %s" msgstr "Erreur de synchronisation avec le noeud : %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Une erreur est survenue lors de la tentative de régénération de %(path)s " "policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Erreur : une erreur s'est produite" msgid "Error: missing config path argument" msgstr "Erreur: Manque argument de configuration du chemin" #, python-format msgid "Error: unable to locate %s" msgstr "Erreur: impossible de localiser %s" msgid "Exception dumping recon cache" msgstr "Exception lors du vidage de cache Recon" msgid "Exception in top-level account reaper loop" msgstr "Exception dans la boucle de collecteur de compte de niveau supérieur" msgid "Exception in top-level replication loop" msgstr "Exception dans la boucle de réplication de niveau supérieur" msgid "Exception in top-levelreconstruction loop" msgstr "Exception dans la boucle de reconstruction de niveau supérieur" #, python-format msgid "Exception while deleting container %s %s" msgstr "Exception lors de la suppression du conteneur %s %s" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "Exception lors de la suppression de l'objet %s %s %s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exception liée à %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Exception avec le compte %s" #, python-format msgid "Exception with containers for account %s" msgstr "Exception avec les containers pour le compte %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Exception liée aux objets pour le conteneur %(container)s et le compte " "%(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Attendus(s) : 100 - poursuivre sur %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "" "Suivi de la chaîne CNAME pour %(given_domain)s jusqu'à %(found_domain)s" msgid "Found configs:" msgstr "Configurations trouvées :" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Le premier mode de transferts contient d'autres transferts. Abandon de la " "session de réplication en cours." msgid "Host unreachable" msgstr "Hôte inaccessible" #, python-format msgid "Incomplete pass on account %s" msgstr "Session d'audit incomplète sur le compte %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Non valide X-Container-Sync-To format %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Hôte %r non valide dans X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrée en attente non valide %(file)s : %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Réponse %(resp)s non valide de %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Réponse %(resp)s non valide de %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Schéma %r non valide dans X-Container-Sync-To. Doit être \"//\", \"http\" ou " "\"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Arrêt de l'opération Rsync à exécution longue : %s" msgid "Lockup detected.. killing live coros." msgstr "Blocage détecté. Arrêt des coroutines actives." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s mappé avec %(found_domain)s" #, python-format msgid "No %s running" msgstr "Non démarré %s" #, python-format msgid "No cluster endpoint for %r %r" msgstr "Aucun noeud final de cluster pour %r %r" #, python-format msgid "No permission to signal PID %d" msgstr "Aucun droit pour signaler le PID %d" #, python-format msgid "No policy with index %s" msgstr "Aucune statégie avec un index de type %s" #, python-format msgid "No realm key for %r" msgstr "Aucune clé de domaine pour %r" #, python-format msgid "No space left on device for %s (%s)" msgstr "Plus d'espace disponible sur le périphérique pour %s (%s)" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" "Noeud marqué avec limite d'erreurs (error_limited) %(ip)s:%(port)s " "(%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" "Le nombre de serveurs d'objets reconnus n'est pas suffisant (%d obtenus)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Introuvable : %(sync_from)r => %(sync_to)r - objet " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Aucun élément reconstruit pendant %s secondes." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Aucun élément répliqué pendant %s secondes." msgid "Object" msgstr "Objet" msgid "Object PUT" msgstr "Object PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "L'opération d'insertion (PUT) d'objet a renvoyé l'erreur 202 pour 409 : " "%(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "" "L'opération d'insertion (PUT) d'objet a renvoyé l'erreur 412. %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "L'audit d'objet (%(type)s) en mode \"%(mode)s\" est terminé : " "%(elapsed).02fs. Nombre total mis en quarantaine : %(quars)d. Nombre total " "d'erreurs : %(errors)d. Nombre total de fichiers/sec : %(frate).2f. Nombre " "total d'octets/sec : %(brate).2f. Durée d'audit : %(audit).2f. Taux : " "%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Audit d'objet (%(type)s). Depuis %(start_time)s, localement : %(passes)d " "succès. %(quars)d en quarantaine. %(errors)d erreurs. Fichiers/sec : " "%(frate).2f. Octets/sec : %(brate).2f. Durée totale : %(total).2f. Durée " "d'audit : %(audit).2f. Taux : %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Statistiques de l'audit d'objet : %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "" "La reconstruction d'objet en mode Once (une fois) est terminée. (%.02f " "minutes)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Reconstruction d'objet terminée. (%.02f minutes)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" "La réplication d'objet en mode Once (une fois) est terminée. (%.02f minutes)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Réplication d'objet terminée. (%.02f minutes)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Des serveurs d'objets ont renvoyé %s en-têtes Etag non concordants" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Le balayage des mises à jour d'objet (unité d'exécution unique) est " "terminé : %(elapsed).02fs, %(success)s succès, %(fail)s échec(s)" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Le balayage des mises à jour d'objet est terminé : %.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Le balayage des mises à jour d'objet (%(device)s) est terminé : " "%(elapsed).02fs, %(success)s succès, %(fail)s échec(s)" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Paramètres, requêtes et fragments interdits dans X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Temps de partition : maximum %(max).4fs, minimum %(min).4fs, moyenne " "%(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "Début de session. %s conteneur(s) possible(s). %s objet(s) possible(s)" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "Session terminée dans %ds. %d objet(s) arrivé(s) à expiration" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "Session jusqu'à %ds. %d objet(s) arrivé(s) à expiration" msgid "Path required in X-Container-Sync-To" msgstr "Chemin requis dans X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problème lors du nettoyage de %s" #, python-format msgid "Problem cleaning up %s (%s)" msgstr "Problème lors du nettoyage de %s (%s)" #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "" "Un problème est survenu lors de l'écriture du fichier d'état durable %s (%s)" #, python-format msgid "Profiling Error: %s" msgstr "Erreur de profilage : %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s n'est pas un répertoire et a donc été mis en quarantaine dans " "%(quar_path)s" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s n'est pas un répertoire et a donc été mis en quarantaine " "dans %(quar_path)s" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "En quarantaine de %s à %s en raison de la base de données %s" #, python-format msgid "Quarantining DB %s" msgstr "Mise en quarantaine de la base de données %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Journal de mise en veille Ratelimit : %(sleep)s pour %(account)s/" "%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d bases de données ont été retirées" #, python-format msgid "Removing %s objects" msgstr "Suppression de %s objets" #, python-format msgid "Removing partition: %s" msgstr "Suppression partition: %s" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Suppression du fichier pid %s comportant un pid non valide" #, python-format msgid "Removing stale pid file %s" msgstr "Suppression du fichier PID %s périmé" msgid "Replication run OVER" msgstr "Le cycle de réplication est terminé" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Renvoi de 497 en raison du placement sur liste noire : %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Renvoi de 498 pour %(meth)s jusqu'à %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(Max Sleep) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Changement d'anneau détecté. Abandon de la session de reconstruction en " "cours." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Changement d'anneau détecté. Abandon de la session de réplication en cours." #, python-format msgid "Running %s once" msgstr "Exécution unique de %s" msgid "Running object reconstructor in script mode." msgstr "Exécution du reconstructeur d'objet en mode script." msgid "Running object replicator in script mode." msgstr "Exécution du réplicateur d'objet en mode script." #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "Signal %s pid: %s signal: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Depuis %(time)s : %(sync)s synchronisé(s) [%(delete)s suppression(s), " "%(put)s insertion(s)], %(skip)s ignoré(s), %(fail)s échec(s)" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Depuis %(time)s : audits de compte : %(passed)s succès, %(failed)s échec(s)" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Depuis %(time)s : audits de conteneur : %(pass)s succès, %(fail)s échec(s)" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s est ignoré car il n'est pas monté" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s est ignoré car il n'est pas monté" #, python-format msgid "Starting %s" msgstr "Démarrage %s" msgid "Starting object reconstruction pass." msgstr "Démarrage de la session de reconstruction d'objet." msgid "Starting object reconstructor in daemon mode." msgstr "Démarrage du reconstructeur d'objet en mode démon." msgid "Starting object replication pass." msgstr "Démarrage de la session de réplication d'objet." msgid "Starting object replicator in daemon mode." msgstr "Démarrage du réplicateur d'objet en mode démon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Succès de Rsync pour %(src)s dans %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Accès interdit au type de fichier" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Le total %(key)s du conteneur (%(total)s) ne correspond pas à la somme des " "clés %(key)s des différentes règles (%(sum)s)" #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "Délai d'attente de %(action)s dans memcached : %(server)s" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "" "Exception liée à un dépassement de délai concernant %(ip)s:%(port)s/" "%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentative d'exécution de %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Tentative de lecture de %(full_path)s" #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "Tentative d'obtention du statut de l'opération PUT %s sur %s" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Tentative d'obtention du statut final de l'opération PUT sur %s" msgid "Trying to read during GET" msgstr "Tentative de lecture pendant une opération GET" msgid "Trying to read during GET (retrying)" msgstr "Tentative de lecture pendant une opération GET (nouvelle tentative)" msgid "Trying to send to client" msgstr "Tentative d'envoi au client" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Tentative de synchronisation de suffixes à l'aide de %s" #, python-format msgid "Trying to write to %s" msgstr "Tentative d'écriture sur %s" msgid "UNCAUGHT EXCEPTION" msgstr "EXCEPTION NON INTERCEPTEE" #, python-format msgid "Unable to find %s config section in %s" msgstr "Impossible de trouver la section de configuration %s dans %s" #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "" "Impossible de charger le client interne depuis la configuration : %r (%s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" "Impossible de localiser %s dans libc. Laissé comme action nulle (no-op)." #, python-format msgid "Unable to locate config for %s" msgstr "Impossible de trouver la configuration pour %s" #, python-format msgid "Unable to locate config number %s for %s" msgstr "Impossible de trouver la configuration portant le numéro %s pour %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Impossible de localiser fallocate, posix_fallocate dans libc. Laissé comme " "action nulle (no-op)." #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "Impossible d'exécuter fsync() dans le répertoire %s : %s" #, python-format msgid "Unable to read config from %s" msgstr "Impossible de lire le fichier de configuration depuis %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Non autorisé : %(sync_from)r => %(sync_to)r" #, python-format msgid "Unexpected response: %s" msgstr "Réponse inattendue : %s" msgid "Unhandled exception" msgstr "Exception non prise en charge" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Une exception inconnue s'est produite pendant une opération GET: %(account)r " "%(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Echec du rapport de mise à jour pour %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Rapport de mise à jour envoyé pour %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVERTISSEMENT : SSL ne doit être activé qu'à des fins de test. Utilisez la " "terminaison SSL externe pour un déploiement en production." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVERTISSEMENT : Impossible de modifier la limite de descripteur de fichier. " "Exécution en tant que non root ?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVERTISSEMENT : Impossible de modifier la limite maximale de processus. " "Exécution en tant que non root ?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVERTISSEMENT : Impossible de modifier la limite de mémoire. Exécution en " "tant que non root ?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "Attente de %s secondes pour la fin de %s ; abandon" #, python-format msgid "Waited %s seconds for %s to die; killing" msgstr "Attente de %s secondes pour la fin de %s . En cours d'arrêt" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avertissement : impossible d'appliquer Ratelimit sans client memcached" #, python-format msgid "method %s is not allowed." msgstr "Méthode %s interdite." msgid "no log file found" msgstr "Pas de fichier log trouvé" msgid "odfpy not installed." msgstr "odfpy n'est pas installé." #, python-format msgid "plotting results failed due to %s" msgstr "Echec du traçage des résultats. Cause : %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib non installé." swift-2.7.0/swift/locale/zh_CN/0000775000567000056710000000000012675204211017411 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/zh_CN/LC_MESSAGES/0000775000567000056710000000000012675204211021176 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/zh_CN/LC_MESSAGES/swift.po0000664000567000056710000007706512675204037022717 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Pearl Yajing Tan(Seagate Tech) , 2014 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Linda , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev254\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-22 19:48+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-03-22 10:30+0000\n" "Last-Translator: Linda \n" "Language: zh-CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Chinese (China)\n" msgid "" "\n" "user quit" msgstr "" "\n" "用户退出" #, python-format msgid " - %s" msgstr "- %s" #, python-format msgid " - parallel, %s" msgstr "-平行,%s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "%(checked)d后缀已被检查 %(hashed).2f%% hashed, %(synced).2f%% synced" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s的回应为未挂载" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions of %(device)d/" "%(dtotal)d (%(dpercentage).2f%%) devices reconstructed in %(time).2fs " "(%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(device)d/%(dtotal)d (%(dpercentage).2f%%) 设备的 %(reconstructed)d/" "%(total)d (%(percentage).2f%%) 分区已于 %(time).2fs 重构(%(rate).2f/秒,剩" "余 %(remaining)s)" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) 分区被复制 持续时间为 \"\n" "\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s成功,%(failure)s失败" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s 返回 503 在 %(statuses)s" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d无法运行(%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s)显示已停止" #, python-format msgid "%s already started..." msgstr "%s已启动..." #, python-format msgid "%s does not exist" msgstr "%s不存在" #, python-format msgid "%s is not mounted" msgstr "%s未挂载" #, python-format msgid "%s responded as unmounted" msgstr "%s 响应为未安装" #, python-format msgid "%s running (%s - %s)" msgstr "%s运行(%s - %s)" #, python-format msgid "%s: %s" msgstr "%s:%s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s:已由同级重置连接" #, python-format msgid ", %s containers deleted" msgstr ",删除容器%s" #, python-format msgid ", %s containers possibly remaining" msgstr ",可能剩余容器%s" #, python-format msgid ", %s containers remaining" msgstr ",剩余容器%s" #, python-format msgid ", %s objects deleted" msgstr ",删除对象%s" #, python-format msgid ", %s objects possibly remaining" msgstr ",可能剩余对象%s" #, python-format msgid ", %s objects remaining" msgstr ",剩余对象%s" #, python-format msgid ", elapsed: %.02fs" msgstr ",耗时:%.02fs" msgid ", return codes: " msgstr ",返回代码:" msgid "Account" msgstr "账号" #, python-format msgid "Account %s has not been reaped since %s" msgstr "账号%s自%s起未被reaped" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "账号审计\"once\"模式完成: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "账号审计完成:%.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(time).5f seconds (%(rate).5f/s)尝试复制%(count)d dbs" #, python-format msgid "Audit Failed for %s: %s" msgstr "审计失败%s: %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Bad rsync返还代码:%(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "开始账号审计\"once\"模式" msgid "Begin account audit pass." msgstr "开始账号审计通过" msgid "Begin container audit \"once\" mode" msgstr "开始容器审计\"once\" 模式" msgid "Begin container audit pass." msgstr "开始通过容器审计" msgid "Begin container sync \"once\" mode" msgstr "开始容器同步\"once\"模式" msgid "Begin container update single threaded sweep" msgstr "开始容器更新单线程扫除" msgid "Begin container update sweep" msgstr "开始容器更新扫除" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "开始对象审计\\\"%s\\\" 模式 (%s%s)" msgid "Begin object update single threaded sweep" msgstr "开始对象更新单线程扫除" msgid "Begin object update sweep" msgstr "开始对象更新扫除" #, python-format msgid "Beginning pass on account %s" msgstr "账号%s开始通过" msgid "Beginning replication run" msgstr "开始运行复制" msgid "Broker error trying to rollback locked connection" msgstr "服务器错误并尝试去回滚已经锁住的链接" #, python-format msgid "Can not access the file %s." msgstr "无法访问文件%s" #, python-format msgid "Can not load profile data from %s." msgstr "无法从%s下载分析数据" #, python-format msgid "Cannot read %s (%s)" msgstr "无法读取 %s (%s)" #, python-format msgid "Cannot write %s (%s)" msgstr "无法写入 %s (%s)" #, python-format msgid "Client did not read from proxy within %ss" msgstr "客户尚未从代理处读取%ss" msgid "Client disconnected on read" msgstr "客户读取时中断" msgid "Client disconnected without sending enough data" msgstr "客户中断 尚未发送足够" msgid "Client disconnected without sending last chunk" msgstr "客户机已断开连接而未发送最后一个数据块" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "客户路径%(client)s与对象元数据中存储的路径%(meta)s不符" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "未定义配置选项 internal_client_conf_path。正在使用缺省配置。请参阅 internal-" "client.conf-sample 以了解各个选项" msgid "Connection refused" msgstr "连接被拒绝" msgid "Connection timeout" msgstr "连接超时" msgid "Container" msgstr "容器" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "容器审计\"once\"模式完成:%.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "容器审计通过完成: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "容器同步\"once\"模式完成:%.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "容器更新单线程扫除完成:%(elapsed).02fs, %(success)s 成功, %(fail)s 失败, " "%(no_change)s 无更改" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "容器更新扫除完成:%.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "通过路径%(path)s容器更新扫除完成:%(elapsed).02fs, %(success)s 成功, " "%(fail)s 失败, %(no_change)s 无更改" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "尝试过%s秒后无法捆绑%s:%s" #, python-format msgid "Could not load %r: %s" msgstr "无法下载%r: %s" #, python-format msgid "Data download error: %s" msgstr "数据下载错误:%s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "设备通过完成: %.02fs" #, python-format msgid "Directory %r does not map to a valid policy (%s)" msgstr "目录 %r 未映射至有效策略 (%s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "错误 %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "错误 %(status)d %(body)s 来自 %(type)s 服务器" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "错误 %(status)d %(body)s 来自 对象服务器 re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "发生 %(status)d 错误,需要 100 - 从对象服务器继续" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "尝试从容器服务器执行 %(method)s %(path)s 时发生 %(status)d 错误" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "出现错误 账号更新失败: %(ip)s:%(port)s/%(device)s (稍后尝试): 回应 " "%(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "出现错误 账号更新失败:本机数量与设备数量不符: \"%s\" vs \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "失败响应错误%(status)s来自%(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "错误 客户读取超时(%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "错误 容器更新失败(正在保存 稍后同步更新):%(status)d回应来自%(ip)s:%(port)s/" "%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "错误 容器更新失败:主机数量和设备数量不符合请求: \"%s\" vs \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "错误:无法获取账号信息%s" #, python-format msgid "ERROR Could not get container info %s" msgstr "错误:无法获取容器%s信息" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "磁盘文件错误%(data_file)s关闭失败: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "错误 异常导致客户端中断连接" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "错误:向对象服务器 %s 传输数据时发生异常" msgid "ERROR Failed to get my own IPs?" msgstr "错误 无法获得我方IPs?" msgid "ERROR Insufficient Storage" msgstr "错误 存储空间不足" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "错误 Pickle问题 隔离%s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "错误 远程驱动器无法挂载 %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "同步错误 %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "同步时发生错误%s" #, python-format msgid "ERROR Trying to audit %s" msgstr "错误 尝试开始审计%s" msgid "ERROR Unhandled exception in request" msgstr "错误 未处理的异常发出请求" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "%(method)s %(path)s出现错误__call__ error" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "错误 账号更新失败 %(ip)s:%(port)s/%(device)s (稍后尝试)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "错误 账号更新失败%(ip)s:%(port)s/%(device)s (稍后尝试):" #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "执行同步等待文件 文件名不可知%s" msgid "ERROR auditing" msgstr "错误 审计" #, python-format msgid "ERROR auditing: %s" msgstr "审计错误:%s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在保存 稍后同步更新)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "读取HTTP错误 响应来源%s" #, python-format msgid "ERROR reading db %s" msgstr "错误 读取db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "错误 rsync失败 %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "错误 同步 %(file)s 和 节点%(node)s" msgid "ERROR trying to replicate" msgstr "尝试复制时发生错误" #, python-format msgid "ERROR while trying to clean up %s" msgstr "清理时出现错误%s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)s服务器发生错误 %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "执行下载压缩时发生错误%s" #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "远程服务器发生错误 %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "%s未挂载" msgid "ERROR: An error occurred while retrieving segments" msgstr "错误:检索段时出错" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "出错,无法访问 %(path)s:%(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "错误:无法执行审计:%s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "%(action)s错误 高性能内存对象缓存: %(server)s" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "UTF-8编码错误:%s" msgid "Error hashing suffix" msgstr "执行Hashing后缀时发生错误" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "%r中mtime_check_interval出现错误:%s" #, python-format msgid "Error limiting server %s" msgstr "服务器出现错误%s " msgid "Error listing devices" msgstr "设备列表时出现错误" #, python-format msgid "Error on render profiling results: %s" msgstr "给予分析结果时发生错误:%s" msgid "Error parsing recon cache file" msgstr "解析recon cache file时出现错误" msgid "Error reading recon cache file" msgstr "读取recon cache file时出现错误" msgid "Error reading ringfile" msgstr "读取ringfile时出现错误" msgid "Error reading swift.conf" msgstr "读取swift.conf时出现错误" msgid "Error retrieving recon data" msgstr "检索recon data时出现错误" msgid "Error syncing handoff partition" msgstr "执行同步切换分区时发生错误" msgid "Error syncing partition" msgstr "执行同步分区时发生错误" #, python-format msgid "Error syncing with node: %s" msgstr "执行同步时节点%s发生错误" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "尝试重建 %(path)s 策略时出错:#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "错误:一个错误发生了" msgid "Error: missing config path argument" msgstr "错误:设置路径信息丢失" #, python-format msgid "Error: unable to locate %s" msgstr "错误:无法查询到 %s" msgid "Exception dumping recon cache" msgstr "执行dump recon的时候出现异常" msgid "Exception in top-level account reaper loop" msgstr "异常出现在top-level账号reaper环" msgid "Exception in top-level replication loop" msgstr "top-level复制圈出现异常" msgid "Exception in top-levelreconstruction loop" msgstr " top-levelreconstruction 环中发生异常" #, python-format msgid "Exception while deleting container %s %s" msgstr "执行删除容器时出现异常 %s %s" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "执行删除对象时发生异常%s %s %s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s出现异常" #, python-format msgid "Exception with account %s" msgstr "账号%s出现异常" #, python-format msgid "Exception with containers for account %s" msgstr "账号%s内容器出现异常" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "账号%(account)s容器%(container)s的对象出现异常" #, python-format msgid "Expect: 100-continue on %s" msgstr "已知:100-continue on %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "跟随CNAME链从%(given_domain)s到%(found_domain)s" msgid "Found configs:" msgstr "找到配置" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "Handoffs 优先方式仍有 handoffs。正在中止当前复制过程。" msgid "Host unreachable" msgstr "无法连接到主机" #, python-format msgid "Incomplete pass on account %s" msgstr "账号%s未完成通过" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "无效的X-Container-Sync-To格式%r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To中无效主机%r" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "不可用的等待输入%(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "从 %(full_path)s 返回了无效响应 %(resp)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "无效的回应%(resp)s来自%(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。" #, python-format msgid "Killing long-running rsync: %s" msgstr "终止long-running同步: %s" #, python-format msgid "Loading JSON from %s failed (%s)" msgstr "从 %s 读取 JSON 失败 (%s)" msgid "Lockup detected.. killing live coros." msgstr "检测到lockup。终止正在执行的coros" #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "集合%(given_domain)s到%(found_domain)s" #, python-format msgid "No %s running" msgstr "无%s账号运行" #, python-format msgid "No cluster endpoint for %r %r" msgstr "%r %r的集群节点不存在" #, python-format msgid "No permission to signal PID %d" msgstr "无权限发送信号PID%d" #, python-format msgid "No policy with index %s" msgstr "没有具备索引 %s 的策略" #, python-format msgid "No realm key for %r" msgstr "%r权限key不存在" #, python-format msgid "No space left on device for %s (%s)" msgstr "设备上没有可容纳 %s (%s) 的空间" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "节点错误极限 %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "没有足够的对象服务器应答(收到 %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "过去 %s 秒未重构任何对象。" #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s秒无复制" msgid "Object" msgstr "对象" msgid "Object PUT" msgstr "对象上传" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "对象 PUT 正在返回 202(对于 409):%(req_timestamp)s 小于或等于 " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "对象PUT返还 412,%(statuses)r " #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "对象审计 (%(type)s) \\\"%(mode)s\\\"模式完成: %(elapsed).02fs 隔离总数: " "%(quars)d, 错误总数: %(errors)d, 文件/秒总和:%(frate).2f, bytes/sec总和: " "%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "对象审计 (%(type)s). 自 %(start_time)s 开始: 本地:%(passes)d 通" "过,%(quars)d 隔离,%(errors)d 错误,文件/秒:%(frate).2f,字节/秒:" "%(brate).2f,总时间:%(total).2f,审计时间:%(audit).2f,速率:" "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "对象审计统计:%s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "对象重构完成(一次)。(%.02f 分钟)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "对象重构完成。(%.02f 分钟)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "对象复制完成(一次)。(%.02f minutes)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "对象复制完成。(%.02f minutes)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "对象服务器返还%s不匹配etags" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "对象更新单线程扫除完成:%(elapsed).02fs,%(success)s 成功, %(fail)s 失败" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "对象更新扫除完成:%.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "%(device)s对象更新扫除完成:%(elapsed).02fs, %(success)s成功, %(fail)s失败" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "在X-Container-Sync-To中,变量,查询和碎片不被允许" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "开始通过;%s可能容器;%s可能对象" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "%ds通过完成; %d对象过期" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "%ds目前通过;%d对象过期" msgid "Path required in X-Container-Sync-To" msgstr "在X-Container-Sync-To中路径是必须的" #, python-format msgid "Problem cleaning up %s" msgstr "问题清除%s" #, python-format msgid "Problem cleaning up %s (%s)" msgstr "清除 %s (%s) 时发生了问题" #, python-format msgid "Problem writing durable state file %s (%s)" msgstr "编写可持续状态文件 %s (%s) 时发生了问题" #, python-format msgid "Profiling Error: %s" msgstr "分析代码时出现错误:%s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(hsh_path)s和%(quar_path)s因为非目录" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(object_path)s和%(quar_path)s因为非目录" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "隔离%s和%s 因为%s数据库" #, python-format msgid "Quarantining DB %s" msgstr "隔离DB%s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "流量控制休眠日志:%(sleep)s for %(account)s/%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "删除%(remove)d dbs" #, python-format msgid "Removing %s objects" msgstr "正在移除 %s 个对象" #, python-format msgid "Removing partition: %s" msgstr "移除分区:%s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "移除 pid 文件 %(pid_file)s 失败,pid %(pid)d 不正确" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "正在移除带有无效 pid 的 pid 文件 %s" #, python-format msgid "Removing stale pid file %s" msgstr "移除原有pid文件%s" msgid "Replication run OVER" msgstr "复制运行结束" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "返回497因为黑名单:%s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,流量控制(Max \"\n" "\"Sleep) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "检测到环更改。正在中止当前重构过程。" msgid "Ring change detected. Aborting current replication pass." msgstr "Ring改变被检测到。退出现有的复制通过" #, python-format msgid "Running %s once" msgstr "运行%s一次" msgid "Running object reconstructor in script mode." msgstr "正以脚本方式运行对象重构程序。" msgid "Running object replicator in script mode." msgstr "在加密模式下执行对象复制" #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "发出信号%s pid: %s 信号: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "自%(time)s起:%(sync)s完成同步 [%(delete)s 删除, %(put)s 上传], \"\n" "\"%(skip)s 跳过, %(fail)s 失败" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "自%(time)s开始:账号审计:%(passed)s 通过审计,%(failed)s 失败" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "因无法挂载跳过%(device)s" #, python-format msgid "Skipping %s as it is not mounted" msgstr "挂载失败 跳过%s" #, python-format msgid "Starting %s" msgstr "启动%s" msgid "Starting object reconstruction pass." msgstr "正在启动对象重构过程。" msgid "Starting object reconstructor in daemon mode." msgstr "正以守护程序方式启动对象重构程序。" msgid "Starting object replication pass." msgstr "开始通过对象复制" msgid "Starting object replicator in daemon mode." msgstr "在守护模式下开始对象复制" #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "成功的rsync %(src)s at %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "该文件类型被禁止访问!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "容器(%(total)s)内%(key)s总数不符合协议%(key)s总数(%(sum)s)" #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "%(action)s超时 高性能内存对象缓存: %(server)s" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 发生超时异常" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "尝试执行%(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "正尝试获取 %(full_path)s" #, python-format msgid "Trying to get %s status of PUT to %s" msgstr "正尝试将 PUT 的 %s 状态发送至 %s" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "尝试执行获取最后的PUT状态%s" msgid "Trying to read during GET" msgstr "执行GET时尝试读取" msgid "Trying to read during GET (retrying)" msgstr "执行GET时尝试读取(重新尝试)" msgid "Trying to send to client" msgstr "尝试发送到客户端" #, python-format msgid "Trying to sync suffixes with %s" msgstr "正尝试使后缀与 %s 同步" #, python-format msgid "Trying to write to %s" msgstr "尝试执行书写%s" msgid "UNCAUGHT EXCEPTION" msgstr "未捕获的异常" #, python-format msgid "Unable to find %s config section in %s" msgstr "无法在%s中查找到%s设置部分" #, python-format msgid "Unable to load internal client from config: %r (%s)" msgstr "无法从配置装入内部客户机:%r (%s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "无法查询到%s 保留为no-op" #, python-format msgid "Unable to locate config for %s" msgstr "找不到 %s 的配置" #, python-format msgid "Unable to locate config number %s for %s" msgstr "找不到 %s 的配置编号 %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "无法查询到fallocate, posix_fallocate。保存为no-op" #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "无法在目录 %s 上执行 fsync():%s" #, python-format msgid "Unable to read config from %s" msgstr "无法从%s读取设置" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "未授权%(sync_from)r => %(sync_to)r" #, python-format msgid "Unexpected response: %s" msgstr "意外响应:%s" msgid "Unhandled exception" msgstr "未处理的异常" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "尝试获取 %(account)r %(container)r %(object)r 时发生未知异常" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s更新报告失败" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "更新报告发至%(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "警告:SSL仅可以做测试使用。产品部署时请使用外连SSL终端" msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告:无法修改文件描述限制。是否按非root运行?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告:无法修改最大运行极限,是否按非root运行?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:无法修改内存极限,是否按非root运行?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "等待%s秒直到%s停止;放弃" #, python-format msgid "Waited %s seconds for %s to die; killing" msgstr "已消耗 %s 秒等待 %s 终止;正在终止" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:缺失缓存客户端 无法控制流量 " #, python-format msgid "method %s is not allowed." msgstr "方法%s不被允许" msgid "no log file found" msgstr "日志文件丢失" msgid "odfpy not installed." msgstr "odfpy未安装" #, python-format msgid "plotting results failed due to %s" msgstr "绘制结果图标时失败因为%s" msgid "python-matplotlib not installed." msgstr "python-matplotlib未安装" swift-2.7.0/swift/locale/ko_KR/0000775000567000056710000000000012675204211017415 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/ko_KR/LC_MESSAGES/0000775000567000056710000000000012675204211021202 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/ko_KR/LC_MESSAGES/swift.po0000664000567000056710000007332212675204037022713 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Mario Cho , 2014 # Ying Chun Guo , 2015 # Lucas Palm , 2015. #zanata # OpenStack Infra , 2015. #zanata # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev176\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-08 04:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-01-30 06:54+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: ko-KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Korean (South Korea)\n" msgid "" "\n" "user quit" msgstr "" "\n" "사용자 종료" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - 병렬, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d개 접미부를 검사함 - %(hashed).2f%%개 해시됨, %(synced).2f%%개 동" "기화됨" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s에서 마운트 해제된 것으로 응답함" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d(%(percentage).2f%%)개 파티션이 %(time).2f초" "(%(rate).2f/초, %(remaining)s 남음) 안에 복제됨" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s개 성공, %(failure)s개 실패" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s에서 %(statuses)s에 대해 503을 리턴함" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d이(가) 실행되지 않음(%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s(%s)이(가) 중지됨" #, python-format msgid "%s already started..." msgstr "%s이(가) 이미 시작되었음..." #, python-format msgid "%s does not exist" msgstr "%s이(가) 존재하지 않음" #, python-format msgid "%s is not mounted" msgstr "%s이(가) 마운트되지 않음" #, python-format msgid "%s running (%s - %s)" msgstr "%s 실행 중(%s - %s)" #, python-format msgid "%s: %s" msgstr "%s: %s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: 피어에서 연결 재설정" #, python-format msgid ", %s containers deleted" msgstr ", %s 지워진 컨테이너" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s 여분의 컨테이너" #, python-format msgid ", %s containers remaining" msgstr ", %s 남은 컨테이너" #, python-format msgid ", %s objects deleted" msgstr ", %s 지워진 오브젝트" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s o여분의 오브젝트" #, python-format msgid ", %s objects remaining" msgstr ", %s 남은 오브젝트" #, python-format msgid ", elapsed: %.02fs" msgstr ", 경과됨: %.02fs" msgid ", return codes: " msgstr ", 반환 코드들:" msgid "Account" msgstr "계정" #, python-format msgid "Account %s has not been reaped since %s" msgstr "계정 %s을(를) %s 이후에 얻지 못함" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "계정 감사 \"한 번\"모드가 완료: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "정상으로 판정난 계정: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "%(time).5f초(%(rate).5f/s)에 %(count)d개의 데이터베이스를 복제하려고 함" #, python-format msgid "Audit Failed for %s: %s" msgstr "검사 중 오류 %s: %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "잘못된 rsync 리턴 코드: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "계정 감사 \"한 번\"모드로 시작" msgid "Begin account audit pass." msgstr "계정 검사 시작." msgid "Begin container audit \"once\" mode" msgstr "컨테이너 감사 \"일 회\" 모드 시작" msgid "Begin container audit pass." msgstr "컨테이너 감사 전달이 시작됩니다." msgid "Begin container sync \"once\" mode" msgstr "컨테이너 동기화 \"일 회\" 모드 시작" msgid "Begin container update single threaded sweep" msgstr "컨테이너 업데이트 단일 스레드 스윕 시작" msgid "Begin container update sweep" msgstr "컨테이너 업데이트 스윕 시작" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "오브젝트 감사 \"%s\" 모드(%s%s) 시작" msgid "Begin object update single threaded sweep" msgstr "오브젝트 업데이트 단일 스레드 스윕 시작" msgid "Begin object update sweep" msgstr "오브젝트 업데이트 스윕 시작" #, python-format msgid "Beginning pass on account %s" msgstr "계정 패스 시작 %s" msgid "Beginning replication run" msgstr "복제 실행 시작" msgid "Broker error trying to rollback locked connection" msgstr "잠긴 연결을 롤백하는 중 브로커 오류 발생" #, python-format msgid "Can not access the file %s." msgstr "파일 %s에 액세스할 수 없습니다." #, python-format msgid "Can not load profile data from %s." msgstr "%s에서 프로파일 데이터를 로드할 수 없습니다." #, python-format msgid "Client did not read from proxy within %ss" msgstr "클라이언트에서 %ss 내에 프록시를 읽을 수 없었음" msgid "Client disconnected on read" msgstr "읽기 시 클라이언트 연결이 끊어짐" msgid "Client disconnected without sending enough data" msgstr "데이터를 모두 전송하기 전에 클라이언트 연결이 끊어짐" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "클라이언트 경로 %(client)s이(가) 오브젝트 메타데이터 %(meta)s에 저장된 경로" "와 일치하지 않음" msgid "Connection refused" msgstr "연결이 거부됨" msgid "Connection timeout" msgstr "연결 제한시간 초과" msgid "Container" msgstr "컨테이너" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "컨테이너 감사 \"일 회\" 모드 완료: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "컨테이너 감사 전달 완료: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "컨테이너 동기화 \"일 회\" 모드 완료: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "컨테이너 업데이트 단일 스레드 스윕 완료: %(elapsed).02fs, %(success)s개 성" "공, %(fail)s개 실패, %(no_change)s개 변경 없음" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "컨테이너 업데이트 스윕 완료: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s의 컨테이너 업데이트 스윕 완료: %(elapsed).02fs, %(success)s개 성공, " "%(fail)s개 실패, %(no_change)s개 변경 없음" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "%s초 동안 시도한 후 %s:%s에 바인드할 수 없음" #, python-format msgid "Could not load %r: %s" msgstr "%r을(를) 로드할 수 없음: %s" #, python-format msgid "Data download error: %s" msgstr "데이터 다운로드 오류: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "장치 패스 완료 : %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "오류 %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "오류 %(status)d %(body)s, %(type)s 서버 발신" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "오류 %(status)d %(body)s, 오브젝트 서버 발신, 회신: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "오류 %(status)d. 예상: 100-continue, 오브젝트 서버 발신" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "오류 %(status)d, 컨테이너 서버에서 %(method)s %(path)s 시도 중" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "오류. %(ip)s:%(port)s/%(device)s(으)로 계정 업데이트 실패(나중에 다시 시도): " "응답 %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "오류. 계정 업데이트 실패: 다음 요청에서 호스트 및 디바이스 수가 서로 다름: " "\"%s\" 대 \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "오류. %(host)s의 잘못된 응답 %(status)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERROR 클라이언트 읽기 시간 초과 (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "오류. 컨테이너 업데이트 실패(이후 비동기 업데이트용으로 저장): %(status)d응" "답. 출처: %(ip)s:%(port)s/%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "오류. 컨테이너 업데이트 실패: 다음 요청에서 호스트 및 디바이스 수가 서로 다" "름: \"%s\" 대 \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "오류는 %s의 계정 정보를 얻을 수 없습니다" #, python-format msgid "ERROR Could not get container info %s" msgstr "오류. 컨테이너 정보 %s을(를) 가져올 수 없음" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "오류. 디스크 파일 %(data_file)s 닫기 실패: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "오류. 예외로 인해 클라이언트 연결이 끊어짐" msgid "ERROR Failed to get my own IPs?" msgstr "오류. 자체 IP를 가져오는 중 오류 발생 여부" msgid "ERROR Insufficient Storage" msgstr "오류. 스토리지 공간이 충분하지 않음" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "오류. 오브젝트 %(obj)s의 감사가 실패하여 격리됨: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "오류. 문제가 발생함, %s 격리 중" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "오류. 원격 드라이브가 마운트되지 않음. %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "%(db_file)s %(row)s 동기화 오류" #, python-format msgid "ERROR Syncing %s" msgstr "%s 동기화 오류" #, python-format msgid "ERROR Trying to audit %s" msgstr "%s 감사 중 오류 발생" msgid "ERROR Unhandled exception in request" msgstr "오류. 요청에 처리되지 않은 예외가 있음" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "오류. %(method)s %(path)s에 __call__ 오류 발생" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "오류. %(ip)s:%(port)s/%(device)s(으)로 계정 업데이트 실패(나중에 다시 시도)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "오류. %(ip)s:%(port)s/%(device)s(으)로 계정 업데이트 실패(나중에 다시 시도): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "오류. 비동기 보류 파일에 예상치 못한 이름 %s을(를) 사용함" msgid "ERROR auditing" msgstr "검사 오류" #, python-format msgid "ERROR auditing: %s" msgstr "감사 오류: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "오류. %(ip)s:%(port)s/%(dev)s(으)로 컨테이너 업데이트 실패(이후 비동기 업데이" "트용으로 저장)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "%s에서 HTTP 응답을 읽는 중 오류 발생" #, python-format msgid "ERROR reading db %s" msgstr "데이터베이스 %s을(를) 읽는 중 오류 발생" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "오류. %(code)s의 rsync가 실패함: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "%(file)s을(를) 노드 %(node)s과(와) 동기화하는 중 오류 발생" msgid "ERROR trying to replicate" msgstr "복제 중 오류 발생" #, python-format msgid "ERROR while trying to clean up %s" msgstr "%s 정리 중 오류 발생" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)s 서버 %(ip)s:%(port)s/%(device)s 오류, 회신: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "%s에서 억제를 로드하는 중 오류 발생: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "원격 서버 %(ip)s:%(port)s/%(device)s에 오류 발생" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "오류: 드라이브 파티션에 대한 경로를 가져오지 못함: %s" msgid "ERROR: An error occurred while retrieving segments" msgstr "오류: 세그먼트를 검색하는 중 오류 발생" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "오류: %(path)s에 액세스할 수 없음: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "오류: 감사를 실행할 수 없음: %s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "memcached에 대한 %(action)s 오류: %(server)s" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "UTF-8: %s 으로 변환 오류" msgid "Error hashing suffix" msgstr "접미부를 해싱하는 중 오류 발생" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "%r에서 mtime_check_interval 오류 발생: %s" #, python-format msgid "Error limiting server %s" msgstr "서버 %s 제한 오류" msgid "Error listing devices" msgstr "디바이스 나열 중 오류 발생" #, python-format msgid "Error on render profiling results: %s" msgstr "프로파일링 결과를 렌더링하는 중 오류 발생: %s" msgid "Error parsing recon cache file" msgstr "조정 캐시 파일을 구문 분석하는 중 오류 발생" msgid "Error reading recon cache file" msgstr "조정 캐시 파일을 읽는 중 오류 발생" msgid "Error reading ringfile" msgstr "링 파일을 읽는 중 오류 발생" msgid "Error reading swift.conf" msgstr "swift.conf를 읽는 중 오류 발생" msgid "Error retrieving recon data" msgstr "조정 데이터를 검색하는 중에 오류 발생" msgid "Error syncing handoff partition" msgstr "핸드오프 파티션 동기화 중 오류 발생" msgid "Error syncing partition" msgstr "파티션 동기 오류 " #, python-format msgid "Error syncing with node: %s" msgstr "노드 동기 오류: %s" msgid "Error: An error occurred" msgstr "오류: 오류 발생" msgid "Error: missing config path argument" msgstr "오류: 구성 경로 인수 누락" #, python-format msgid "Error: unable to locate %s" msgstr "오류: %s을(를) 찾을 수 없음" msgid "Exception dumping recon cache" msgstr "조정 캐시 덤프 중 예외 발생" msgid "Exception in top-level account reaper loop" msgstr "최상위 계정 루프의 예외 " msgid "Exception in top-level replication loop" msgstr "최상위 레벨 복제 루프에서 예외 발생" #, python-format msgid "Exception while deleting container %s %s" msgstr "컨테이너 %s %s 삭제 중 예외 발생" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "오브젝트 %s %s %s 삭제 중 예외 발생" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 예외" #, python-format msgid "Exception with account %s" msgstr "예외 계정 %s" #, python-format msgid "Exception with containers for account %s" msgstr "계정 콘테이너의 예외 %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "계정 %(account)s의 컨테이너 %(container)s에 대한 오브젝트에 예외 발생" #, python-format msgid "Expect: 100-continue on %s" msgstr "%s에서 100-continue 예상" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s에서 %(found_domain)s(으)로의 다음 CNAME 체인" msgid "Found configs:" msgstr "구성 발견:" msgid "Host unreachable" msgstr "호스트 도달 불가능" #, python-format msgid "Incomplete pass on account %s" msgstr "계정 패스 미완료 %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "올바르지 않은 X-Container-Sync-To 형식 %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To에 올바르지 않은 호스트 %r이(가) 있음" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "올바르지 않은 보류 항목 %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)s의 올바르지 않은 응답 %(resp)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "X-Container-Sync-To 올바르지 않은 스키마 %r이(가) 있습니다. \"//\", \"http\" " "또는 \"https\"여야 합니다." #, python-format msgid "Killing long-running rsync: %s" msgstr "장기 실행 중인 rsync 강제 종료: %s" msgid "Lockup detected.. killing live coros." msgstr "잠금 발견.. 활성 coros를 강제 종료합니다." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s을(를) %(found_domain)s(으)로 맵핑함" #, python-format msgid "No %s running" msgstr "%s이(가) 실행되지 않음" #, python-format msgid "No cluster endpoint for %r %r" msgstr "%r %r에 대한 클러스터 엔드포인트가 없음" #, python-format msgid "No permission to signal PID %d" msgstr "PID %d을(를) 표시할 권한이 없음" #, python-format msgid "No realm key for %r" msgstr "%r에 대한 영역 키가 없음" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "노드 오류로 %(ip)s:%(port)s(%(device)s)이(가) 제한됨" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "찾을 수 없음 %(sync_from)r => %(sync_to)r - 오브젝" "트%(obj_name)r" #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s초 동안 복제된 것이 없습니다." msgid "Object" msgstr "오브젝트" msgid "Object PUT" msgstr "Object PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Object PUT에서 409에 대해 202를 리턴함: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Object PUT에서 412를 리턴함, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "오브젝트 감사(%(type)s) \"%(mode)s\" 모드 완료: %(elapsed).02fs. 총 격리 항" "목: %(quars)d, 총 오류 수: %(errors)d, 총 파일/초: %(frate).2f, 총 바이트/" "초: %(brate).2f, 감사 시간: %(audit).2f, 속도: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "오브젝트 감사 통계: %s" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "오브젝트 복제 완료(일 회). (%.02f분)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "오브젝트 복제 완료. (%.02f분)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "오브젝트 서버에서 %s개의 불일치 etag를 리턴함" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "오브젝트 업데이트 단일 스레드 스윕 완료: %(elapsed).02fs, %(success)s개 성" "공, %(fail)s개 실패" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "오브젝트 업데이트 스윕 완료: %.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "%(device)s의 오브젝트 업데이트 스윕 완료: %(elapsed).02fs, %(success)s개 성" "공, %(fail)s개 실패" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "X-Container-Sync-To에 매개변수, 조회, 단편이 허용되지 않음" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "파티션 시간: 최대 %(max).4f초, 최소 %(min).4f초, 중간 %(med).4f초" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "전달 시작, %s개의 컨테이너 사용 가능, %s개의 오브젝트 사용 가능" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "%d초 안에 전달이 완료됨. %d개의 오브젝트가 만료됨" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "현재 %d개 전달, %d개의 오브젝트가 만료됨" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To에 경로가 필요함" #, python-format msgid "Problem cleaning up %s" msgstr "%s 정리 문제 발생" #, python-format msgid "Profiling Error: %s" msgstr "프로파일링 오류: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "디렉토리가 아니어서 %(hsh_path)s을(를) %(quar_path)s에 격리함" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "디렉토리가 아니어서 %(object_path)s을(를) %(quar_path)s에 격리함" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "%s을(를) %s에 격리. 원인: %s 데이터베이스" #, python-format msgid "Quarantining DB %s" msgstr "데이터베이스 %s 격리" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "%(account)s/%(container)s/%(object)s에 대한 Ratelimit 휴면 로그: %(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d 데이터베이스를 제거함" #, python-format msgid "Removing %s objects" msgstr "%s 오브젝트 제거 중" #, python-format msgid "Removing partition: %s" msgstr "파티션 제거: %s" #, python-format msgid "Removing stale pid file %s" msgstr "시간이 경과된 pid 파일 %s을(를) 제거하는 중 " msgid "Replication run OVER" msgstr "복제 실행 대상" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "블랙리스트 지정으로 인해 497이 리턴됨: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "%(acc)s/%(cont)s/%(obj)s(으)로 %(meth)s에 대한 498을 리턴합니다. 전송률 제한" "(최대 휴면) %(e)s" msgid "Ring change detected. Aborting current replication pass." msgstr "링 변경이 발견되었습니다. 현재 복제 전달을 중단합니다." #, python-format msgid "Running %s once" msgstr "%s을(를) 한 번 실행" msgid "Running object replicator in script mode." msgstr "오브젝트 복제자를 스크립트 모드로 실행 중입니다." #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "신호 %s pid: %s 신호: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "%(time)s 이후: %(sync)s 동기화됨 [%(delete)s 삭제, %(put)s 배치], %(skip)s 건" "너뜀, %(fail)s 실패" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "검사 경과 시간 %(time)s: 계정 검사A: %(passed)s 정상 ,%(failed)s 실패" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "%(time)s 이후: 컨테이너 감사: %(pass)s 감사 전달, %(fail)s 감사 실패" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "마운트되지 않았으므로 %(device)s을(를) 건너뜀" #, python-format msgid "Skipping %s as it is not mounted" msgstr "마운트되지 않는 %s를 건너 뛰기" #, python-format msgid "Starting %s" msgstr "%s 시작 중" msgid "Starting object replication pass." msgstr "오브젝트 복제 전달을 시작합니다." msgid "Starting object replicator in daemon mode." msgstr "오브젝트 복제자를 디먼 모드로 시작합니다." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "%(dst)s(%(time).03f)에서 %(src)s의 rsync 성공" msgid "The file type are forbidden to access!" msgstr "이 파일 유형에 대한 액세스가 금지되었습니다!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "컨테이너의 총 %(key)s가 (%(total)s) 과 %(key)s의 총합 (%(sum)s)가 일치하지 " "않습니다." #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "memcached에 대한 %(action)s 제한시간 초과: %(server)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s 시도 중" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "PUT의 최종 상태를 %s(으)로 가져오는 중" msgid "Trying to read during GET" msgstr "가져오기 중 읽기를 시도함" msgid "Trying to read during GET (retrying)" msgstr "가져오기(재시도) 중 읽기를 시도함" msgid "Trying to send to client" msgstr "클라이언트로 전송 시도 중" #, python-format msgid "Trying to write to %s" msgstr "%s에 쓰기 시도 중" msgid "UNCAUGHT EXCEPTION" msgstr "미발견 예외" #, python-format msgid "Unable to find %s config section in %s" msgstr "%s 구성 섹션을 %s에서 찾을 수 없음" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "libc에서 %s을(를) 찾을 수 없습니다. no-op로 남겨 둡니다." msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "libc에서 fallocate, posix_fallocate를 찾을 수 없습니다. no-op로 남겨 둡니다." #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "%s 디렉토리에서 fsync()를 수행할 수 없음: %s" #, python-format msgid "Unable to read config from %s" msgstr "%s에서 구성을 읽을 수 없음" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "권한 부여 해제 %(sync_from)r => %(sync_to)r" #, python-format msgid "Unexpected response: %s" msgstr "예상치 않은 응답: %s" msgid "Unhandled exception" msgstr "처리되지 않은 예외" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s의 업데이트 보고서 실패" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s의 업데이트 보고서를 발송함" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "경고: SSL은 테스트용으로만 사용해야 합니다. 프로덕션 배치에는 외부 SSL 종료" "를 사용하십시오." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "경고: 파일 디스크립터 한계를 수정할 수 없습니다. 비루트로 실행 중인지 확인하" "십시오." msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "경고: 최대 프로세스 한계를 수정할 수 없습니다. 비루트로 실행 중인지 확인하십" "시오." msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "경고: 메모리 한계를 수정할 수 없습니다. 비루트로 실행 중인지 확인하십시오." #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "%s초 동안 %s의 종료를 대기함, 포기하는 중" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "경고: memcached 클라이언트 없이 전송률을 제한할 수 없음" #, python-format msgid "method %s is not allowed." msgstr "메소드 %s이(가) 허용되지 않습니다." msgid "no log file found" msgstr "로그 파일을 찾을 수 없음" msgid "odfpy not installed." msgstr "odfpy가 설치되어 있지 않습니다." #, python-format msgid "plotting results failed due to %s" msgstr "%s(으)로 인해 결과 표시 실패" msgid "python-matplotlib not installed." msgstr "python-matplotlib가 설치되어 있지 않습니다." swift-2.7.0/swift/locale/es/0000775000567000056710000000000012675204211017017 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/es/LC_MESSAGES/0000775000567000056710000000000012675204211020604 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/locale/es/LC_MESSAGES/swift.po0000664000567000056710000007403512675204037022317 0ustar jenkinsjenkins00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Carlos A. Muñoz , 2015. #zanata # OpenStack Infra , 2015. #zanata # Tom Cocozzello , 2015. #zanata msgid "" msgstr "" "Project-Id-Version: swift 2.6.1.dev176\n" "Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" "POT-Creation-Date: 2016-03-08 04:09+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2015-09-09 05:36+0000\n" "Last-Translator: Carlos A. Muñoz \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.7.3\n" "Language-Team: Spanish\n" msgid "" "\n" "user quit" msgstr "" "\n" "salida del usuario" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - paralelo, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d sufijos comprobados - %(hashed).2f%% con hash, %(synced).2f%% " "sincronizados" #, python-format msgid "%(ip)s/%(device)s responded as unmounted" msgstr "%(ip)s/%(device)s han respondido como desmontados" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) particiones replicadas en " "%(time).2fs (%(rate).2f/segundo, %(remaining)s restantes)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s éxitos, %(failure)s fallos" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s devuelve 503 para %(statuses)s" #, python-format msgid "%s #%d not running (%s)" msgstr "%s #%d no está en ejecución (%s)" #, python-format msgid "%s (%s) appears to have stopped" msgstr "%s (%s) parece haberse detenido" #, python-format msgid "%s already started..." msgstr "%s ya está iniciado..." #, python-format msgid "%s does not exist" msgstr "%s no existe" #, python-format msgid "%s is not mounted" msgstr "%s no está montado" #, python-format msgid "%s running (%s - %s)" msgstr "%s en ejecución (%s - %s)" #, python-format msgid "%s: %s" msgstr "%s: %s" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Restablecimiento de conexión por igual" #, python-format msgid ", %s containers deleted" msgstr ", %s contenedores suprimidos" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s contenedores posiblemente restantes" #, fuzzy, python-format msgid ", %s containers remaining" msgstr ", %s contenedores restantes" #, fuzzy, python-format msgid ", %s objects deleted" msgstr ", %s objetos suprimidos" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objetos posiblemente restantes" #, fuzzy, python-format msgid ", %s objects remaining" msgstr ", %s objectos restantes" #, python-format msgid ", elapsed: %.02fs" msgstr ", transcurrido: %.02fs" #, fuzzy msgid ", return codes: " msgstr ", códigos de retorno:" msgid "Account" msgstr "Cuenta" #, python-format msgid "Account %s has not been reaped since %s" msgstr "La cuenta %s no se ha cosechado desde %s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Auditoría de cuenta en modalidad de \"una vez\" finalizada: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Paso de auditoría de cuenta finalizado: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Se han intentado replicar %(count)d bases de datos en %(time).5f segundos " "(%(rate).5f/s)" #, python-format msgid "Audit Failed for %s: %s" msgstr "La auditoría ha fallado para %s: %s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Código de retorno de resincronización erróneo: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Comenzar auditoría de cuenta en modalidad de \"una vez\"" msgid "Begin account audit pass." msgstr "Comenzar a pasar la auditoría de cuenta." msgid "Begin container audit \"once\" mode" msgstr "Comenzar auditoría de contenedor en modalidad de \"una vez\"" msgid "Begin container audit pass." msgstr "Comenzar a pasar la auditoría de contenedor." msgid "Begin container sync \"once\" mode" msgstr "Comenzar sincronización de contenedor en modalidad de \"una vez\"" msgid "Begin container update single threaded sweep" msgstr "Comenzar el barrido de hebra única de actualización del contenedor" msgid "Begin container update sweep" msgstr "Comenzar el barrido de actualización del contenedor" #, python-format msgid "Begin object audit \"%s\" mode (%s%s)" msgstr "Comenzar auditoría de objetos en modalidad \"%s\" (%s%s)" msgid "Begin object update single threaded sweep" msgstr "Comenzar el barrido de hebra única de actualización del objeto" msgid "Begin object update sweep" msgstr "Comenzar el barrido de actualización del objeto" #, python-format msgid "Beginning pass on account %s" msgstr "Iniciando el paso en la cuenta %s" msgid "Beginning replication run" msgstr "Iniciando la ejecución de la replicación" msgid "Broker error trying to rollback locked connection" msgstr "Error de intermediario al intentar retrotraer una conexión bloqueada" #, python-format msgid "Can not access the file %s." msgstr "No se puede acceder al archivo %s." #, python-format msgid "Can not load profile data from %s." msgstr "No se pueden cargar los datos de perfil desde %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "El cliente pudo realizar la lectura desde el proxy en %ss" msgid "Client disconnected on read" msgstr "El cliente se ha desconectado de la lectura" msgid "Client disconnected without sending enough data" msgstr "El cliente se ha desconectado sin enviar suficientes datos" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "La vía de acceso de cliente %(client)s no coincide con la vía de acceso " "almacenada en los metadatos de objeto %(meta)s" msgid "Connection refused" msgstr "Conexión rechazada" msgid "Connection timeout" msgstr "Tiempo de espera de conexión agotado" msgid "Container" msgstr "Contenedor" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Auditoría de contenedor en modalidad de \"una vez\" finalizada: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Paso de auditoría de contenedor finalizado: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" "Sincronización de contenedor en modalidad de \"una vez\" finalizada: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Barrido de hebra única de actualización del contenedor finalizado: " "%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos, %(no_change)s sin " "cambios" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Barrido de actualización del contenedor finalizado: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Barrido de actualización del contenedor de %(path)s finalizado: " "%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos, %(no_change)s sin " "cambios" #, python-format msgid "Could not bind to %s:%s after trying for %s seconds" msgstr "No se puede enlazar a %s:%s después de intentar por %s segundos" #, python-format msgid "Could not load %r: %s" msgstr "No se ha podido cargar %r: %s" #, python-format msgid "Data download error: %s" msgstr "Error de descarga de datos: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Paso de dispositivos finalizado: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERROR %(status)d %(body)s Desde el servidor %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERROR %(status)d %(body)s Desde el servidor de objeto re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERROR %(status)d Esperado: 100-continuo Desde el servidor de objeto" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)sFrom Container Server" msgstr "" "ERROR %(status)d Intentando %(method)s %(path)sDesde el servidor de " "contenedor" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde): Respuesta %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "ERROR La actualización de la cuenta ha fallado: hay números distintos de " "hosts y dispositivos en la solicitud: \"%s\" frente a \"%s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERROR Respuesta errónea %(status)s desde %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERROR Tiempo de espera de lectura de cliente agotado (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERROR La actualización del contenedor ha fallado (guardando para una " "actualización asíncrona posterior): %(status)d respuesta desde %(ip)s:" "%(port)s/%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%s\" vs \"%s\"" msgstr "" "ERROR La actualización del contenedor ha fallado: hay números distintos de " "hosts y dispositivos en la solicitud: \"%s\" frente a \"%s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERROR No se ha podido obtener la información de cuenta %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERROR No se ha podido obtener la información de contenedor %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "ERROR Fallo al cerrar el archivo de disco %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERROR Excepción que provoca la desconexión del cliente" msgid "ERROR Failed to get my own IPs?" msgstr "ERROR ¿No puedo obtener mis propias IP?" msgid "ERROR Insufficient Storage" msgstr "ERROR No hay suficiente almacenamiento" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "ERROR La auditoría del objeto %(obj)s ha fallado y se ha puesto en " "cuarentena: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERROR Problema de desorden, poniendo %s en cuarentena" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERROR Unidad remota no montada %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERROR al sincronizar %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERROR al sincronizar %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERROR al intentar la auditoría de %s" msgid "ERROR Unhandled exception in request" msgstr "ERROR Excepción no controlada en la solicitud" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR Error de __call__ con %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" "ERROR Archivo pendiente de sincronización asíncrona con nombre inesperado %s" msgid "ERROR auditing" msgstr "ERROR de auditoría" #, python-format msgid "ERROR auditing: %s" msgstr "ERROR en la auditoría: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERROR La actualización del contenedor ha fallado con %(ip)s:%(port)s/%(dev)s " "(guardando para una actualización asíncrona posterior)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERROR al leer la respuesta HTTP desde %s" #, python-format msgid "ERROR reading db %s" msgstr "ERROR al leer la base de datos %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERROR La resincronización ha fallado con %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERROR al sincronizar %(file)s con el nodo %(node)s" msgid "ERROR trying to replicate" msgstr "ERROR al intentar la replicación" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERROR al intentar limpiar %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "ERROR con el servidor %(type)s %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERROR con las supresiones de carga desde %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERROR con el servidor remoto %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" "ERROR: No se han podido obtener las vías de acceso a las particiones de " "unidad: %s" msgid "ERROR: An error occurred while retrieving segments" msgstr "ERROR: se ha producido un error al recuperar los segmentos" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERROR: no se ha podido acceder a %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERROR: no se ha podido ejecutar la auditoría: %s" #, python-format msgid "Error %(action)s to memcached: %(server)s" msgstr "%(action)s de error para memcached: %(server)s" #, python-format msgid "Error encoding to UTF-8: %s" msgstr "Error en la codificación a UTF-8: %s" msgid "Error hashing suffix" msgstr "Error en el hash del sufijo" #, python-format msgid "Error in %r with mtime_check_interval: %s" msgstr "Error en %r con mtime_check_interval: %s" #, python-format msgid "Error limiting server %s" msgstr "Error al limitar el servidor %s" msgid "Error listing devices" msgstr "Error al mostrar los dispositivos" #, python-format msgid "Error on render profiling results: %s" msgstr "Error al representar los resultados de perfil: %s" msgid "Error parsing recon cache file" msgstr "Error al analizar el archivo de memoria caché de recon" msgid "Error reading recon cache file" msgstr "Error al leer el archivo de memoria caché de recon" msgid "Error reading ringfile" msgstr "Error al leer el ringfile" msgid "Error reading swift.conf" msgstr "Error al leer swift.conf" msgid "Error retrieving recon data" msgstr "Error al recuperar los datos de recon" msgid "Error syncing handoff partition" msgstr "Error al sincronizar la partición de transferencia" msgid "Error syncing partition" msgstr "Error al sincronizar la partición" #, python-format msgid "Error syncing with node: %s" msgstr "Error en la sincronización con el nodo: %s" msgid "Error: An error occurred" msgstr "Error: se ha producido un error" msgid "Error: missing config path argument" msgstr "Error: falta el argumento de vía de acceso de configuración" #, python-format msgid "Error: unable to locate %s" msgstr "Error: no se ha podido localizar %s" msgid "Exception dumping recon cache" msgstr "Excepción al volcar la memoria caché de recon" msgid "Exception in top-level account reaper loop" msgstr "Excepción en el bucle cosechador de cuenta de nivel superior" msgid "Exception in top-level replication loop" msgstr "Excepción en el bucle de réplica de nivel superior" #, python-format msgid "Exception while deleting container %s %s" msgstr "Excepción al suprimir el contenedor %s %s" #, python-format msgid "Exception while deleting object %s %s %s" msgstr "Excepción al suprimir el objeto %s %s %s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Excepción con %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Excepción con la cuenta %s" #, python-format msgid "Exception with containers for account %s" msgstr "Excepción con los contenedores para la cuenta %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Excepción con objetos para el contenedor %(container)s para la cuenta " "%(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Esperado: 100-continuo en %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Siguiente cadena CNAME de %(given_domain)s a %(found_domain)s" msgid "Found configs:" msgstr "Configuraciones encontradas:" msgid "Host unreachable" msgstr "Host no alcanzable" #, python-format msgid "Incomplete pass on account %s" msgstr "Paso incompleto en la cuenta %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Formato de X-Container-Sync-To no válido %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Host no válido %r en X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrada pendiente no válida %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Respuesta no válida %(resp)s desde %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Esquema no válido %r en X-Container-Sync-To, debe ser \"//\", \"http\" o " "\"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Interrumpiendo resincronización (rsync) de larga duración: %s" msgid "Lockup detected.. killing live coros." msgstr "Bloqueo detectado. Interrumpiendo coros activos." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "Se ha correlacionado %(given_domain)s con %(found_domain)s" #, python-format msgid "No %s running" msgstr "Ningún %s en ejecución" #, python-format msgid "No cluster endpoint for %r %r" msgstr "No hay punto final de clúster para %r %r" #, python-format msgid "No permission to signal PID %d" msgstr "No hay permiso para señalar el PID %d" #, python-format msgid "No realm key for %r" msgstr "No hay clave de dominio para %r" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Error de nodo limitado %(ip)s:%(port)s (%(device)s)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "No se ha encontrado %(sync_from)r => %(sync_to)r - " "objeto %(obj_name)rd" #, python-format msgid "Nothing replicated for %s seconds." msgstr "No se ha replicado nada durante %s segundos." msgid "Object" msgstr "Objeto" msgid "Object PUT" msgstr "Objeto PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "El objeto PUT devuelve 202 para 409: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "El objeto PUT devuelve 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Auditoría de objetos (%(type)s) en modalidad \"%(mode)s\" finalizada: " "%(elapsed).02fs. Total en cuarentena: %(quars)d, Errores totales: " "%(errors)d, Archivos totales por segundo: %(frate).2f, Bytes totales por " "segundo: %(brate).2f, Tiempo de auditoría: %(audit).2f, Velocidad: " "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Estadísticas de auditoría de objetos: %s" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Réplica de objeto finalizada (una vez). (%.02f minutos)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Réplica de objeto finalizada. (%.02f minutos)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "" "Los servidores de objeto han devuelvo %s etiquetas (etags) no coincidentes" #, python-format msgid "" "Object update single threaded sweep completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Barrido de hebra única de actualización del objeto finalizado: " "%(elapsed).02fs, %(success)s éxitos, %(fail)s fallos" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Barrido de actualización del objeto finalizado: %.02fs" #, python-format msgid "" "Object update sweep of %(device)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures" msgstr "" "Barrido de actualización del objeto de %(device)s finalizado: " "%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "Parámetros, consultas y fragmentos no permitidos en X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Tiempos de partición: máximo %(max).4fs, mínimo %(min).4fs, medio %(med).4fs" #, python-format msgid "Pass beginning; %s possible containers; %s possible objects" msgstr "Inicio del paso; %s posibles contenedores; %s posibles objetos" #, python-format msgid "Pass completed in %ds; %d objects expired" msgstr "Paso completado en %ds; %d objetos caducados" #, python-format msgid "Pass so far %ds; %d objects expired" msgstr "Paso hasta ahora %ds; %d objetos caducados" msgid "Path required in X-Container-Sync-To" msgstr "Vía de acceso necesaria en X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problema al limpiar %s" #, python-format msgid "Profiling Error: %s" msgstr "Error de perfil: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "Se ha puesto en cuarentena %(hsh_path)s en %(quar_path)s debido a que no es " "un directorio" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Se ha puesto en cuarentena %(object_path)s en %(quar_path)s debido a que no " "es un directorio" #, python-format msgid "Quarantined %s to %s due to %s database" msgstr "%s de %s en cuarentena debido a la base de datos %s" #, python-format msgid "Quarantining DB %s" msgstr "Poniendo en cuarentena la base de datos %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Ajuste de límite de registro de suspensión: %(sleep)s para %(account)s/" "%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Se han eliminado %(remove)d bases de datos" #, python-format msgid "Removing %s objects" msgstr "Eliminando %s objetos" #, python-format msgid "Removing partition: %s" msgstr "Eliminando partición: %s" #, python-format msgid "Removing stale pid file %s" msgstr "Eliminando fichero de identificación positiva obsoleto %s" msgid "Replication run OVER" msgstr "Ejecución de la replicación finalizada" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Se devuelven 497 debido a las listas negras: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Se devuelven 498 de %(meth)s a %(acc)s/%(cont)s/%(obj)s. Ajuste de límite " "(suspensión máxima) %(e)s" msgid "Ring change detected. Aborting current replication pass." msgstr "Cambio de anillo detectado. Abortando el pase de réplica actual." #, python-format msgid "Running %s once" msgstr "Ejecutando %s una vez" msgid "Running object replicator in script mode." msgstr "Ejecutando replicador de objeto en modalidad de script." #, python-format msgid "Signal %s pid: %s signal: %s" msgstr "Señal %s pid: %s señal: %s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Desde %(time)s: %(sync)s se han sincronizado [%(delete)s supresiones, " "%(put)s colocaciones], %(skip)s se han omitido, %(fail)s han fallado" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Desde %(time)s: Auditorías de cuenta: %(passed)s han pasado la auditoría," "%(failed)s han fallado la auditoría" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Desde %(time)s: Auditorías de contenedor: %(pass)s han pasado la auditoría," "%(fail)s han fallado la auditoría" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "Omitiendo %(device)s, ya que no está montado" #, python-format msgid "Skipping %s as it is not mounted" msgstr "Omitiendo %s, ya que no está montado" #, python-format msgid "Starting %s" msgstr "Iniciando %s" msgid "Starting object replication pass." msgstr "Iniciando el paso de réplica de objeto." msgid "Starting object replicator in daemon mode." msgstr "Iniciando replicador de objeto en modalidad de daemon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "" "Resincronización de %(src)s realizada con éxito en %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "El acceso al tipo de archivo está prohibido." #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "El total de %(key)s del contenedor (%(total)s) no coincide con la suma de " "%(key)s en las políticas (%(sum)s)" #, python-format msgid "Timeout %(action)s to memcached: %(server)s" msgstr "%(action)s de tiempo de espera para memcached: %(server)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Intentando %(method)s %(path)s" #, python-format msgid "Trying to get final status of PUT to %s" msgstr "Intentando obtener el estado final de PUT en %s" msgid "Trying to read during GET" msgstr "Intentado leer durante GET" msgid "Trying to read during GET (retrying)" msgstr "Intentando leer durante GET (reintento)" msgid "Trying to send to client" msgstr "Intentando enviar al cliente" #, python-format msgid "Trying to write to %s" msgstr "Intentando escribir en %s" msgid "UNCAUGHT EXCEPTION" msgstr "UNCAUGHT EXCEPTION" #, python-format msgid "Unable to find %s config section in %s" msgstr "No se ha podido encontrar la sección de configuración %s en %s" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "No se ha podido localizar %s en libc. Se dejará como no operativo." msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "No se ha podido localizar fallocate, posix_fallocate en libc. Se dejará como " "no operativo." #, python-format msgid "Unable to perform fsync() on directory %s: %s" msgstr "No se puede realizar fsync() en el directorio %s: %s" #, python-format msgid "Unable to read config from %s" msgstr "No se ha podido leer la configuración de %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "%(sync_from)r => %(sync_to)r sin autorización" #, python-format msgid "Unexpected response: %s" msgstr "Respuesta inesperada : %s " msgid "Unhandled exception" msgstr "Excepción no controlada" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Informe de actualización fallido para %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Informe de actualización enviado para %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVISO: SSL sólo se debe habilitar con fines de prueba. Utilice la " "terminación de SSL externa para un despliegue de producción." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVISO: no se ha podido modificar el límite del descriptor de archivos. ¿Está " "en ejecución como no root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVISO: no se ha podido modificar el límite máximo de procesos. ¿Está en " "ejecución como no root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVISO: no se ha podido modificar el límite de memoria. ¿Está en ejecución " "como no root?" #, python-format msgid "Waited %s seconds for %s to die; giving up" msgstr "Se han esperado %s segundos a que muriera %s; abandonando" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Aviso: no se puede ajustar el límite sin un cliente almacenado en memoria " "caché" #, python-format msgid "method %s is not allowed." msgstr "el método %s no está permitido." msgid "no log file found" msgstr "no se ha encontrado ningún archivo de registro" msgid "odfpy not installed." msgstr "odfpy no está instalado." #, python-format msgid "plotting results failed due to %s" msgstr "error en el trazado de resultados debido a %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib no está instalado." swift-2.7.0/swift/account/0000775000567000056710000000000012675204211016605 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/account/replicator.py0000664000567000056710000000152212675204037021331 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swift.account.backend import AccountBroker, DATADIR from swift.common import db_replicator class AccountReplicator(db_replicator.Replicator): server_type = 'account' brokerclass = AccountBroker datadir = DATADIR default_port = 6002 swift-2.7.0/swift/account/utils.py0000664000567000056710000001054112675204037020326 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json import time from xml.sax import saxutils from swift.common.swob import HTTPOk, HTTPNoContent from swift.common.utils import Timestamp from swift.common.storage_policy import POLICIES class FakeAccountBroker(object): """ Quacks like an account broker, but doesn't actually do anything. Responds like an account broker would for a real, empty account with no metadata. """ def get_info(self): now = Timestamp(time.time()).internal return {'container_count': 0, 'object_count': 0, 'bytes_used': 0, 'created_at': now, 'put_timestamp': now} def list_containers_iter(self, *_, **__): return [] @property def metadata(self): return {} def get_policy_stats(self): return {} def get_response_headers(broker): info = broker.get_info() resp_headers = { 'X-Account-Container-Count': info['container_count'], 'X-Account-Object-Count': info['object_count'], 'X-Account-Bytes-Used': info['bytes_used'], 'X-Timestamp': Timestamp(info['created_at']).normal, 'X-PUT-Timestamp': Timestamp(info['put_timestamp']).normal} policy_stats = broker.get_policy_stats() for policy_idx, stats in policy_stats.items(): policy = POLICIES.get_by_index(policy_idx) if not policy: continue header_prefix = 'X-Account-Storage-Policy-%s-%%s' % policy.name for key, value in stats.items(): header_name = header_prefix % key.replace('_', '-') resp_headers[header_name] = value resp_headers.update((key, value) for key, (value, timestamp) in broker.metadata.items() if value != '') return resp_headers def account_listing_response(account, req, response_content_type, broker=None, limit='', marker='', end_marker='', prefix='', delimiter='', reverse=False): if broker is None: broker = FakeAccountBroker() resp_headers = get_response_headers(broker) account_list = broker.list_containers_iter(limit, marker, end_marker, prefix, delimiter, reverse) if response_content_type == 'application/json': data = [] for (name, object_count, bytes_used, is_subdir) in account_list: if is_subdir: data.append({'subdir': name}) else: data.append({'name': name, 'count': object_count, 'bytes': bytes_used}) account_list = json.dumps(data) elif response_content_type.endswith('/xml'): output_list = ['', '' % saxutils.quoteattr(account)] for (name, object_count, bytes_used, is_subdir) in account_list: if is_subdir: output_list.append( '' % saxutils.quoteattr(name)) else: item = '%s%s' \ '%s' % \ (saxutils.escape(name), object_count, bytes_used) output_list.append(item) output_list.append('') account_list = '\n'.join(output_list) else: if not account_list: resp = HTTPNoContent(request=req, headers=resp_headers) resp.content_type = response_content_type resp.charset = 'utf-8' return resp account_list = '\n'.join(r[0] for r in account_list) + '\n' ret = HTTPOk(body=account_list, request=req, headers=resp_headers) ret.content_type = response_content_type ret.charset = 'utf-8' return ret swift-2.7.0/swift/account/backend.py0000664000567000056710000005730712675204037020570 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Pluggable Back-end for Account Server """ from uuid import uuid4 import time import six.moves.cPickle as pickle import sqlite3 from swift.common.utils import Timestamp from swift.common.db import DatabaseBroker, utf8encode DATADIR = 'accounts' POLICY_STAT_TRIGGER_SCRIPT = """ CREATE TRIGGER container_insert_ps AFTER INSERT ON container BEGIN INSERT OR IGNORE INTO policy_stat (storage_policy_index, container_count, object_count, bytes_used) VALUES (new.storage_policy_index, 0, 0, 0); UPDATE policy_stat SET container_count = container_count + (1 - new.deleted), object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used WHERE storage_policy_index = new.storage_policy_index; END; CREATE TRIGGER container_delete_ps AFTER DELETE ON container BEGIN UPDATE policy_stat SET container_count = container_count - (1 - old.deleted), object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used WHERE storage_policy_index = old.storage_policy_index; END; """ class AccountBroker(DatabaseBroker): """Encapsulates working with an account database.""" db_type = 'account' db_contains_type = 'container' db_reclaim_timestamp = 'delete_timestamp' def _initialize(self, conn, put_timestamp, **kwargs): """ Create a brand new account database (tables, indices, triggers, etc.) :param conn: DB connection object :param put_timestamp: put timestamp """ if not self.account: raise ValueError( 'Attempting to create a new database with no account set') self.create_container_table(conn) self.create_account_stat_table(conn, put_timestamp) self.create_policy_stat_table(conn) def create_container_table(self, conn): """ Create container table which is specific to the account DB. :param conn: DB connection object """ conn.executescript(""" CREATE TABLE container ( ROWID INTEGER PRIMARY KEY AUTOINCREMENT, name TEXT, put_timestamp TEXT, delete_timestamp TEXT, object_count INTEGER, bytes_used INTEGER, deleted INTEGER DEFAULT 0, storage_policy_index INTEGER DEFAULT 0 ); CREATE INDEX ix_container_deleted_name ON container (deleted, name); CREATE TRIGGER container_insert AFTER INSERT ON container BEGIN UPDATE account_stat SET container_count = container_count + (1 - new.deleted), object_count = object_count + new.object_count, bytes_used = bytes_used + new.bytes_used, hash = chexor(hash, new.name, new.put_timestamp || '-' || new.delete_timestamp || '-' || new.object_count || '-' || new.bytes_used); END; CREATE TRIGGER container_update BEFORE UPDATE ON container BEGIN SELECT RAISE(FAIL, 'UPDATE not allowed; DELETE and INSERT'); END; CREATE TRIGGER container_delete AFTER DELETE ON container BEGIN UPDATE account_stat SET container_count = container_count - (1 - old.deleted), object_count = object_count - old.object_count, bytes_used = bytes_used - old.bytes_used, hash = chexor(hash, old.name, old.put_timestamp || '-' || old.delete_timestamp || '-' || old.object_count || '-' || old.bytes_used); END; """ + POLICY_STAT_TRIGGER_SCRIPT) def create_account_stat_table(self, conn, put_timestamp): """ Create account_stat table which is specific to the account DB. Not a part of Pluggable Back-ends, internal to the baseline code. :param conn: DB connection object :param put_timestamp: put timestamp """ conn.executescript(""" CREATE TABLE account_stat ( account TEXT, created_at TEXT, put_timestamp TEXT DEFAULT '0', delete_timestamp TEXT DEFAULT '0', container_count INTEGER, object_count INTEGER DEFAULT 0, bytes_used INTEGER DEFAULT 0, hash TEXT default '00000000000000000000000000000000', id TEXT, status TEXT DEFAULT '', status_changed_at TEXT DEFAULT '0', metadata TEXT DEFAULT '' ); INSERT INTO account_stat (container_count) VALUES (0); """) conn.execute(''' UPDATE account_stat SET account = ?, created_at = ?, id = ?, put_timestamp = ?, status_changed_at = ? ''', (self.account, Timestamp(time.time()).internal, str(uuid4()), put_timestamp, put_timestamp)) def create_policy_stat_table(self, conn): """ Create policy_stat table which is specific to the account DB. Not a part of Pluggable Back-ends, internal to the baseline code. :param conn: DB connection object """ conn.executescript(""" CREATE TABLE policy_stat ( storage_policy_index INTEGER PRIMARY KEY, container_count INTEGER DEFAULT 0, object_count INTEGER DEFAULT 0, bytes_used INTEGER DEFAULT 0 ); INSERT OR IGNORE INTO policy_stat ( storage_policy_index, container_count, object_count, bytes_used ) SELECT 0, container_count, object_count, bytes_used FROM account_stat WHERE container_count > 0; """) def get_db_version(self, conn): if self._db_version == -1: self._db_version = 0 for row in conn.execute(''' SELECT name FROM sqlite_master WHERE name = 'ix_container_deleted_name' '''): self._db_version = 1 return self._db_version def _delete_db(self, conn, timestamp, force=False): """ Mark the DB as deleted. :param conn: DB connection object :param timestamp: timestamp to mark as deleted """ conn.execute(""" UPDATE account_stat SET delete_timestamp = ?, status = 'DELETED', status_changed_at = ? WHERE delete_timestamp < ? """, (timestamp, timestamp, timestamp)) def _commit_puts_load(self, item_list, entry): """See :func:`swift.common.db.DatabaseBroker._commit_puts_load`""" loaded = pickle.loads(entry.decode('base64')) # check to see if the update includes policy_index or not (name, put_timestamp, delete_timestamp, object_count, bytes_used, deleted) = loaded[:6] if len(loaded) > 6: storage_policy_index = loaded[6] else: # legacy support during upgrade until first non legacy storage # policy is defined storage_policy_index = 0 item_list.append( {'name': name, 'put_timestamp': put_timestamp, 'delete_timestamp': delete_timestamp, 'object_count': object_count, 'bytes_used': bytes_used, 'deleted': deleted, 'storage_policy_index': storage_policy_index}) def empty(self): """ Check if the account DB is empty. :returns: True if the database has no active containers. """ self._commit_puts_stale_ok() with self.get() as conn: row = conn.execute( 'SELECT container_count from account_stat').fetchone() return (row[0] == 0) def make_tuple_for_pickle(self, record): return (record['name'], record['put_timestamp'], record['delete_timestamp'], record['object_count'], record['bytes_used'], record['deleted'], record['storage_policy_index']) def put_container(self, name, put_timestamp, delete_timestamp, object_count, bytes_used, storage_policy_index): """ Create a container with the given attributes. :param name: name of the container to create :param put_timestamp: put_timestamp of the container to create :param delete_timestamp: delete_timestamp of the container to create :param object_count: number of objects in the container :param bytes_used: number of bytes used by the container :param storage_policy_index: the storage policy for this container """ if delete_timestamp > put_timestamp and \ object_count in (None, '', 0, '0'): deleted = 1 else: deleted = 0 record = {'name': name, 'put_timestamp': put_timestamp, 'delete_timestamp': delete_timestamp, 'object_count': object_count, 'bytes_used': bytes_used, 'deleted': deleted, 'storage_policy_index': storage_policy_index} self.put_record(record) def _is_deleted_info(self, status, container_count, delete_timestamp, put_timestamp): """ Apply delete logic to database info. :returns: True if the DB is considered to be deleted, False otherwise """ return status == 'DELETED' or ( container_count in (None, '', 0, '0') and Timestamp(delete_timestamp) > Timestamp(put_timestamp)) def _is_deleted(self, conn): """ Check account_stat table and evaluate info. :param conn: database conn :returns: True if the DB is considered to be deleted, False otherwise """ info = conn.execute(''' SELECT put_timestamp, delete_timestamp, container_count, status FROM account_stat''').fetchone() return self._is_deleted_info(**info) def is_status_deleted(self): """Only returns true if the status field is set to DELETED.""" with self.get() as conn: row = conn.execute(''' SELECT put_timestamp, delete_timestamp, status FROM account_stat''').fetchone() return row['status'] == "DELETED" or ( row['delete_timestamp'] > row['put_timestamp']) def get_policy_stats(self, do_migrations=False): """ Get global policy stats for the account. :param do_migrations: boolean, if True the policy stat dicts will always include the 'container_count' key; otherwise it may be omitted on legacy databases until they are migrated. :returns: dict of policy stats where the key is the policy index and the value is a dictionary like {'object_count': M, 'bytes_used': N, 'container_count': L} """ columns = [ 'storage_policy_index', 'container_count', 'object_count', 'bytes_used', ] def run_query(): return (conn.execute(''' SELECT %s FROM policy_stat ''' % ', '.join(columns)).fetchall()) self._commit_puts_stale_ok() info = [] with self.get() as conn: try: info = run_query() except sqlite3.OperationalError as err: if "no such column: container_count" in str(err): if do_migrations: self._migrate_add_container_count(conn) else: columns.remove('container_count') info = run_query() elif "no such table: policy_stat" not in str(err): raise policy_stats = {} for row in info: stats = dict(row) key = stats.pop('storage_policy_index') policy_stats[key] = stats return policy_stats def get_info(self): """ Get global data for the account. :returns: dict with keys: account, created_at, put_timestamp, delete_timestamp, status_changed_at, container_count, object_count, bytes_used, hash, id """ self._commit_puts_stale_ok() with self.get() as conn: return dict(conn.execute(''' SELECT account, created_at, put_timestamp, delete_timestamp, status_changed_at, container_count, object_count, bytes_used, hash, id FROM account_stat ''').fetchone()) def list_containers_iter(self, limit, marker, end_marker, prefix, delimiter, reverse=False): """ Get a list of containers sorted by name starting at marker onward, up to limit entries. Entries will begin with the prefix and will not have the delimiter after the prefix. :param limit: maximum number of entries to get :param marker: marker query :param end_marker: end marker query :param prefix: prefix query :param delimiter: delimiter for query :param reverse: reverse the result order. :returns: list of tuples of (name, object_count, bytes_used, 0) """ delim_force_gte = False (marker, end_marker, prefix, delimiter) = utf8encode( marker, end_marker, prefix, delimiter) if reverse: # Reverse the markers if we are reversing the listing. marker, end_marker = end_marker, marker self._commit_puts_stale_ok() if delimiter and not prefix: prefix = '' if prefix: end_prefix = prefix[:-1] + chr(ord(prefix[-1]) + 1) orig_marker = marker with self.get() as conn: results = [] while len(results) < limit: query = """ SELECT name, object_count, bytes_used, 0 FROM container WHERE """ query_args = [] if end_marker and (not prefix or end_marker < end_prefix): query += ' name < ? AND' query_args.append(end_marker) elif prefix: query += ' name < ? AND' query_args.append(end_prefix) if delim_force_gte: query += ' name >= ? AND' query_args.append(marker) # Always set back to False delim_force_gte = False elif marker and marker >= prefix: query += ' name > ? AND' query_args.append(marker) elif prefix: query += ' name >= ? AND' query_args.append(prefix) if self.get_db_version(conn) < 1: query += ' +deleted = 0' else: query += ' deleted = 0' query += ' ORDER BY name %s LIMIT ?' % \ ('DESC' if reverse else '') query_args.append(limit - len(results)) curs = conn.execute(query, query_args) curs.row_factory = None # Delimiters without a prefix is ignored, further if there # is no delimiter then we can simply return the result as # prefixes are now handled in the SQL statement. if prefix is None or not delimiter: return [r for r in curs] # We have a delimiter and a prefix (possibly empty string) to # handle rowcount = 0 for row in curs: rowcount += 1 name = row[0] if reverse: end_marker = name else: marker = name if len(results) >= limit: curs.close() return results end = name.find(delimiter, len(prefix)) if end > 0: if reverse: end_marker = name[:end + 1] else: marker = name[:end] + chr(ord(delimiter) + 1) # we want result to be inclusive of delim+1 delim_force_gte = True dir_name = name[:end + 1] if dir_name != orig_marker: results.append([dir_name, 0, 0, 1]) curs.close() break results.append(row) if not rowcount: break return results def merge_items(self, item_list, source=None): """ Merge items into the container table. :param item_list: list of dictionaries of {'name', 'put_timestamp', 'delete_timestamp', 'object_count', 'bytes_used', 'deleted', 'storage_policy_index'} :param source: if defined, update incoming_sync with the source """ def _really_merge_items(conn): max_rowid = -1 curs = conn.cursor() for rec in item_list: rec.setdefault('storage_policy_index', 0) # legacy record = [rec['name'], rec['put_timestamp'], rec['delete_timestamp'], rec['object_count'], rec['bytes_used'], rec['deleted'], rec['storage_policy_index']] query = ''' SELECT name, put_timestamp, delete_timestamp, object_count, bytes_used, deleted, storage_policy_index FROM container WHERE name = ? ''' if self.get_db_version(conn) >= 1: query += ' AND deleted IN (0, 1)' curs_row = curs.execute(query, (rec['name'],)) curs_row.row_factory = None row = curs_row.fetchone() if row: row = list(row) for i in range(5): if record[i] is None and row[i] is not None: record[i] = row[i] if row[1] > record[1]: # Keep newest put_timestamp record[1] = row[1] if row[2] > record[2]: # Keep newest delete_timestamp record[2] = row[2] # If deleted, mark as such if record[2] > record[1] and \ record[3] in (None, '', 0, '0'): record[5] = 1 else: record[5] = 0 curs.execute(''' DELETE FROM container WHERE name = ? AND deleted IN (0, 1) ''', (record[0],)) curs.execute(''' INSERT INTO container (name, put_timestamp, delete_timestamp, object_count, bytes_used, deleted, storage_policy_index) VALUES (?, ?, ?, ?, ?, ?, ?) ''', record) if source: max_rowid = max(max_rowid, rec['ROWID']) if source: try: curs.execute(''' INSERT INTO incoming_sync (sync_point, remote_id) VALUES (?, ?) ''', (max_rowid, source)) except sqlite3.IntegrityError: curs.execute(''' UPDATE incoming_sync SET sync_point=max(?, sync_point) WHERE remote_id=? ''', (max_rowid, source)) conn.commit() with self.get() as conn: # create the policy stat table if needed and add spi to container try: _really_merge_items(conn) except sqlite3.OperationalError as err: if 'no such column: storage_policy_index' not in str(err): raise self._migrate_add_storage_policy_index(conn) _really_merge_items(conn) def _migrate_add_container_count(self, conn): """ Add the container_count column to the 'policy_stat' table and update it :param conn: DB connection object """ # add the container_count column curs = conn.cursor() curs.executescript(''' DROP TRIGGER container_delete_ps; DROP TRIGGER container_insert_ps; ALTER TABLE policy_stat ADD COLUMN container_count INTEGER DEFAULT 0; ''' + POLICY_STAT_TRIGGER_SCRIPT) # keep the simple case simple, if there's only one entry in the # policy_stat table we just copy the total container count from the # account_stat table # if that triggers an update then the where changes <> 0 *would* exist # and the insert or replace from the count subqueries won't execute curs.executescript(""" UPDATE policy_stat SET container_count = ( SELECT container_count FROM account_stat) WHERE ( SELECT COUNT(storage_policy_index) FROM policy_stat ) <= 1; INSERT OR REPLACE INTO policy_stat ( storage_policy_index, container_count, object_count, bytes_used ) SELECT p.storage_policy_index, c.count, p.object_count, p.bytes_used FROM ( SELECT storage_policy_index, COUNT(*) as count FROM container WHERE deleted = 0 GROUP BY storage_policy_index ) c JOIN policy_stat p ON p.storage_policy_index = c.storage_policy_index WHERE NOT EXISTS( SELECT changes() as change FROM policy_stat WHERE change <> 0 ); """) conn.commit() def _migrate_add_storage_policy_index(self, conn): """ Add the storage_policy_index column to the 'container' table and set up triggers, creating the policy_stat table if needed. :param conn: DB connection object """ try: self.create_policy_stat_table(conn) except sqlite3.OperationalError as err: if 'table policy_stat already exists' not in str(err): raise conn.executescript(''' ALTER TABLE container ADD COLUMN storage_policy_index INTEGER DEFAULT 0; ''' + POLICY_STAT_TRIGGER_SCRIPT) swift-2.7.0/swift/account/server.py0000664000567000056710000003234012675204037020475 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import traceback from swift import gettext_ as _ from eventlet import Timeout import swift.common.db from swift.account.backend import AccountBroker, DATADIR from swift.account.utils import account_listing_response, get_response_headers from swift.common.db import DatabaseConnectionError, DatabaseAlreadyExists from swift.common.request_helpers import get_param, get_listing_content_type, \ split_and_validate_path from swift.common.utils import get_logger, hash_path, public, \ Timestamp, storage_directory, config_true_value, \ json, timing_stats, replication, get_log_line from swift.common.constraints import check_mount, valid_timestamp, check_utf8 from swift.common import constraints from swift.common.db_replicator import ReplicatorRpc from swift.common.base_storage_server import BaseStorageServer from swift.common.swob import HTTPAccepted, HTTPBadRequest, \ HTTPCreated, HTTPForbidden, HTTPInternalServerError, \ HTTPMethodNotAllowed, HTTPNoContent, HTTPNotFound, \ HTTPPreconditionFailed, HTTPConflict, Request, \ HTTPInsufficientStorage, HTTPException from swift.common.request_helpers import is_sys_or_user_meta class AccountController(BaseStorageServer): """WSGI controller for the account server.""" server_type = 'account-server' def __init__(self, conf, logger=None): super(AccountController, self).__init__(conf) self.logger = logger or get_logger(conf, log_route='account-server') self.log_requests = config_true_value(conf.get('log_requests', 'true')) self.root = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.replicator_rpc = ReplicatorRpc(self.root, DATADIR, AccountBroker, self.mount_check, logger=self.logger) self.auto_create_account_prefix = \ conf.get('auto_create_account_prefix') or '.' swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) def _get_account_broker(self, drive, part, account, **kwargs): hsh = hash_path(account) db_dir = storage_directory(DATADIR, part, hsh) db_path = os.path.join(self.root, drive, db_dir, hsh + '.db') kwargs.setdefault('account', account) kwargs.setdefault('logger', self.logger) return AccountBroker(db_path, **kwargs) def _deleted_response(self, broker, req, resp, body=''): # We are here since either the account does not exist or # it exists but marked for deletion. headers = {} # Try to check if account exists and is marked for deletion try: if broker.is_status_deleted(): # Account does exist and is marked for deletion headers = {'X-Account-Status': 'Deleted'} except DatabaseConnectionError: # Account does not exist! pass return resp(request=req, headers=headers, charset='utf-8', body=body) @public @timing_stats() def DELETE(self, req): """Handle HTTP DELETE request.""" drive, part, account = split_and_validate_path(req, 3) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) req_timestamp = valid_timestamp(req) broker = self._get_account_broker(drive, part, account) if broker.is_deleted(): return self._deleted_response(broker, req, HTTPNotFound) broker.delete_db(req_timestamp.internal) return self._deleted_response(broker, req, HTTPNoContent) @public @timing_stats() def PUT(self, req): """Handle HTTP PUT request.""" drive, part, account, container = split_and_validate_path(req, 3, 4) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) if container: # put account container if 'x-timestamp' not in req.headers: timestamp = Timestamp(time.time()) else: timestamp = valid_timestamp(req) pending_timeout = None container_policy_index = \ req.headers.get('X-Backend-Storage-Policy-Index', 0) if 'x-trans-id' in req.headers: pending_timeout = 3 broker = self._get_account_broker(drive, part, account, pending_timeout=pending_timeout) if account.startswith(self.auto_create_account_prefix) and \ not os.path.exists(broker.db_file): try: broker.initialize(timestamp.internal) except DatabaseAlreadyExists: pass if req.headers.get('x-account-override-deleted', 'no').lower() != \ 'yes' and broker.is_deleted(): return HTTPNotFound(request=req) broker.put_container(container, req.headers['x-put-timestamp'], req.headers['x-delete-timestamp'], req.headers['x-object-count'], req.headers['x-bytes-used'], container_policy_index) if req.headers['x-delete-timestamp'] > \ req.headers['x-put-timestamp']: return HTTPNoContent(request=req) else: return HTTPCreated(request=req) else: # put account timestamp = valid_timestamp(req) broker = self._get_account_broker(drive, part, account) if not os.path.exists(broker.db_file): try: broker.initialize(timestamp.internal) created = True except DatabaseAlreadyExists: created = False elif broker.is_status_deleted(): return self._deleted_response(broker, req, HTTPForbidden, body='Recently deleted') else: created = broker.is_deleted() broker.update_put_timestamp(timestamp.internal) if broker.is_deleted(): return HTTPConflict(request=req) metadata = {} metadata.update((key, (value, timestamp.internal)) for key, value in req.headers.items() if is_sys_or_user_meta('account', key)) if metadata: broker.update_metadata(metadata, validate_metadata=True) if created: return HTTPCreated(request=req) else: return HTTPAccepted(request=req) @public @timing_stats() def HEAD(self, req): """Handle HTTP HEAD request.""" drive, part, account = split_and_validate_path(req, 3) out_content_type = get_listing_content_type(req) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_account_broker(drive, part, account, pending_timeout=0.1, stale_reads_ok=True) if broker.is_deleted(): return self._deleted_response(broker, req, HTTPNotFound) headers = get_response_headers(broker) headers['Content-Type'] = out_content_type return HTTPNoContent(request=req, headers=headers, charset='utf-8') @public @timing_stats() def GET(self, req): """Handle HTTP GET request.""" drive, part, account = split_and_validate_path(req, 3) prefix = get_param(req, 'prefix') delimiter = get_param(req, 'delimiter') if delimiter and (len(delimiter) > 1 or ord(delimiter) > 254): # delimiters can be made more flexible later return HTTPPreconditionFailed(body='Bad delimiter') limit = constraints.ACCOUNT_LISTING_LIMIT given_limit = get_param(req, 'limit') reverse = config_true_value(get_param(req, 'reverse')) if given_limit and given_limit.isdigit(): limit = int(given_limit) if limit > constraints.ACCOUNT_LISTING_LIMIT: return HTTPPreconditionFailed( request=req, body='Maximum limit is %d' % constraints.ACCOUNT_LISTING_LIMIT) marker = get_param(req, 'marker', '') end_marker = get_param(req, 'end_marker') out_content_type = get_listing_content_type(req) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_account_broker(drive, part, account, pending_timeout=0.1, stale_reads_ok=True) if broker.is_deleted(): return self._deleted_response(broker, req, HTTPNotFound) return account_listing_response(account, req, out_content_type, broker, limit, marker, end_marker, prefix, delimiter, reverse) @public @replication @timing_stats() def REPLICATE(self, req): """ Handle HTTP REPLICATE request. Handler for RPC calls for account replication. """ post_args = split_and_validate_path(req, 3) drive, partition, hash = post_args if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) try: args = json.load(req.environ['wsgi.input']) except ValueError as err: return HTTPBadRequest(body=str(err), content_type='text/plain') ret = self.replicator_rpc.dispatch(post_args, args) ret.request = req return ret @public @timing_stats() def POST(self, req): """Handle HTTP POST request.""" drive, part, account = split_and_validate_path(req, 3) req_timestamp = valid_timestamp(req) if self.mount_check and not check_mount(self.root, drive): return HTTPInsufficientStorage(drive=drive, request=req) broker = self._get_account_broker(drive, part, account) if broker.is_deleted(): return self._deleted_response(broker, req, HTTPNotFound) metadata = {} metadata.update((key, (value, req_timestamp.internal)) for key, value in req.headers.items() if is_sys_or_user_meta('account', key)) if metadata: broker.update_metadata(metadata, validate_metadata=True) return HTTPNoContent(request=req) def __call__(self, env, start_response): start_time = time.time() req = Request(env) self.logger.txn_id = req.headers.get('x-trans-id', None) if not check_utf8(req.path_info): res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL') else: try: # disallow methods which are not publicly accessible try: if req.method not in self.allowed_methods: raise AttributeError('Not allowed method.') except AttributeError: res = HTTPMethodNotAllowed() else: method = getattr(self, req.method) res = method(req) except HTTPException as error_response: res = error_response except (Exception, Timeout): self.logger.exception(_('ERROR __call__ error with %(method)s' ' %(path)s '), {'method': req.method, 'path': req.path}) res = HTTPInternalServerError(body=traceback.format_exc()) if self.log_requests: trans_time = time.time() - start_time additional_info = '' if res.headers.get('x-container-timestamp') is not None: additional_info += 'x-container-timestamp: %s' % \ res.headers['x-container-timestamp'] log_msg = get_log_line(req, res, trans_time, additional_info) if req.method.upper() == 'REPLICATE': self.logger.debug(log_msg) else: self.logger.info(log_msg) return res(env, start_response) def app_factory(global_conf, **local_conf): """paste.deploy app factory for creating WSGI account server apps""" conf = global_conf.copy() conf.update(local_conf) return AccountController(conf) swift-2.7.0/swift/account/__init__.py0000664000567000056710000000000012675204037020712 0ustar jenkinsjenkins00000000000000swift-2.7.0/swift/account/auditor.py0000664000567000056710000001461412675204037020642 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from swift import gettext_ as _ from random import random import swift.common.db from swift.account.backend import AccountBroker, DATADIR from swift.common.exceptions import InvalidAccountInfo from swift.common.utils import get_logger, audit_location_generator, \ config_true_value, dump_recon_cache, ratelimit_sleep from swift.common.daemon import Daemon from eventlet import Timeout class AccountAuditor(Daemon): """Audit accounts.""" def __init__(self, conf, logger=None): self.conf = conf self.logger = logger or get_logger(conf, log_route='account-auditor') self.devices = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.interval = int(conf.get('interval', 1800)) self.logging_interval = 3600 # once an hour self.account_passes = 0 self.account_failures = 0 self.accounts_running_time = 0 self.max_accounts_per_second = \ float(conf.get('accounts_per_second', 200)) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.rcache = os.path.join(self.recon_cache_path, "account.recon") def _one_audit_pass(self, reported): all_locs = audit_location_generator(self.devices, DATADIR, '.db', mount_check=self.mount_check, logger=self.logger) for path, device, partition in all_locs: self.account_audit(path) if time.time() - reported >= self.logging_interval: self.logger.info(_('Since %(time)s: Account audits: ' '%(passed)s passed audit,' '%(failed)s failed audit'), {'time': time.ctime(reported), 'passed': self.account_passes, 'failed': self.account_failures}) dump_recon_cache({'account_audits_since': reported, 'account_audits_passed': self.account_passes, 'account_audits_failed': self.account_failures}, self.rcache, self.logger) reported = time.time() self.account_passes = 0 self.account_failures = 0 self.accounts_running_time = ratelimit_sleep( self.accounts_running_time, self.max_accounts_per_second) return reported def run_forever(self, *args, **kwargs): """Run the account audit until stopped.""" reported = time.time() time.sleep(random() * self.interval) while True: self.logger.info(_('Begin account audit pass.')) begin = time.time() try: reported = self._one_audit_pass(reported) except (Exception, Timeout): self.logger.increment('errors') self.logger.exception(_('ERROR auditing')) elapsed = time.time() - begin if elapsed < self.interval: time.sleep(self.interval - elapsed) self.logger.info( _('Account audit pass completed: %.02fs'), elapsed) dump_recon_cache({'account_auditor_pass_completed': elapsed}, self.rcache, self.logger) def run_once(self, *args, **kwargs): """Run the account audit once.""" self.logger.info(_('Begin account audit "once" mode')) begin = reported = time.time() self._one_audit_pass(reported) elapsed = time.time() - begin self.logger.info( _('Account audit "once" mode completed: %.02fs'), elapsed) dump_recon_cache({'account_auditor_pass_completed': elapsed}, self.rcache, self.logger) def validate_per_policy_counts(self, broker): info = broker.get_info() policy_stats = broker.get_policy_stats(do_migrations=True) policy_totals = { 'container_count': 0, 'object_count': 0, 'bytes_used': 0, } for policy_stat in policy_stats.values(): for key in policy_totals: policy_totals[key] += policy_stat[key] for key in policy_totals: if policy_totals[key] == info[key]: continue raise InvalidAccountInfo(_( 'The total %(key)s for the container (%(total)s) does not ' 'match the sum of %(key)s across policies (%(sum)s)') % {'key': key, 'total': info[key], 'sum': policy_totals[key]}) def account_audit(self, path): """ Audits the given account path :param path: the path to an account db """ start_time = time.time() try: broker = AccountBroker(path) if not broker.is_deleted(): self.validate_per_policy_counts(broker) self.logger.increment('passes') self.account_passes += 1 self.logger.debug('Audit passed for %s' % broker) except InvalidAccountInfo as e: self.logger.increment('failures') self.account_failures += 1 self.logger.error( _('Audit Failed for %s: %s'), path, str(e)) except (Exception, Timeout): self.logger.increment('failures') self.account_failures += 1 self.logger.exception(_('ERROR Could not get account info %s'), path) self.logger.timing_since('timing', start_time) swift-2.7.0/swift/account/reaper.py0000664000567000056710000005745512675204037020463 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import socket from swift import gettext_ as _ from logging import DEBUG from math import sqrt from time import time from hashlib import md5 import itertools from eventlet import GreenPool, sleep, Timeout import six import swift.common.db from swift.account.backend import AccountBroker, DATADIR from swift.common.direct_client import direct_delete_container, \ direct_delete_object, direct_get_container from swift.common.exceptions import ClientException from swift.common.ring import Ring from swift.common.ring.utils import is_local_device from swift.common.utils import get_logger, whataremyips, ismount, \ config_true_value, Timestamp from swift.common.daemon import Daemon from swift.common.storage_policy import POLICIES, PolicyError class AccountReaper(Daemon): """ Removes data from status=DELETED accounts. These are accounts that have been asked to be removed by the reseller via services remove_storage_account XMLRPC call. The account is not deleted immediately by the services call, but instead the account is simply marked for deletion by setting the status column in the account_stat table of the account database. This account reaper scans for such accounts and removes the data in the background. The background deletion process will occur on the primary account server for the account. :param server_conf: The [account-server] dictionary of the account server configuration file :param reaper_conf: The [account-reaper] dictionary of the account server configuration file See the etc/account-server.conf-sample for information on the possible configuration parameters. """ def __init__(self, conf, logger=None): self.conf = conf self.logger = logger or get_logger(conf, log_route='account-reaper') self.devices = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.interval = int(conf.get('interval', 3600)) self.swift_dir = conf.get('swift_dir', '/etc/swift') self.account_ring = None self.container_ring = None self.object_ring = None self.node_timeout = float(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.myips = whataremyips(conf.get('bind_ip', '0.0.0.0')) self.bind_port = int(conf.get('bind_port', 6002)) self.concurrency = int(conf.get('concurrency', 25)) self.container_concurrency = self.object_concurrency = \ sqrt(self.concurrency) self.container_pool = GreenPool(size=self.container_concurrency) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) self.delay_reaping = int(conf.get('delay_reaping') or 0) reap_warn_after = float(conf.get('reap_warn_after') or 86400 * 30) self.reap_not_done_after = reap_warn_after + self.delay_reaping self.start_time = time() def get_account_ring(self): """The account :class:`swift.common.ring.Ring` for the cluster.""" if not self.account_ring: self.account_ring = Ring(self.swift_dir, ring_name='account') return self.account_ring def get_container_ring(self): """The container :class:`swift.common.ring.Ring` for the cluster.""" if not self.container_ring: self.container_ring = Ring(self.swift_dir, ring_name='container') return self.container_ring def get_object_ring(self, policy_idx): """ Get the ring identified by the policy index :param policy_idx: Storage policy index :returns: A ring matching the storage policy """ return POLICIES.get_object_ring(policy_idx, self.swift_dir) def run_forever(self, *args, **kwargs): """Main entry point when running the reaper in normal daemon mode. This repeatedly calls :func:`reap_once` no quicker than the configuration interval. """ self.logger.debug('Daemon started.') sleep(random.random() * self.interval) while True: begin = time() self.run_once() elapsed = time() - begin if elapsed < self.interval: sleep(self.interval - elapsed) def run_once(self, *args, **kwargs): """ Main entry point when running the reaper in 'once' mode, where it will do a single pass over all accounts on the server. This is called repeatedly by :func:`run_forever`. This will call :func:`reap_device` once for each device on the server. """ self.logger.debug('Begin devices pass: %s', self.devices) begin = time() try: for device in os.listdir(self.devices): if self.mount_check and not ismount( os.path.join(self.devices, device)): self.logger.increment('errors') self.logger.debug( _('Skipping %s as it is not mounted'), device) continue self.reap_device(device) except (Exception, Timeout): self.logger.exception(_("Exception in top-level account reaper " "loop")) elapsed = time() - begin self.logger.info(_('Devices pass completed: %.02fs'), elapsed) def reap_device(self, device): """ Called once per pass for each device on the server. This will scan the accounts directory for the device, looking for partitions this device is the primary for, then looking for account databases that are marked status=DELETED and still have containers and calling :func:`reap_account`. Account databases marked status=DELETED that no longer have containers will eventually be permanently removed by the reclaim process within the account replicator (see :mod:`swift.db_replicator`). :param device: The device to look for accounts to be deleted. """ datadir = os.path.join(self.devices, device, DATADIR) if not os.path.exists(datadir): return for partition in os.listdir(datadir): partition_path = os.path.join(datadir, partition) if not partition.isdigit(): continue nodes = self.get_account_ring().get_part_nodes(int(partition)) if not os.path.isdir(partition_path): continue container_shard = None for container_shard, node in enumerate(nodes): if is_local_device(self.myips, None, node['ip'], None) and \ (not self.bind_port or self.bind_port == node['port']): break else: continue for suffix in os.listdir(partition_path): suffix_path = os.path.join(partition_path, suffix) if not os.path.isdir(suffix_path): continue for hsh in os.listdir(suffix_path): hsh_path = os.path.join(suffix_path, hsh) if not os.path.isdir(hsh_path): continue for fname in sorted(os.listdir(hsh_path), reverse=True): if fname.endswith('.ts'): break elif fname.endswith('.db'): self.start_time = time() broker = \ AccountBroker(os.path.join(hsh_path, fname)) if broker.is_status_deleted() and \ not broker.empty(): self.reap_account( broker, partition, nodes, container_shard=container_shard) def reset_stats(self): self.stats_return_codes = {} self.stats_containers_deleted = 0 self.stats_objects_deleted = 0 self.stats_containers_remaining = 0 self.stats_objects_remaining = 0 self.stats_containers_possibly_remaining = 0 self.stats_objects_possibly_remaining = 0 def reap_account(self, broker, partition, nodes, container_shard=None): """ Called once per pass for each account this server is the primary for and attempts to delete the data for the given account. The reaper will only delete one account at any given time. It will call :func:`reap_container` up to sqrt(self.concurrency) times concurrently while reaping the account. If there is any exception while deleting a single container, the process will continue for any other containers and the failed containers will be tried again the next time this function is called with the same parameters. If there is any exception while listing the containers for deletion, the process will stop (but will obviously be tried again the next time this function is called with the same parameters). This isn't likely since the listing comes from the local database. After the process completes (successfully or not) statistics about what was accomplished will be logged. This function returns nothing and should raise no exception but only update various self.stats_* values for what occurs. :param broker: The AccountBroker for the account to delete. :param partition: The partition in the account ring the account is on. :param nodes: The primary node dicts for the account to delete. :param container_shard: int used to shard containers reaped. If None, will reap all containers. .. seealso:: :class:`swift.account.backend.AccountBroker` for the broker class. .. seealso:: :func:`swift.common.ring.Ring.get_nodes` for a description of the node dicts. """ begin = time() info = broker.get_info() if time() - float(Timestamp(info['delete_timestamp'])) <= \ self.delay_reaping: return False account = info['account'] self.logger.info(_('Beginning pass on account %s'), account) self.reset_stats() container_limit = 1000 if container_shard is not None: container_limit *= len(nodes) try: marker = '' while True: containers = \ list(broker.list_containers_iter(container_limit, marker, None, None, None)) if not containers: break try: for (container, _junk, _junk, _junk) in containers: this_shard = int(md5(container).hexdigest(), 16) % \ len(nodes) if container_shard not in (this_shard, None): continue self.container_pool.spawn(self.reap_container, account, partition, nodes, container) self.container_pool.waitall() except (Exception, Timeout): self.logger.exception( _('Exception with containers for account %s'), account) marker = containers[-1][0] if marker == '': break log = 'Completed pass on account %s' % account except (Exception, Timeout): self.logger.exception( _('Exception with account %s'), account) log = _('Incomplete pass on account %s') % account if self.stats_containers_deleted: log += _(', %s containers deleted') % self.stats_containers_deleted if self.stats_objects_deleted: log += _(', %s objects deleted') % self.stats_objects_deleted if self.stats_containers_remaining: log += _(', %s containers remaining') % \ self.stats_containers_remaining if self.stats_objects_remaining: log += _(', %s objects remaining') % self.stats_objects_remaining if self.stats_containers_possibly_remaining: log += _(', %s containers possibly remaining') % \ self.stats_containers_possibly_remaining if self.stats_objects_possibly_remaining: log += _(', %s objects possibly remaining') % \ self.stats_objects_possibly_remaining if self.stats_return_codes: log += _(', return codes: ') for code in sorted(self.stats_return_codes): log += '%s %sxxs, ' % (self.stats_return_codes[code], code) log = log[:-2] log += _(', elapsed: %.02fs') % (time() - begin) self.logger.info(log) self.logger.timing_since('timing', self.start_time) delete_timestamp = Timestamp(info['delete_timestamp']) if self.stats_containers_remaining and \ begin - float(delete_timestamp) >= self.reap_not_done_after: self.logger.warning(_('Account %s has not been reaped since %s') % (account, delete_timestamp.isoformat)) return True def reap_container(self, account, account_partition, account_nodes, container): """ Deletes the data and the container itself for the given container. This will call :func:`reap_object` up to sqrt(self.concurrency) times concurrently for the objects in the container. If there is any exception while deleting a single object, the process will continue for any other objects in the container and the failed objects will be tried again the next time this function is called with the same parameters. If there is any exception while listing the objects for deletion, the process will stop (but will obviously be tried again the next time this function is called with the same parameters). This is a possibility since the listing comes from querying just the primary remote container server. Once all objects have been attempted to be deleted, the container itself will be attempted to be deleted by sending a delete request to all container nodes. The format of the delete request is such that each container server will update a corresponding account server, removing the container from the account's listing. This function returns nothing and should raise no exception but only update various self.stats_* values for what occurs. :param account: The name of the account for the container. :param account_partition: The partition for the account on the account ring. :param account_nodes: The primary node dicts for the account. :param container: The name of the container to delete. * See also: :func:`swift.common.ring.Ring.get_nodes` for a description of the account node dicts. """ account_nodes = list(account_nodes) part, nodes = self.get_container_ring().get_nodes(account, container) node = nodes[-1] pool = GreenPool(size=self.object_concurrency) marker = '' while True: objects = None try: headers, objects = direct_get_container( node, part, account, container, marker=marker, conn_timeout=self.conn_timeout, response_timeout=self.node_timeout) self.stats_return_codes[2] = \ self.stats_return_codes.get(2, 0) + 1 self.logger.increment('return_codes.2') except ClientException as err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( _('Exception with %(ip)s:%(port)s/%(device)s'), node) self.stats_return_codes[err.http_status // 100] = \ self.stats_return_codes.get(err.http_status // 100, 0) + 1 self.logger.increment( 'return_codes.%d' % (err.http_status // 100,)) except (Timeout, socket.error) as err: self.logger.error( _('Timeout Exception with %(ip)s:%(port)s/%(device)s'), node) if not objects: break try: policy_index = headers.get('X-Backend-Storage-Policy-Index', 0) policy = POLICIES.get_by_index(policy_index) if not policy: self.logger.error('ERROR: invalid storage policy index: %r' % policy_index) for obj in objects: if isinstance(obj['name'], six.text_type): obj['name'] = obj['name'].encode('utf8') pool.spawn(self.reap_object, account, container, part, nodes, obj['name'], policy_index) pool.waitall() except (Exception, Timeout): self.logger.exception(_('Exception with objects for container ' '%(container)s for account %(account)s' ), {'container': container, 'account': account}) marker = objects[-1]['name'] if marker == '': break successes = 0 failures = 0 timestamp = Timestamp(time()) for node in nodes: anode = account_nodes.pop() try: direct_delete_container( node, part, account, container, conn_timeout=self.conn_timeout, response_timeout=self.node_timeout, headers={'X-Account-Host': '%(ip)s:%(port)s' % anode, 'X-Account-Partition': str(account_partition), 'X-Account-Device': anode['device'], 'X-Account-Override-Deleted': 'yes', 'X-Timestamp': timestamp.internal}) successes += 1 self.stats_return_codes[2] = \ self.stats_return_codes.get(2, 0) + 1 self.logger.increment('return_codes.2') except ClientException as err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( _('Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.logger.increment('containers_failures') self.stats_return_codes[err.http_status // 100] = \ self.stats_return_codes.get(err.http_status // 100, 0) + 1 self.logger.increment( 'return_codes.%d' % (err.http_status // 100,)) except (Timeout, socket.error) as err: self.logger.error( _('Timeout Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.logger.increment('containers_failures') if successes > failures: self.stats_containers_deleted += 1 self.logger.increment('containers_deleted') elif not successes: self.stats_containers_remaining += 1 self.logger.increment('containers_remaining') else: self.stats_containers_possibly_remaining += 1 self.logger.increment('containers_possibly_remaining') def reap_object(self, account, container, container_partition, container_nodes, obj, policy_index): """ Deletes the given object by issuing a delete request to each node for the object. The format of the delete request is such that each object server will update a corresponding container server, removing the object from the container's listing. This function returns nothing and should raise no exception but only update various self.stats_* values for what occurs. :param account: The name of the account for the object. :param container: The name of the container for the object. :param container_partition: The partition for the container on the container ring. :param container_nodes: The primary node dicts for the container. :param obj: The name of the object to delete. :param policy_index: The storage policy index of the object's container * See also: :func:`swift.common.ring.Ring.get_nodes` for a description of the container node dicts. """ cnodes = itertools.cycle(container_nodes) try: ring = self.get_object_ring(policy_index) except PolicyError: self.stats_objects_remaining += 1 self.logger.increment('objects_remaining') return part, nodes = ring.get_nodes(account, container, obj) successes = 0 failures = 0 timestamp = Timestamp(time()) for node in nodes: cnode = next(cnodes) try: direct_delete_object( node, part, account, container, obj, conn_timeout=self.conn_timeout, response_timeout=self.node_timeout, headers={'X-Container-Host': '%(ip)s:%(port)s' % cnode, 'X-Container-Partition': str(container_partition), 'X-Container-Device': cnode['device'], 'X-Backend-Storage-Policy-Index': policy_index, 'X-Timestamp': timestamp.internal}) successes += 1 self.stats_return_codes[2] = \ self.stats_return_codes.get(2, 0) + 1 self.logger.increment('return_codes.2') except ClientException as err: if self.logger.getEffectiveLevel() <= DEBUG: self.logger.exception( _('Exception with %(ip)s:%(port)s/%(device)s'), node) failures += 1 self.logger.increment('objects_failures') self.stats_return_codes[err.http_status // 100] = \ self.stats_return_codes.get(err.http_status // 100, 0) + 1 self.logger.increment( 'return_codes.%d' % (err.http_status // 100,)) except (Timeout, socket.error) as err: failures += 1 self.logger.increment('objects_failures') self.logger.error( _('Timeout Exception with %(ip)s:%(port)s/%(device)s'), node) if successes > failures: self.stats_objects_deleted += 1 self.logger.increment('objects_deleted') elif not successes: self.stats_objects_remaining += 1 self.logger.increment('objects_remaining') else: self.stats_objects_possibly_remaining += 1 self.logger.increment('objects_possibly_remaining') swift-2.7.0/swift/common/0000775000567000056710000000000012675204211016441 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/common/exceptions.py0000664000567000056710000001244312675204037021206 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import Timeout import swift.common.utils class MessageTimeout(Timeout): def __init__(self, seconds=None, msg=None): Timeout.__init__(self, seconds=seconds) self.msg = msg def __str__(self): return '%s: %s' % (Timeout.__str__(self), self.msg) class SwiftException(Exception): pass class PutterConnectError(Exception): def __init__(self, status=None): self.status = status class InvalidTimestamp(SwiftException): pass class InsufficientStorage(SwiftException): pass class FooterNotSupported(SwiftException): pass class MultiphasePUTNotSupported(SwiftException): pass class SuffixSyncError(SwiftException): pass class RangeAlreadyComplete(SwiftException): pass class DiskFileError(SwiftException): pass class DiskFileNotOpen(DiskFileError): pass class DiskFileQuarantined(DiskFileError): pass class DiskFileCollision(DiskFileError): pass class DiskFileNotExist(DiskFileError): pass class DiskFileDeleted(DiskFileNotExist): def __init__(self, metadata=None): self.metadata = metadata or {} self.timestamp = swift.common.utils.Timestamp( self.metadata.get('X-Timestamp', 0)) class DiskFileExpired(DiskFileDeleted): pass class DiskFileNoSpace(DiskFileError): pass class DiskFileDeviceUnavailable(DiskFileError): pass class DiskFileXattrNotSupported(DiskFileError): pass class DeviceUnavailable(SwiftException): pass class InvalidAccountInfo(SwiftException): pass class PathNotDir(OSError): pass class ChunkReadError(SwiftException): pass class ChunkReadTimeout(Timeout): pass class ChunkWriteTimeout(Timeout): pass class ConnectionTimeout(Timeout): pass class ResponseTimeout(Timeout): pass class DriveNotMounted(SwiftException): pass class LockTimeout(MessageTimeout): pass class ThreadPoolDead(SwiftException): pass class RingBuilderError(SwiftException): pass class RingValidationError(RingBuilderError): pass class EmptyRingError(RingBuilderError): pass class DuplicateDeviceError(RingBuilderError): pass class UnPicklingError(SwiftException): pass class FileNotFoundError(SwiftException): pass class PermissionError(SwiftException): pass class ListingIterError(SwiftException): pass class ListingIterNotFound(ListingIterError): pass class ListingIterNotAuthorized(ListingIterError): def __init__(self, aresp): self.aresp = aresp class SegmentError(SwiftException): pass class ReplicationException(Exception): pass class ReplicationLockTimeout(LockTimeout): pass class MimeInvalid(SwiftException): pass class APIVersionError(SwiftException): pass class ClientException(Exception): def __init__(self, msg, http_scheme='', http_host='', http_port='', http_path='', http_query='', http_status=None, http_reason='', http_device='', http_response_content='', http_headers=None): super(ClientException, self).__init__(msg) self.msg = msg self.http_scheme = http_scheme self.http_host = http_host self.http_port = http_port self.http_path = http_path self.http_query = http_query self.http_status = http_status self.http_reason = http_reason self.http_device = http_device self.http_response_content = http_response_content self.http_headers = http_headers or {} def __str__(self): a = self.msg b = '' if self.http_scheme: b += '%s://' % self.http_scheme if self.http_host: b += self.http_host if self.http_port: b += ':%s' % self.http_port if self.http_path: b += self.http_path if self.http_query: b += '?%s' % self.http_query if self.http_status: if b: b = '%s %s' % (b, self.http_status) else: b = str(self.http_status) if self.http_reason: if b: b = '%s %s' % (b, self.http_reason) else: b = '- %s' % self.http_reason if self.http_device: if b: b = '%s: device %s' % (b, self.http_device) else: b = 'device %s' % self.http_device if self.http_response_content: if len(self.http_response_content) <= 60: b += ' %s' % self.http_response_content else: b += ' [first 60 chars of response] %s' \ % self.http_response_content[:60] return b and '%s: %s' % (a, b) or a class InvalidPidFileException(Exception): pass swift-2.7.0/swift/common/utils.py0000664000567000056710000041516112675204037020171 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Miscellaneous utility functions for use with Swift.""" from __future__ import print_function import errno import fcntl import grp import hmac import json import math import operator import os import pwd import re import sys import time import uuid import functools import email.parser from hashlib import md5, sha1 from random import random, shuffle from contextlib import contextmanager, closing import ctypes import ctypes.util from optparse import OptionParser from tempfile import mkstemp, NamedTemporaryFile import glob import itertools import stat import datetime import eventlet import eventlet.semaphore from eventlet import GreenPool, sleep, Timeout, tpool, greenthread, \ greenio, event from eventlet.green import socket, threading import eventlet.queue import netifaces import codecs utf8_decoder = codecs.getdecoder('utf-8') utf8_encoder = codecs.getencoder('utf-8') import six from six.moves import cPickle as pickle from six.moves.configparser import (ConfigParser, NoSectionError, NoOptionError, RawConfigParser) from six.moves import range from six.moves.urllib.parse import ParseResult from six.moves.urllib.parse import quote as _quote from six.moves.urllib.parse import urlparse as stdlib_urlparse from swift import gettext_ as _ import swift.common.exceptions from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \ HTTP_PRECONDITION_FAILED, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE from swift.common.header_key_dict import HeaderKeyDict if six.PY3: stdlib_queue = eventlet.patcher.original('queue') else: stdlib_queue = eventlet.patcher.original('Queue') stdlib_threading = eventlet.patcher.original('threading') # logging doesn't import patched as cleanly as one would like from logging.handlers import SysLogHandler import logging logging.thread = eventlet.green.thread logging.threading = eventlet.green.threading logging._lock = logging.threading.RLock() # setup notice level logging NOTICE = 25 logging.addLevelName(NOTICE, 'NOTICE') SysLogHandler.priority_map['NOTICE'] = 'notice' # These are lazily pulled from libc elsewhere _sys_fallocate = None _posix_fadvise = None _libc_socket = None _libc_bind = None _libc_accept = None # If set to non-zero, fallocate routines will fail based on free space # available being at or below this amount, in bytes. FALLOCATE_RESERVE = 0 # Used by hash_path to offer a bit more security when generating hashes for # paths. It simply appends this value to all paths; guessing the hash a path # will end up with would also require knowing this suffix. HASH_PATH_SUFFIX = '' HASH_PATH_PREFIX = '' SWIFT_CONF_FILE = '/etc/swift/swift.conf' # These constants are Linux-specific, and Python doesn't seem to know # about them. We ask anyway just in case that ever gets fixed. # # The values were copied from the Linux 3.0 kernel headers. AF_ALG = getattr(socket, 'AF_ALG', 38) F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031) # Used by the parse_socket_string() function to validate IPv6 addresses IPV6_RE = re.compile("^\[(?P
.*)\](:(?P[0-9]+))?$") class InvalidHashPathConfigError(ValueError): def __str__(self): return "[swift-hash]: both swift_hash_path_suffix and " \ "swift_hash_path_prefix are missing from %s" % SWIFT_CONF_FILE def validate_hash_conf(): global HASH_PATH_SUFFIX global HASH_PATH_PREFIX if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX: hash_conf = ConfigParser() if hash_conf.read(SWIFT_CONF_FILE): try: HASH_PATH_SUFFIX = hash_conf.get('swift-hash', 'swift_hash_path_suffix') except (NoSectionError, NoOptionError): pass try: HASH_PATH_PREFIX = hash_conf.get('swift-hash', 'swift_hash_path_prefix') except (NoSectionError, NoOptionError): pass if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX: raise InvalidHashPathConfigError() try: validate_hash_conf() except InvalidHashPathConfigError: # could get monkey patched or lazy loaded pass def get_hmac(request_method, path, expires, key): """ Returns the hexdigest string of the HMAC-SHA1 (RFC 2104) for the request. :param request_method: Request method to allow. :param path: The path to the resource to allow access to. :param expires: Unix timestamp as an int for when the URL expires. :param key: HMAC shared secret. :returns: hexdigest str of the HMAC-SHA1 for the request. """ return hmac.new( key, '%s\n%s\n%s' % (request_method, expires, path), sha1).hexdigest() # Used by get_swift_info and register_swift_info to store information about # the swift cluster. _swift_info = {} _swift_admin_info = {} def get_swift_info(admin=False, disallowed_sections=None): """ Returns information about the swift cluster that has been previously registered with the register_swift_info call. :param admin: boolean value, if True will additionally return an 'admin' section with information previously registered as admin info. :param disallowed_sections: list of section names to be withheld from the information returned. :returns: dictionary of information about the swift cluster. """ disallowed_sections = disallowed_sections or [] info = dict(_swift_info) for section in disallowed_sections: key_to_pop = None sub_section_dict = info for sub_section in section.split('.'): if key_to_pop: sub_section_dict = sub_section_dict.get(key_to_pop, {}) if not isinstance(sub_section_dict, dict): sub_section_dict = {} break key_to_pop = sub_section sub_section_dict.pop(key_to_pop, None) if admin: info['admin'] = dict(_swift_admin_info) info['admin']['disallowed_sections'] = list(disallowed_sections) return info def register_swift_info(name='swift', admin=False, **kwargs): """ Registers information about the swift cluster to be retrieved with calls to get_swift_info. NOTE: Do not use "." in the param: name or any keys in kwargs. "." is used in the disallowed_sections to remove unwanted keys from /info. :param name: string, the section name to place the information under. :param admin: boolean, if True, information will be registered to an admin section which can optionally be withheld when requesting the information. :param kwargs: key value arguments representing the information to be added. :raises ValueError: if name or any of the keys in kwargs has "." in it """ if name == 'admin' or name == 'disallowed_sections': raise ValueError('\'{0}\' is reserved name.'.format(name)) if admin: dict_to_use = _swift_admin_info else: dict_to_use = _swift_info if name not in dict_to_use: if "." in name: raise ValueError('Cannot use "." in a swift_info key: %s' % name) dict_to_use[name] = {} for key, val in kwargs.items(): if "." in key: raise ValueError('Cannot use "." in a swift_info key: %s' % key) dict_to_use[name][key] = val def backward(f, blocksize=4096): """ A generator returning lines from a file starting with the last line, then the second last line, etc. i.e., it reads lines backwards. Stops when the first line (if any) is read. This is useful when searching for recent activity in very large files. :param f: file object to read :param blocksize: no of characters to go backwards at each block """ f.seek(0, os.SEEK_END) if f.tell() == 0: return last_row = b'' while f.tell() != 0: try: f.seek(-blocksize, os.SEEK_CUR) except IOError: blocksize = f.tell() f.seek(-blocksize, os.SEEK_CUR) block = f.read(blocksize) f.seek(-blocksize, os.SEEK_CUR) rows = block.split(b'\n') rows[-1] = rows[-1] + last_row while rows: last_row = rows.pop(-1) if rows and last_row: yield last_row yield last_row # Used when reading config values TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y')) def config_true_value(value): """ Returns True if the value is either True or a string in TRUE_VALUES. Returns False otherwise. """ return value is True or \ (isinstance(value, six.string_types) and value.lower() in TRUE_VALUES) def config_auto_int_value(value, default): """ Returns default if value is None or 'auto'. Returns value as an int or raises ValueError otherwise. """ if value is None or \ (isinstance(value, six.string_types) and value.lower() == 'auto'): return default try: value = int(value) except (TypeError, ValueError): raise ValueError('Config option must be an integer or the ' 'string "auto", not "%s".' % value) return value def append_underscore(prefix): if prefix and not prefix.endswith('_'): prefix += '_' return prefix def config_read_reseller_options(conf, defaults): """ Read reseller_prefix option and associated options from configuration Reads the reseller_prefix option, then reads options that may be associated with a specific reseller prefix. Reads options such that an option without a prefix applies to all reseller prefixes unless an option has an explicit prefix. :param conf: the configuration :param defaults: a dict of default values. The key is the option name. The value is either an array of strings or a string :return: tuple of an array of reseller prefixes and a dict of option values """ reseller_prefix_opt = conf.get('reseller_prefix', 'AUTH').split(',') reseller_prefixes = [] for prefix in [pre.strip() for pre in reseller_prefix_opt if pre.strip()]: if prefix == "''": prefix = '' prefix = append_underscore(prefix) if prefix not in reseller_prefixes: reseller_prefixes.append(prefix) if len(reseller_prefixes) == 0: reseller_prefixes.append('') # Get prefix-using config options associated_options = {} for prefix in reseller_prefixes: associated_options[prefix] = dict(defaults) associated_options[prefix].update( config_read_prefixed_options(conf, '', defaults)) prefix_name = prefix if prefix != '' else "''" associated_options[prefix].update( config_read_prefixed_options(conf, prefix_name, defaults)) return reseller_prefixes, associated_options def config_read_prefixed_options(conf, prefix_name, defaults): """ Read prefixed options from configuration :param conf: the configuration :param prefix_name: the prefix (including, if needed, an underscore) :param defaults: a dict of default values. The dict supplies the option name and type (string or comma separated string) :return: a dict containing the options """ params = {} for option_name in defaults.keys(): value = conf.get('%s%s' % (prefix_name, option_name)) if value: if isinstance(defaults.get(option_name), list): params[option_name] = [] for role in value.lower().split(','): params[option_name].append(role.strip()) else: params[option_name] = value.strip() return params def noop_libc_function(*args): return 0 def validate_configuration(): try: validate_hash_conf() except InvalidHashPathConfigError as e: sys.exit("Error: %s" % e) def load_libc_function(func_name, log_error=True, fail_if_missing=False): """ Attempt to find the function in libc, otherwise return a no-op func. :param func_name: name of the function to pull from libc. :param log_error: log an error when a function can't be found :param fail_if_missing: raise an exception when a function can't be found. Default behavior is to return a no-op function. """ try: libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) return getattr(libc, func_name) except AttributeError: if fail_if_missing: raise if log_error: logging.warning(_("Unable to locate %s in libc. Leaving as a " "no-op."), func_name) return noop_libc_function def generate_trans_id(trans_id_suffix): return 'tx%s-%010x%s' % ( uuid.uuid4().hex[:21], time.time(), quote(trans_id_suffix)) def get_policy_index(req_headers, res_headers): """ Returns the appropriate index of the storage policy for the request from a proxy server :param req: dict of the request headers. :param res: dict of the response headers. :returns: string index of storage policy, or None """ header = 'X-Backend-Storage-Policy-Index' policy_index = res_headers.get(header, req_headers.get(header)) return str(policy_index) if policy_index is not None else None def get_log_line(req, res, trans_time, additional_info): """ Make a line for logging that matches the documented log line format for backend servers. :param req: the request. :param res: the response. :param trans_time: the time the request took to complete, a float. :param additional_info: a string to log at the end of the line :returns: a properly formatted line for logging. """ policy_index = get_policy_index(req.headers, res.headers) return '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %.4f "%s" %d %s' % ( req.remote_addr, time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()), req.method, req.path, res.status.split()[0], res.content_length or '-', req.referer or '-', req.headers.get('x-trans-id', '-'), req.user_agent or '-', trans_time, additional_info or '-', os.getpid(), policy_index or '-') def get_trans_id_time(trans_id): if len(trans_id) >= 34 and \ trans_id.startswith('tx') and trans_id[23] == '-': try: return int(trans_id[24:34], 16) except ValueError: pass return None class FileLikeIter(object): def __init__(self, iterable): """ Wraps an iterable to behave as a file-like object. The iterable must yield bytes strings. """ self.iterator = iter(iterable) self.buf = None self.closed = False def __iter__(self): return self def next(self): """ next(x) -> the next value, or raise StopIteration """ if self.closed: raise ValueError('I/O operation on closed file') if self.buf: rv = self.buf self.buf = None return rv else: return next(self.iterator) __next__ = next def read(self, size=-1): """ read([size]) -> read at most size bytes, returned as a bytes string. If the size argument is negative or omitted, read until EOF is reached. Notice that when in non-blocking mode, less data than what was requested may be returned, even if no size parameter was given. """ if self.closed: raise ValueError('I/O operation on closed file') if size < 0: return b''.join(self) elif not size: chunk = b'' elif self.buf: chunk = self.buf self.buf = None else: try: chunk = next(self.iterator) except StopIteration: return b'' if len(chunk) > size: self.buf = chunk[size:] chunk = chunk[:size] return chunk def readline(self, size=-1): """ readline([size]) -> next line from the file, as a bytes string. Retain newline. A non-negative size argument limits the maximum number of bytes to return (an incomplete line may be returned then). Return an empty string at EOF. """ if self.closed: raise ValueError('I/O operation on closed file') data = b'' while b'\n' not in data and (size < 0 or len(data) < size): if size < 0: chunk = self.read(1024) else: chunk = self.read(size - len(data)) if not chunk: break data += chunk if b'\n' in data: data, sep, rest = data.partition(b'\n') data += sep if self.buf: self.buf = rest + self.buf else: self.buf = rest return data def readlines(self, sizehint=-1): """ readlines([size]) -> list of bytes strings, each a line from the file. Call readline() repeatedly and return a list of the lines so read. The optional size argument, if given, is an approximate bound on the total number of bytes in the lines returned. """ if self.closed: raise ValueError('I/O operation on closed file') lines = [] while True: line = self.readline(sizehint) if not line: break lines.append(line) if sizehint >= 0: sizehint -= len(line) if sizehint <= 0: break return lines def close(self): """ close() -> None or (perhaps) an integer. Close the file. Sets data attribute .closed to True. A closed file cannot be used for further I/O operations. close() may be called more than once without error. Some kinds of file objects (for example, opened by popen()) may return an exit status upon closing. """ self.iterator = None self.closed = True class FallocateWrapper(object): def __init__(self, noop=False): if noop: self.func_name = 'posix_fallocate' self.fallocate = noop_libc_function return # fallocate is preferred because we need the on-disk size to match # the allocated size. Older versions of sqlite require that the # two sizes match. However, fallocate is Linux only. for func in ('fallocate', 'posix_fallocate'): self.func_name = func self.fallocate = load_libc_function(func, log_error=False) if self.fallocate is not noop_libc_function: break if self.fallocate is noop_libc_function: logging.warning(_("Unable to locate fallocate, posix_fallocate in " "libc. Leaving as a no-op.")) def __call__(self, fd, mode, offset, length): """The length parameter must be a ctypes.c_uint64.""" if FALLOCATE_RESERVE > 0: st = os.fstatvfs(fd) free = st.f_frsize * st.f_bavail - length.value if free <= FALLOCATE_RESERVE: raise OSError('FALLOCATE_RESERVE fail %s <= %s' % ( free, FALLOCATE_RESERVE)) args = { 'fallocate': (fd, mode, offset, length), 'posix_fallocate': (fd, offset, length) } return self.fallocate(*args[self.func_name]) def disable_fallocate(): global _sys_fallocate _sys_fallocate = FallocateWrapper(noop=True) def fallocate(fd, size): """ Pre-allocate disk space for a file. :param fd: file descriptor :param size: size to allocate (in bytes) """ global _sys_fallocate if _sys_fallocate is None: _sys_fallocate = FallocateWrapper() if size < 0: size = 0 # 1 means "FALLOC_FL_KEEP_SIZE", which means it pre-allocates invisibly ret = _sys_fallocate(fd, 1, 0, ctypes.c_uint64(size)) err = ctypes.get_errno() if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL): raise OSError(err, 'Unable to fallocate(%s)' % size) def fsync(fd): """ Sync modified file data and metadata to disk. :param fd: file descriptor """ if hasattr(fcntl, 'F_FULLSYNC'): try: fcntl.fcntl(fd, fcntl.F_FULLSYNC) except IOError as e: raise OSError(e.errno, 'Unable to F_FULLSYNC(%s)' % fd) else: os.fsync(fd) def fdatasync(fd): """ Sync modified file data to disk. :param fd: file descriptor """ try: os.fdatasync(fd) except AttributeError: fsync(fd) def fsync_dir(dirpath): """ Sync directory entries to disk. :param dirpath: Path to the directory to be synced. """ dirfd = None try: dirfd = os.open(dirpath, os.O_DIRECTORY | os.O_RDONLY) fsync(dirfd) except OSError as err: if err.errno == errno.ENOTDIR: # Raise error if someone calls fsync_dir on a non-directory raise logging.warning(_("Unable to perform fsync() on directory %s: %s"), dirpath, os.strerror(err.errno)) finally: if dirfd: os.close(dirfd) def drop_buffer_cache(fd, offset, length): """ Drop 'buffer' cache for the given range of the given file. :param fd: file descriptor :param offset: start offset :param length: length """ global _posix_fadvise if _posix_fadvise is None: _posix_fadvise = load_libc_function('posix_fadvise64') # 4 means "POSIX_FADV_DONTNEED" ret = _posix_fadvise(fd, ctypes.c_uint64(offset), ctypes.c_uint64(length), 4) if ret != 0: logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) " "-> %(ret)s", {'fd': fd, 'offset': offset, 'length': length, 'ret': ret}) NORMAL_FORMAT = "%016.05f" INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x' SHORT_FORMAT = NORMAL_FORMAT + '_%x' MAX_OFFSET = (16 ** 16) - 1 PRECISION = 1e-5 # Setting this to True will cause the internal format to always display # extended digits - even when the value is equivalent to the normalized form. # This isn't ideal during an upgrade when some servers might not understand # the new time format - but flipping it to True works great for testing. FORCE_INTERNAL = False # or True @functools.total_ordering class Timestamp(object): """ Internal Representation of Swift Time. The normalized form of the X-Timestamp header looks like a float with a fixed width to ensure stable string sorting - normalized timestamps look like "1402464677.04188" To support overwrites of existing data without modifying the original timestamp but still maintain consistency a second internal offset vector is append to the normalized timestamp form which compares and sorts greater than the fixed width float format but less than a newer timestamp. The internalized format of timestamps looks like "1402464677.04188_0000000000000000" - the portion after the underscore is the offset and is a formatted hexadecimal integer. The internalized form is not exposed to clients in responses from Swift. Normal client operations will not create a timestamp with an offset. The Timestamp class in common.utils supports internalized and normalized formatting of timestamps and also comparison of timestamp values. When the offset value of a Timestamp is 0 - it's considered insignificant and need not be represented in the string format; to support backwards compatibility during a Swift upgrade the internalized and normalized form of a Timestamp with an insignificant offset are identical. When a timestamp includes an offset it will always be represented in the internalized form, but is still excluded from the normalized form. Timestamps with an equivalent timestamp portion (the float part) will compare and order by their offset. Timestamps with a greater timestamp portion will always compare and order greater than a Timestamp with a lesser timestamp regardless of it's offset. String comparison and ordering is guaranteed for the internalized string format, and is backwards compatible for normalized timestamps which do not include an offset. """ def __init__(self, timestamp, offset=0, delta=0): """ Create a new Timestamp. :param timestamp: time in seconds since the Epoch, may be any of: * a float or integer * normalized/internalized string * another instance of this class (offset is preserved) :param offset: the second internal offset vector, an int :param delta: deca-microsecond difference from the base timestamp param, an int """ if isinstance(timestamp, six.string_types): parts = timestamp.split('_', 1) self.timestamp = float(parts.pop(0)) if parts: self.offset = int(parts[0], 16) else: self.offset = 0 else: self.timestamp = float(timestamp) self.offset = getattr(timestamp, 'offset', 0) # increment offset if offset >= 0: self.offset += offset else: raise ValueError('offset must be non-negative') if self.offset > MAX_OFFSET: raise ValueError('offset must be smaller than %d' % MAX_OFFSET) self.raw = int(round(self.timestamp / PRECISION)) # add delta if delta: self.raw = self.raw + delta if self.raw <= 0: raise ValueError( 'delta must be greater than %d' % (-1 * self.raw)) self.timestamp = float(self.raw * PRECISION) if self.timestamp < 0: raise ValueError('timestamp cannot be negative') if self.timestamp >= 10000000000: raise ValueError('timestamp too large') def __repr__(self): return INTERNAL_FORMAT % (self.timestamp, self.offset) def __str__(self): raise TypeError('You must specify which string format is required') def __float__(self): return self.timestamp def __int__(self): return int(self.timestamp) def __nonzero__(self): return bool(self.timestamp or self.offset) def __bool__(self): return self.__nonzero__() @property def normal(self): return NORMAL_FORMAT % self.timestamp @property def internal(self): if self.offset or FORCE_INTERNAL: return INTERNAL_FORMAT % (self.timestamp, self.offset) else: return self.normal @property def short(self): if self.offset or FORCE_INTERNAL: return SHORT_FORMAT % (self.timestamp, self.offset) else: return self.normal @property def isoformat(self): t = float(self.normal) if six.PY3: # On Python 3, round manually using ROUND_HALF_EVEN rounding # method, to use the same rounding method than Python 2. Python 3 # used a different rounding method, but Python 3.4.4 and 3.5.1 use # again ROUND_HALF_EVEN as Python 2. # See https://bugs.python.org/issue23517 frac, t = math.modf(t) us = round(frac * 1e6) if us >= 1000000: t += 1 us -= 1000000 elif us < 0: t -= 1 us += 1000000 dt = datetime.datetime.utcfromtimestamp(t) dt = dt.replace(microsecond=us) else: dt = datetime.datetime.utcfromtimestamp(t) isoformat = dt.isoformat() # python isoformat() doesn't include msecs when zero if len(isoformat) < len("1970-01-01T00:00:00.000000"): isoformat += ".000000" return isoformat def __eq__(self, other): if other is None: return False if not isinstance(other, Timestamp): other = Timestamp(other) return self.internal == other.internal def __ne__(self, other): if other is None: return True if not isinstance(other, Timestamp): other = Timestamp(other) return self.internal != other.internal def __lt__(self, other): if other is None: return False if not isinstance(other, Timestamp): other = Timestamp(other) return self.internal < other.internal def __hash__(self): return hash(self.internal) def encode_timestamps(t1, t2=None, t3=None, explicit=False): """ Encode up to three timestamps into a string. Unlike a Timestamp object, the encoded string does NOT used fixed width fields and consequently no relative chronology of the timestamps can be inferred from lexicographic sorting of encoded timestamp strings. The format of the encoded string is: [<+/->[<+/->]] i.e. if t1 = t2 = t3 then just the string representation of t1 is returned, otherwise the time offsets for t2 and t3 are appended. If explicit is True then the offsets for t2 and t3 are always appended even if zero. Note: any offset value in t1 will be preserved, but offsets on t2 and t3 are not preserved. In the anticipated use cases for this method (and the inverse decode_timestamps method) the timestamps passed as t2 and t3 are not expected to have offsets as they will be timestamps associated with a POST request. In the case where the encoding is used in a container objects table row, t1 could be the PUT or DELETE time but t2 and t3 represent the content type and metadata times (if different from the data file) i.e. correspond to POST timestamps. In the case where the encoded form is used in a .meta file name, t1 and t2 both correspond to POST timestamps. """ form = '{0}' values = [t1.short] if t2 is not None: t2_t1_delta = t2.raw - t1.raw explicit = explicit or (t2_t1_delta != 0) values.append(t2_t1_delta) if t3 is not None: t3_t2_delta = t3.raw - t2.raw explicit = explicit or (t3_t2_delta != 0) values.append(t3_t2_delta) if explicit: form += '{1:+x}' if t3 is not None: form += '{2:+x}' return form.format(*values) def decode_timestamps(encoded, explicit=False): """ Parses a string of the form generated by encode_timestamps and returns a tuple of the three component timestamps. If explicit is False, component timestamps that are not explicitly encoded will be assumed to have zero delta from the previous component and therefore take the value of the previous component. If explicit is True, component timestamps that are not explicitly encoded will be returned with value None. """ # TODO: some tests, e.g. in test_replicator, put float timestamps values # into container db's, hence this defensive check, but in real world # this may never happen. if not isinstance(encoded, basestring): ts = Timestamp(encoded) return ts, ts, ts parts = [] signs = [] pos_parts = encoded.split('+') for part in pos_parts: # parse time components and their signs # e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1] neg_parts = part.split('-') parts = parts + neg_parts signs = signs + [1] + [-1] * (len(neg_parts) - 1) t1 = Timestamp(parts[0]) t2 = t3 = None if len(parts) > 1: t2 = t1 delta = signs[1] * int(parts[1], 16) # if delta = 0 we want t2 = t3 = t1 in order to # preserve any offset in t1 - only construct a distinct # timestamp if there is a non-zero delta. if delta: t2 = Timestamp((t1.raw + delta) * PRECISION) elif not explicit: t2 = t1 if len(parts) > 2: t3 = t2 delta = signs[2] * int(parts[2], 16) if delta: t3 = Timestamp((t2.raw + delta) * PRECISION) elif not explicit: t3 = t2 return t1, t2, t3 def normalize_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx.xxxxx (10.5) format. Note that timestamps using values greater than or equal to November 20th, 2286 at 17:46 UTC will use 11 digits to represent the number of seconds. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return Timestamp(timestamp).normal EPOCH = datetime.datetime(1970, 1, 1) def last_modified_date_to_timestamp(last_modified_date_str): """ Convert a last modified date (like you'd get from a container listing, e.g. 2014-02-28T23:22:36.698390) to a float. """ start = datetime.datetime.strptime(last_modified_date_str, '%Y-%m-%dT%H:%M:%S.%f') delta = start - EPOCH # This calculation is based on Python 2.7's Modules/datetimemodule.c, # function delta_to_microseconds(), but written in Python. return Timestamp(delta.total_seconds()) def normalize_delete_at_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx (10) format. Note that timestamps less than 0000000000 are raised to 0000000000 and values greater than November 20th, 2286 at 17:46:39 UTC will be capped at that date and time, resulting in no return value exceeding 9999999999. This cap is because the expirer is already working through a sorted list of strings that were all a length of 10. Adding another digit would mess up the sort and cause the expirer to break from processing early. By 2286, this problem will need to be fixed, probably by creating an additional .expiring_objects account to work from with 11 (or more) digit container names. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return '%010d' % min(max(0, float(timestamp)), 9999999999) def mkdirs(path): """ Ensures the path is a directory or makes it if not. Errors if the path exists but is a file or on permissions failure. :param path: path to create """ if not os.path.isdir(path): try: os.makedirs(path) except OSError as err: if err.errno != errno.EEXIST or not os.path.isdir(path): raise def makedirs_count(path, count=0): """ Same as os.makedirs() except that this method returns the number of new directories that had to be created. Also, this does not raise an error if target directory already exists. This behaviour is similar to Python 3.x's os.makedirs() called with exist_ok=True. Also similar to swift.common.utils.mkdirs() https://hg.python.org/cpython/file/v3.4.2/Lib/os.py#l212 """ head, tail = os.path.split(path) if not tail: head, tail = os.path.split(head) if head and tail and not os.path.exists(head): count = makedirs_count(head, count) if tail == os.path.curdir: return try: os.mkdir(path) except OSError as e: # EEXIST may also be raised if path exists as a file # Do not let that pass. if e.errno != errno.EEXIST or not os.path.isdir(path): raise else: count += 1 return count def renamer(old, new, fsync=True): """ Attempt to fix / hide race conditions like empty object directories being removed by backend processes during uploads, by retrying. The containing directory of 'new' and of all newly created directories are fsync'd by default. This _will_ come at a performance penalty. In cases where these additional fsyncs are not necessary, it is expected that the caller of renamer() turn it off explicitly. :param old: old path to be renamed :param new: new path to be renamed to :param fsync: fsync on containing directory of new and also all the newly created directories. """ dirpath = os.path.dirname(new) try: count = makedirs_count(dirpath) os.rename(old, new) except OSError: count = makedirs_count(dirpath) os.rename(old, new) if fsync: # If count=0, no new directories were created. But we still need to # fsync leaf dir after os.rename(). # If count>0, starting from leaf dir, fsync parent dirs of all # directories created by makedirs_count() for i in range(0, count + 1): fsync_dir(dirpath) dirpath = os.path.dirname(dirpath) def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False): """ Validate and split the given HTTP request path. **Examples**:: ['a'] = split_path('/a') ['a', None] = split_path('/a', 1, 2) ['a', 'c'] = split_path('/a/c', 1, 2) ['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True) :param path: HTTP Request path to be split :param minsegs: Minimum number of segments to be extracted :param maxsegs: Maximum number of segments to be extracted :param rest_with_last: If True, trailing data will be returned as part of last segment. If False, and there is trailing data, raises ValueError. :returns: list of segments with a length of maxsegs (non-existent segments will return as None) :raises: ValueError if given an invalid path """ if not maxsegs: maxsegs = minsegs if minsegs > maxsegs: raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs)) if rest_with_last: segs = path.split('/', maxsegs) minsegs += 1 maxsegs += 1 count = len(segs) if (segs[0] or count < minsegs or count > maxsegs or '' in segs[1:minsegs]): raise ValueError('Invalid path: %s' % quote(path)) else: minsegs += 1 maxsegs += 1 segs = path.split('/', maxsegs) count = len(segs) if (segs[0] or count < minsegs or count > maxsegs + 1 or '' in segs[1:minsegs] or (count == maxsegs + 1 and segs[maxsegs])): raise ValueError('Invalid path: %s' % quote(path)) segs = segs[1:maxsegs] segs.extend([None] * (maxsegs - 1 - len(segs))) return segs def validate_device_partition(device, partition): """ Validate that a device and a partition are valid and won't lead to directory traversal when used. :param device: device to validate :param partition: partition to validate :raises: ValueError if given an invalid device or partition """ if not device or '/' in device or device in ['.', '..']: raise ValueError('Invalid device: %s' % quote(device or '')) if not partition or '/' in partition or partition in ['.', '..']: raise ValueError('Invalid partition: %s' % quote(partition or '')) class RateLimitedIterator(object): """ Wrap an iterator to only yield elements at a rate of N per second. :param iterable: iterable to wrap :param elements_per_second: the rate at which to yield elements :param limit_after: rate limiting kicks in only after yielding this many elements; default is 0 (rate limit immediately) """ def __init__(self, iterable, elements_per_second, limit_after=0, ratelimit_if=lambda _junk: True): self.iterator = iter(iterable) self.elements_per_second = elements_per_second self.limit_after = limit_after self.running_time = 0 self.ratelimit_if = ratelimit_if def __iter__(self): return self def next(self): next_value = next(self.iterator) if self.ratelimit_if(next_value): if self.limit_after > 0: self.limit_after -= 1 else: self.running_time = ratelimit_sleep(self.running_time, self.elements_per_second) return next_value __next__ = next class GreenthreadSafeIterator(object): """ Wrap an iterator to ensure that only one greenthread is inside its next() method at a time. This is useful if an iterator's next() method may perform network IO, as that may trigger a greenthread context switch (aka trampoline), which can give another greenthread a chance to call next(). At that point, you get an error like "ValueError: generator already executing". By wrapping calls to next() with a mutex, we avoid that error. """ def __init__(self, unsafe_iterable): self.unsafe_iter = iter(unsafe_iterable) self.semaphore = eventlet.semaphore.Semaphore(value=1) def __iter__(self): return self def next(self): with self.semaphore: return next(self.unsafe_iter) __next__ = next class NullLogger(object): """A no-op logger for eventlet wsgi.""" def write(self, *args): # "Logs" the args to nowhere pass class LoggerFileObject(object): def __init__(self, logger, log_type='STDOUT'): self.logger = logger self.log_type = log_type def write(self, value): value = value.strip() if value: if 'Connection reset by peer' in value: self.logger.error( _('%s: Connection reset by peer'), self.log_type) else: self.logger.error(_('%s: %s'), self.log_type, value) def writelines(self, values): self.logger.error(_('%s: %s'), self.log_type, '#012'.join(values)) def close(self): pass def flush(self): pass def __iter__(self): return self def next(self): raise IOError(errno.EBADF, 'Bad file descriptor') __next__ = next def read(self, size=-1): raise IOError(errno.EBADF, 'Bad file descriptor') def readline(self, size=-1): raise IOError(errno.EBADF, 'Bad file descriptor') def tell(self): return 0 def xreadlines(self): return self class StatsdClient(object): def __init__(self, host, port, base_prefix='', tail_prefix='', default_sample_rate=1, sample_rate_factor=1, logger=None): self._host = host self._port = port self._base_prefix = base_prefix self.set_prefix(tail_prefix) self._default_sample_rate = default_sample_rate self._sample_rate_factor = sample_rate_factor self.random = random self.logger = logger # Determine if host is IPv4 or IPv6 addr_info = None try: addr_info = socket.getaddrinfo(host, port, socket.AF_INET) self._sock_family = socket.AF_INET except socket.gaierror: try: addr_info = socket.getaddrinfo(host, port, socket.AF_INET6) self._sock_family = socket.AF_INET6 except socket.gaierror: # Don't keep the server from starting from what could be a # transient DNS failure. Any hostname will get re-resolved as # necessary in the .sendto() calls. # However, we don't know if we're IPv4 or IPv6 in this case, so # we assume legacy IPv4. self._sock_family = socket.AF_INET # NOTE: we use the original host value, not the DNS-resolved one # because if host is a hostname, we don't want to cache the DNS # resolution for the entire lifetime of this process. Let standard # name resolution caching take effect. This should help operators use # DNS trickery if they want. if addr_info is not None: # addr_info is a list of 5-tuples with the following structure: # (family, socktype, proto, canonname, sockaddr) # where sockaddr is the only thing of interest to us, and we only # use the first result. We want to use the originally supplied # host (see note above) and the remainder of the variable-length # sockaddr: IPv4 has (address, port) while IPv6 has (address, # port, flow info, scope id). sockaddr = addr_info[0][-1] self._target = (host,) + (sockaddr[1:]) else: self._target = (host, port) def set_prefix(self, new_prefix): if new_prefix and self._base_prefix: self._prefix = '.'.join([self._base_prefix, new_prefix, '']) elif new_prefix: self._prefix = new_prefix + '.' elif self._base_prefix: self._prefix = self._base_prefix + '.' else: self._prefix = '' def _send(self, m_name, m_value, m_type, sample_rate): if sample_rate is None: sample_rate = self._default_sample_rate sample_rate = sample_rate * self._sample_rate_factor parts = ['%s%s:%s' % (self._prefix, m_name, m_value), m_type] if sample_rate < 1: if self.random() < sample_rate: parts.append('@%s' % (sample_rate,)) else: return if six.PY3: parts = [part.encode('utf-8') for part in parts] # Ideally, we'd cache a sending socket in self, but that # results in a socket getting shared by multiple green threads. with closing(self._open_socket()) as sock: try: return sock.sendto(b'|'.join(parts), self._target) except IOError as err: if self.logger: self.logger.warning( 'Error sending UDP message to %r: %s', self._target, err) def _open_socket(self): return socket.socket(self._sock_family, socket.SOCK_DGRAM) def update_stats(self, m_name, m_value, sample_rate=None): return self._send(m_name, m_value, 'c', sample_rate) def increment(self, metric, sample_rate=None): return self.update_stats(metric, 1, sample_rate) def decrement(self, metric, sample_rate=None): return self.update_stats(metric, -1, sample_rate) def timing(self, metric, timing_ms, sample_rate=None): return self._send(metric, timing_ms, 'ms', sample_rate) def timing_since(self, metric, orig_time, sample_rate=None): return self.timing(metric, (time.time() - orig_time) * 1000, sample_rate) def transfer_rate(self, metric, elapsed_time, byte_xfer, sample_rate=None): if byte_xfer: return self.timing(metric, elapsed_time * 1000 / byte_xfer * 1000, sample_rate) def server_handled_successfully(status_int): """ True for successful responses *or* error codes that are not Swift's fault, False otherwise. For example, 500 is definitely the server's fault, but 412 is an error code (4xx are all errors) that is due to a header the client sent. If one is tracking error rates to monitor server health, one would be advised to use a function like this one, lest a client cause a flurry of 404s or 416s and make a spurious spike in your errors graph. """ return (is_success(status_int) or is_redirection(status_int) or status_int == HTTP_NOT_FOUND or status_int == HTTP_PRECONDITION_FAILED or status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE) def timing_stats(**dec_kwargs): """ Returns a decorator that logs timing events or errors for public methods in swift's wsgi server controllers, based on response code. """ def decorating_func(func): method = func.__name__ @functools.wraps(func) def _timing_stats(ctrl, *args, **kwargs): start_time = time.time() resp = func(ctrl, *args, **kwargs) if server_handled_successfully(resp.status_int): ctrl.logger.timing_since(method + '.timing', start_time, **dec_kwargs) else: ctrl.logger.timing_since(method + '.errors.timing', start_time, **dec_kwargs) return resp return _timing_stats return decorating_func # double inheritance to support property with setter class LogAdapter(logging.LoggerAdapter, object): """ A Logger like object which performs some reformatting on calls to :meth:`exception`. Can be used to store a threadlocal transaction id and client ip. """ _cls_thread_local = threading.local() def __init__(self, logger, server): logging.LoggerAdapter.__init__(self, logger, {}) self.server = server self.warn = self.warning @property def txn_id(self): if hasattr(self._cls_thread_local, 'txn_id'): return self._cls_thread_local.txn_id @txn_id.setter def txn_id(self, value): self._cls_thread_local.txn_id = value @property def client_ip(self): if hasattr(self._cls_thread_local, 'client_ip'): return self._cls_thread_local.client_ip @client_ip.setter def client_ip(self, value): self._cls_thread_local.client_ip = value @property def thread_locals(self): return (self.txn_id, self.client_ip) @thread_locals.setter def thread_locals(self, value): self.txn_id, self.client_ip = value def getEffectiveLevel(self): return self.logger.getEffectiveLevel() def process(self, msg, kwargs): """ Add extra info to message """ kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id, 'client_ip': self.client_ip} return msg, kwargs def notice(self, msg, *args, **kwargs): """ Convenience function for syslog priority LOG_NOTICE. The python logging lvl is set to 25, just above info. SysLogHandler is monkey patched to map this log lvl to the LOG_NOTICE syslog priority. """ self.log(NOTICE, msg, *args, **kwargs) def _exception(self, msg, *args, **kwargs): logging.LoggerAdapter.exception(self, msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): _junk, exc, _junk = sys.exc_info() call = self.error emsg = '' if isinstance(exc, (OSError, socket.error)): if exc.errno in (errno.EIO, errno.ENOSPC): emsg = str(exc) elif exc.errno == errno.ECONNREFUSED: emsg = _('Connection refused') elif exc.errno == errno.EHOSTUNREACH: emsg = _('Host unreachable') elif exc.errno == errno.ETIMEDOUT: emsg = _('Connection timeout') else: call = self._exception elif isinstance(exc, eventlet.Timeout): emsg = exc.__class__.__name__ if hasattr(exc, 'seconds'): emsg += ' (%ss)' % exc.seconds if isinstance(exc, swift.common.exceptions.MessageTimeout): if exc.msg: emsg += ' %s' % exc.msg else: call = self._exception call('%s: %s' % (msg, emsg), *args, **kwargs) def set_statsd_prefix(self, prefix): """ The StatsD client prefix defaults to the "name" of the logger. This method may override that default with a specific value. Currently used in the proxy-server to differentiate the Account, Container, and Object controllers. """ if self.logger.statsd_client: self.logger.statsd_client.set_prefix(prefix) def statsd_delegate(statsd_func_name): """ Factory to create methods which delegate to methods on self.logger.statsd_client (an instance of StatsdClient). The created methods conditionally delegate to a method whose name is given in 'statsd_func_name'. The created delegate methods are a no-op when StatsD logging is not configured. :param statsd_func_name: the name of a method on StatsdClient. """ func = getattr(StatsdClient, statsd_func_name) @functools.wraps(func) def wrapped(self, *a, **kw): if getattr(self.logger, 'statsd_client'): return func(self.logger.statsd_client, *a, **kw) return wrapped update_stats = statsd_delegate('update_stats') increment = statsd_delegate('increment') decrement = statsd_delegate('decrement') timing = statsd_delegate('timing') timing_since = statsd_delegate('timing_since') transfer_rate = statsd_delegate('transfer_rate') class SwiftLogFormatter(logging.Formatter): """ Custom logging.Formatter will append txn_id to a log message if the record has one and the message does not. Optionally it can shorten overly long log lines. """ def __init__(self, fmt=None, datefmt=None, max_line_length=0): logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt) self.max_line_length = max_line_length def format(self, record): if not hasattr(record, 'server'): # Catch log messages that were not initiated by swift # (for example, the keystone auth middleware) record.server = record.name # Included from Python's logging.Formatter and then altered slightly to # replace \n with #012 record.message = record.getMessage() if self._fmt.find('%(asctime)') >= 0: record.asctime = self.formatTime(record, self.datefmt) msg = (self._fmt % record.__dict__).replace('\n', '#012') if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException( record.exc_info).replace('\n', '#012') if record.exc_text: if not msg.endswith('#012'): msg = msg + '#012' msg = msg + record.exc_text if (hasattr(record, 'txn_id') and record.txn_id and record.levelno != logging.INFO and record.txn_id not in msg): msg = "%s (txn: %s)" % (msg, record.txn_id) if (hasattr(record, 'client_ip') and record.client_ip and record.levelno != logging.INFO and record.client_ip not in msg): msg = "%s (client_ip: %s)" % (msg, record.client_ip) if self.max_line_length > 0 and len(msg) > self.max_line_length: if self.max_line_length < 7: msg = msg[:self.max_line_length] else: approxhalf = (self.max_line_length - 5) // 2 msg = msg[:approxhalf] + " ... " + msg[-approxhalf:] return msg def get_logger(conf, name=None, log_to_console=False, log_route=None, fmt="%(server)s: %(message)s"): """ Get the current system logger using config settings. **Log config and defaults**:: log_facility = LOG_LOCAL0 log_level = INFO log_name = swift log_max_line_length = 0 log_udp_host = (disabled) log_udp_port = logging.handlers.SYSLOG_UDP_PORT log_address = /dev/log log_statsd_host = (disabled) log_statsd_port = 8125 log_statsd_default_sample_rate = 1.0 log_statsd_sample_rate_factor = 1.0 log_statsd_metric_prefix = (empty-string) :param conf: Configuration dict to read settings from :param name: Name of the logger :param log_to_console: Add handler which writes to console on stderr :param log_route: Route for the logging, not emitted to the log, just used to separate logging configurations :param fmt: Override log format """ if not conf: conf = {} if name is None: name = conf.get('log_name', 'swift') if not log_route: log_route = name logger = logging.getLogger(log_route) logger.propagate = False # all new handlers will get the same formatter formatter = SwiftLogFormatter( fmt=fmt, max_line_length=int(conf.get('log_max_line_length', 0))) # get_logger will only ever add one SysLog Handler to a logger if not hasattr(get_logger, 'handler4logger'): get_logger.handler4logger = {} if logger in get_logger.handler4logger: logger.removeHandler(get_logger.handler4logger[logger]) # facility for this logger will be set by last call wins facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'), SysLogHandler.LOG_LOCAL0) udp_host = conf.get('log_udp_host') if udp_host: udp_port = int(conf.get('log_udp_port', logging.handlers.SYSLOG_UDP_PORT)) handler = SysLogHandler(address=(udp_host, udp_port), facility=facility) else: log_address = conf.get('log_address', '/dev/log') try: handler = SysLogHandler(address=log_address, facility=facility) except socket.error as e: # Either /dev/log isn't a UNIX socket or it does not exist at all if e.errno not in [errno.ENOTSOCK, errno.ENOENT]: raise e handler = SysLogHandler(facility=facility) handler.setFormatter(formatter) logger.addHandler(handler) get_logger.handler4logger[logger] = handler # setup console logging if log_to_console or hasattr(get_logger, 'console_handler4logger'): # remove pre-existing console handler for this logger if not hasattr(get_logger, 'console_handler4logger'): get_logger.console_handler4logger = {} if logger in get_logger.console_handler4logger: logger.removeHandler(get_logger.console_handler4logger[logger]) console_handler = logging.StreamHandler(sys.__stderr__) console_handler.setFormatter(formatter) logger.addHandler(console_handler) get_logger.console_handler4logger[logger] = console_handler # set the level for the logger logger.setLevel( getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO)) # Setup logger with a StatsD client if so configured statsd_host = conf.get('log_statsd_host') if statsd_host: statsd_port = int(conf.get('log_statsd_port', 8125)) base_prefix = conf.get('log_statsd_metric_prefix', '') default_sample_rate = float(conf.get( 'log_statsd_default_sample_rate', 1)) sample_rate_factor = float(conf.get( 'log_statsd_sample_rate_factor', 1)) statsd_client = StatsdClient(statsd_host, statsd_port, base_prefix, name, default_sample_rate, sample_rate_factor, logger=logger) logger.statsd_client = statsd_client else: logger.statsd_client = None adapted_logger = LogAdapter(logger, name) other_handlers = conf.get('log_custom_handlers', None) if other_handlers: log_custom_handlers = [s.strip() for s in other_handlers.split(',') if s.strip()] for hook in log_custom_handlers: try: mod, fnc = hook.rsplit('.', 1) logger_hook = getattr(__import__(mod, fromlist=[fnc]), fnc) logger_hook(conf, name, log_to_console, log_route, fmt, logger, adapted_logger) except (AttributeError, ImportError): print('Error calling custom handler [%s]' % hook, file=sys.stderr) except ValueError: print('Invalid custom handler format [%s]' % hook, file=sys.stderr) return adapted_logger def get_hub(): """ Checks whether poll is available and falls back on select if it isn't. Note about epoll: Review: https://review.openstack.org/#/c/18806/ There was a problem where once out of every 30 quadrillion connections, a coroutine wouldn't wake up when the client closed its end. Epoll was not reporting the event or it was getting swallowed somewhere. Then when that file descriptor was re-used, eventlet would freak right out because it still thought it was waiting for activity from it in some other coro. """ try: import select if hasattr(select, "poll"): return "poll" return "selects" except ImportError: return None def drop_privileges(user, call_setsid=True): """ Sets the userid/groupid of the current process, get session leader, etc. :param user: User name to change privileges to """ if os.geteuid() == 0: groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem] os.setgroups(groups) user = pwd.getpwnam(user) os.setgid(user[3]) os.setuid(user[2]) os.environ['HOME'] = user[5] if call_setsid: try: os.setsid() except OSError: pass os.chdir('/') # in case you need to rmdir on where you started the daemon os.umask(0o22) # ensure files are created with the correct privileges def capture_stdio(logger, **kwargs): """ Log unhandled exceptions, close stdio, capture stdout and stderr. param logger: Logger object to use """ # log uncaught exceptions sys.excepthook = lambda * exc_info: \ logger.critical(_('UNCAUGHT EXCEPTION'), exc_info=exc_info) # collect stdio file desc not in use for logging stdio_files = [sys.stdin, sys.stdout, sys.stderr] console_fds = [h.stream.fileno() for _junk, h in getattr( get_logger, 'console_handler4logger', {}).items()] stdio_files = [f for f in stdio_files if f.fileno() not in console_fds] with open(os.devnull, 'r+b') as nullfile: # close stdio (excludes fds open for logging) for f in stdio_files: # some platforms throw an error when attempting an stdin flush try: f.flush() except IOError: pass try: os.dup2(nullfile.fileno(), f.fileno()) except OSError: pass # redirect stdio if kwargs.pop('capture_stdout', True): sys.stdout = LoggerFileObject(logger) if kwargs.pop('capture_stderr', True): sys.stderr = LoggerFileObject(logger, 'STDERR') def parse_options(parser=None, once=False, test_args=None): """ Parse standard swift server/daemon options with optparse.OptionParser. :param parser: OptionParser to use. If not sent one will be created. :param once: Boolean indicating the "once" option is available :param test_args: Override sys.argv; used in testing :returns : Tuple of (config, options); config is an absolute path to the config file, options is the parser options as a dictionary. :raises SystemExit: First arg (CONFIG) is required, file must exist """ if not parser: parser = OptionParser(usage="%prog CONFIG [options]") parser.add_option("-v", "--verbose", default=False, action="store_true", help="log to console") if once: parser.add_option("-o", "--once", default=False, action="store_true", help="only run one pass of daemon") # if test_args is None, optparse will use sys.argv[:1] options, args = parser.parse_args(args=test_args) if not args: parser.print_usage() print(_("Error: missing config path argument")) sys.exit(1) config = os.path.abspath(args.pop(0)) if not os.path.exists(config): parser.print_usage() print(_("Error: unable to locate %s") % config) sys.exit(1) extra_args = [] # if any named options appear in remaining args, set the option to True for arg in args: if arg in options.__dict__: setattr(options, arg, True) else: extra_args.append(arg) options = vars(options) if extra_args: options['extra_args'] = extra_args return config, options def expand_ipv6(address): """ Expand ipv6 address. :param address: a string indicating valid ipv6 address :returns: a string indicating fully expanded ipv6 address """ packed_ip = socket.inet_pton(socket.AF_INET6, address) return socket.inet_ntop(socket.AF_INET6, packed_ip) def whataremyips(bind_ip=None): """ Get "our" IP addresses ("us" being the set of services configured by one `*.conf` file). If our REST listens on a specific address, return it. Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including the loopback. :param str bind_ip: Optional bind_ip from a config file; may be IP address or hostname. :returns: list of Strings of ip addresses """ if bind_ip: # See if bind_ip is '0.0.0.0'/'::' try: _, _, _, _, sockaddr = socket.getaddrinfo( bind_ip, None, 0, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST)[0] if sockaddr[0] not in ('0.0.0.0', '::'): return [bind_ip] except socket.gaierror: pass addresses = [] for interface in netifaces.interfaces(): try: iface_data = netifaces.ifaddresses(interface) for family in iface_data: if family not in (netifaces.AF_INET, netifaces.AF_INET6): continue for address in iface_data[family]: addr = address['addr'] # If we have an ipv6 address remove the # %ether_interface at the end if family == netifaces.AF_INET6: addr = expand_ipv6(addr.split('%')[0]) addresses.append(addr) except ValueError: pass return addresses def parse_socket_string(socket_string, default_port): """ Given a string representing a socket, returns a tuple of (host, port). Valid strings are DNS names, IPv4 addresses, or IPv6 addresses, with an optional port. If an IPv6 address is specified it **must** be enclosed in [], like *[::1]* or *[::1]:11211*. This follows the accepted prescription for `IPv6 host literals`_. Examples:: server.org server.org:1337 127.0.0.1:1337 [::1]:1337 [::1] .. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2 """ port = default_port # IPv6 addresses must be between '[]' if socket_string.startswith('['): match = IPV6_RE.match(socket_string) if not match: raise ValueError("Invalid IPv6 address: %s" % socket_string) host = match.group('address') port = match.group('port') or port else: if ':' in socket_string: tokens = socket_string.split(':') if len(tokens) > 2: raise ValueError("IPv6 addresses must be between '[]'") host, port = tokens else: host = socket_string return (host, port) def storage_directory(datadir, partition, name_hash): """ Get the storage directory :param datadir: Base data directory :param partition: Partition :param name_hash: Account, container or object name hash :returns: Storage directory """ return os.path.join(datadir, str(partition), name_hash[-3:], name_hash) def hash_path(account, container=None, object=None, raw_digest=False): """ Get the canonical hash for an account/container/object :param account: Account :param container: Container :param object: Object :param raw_digest: If True, return the raw version rather than a hex digest :returns: hash string """ if object and not container: raise ValueError('container is required if object is provided') paths = [account] if container: paths.append(container) if object: paths.append(object) if raw_digest: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).digest() else: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).hexdigest() @contextmanager def lock_path(directory, timeout=10, timeout_class=None): """ Context manager that acquires a lock on a directory. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). For locking exclusively, file or directory has to be opened in Write mode. Python doesn't allow directories to be opened in Write Mode. So we workaround by locking a hidden file in the directory. :param directory: directory to be locked :param timeout: timeout (in seconds) :param timeout_class: The class of the exception to raise if the lock cannot be granted within the timeout. Will be constructed as timeout_class(timeout, lockpath). Default: LockTimeout """ if timeout_class is None: timeout_class = swift.common.exceptions.LockTimeout mkdirs(directory) lockpath = '%s/.lock' % directory fd = os.open(lockpath, os.O_WRONLY | os.O_CREAT) sleep_time = 0.01 slower_sleep_time = max(timeout * 0.01, sleep_time) slowdown_at = timeout * 0.01 time_slept = 0 try: with timeout_class(timeout, lockpath): while True: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as err: if err.errno != errno.EAGAIN: raise if time_slept > slowdown_at: sleep_time = slower_sleep_time sleep(sleep_time) time_slept += sleep_time yield True finally: os.close(fd) @contextmanager def lock_file(filename, timeout=10, append=False, unlink=True): """ Context manager that acquires a lock on a file. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). :param filename: file to be locked :param timeout: timeout (in seconds) :param append: True if file should be opened in append mode :param unlink: True if the file should be unlinked at the end """ flags = os.O_CREAT | os.O_RDWR if append: flags |= os.O_APPEND mode = 'a+' else: mode = 'r+' while True: fd = os.open(filename, flags) file_obj = os.fdopen(fd, mode) try: with swift.common.exceptions.LockTimeout(timeout, filename): while True: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as err: if err.errno != errno.EAGAIN: raise sleep(0.01) try: if os.stat(filename).st_ino != os.fstat(fd).st_ino: continue except OSError as err: if err.errno == errno.ENOENT: continue raise yield file_obj if unlink: os.unlink(filename) break finally: file_obj.close() def lock_parent_directory(filename, timeout=10): """ Context manager that acquires a lock on the parent directory of the given file path. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). :param filename: file path of the parent directory to be locked :param timeout: timeout (in seconds) """ return lock_path(os.path.dirname(filename), timeout=timeout) def get_time_units(time_amount): """ Get a nomralized length of time in the largest unit of time (hours, minutes, or seconds.) :param time_amount: length of time in seconds :returns: A touple of (length of time, unit of time) where unit of time is one of ('h', 'm', 's') """ time_unit = 's' if time_amount > 60: time_amount /= 60 time_unit = 'm' if time_amount > 60: time_amount /= 60 time_unit = 'h' return time_amount, time_unit def compute_eta(start_time, current_value, final_value): """ Compute an ETA. Now only if we could also have a progress bar... :param start_time: Unix timestamp when the operation began :param current_value: Current value :param final_value: Final value :returns: ETA as a tuple of (length of time, unit of time) where unit of time is one of ('h', 'm', 's') """ elapsed = time.time() - start_time completion = (float(current_value) / final_value) or 0.00001 return get_time_units(1.0 / completion * elapsed - elapsed) def unlink_older_than(path, mtime): """ Remove any file in a given path that that was last modified before mtime. :param path: path to remove file from :param mtime: timestamp of oldest file to keep """ filepaths = map(functools.partial(os.path.join, path), listdir(path)) return unlink_paths_older_than(filepaths, mtime) def unlink_paths_older_than(filepaths, mtime): """ Remove any files from the given list that that were last modified before mtime. :param filepaths: a list of strings, the full paths of files to check :param mtime: timestamp of oldest file to keep """ for fpath in filepaths: try: if os.path.getmtime(fpath) < mtime: os.unlink(fpath) except OSError: pass def item_from_env(env, item_name, allow_none=False): """ Get a value from the wsgi environment :param env: wsgi environment dict :param item_name: name of item to get :returns: the value from the environment """ item = env.get(item_name, None) if item is None and not allow_none: logging.error("ERROR: %s could not be found in env!", item_name) return item def cache_from_env(env, allow_none=False): """ Get memcache connection pool from the environment (which had been previously set by the memcache middleware :param env: wsgi environment dict :returns: swift.common.memcached.MemcacheRing from environment """ return item_from_env(env, 'swift.cache', allow_none) def read_conf_dir(parser, conf_dir): conf_files = [] for f in os.listdir(conf_dir): if f.endswith('.conf') and not f.startswith('.'): conf_files.append(os.path.join(conf_dir, f)) return parser.read(sorted(conf_files)) def readconf(conf_path, section_name=None, log_name=None, defaults=None, raw=False): """ Read config file(s) and return config items as a dict :param conf_path: path to config file/directory, or a file-like object (hasattr readline) :param section_name: config section to read (will return all sections if not defined) :param log_name: name to be used with logging (will use section_name if not defined) :param defaults: dict of default values to pre-populate the config with :returns: dict of config items """ if defaults is None: defaults = {} if raw: c = RawConfigParser(defaults) else: c = ConfigParser(defaults) if hasattr(conf_path, 'readline'): c.readfp(conf_path) else: if os.path.isdir(conf_path): # read all configs in directory success = read_conf_dir(c, conf_path) else: success = c.read(conf_path) if not success: print(_("Unable to read config from %s") % conf_path) sys.exit(1) if section_name: if c.has_section(section_name): conf = dict(c.items(section_name)) else: print(_("Unable to find %s config section in %s") % (section_name, conf_path)) sys.exit(1) if "log_name" not in conf: if log_name is not None: conf['log_name'] = log_name else: conf['log_name'] = section_name else: conf = {} for s in c.sections(): conf.update({s: dict(c.items(s))}) if 'log_name' not in conf: conf['log_name'] = log_name conf['__file__'] = conf_path return conf def write_pickle(obj, dest, tmp=None, pickle_protocol=0): """ Ensure that a pickle file gets written to disk. The file is first written to a tmp location, ensure it is synced to disk, then perform a move to its final location :param obj: python object to be pickled :param dest: path of final destination file :param tmp: path to tmp to use, defaults to None :param pickle_protocol: protocol to pickle the obj with, defaults to 0 """ if tmp is None: tmp = os.path.dirname(dest) fd, tmppath = mkstemp(dir=tmp, suffix='.tmp') with os.fdopen(fd, 'wb') as fo: pickle.dump(obj, fo, pickle_protocol) fo.flush() os.fsync(fd) renamer(tmppath, dest) def search_tree(root, glob_match, ext='', exts=None, dir_ext=None): """Look in root, for any files/dirs matching glob, recursively traversing any found directories looking for files ending with ext :param root: start of search path :param glob_match: glob to match in root, matching dirs are traversed with os.walk :param ext: only files that end in ext will be returned :param exts: a list of file extensions; only files that end in one of these extensions will be returned; if set this list overrides any extension specified using the 'ext' param. :param dir_ext: if present directories that end with dir_ext will not be traversed and instead will be returned as a matched path :returns: list of full paths to matching files, sorted """ exts = exts or [ext] found_files = [] for path in glob.glob(os.path.join(root, glob_match)): if os.path.isdir(path): for root, dirs, files in os.walk(path): if dir_ext and root.endswith(dir_ext): found_files.append(root) # the root is a config dir, descend no further break for file_ in files: if any(exts) and not any(file_.endswith(e) for e in exts): continue found_files.append(os.path.join(root, file_)) found_dir = False for dir_ in dirs: if dir_ext and dir_.endswith(dir_ext): found_dir = True found_files.append(os.path.join(root, dir_)) if found_dir: # do not descend further into matching directories break else: if ext and not path.endswith(ext): continue found_files.append(path) return sorted(found_files) def write_file(path, contents): """Write contents to file at path :param path: any path, subdirs will be created as needed :param contents: data to write to file, will be converted to string """ dirname, name = os.path.split(path) if not os.path.exists(dirname): try: os.makedirs(dirname) except OSError as err: if err.errno == errno.EACCES: sys.exit('Unable to create %s. Running as ' 'non-root?' % dirname) with open(path, 'w') as f: f.write('%s' % contents) def remove_file(path): """Quiet wrapper for os.unlink, OSErrors are suppressed :param path: first and only argument passed to os.unlink """ try: os.unlink(path) except OSError: pass def audit_location_generator(devices, datadir, suffix='', mount_check=True, logger=None): ''' Given a devices path and a data directory, yield (path, device, partition) for all files in that directory :param devices: parent directory of the devices to be audited :param datadir: a directory located under self.devices. This should be one of the DATADIR constants defined in the account, container, and object servers. :param suffix: path name suffix required for all names returned :param mount_check: Flag to check if a mount check should be performed on devices :param logger: a logger object ''' device_dir = listdir(devices) # randomize devices in case of process restart before sweep completed shuffle(device_dir) for device in device_dir: if mount_check and not ismount(os.path.join(devices, device)): if logger: logger.warning( _('Skipping %s as it is not mounted'), device) continue datadir_path = os.path.join(devices, device, datadir) try: partitions = listdir(datadir_path) except OSError as e: if logger: logger.warning('Skipping %s because %s', datadir_path, e) continue for partition in partitions: part_path = os.path.join(datadir_path, partition) try: suffixes = listdir(part_path) except OSError as e: if e.errno != errno.ENOTDIR: raise continue for asuffix in suffixes: suff_path = os.path.join(part_path, asuffix) try: hashes = listdir(suff_path) except OSError as e: if e.errno != errno.ENOTDIR: raise continue for hsh in hashes: hash_path = os.path.join(suff_path, hsh) try: files = sorted(listdir(hash_path), reverse=True) except OSError as e: if e.errno != errno.ENOTDIR: raise continue for fname in files: if suffix and not fname.endswith(suffix): continue path = os.path.join(hash_path, fname) yield path, device, partition def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5): ''' Will eventlet.sleep() for the appropriate time so that the max_rate is never exceeded. If max_rate is 0, will not ratelimit. The maximum recommended rate should not exceed (1000 * incr_by) a second as eventlet.sleep() does involve some overhead. Returns running_time that should be used for subsequent calls. :param running_time: the running time in milliseconds of the next allowable request. Best to start at zero. :param max_rate: The maximum rate per second allowed for the process. :param incr_by: How much to increment the counter. Useful if you want to ratelimit 1024 bytes/sec and have differing sizes of requests. Must be > 0 to engage rate-limiting behavior. :param rate_buffer: Number of seconds the rate counter can drop and be allowed to catch up (at a faster than listed rate). A larger number will result in larger spikes in rate but better average accuracy. Must be > 0 to engage rate-limiting behavior. ''' if max_rate <= 0 or incr_by <= 0: return running_time # 1,000 milliseconds = 1 second clock_accuracy = 1000.0 # Convert seconds to milliseconds now = time.time() * clock_accuracy # Calculate time per request in milliseconds time_per_request = clock_accuracy * (float(incr_by) / max_rate) # Convert rate_buffer to milliseconds and compare if now - running_time > rate_buffer * clock_accuracy: running_time = now elif running_time - now > time_per_request: # Convert diff back to a floating point number of seconds and sleep eventlet.sleep((running_time - now) / clock_accuracy) # Return the absolute time for the next interval in milliseconds; note # that time could have passed well beyond that point, but the next call # will catch that and skip the sleep. return running_time + time_per_request class ContextPool(GreenPool): "GreenPool subclassed to kill its coros when it gets gc'ed" def __enter__(self): return self def __exit__(self, type, value, traceback): for coro in list(self.coroutines_running): coro.kill() class GreenAsyncPileWaitallTimeout(Timeout): pass class GreenAsyncPile(object): """ Runs jobs in a pool of green threads, and the results can be retrieved by using this object as an iterator. This is very similar in principle to eventlet.GreenPile, except it returns results as they become available rather than in the order they were launched. Correlating results with jobs (if necessary) is left to the caller. """ def __init__(self, size_or_pool): """ :param size_or_pool: thread pool size or a pool to use """ if isinstance(size_or_pool, GreenPool): self._pool = size_or_pool size = self._pool.size else: self._pool = GreenPool(size_or_pool) size = size_or_pool self._responses = eventlet.queue.LightQueue(size) self._inflight = 0 self._pending = 0 def _run_func(self, func, args, kwargs): try: self._responses.put(func(*args, **kwargs)) finally: self._inflight -= 1 @property def inflight(self): return self._inflight def spawn(self, func, *args, **kwargs): """ Spawn a job in a green thread on the pile. """ self._pending += 1 self._inflight += 1 self._pool.spawn(self._run_func, func, args, kwargs) def waitfirst(self, timeout): """ Wait up to timeout seconds for first result to come in. :param timeout: seconds to wait for results :returns: first item to come back, or None """ for result in self._wait(timeout, first_n=1): return result def waitall(self, timeout): """ Wait timeout seconds for any results to come in. :param timeout: seconds to wait for results :returns: list of results accrued in that time """ return self._wait(timeout) def _wait(self, timeout, first_n=None): results = [] try: with GreenAsyncPileWaitallTimeout(timeout): while True: results.append(next(self)) if first_n and len(results) >= first_n: break except (GreenAsyncPileWaitallTimeout, StopIteration): pass return results def __iter__(self): return self def next(self): try: rv = self._responses.get_nowait() except eventlet.queue.Empty: if self._inflight == 0: raise StopIteration() rv = self._responses.get() self._pending -= 1 return rv __next__ = next class ModifiedParseResult(ParseResult): "Parse results class for urlparse." @property def hostname(self): netloc = self.netloc.split('@', 1)[-1] if netloc.startswith('['): return netloc[1:].split(']')[0] elif ':' in netloc: return netloc.rsplit(':')[0] return netloc @property def port(self): netloc = self.netloc.split('@', 1)[-1] if netloc.startswith('['): netloc = netloc.rsplit(']')[1] if ':' in netloc: return int(netloc.rsplit(':')[1]) return None def urlparse(url): """ urlparse augmentation. This is necessary because urlparse can't handle RFC 2732 URLs. :param url: URL to parse. """ return ModifiedParseResult(*stdlib_urlparse(url)) def validate_sync_to(value, allowed_sync_hosts, realms_conf): """ Validates an X-Container-Sync-To header value, returning the validated endpoint, realm, and realm_key, or an error string. :param value: The X-Container-Sync-To header value to validate. :param allowed_sync_hosts: A list of allowed hosts in endpoints, if realms_conf does not apply. :param realms_conf: A instance of swift.common.container_sync_realms.ContainerSyncRealms to validate against. :returns: A tuple of (error_string, validated_endpoint, realm, realm_key). The error_string will None if the rest of the values have been validated. The validated_endpoint will be the validated endpoint to sync to. The realm and realm_key will be set if validation was done through realms_conf. """ orig_value = value value = value.rstrip('/') if not value: return (None, None, None, None) if value.startswith('//'): if not realms_conf: return (None, None, None, None) data = value[2:].split('/') if len(data) != 4: return ( _('Invalid X-Container-Sync-To format %r') % orig_value, None, None, None) realm, cluster, account, container = data realm_key = realms_conf.key(realm) if not realm_key: return (_('No realm key for %r') % realm, None, None, None) endpoint = realms_conf.endpoint(realm, cluster) if not endpoint: return ( _('No cluster endpoint for %r %r') % (realm, cluster), None, None, None) return ( None, '%s/%s/%s' % (endpoint.rstrip('/'), account, container), realm.upper(), realm_key) p = urlparse(value) if p.scheme not in ('http', 'https'): return ( _('Invalid scheme %r in X-Container-Sync-To, must be "//", ' '"http", or "https".') % p.scheme, None, None, None) if not p.path: return (_('Path required in X-Container-Sync-To'), None, None, None) if p.params or p.query or p.fragment: return ( _('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To'), None, None, None) if p.hostname not in allowed_sync_hosts: return ( _('Invalid host %r in X-Container-Sync-To') % p.hostname, None, None, None) return (None, value, None, None) def affinity_key_function(affinity_str): """Turns an affinity config value into a function suitable for passing to sort(). After doing so, the array will be sorted with respect to the given ordering. For example, if affinity_str is "r1=1, r2z7=2, r2z8=2", then the array will be sorted with all nodes from region 1 (r1=1) first, then all the nodes from region 2 zones 7 and 8 (r2z7=2 and r2z8=2), then everything else. Note that the order of the pieces of affinity_str is irrelevant; the priority values are what comes after the equals sign. If affinity_str is empty or all whitespace, then the resulting function will not alter the ordering of the nodes. :param affinity_str: affinity config value, e.g. "r1z2=3" or "r1=1, r2z1=2, r2z2=2" :returns: single-argument function :raises: ValueError if argument invalid """ affinity_str = affinity_str.strip() if not affinity_str: return lambda x: 0 priority_matchers = [] pieces = [s.strip() for s in affinity_str.split(',')] for piece in pieces: # matches r= or rz= match = re.match("r(\d+)(?:z(\d+))?=(\d+)$", piece) if match: region, zone, priority = match.groups() region = int(region) priority = int(priority) zone = int(zone) if zone else None matcher = {'region': region, 'priority': priority} if zone is not None: matcher['zone'] = zone priority_matchers.append(matcher) else: raise ValueError("Invalid affinity value: %r" % affinity_str) priority_matchers.sort(key=operator.itemgetter('priority')) def keyfn(ring_node): for matcher in priority_matchers: if (matcher['region'] == ring_node['region'] and ('zone' not in matcher or matcher['zone'] == ring_node['zone'])): return matcher['priority'] return 4294967296 # 2^32, i.e. "a big number" return keyfn def affinity_locality_predicate(write_affinity_str): """ Turns a write-affinity config value into a predicate function for nodes. The returned value will be a 1-arg function that takes a node dictionary and returns a true value if it is "local" and a false value otherwise. The definition of "local" comes from the affinity_str argument passed in here. For example, if affinity_str is "r1, r2z2", then only nodes where region=1 or where (region=2 and zone=2) are considered local. If affinity_str is empty or all whitespace, then the resulting function will consider everything local :param affinity_str: affinity config value, e.g. "r1z2" or "r1, r2z1, r2z2" :returns: single-argument function, or None if affinity_str is empty :raises: ValueError if argument invalid """ affinity_str = write_affinity_str.strip() if not affinity_str: return None matchers = [] pieces = [s.strip() for s in affinity_str.split(',')] for piece in pieces: # matches r or rz match = re.match("r(\d+)(?:z(\d+))?$", piece) if match: region, zone = match.groups() region = int(region) zone = int(zone) if zone else None matcher = {'region': region} if zone is not None: matcher['zone'] = zone matchers.append(matcher) else: raise ValueError("Invalid write-affinity value: %r" % affinity_str) def is_local(ring_node): for matcher in matchers: if (matcher['region'] == ring_node['region'] and ('zone' not in matcher or matcher['zone'] == ring_node['zone'])): return True return False return is_local def get_remote_client(req): # remote host for zeus client = req.headers.get('x-cluster-client-ip') if not client and 'x-forwarded-for' in req.headers: # remote host for other lbs client = req.headers['x-forwarded-for'].split(',')[0].strip() if not client: client = req.remote_addr return client def human_readable(value): """ Returns the number in a human readable format; for example 1048576 = "1Mi". """ value = float(value) index = -1 suffixes = 'KMGTPEZY' while value >= 1024 and index + 1 < len(suffixes): index += 1 value = round(value / 1024) if index == -1: return '%d' % value return '%d%si' % (round(value), suffixes[index]) def put_recon_cache_entry(cache_entry, key, item): """ Function that will check if item is a dict, and if so put it under cache_entry[key]. We use nested recon cache entries when the object auditor runs in parallel or else in 'once' mode with a specified subset of devices. """ if isinstance(item, dict): if key not in cache_entry or key in cache_entry and not \ isinstance(cache_entry[key], dict): cache_entry[key] = {} elif key in cache_entry and item == {}: cache_entry.pop(key, None) return for k, v in item.items(): if v == {}: cache_entry[key].pop(k, None) else: cache_entry[key][k] = v else: cache_entry[key] = item def dump_recon_cache(cache_dict, cache_file, logger, lock_timeout=2): """Update recon cache values :param cache_dict: Dictionary of cache key/value pairs to write out :param cache_file: cache file to update :param logger: the logger to use to log an encountered error :param lock_timeout: timeout (in seconds) """ try: with lock_file(cache_file, lock_timeout, unlink=False) as cf: cache_entry = {} try: existing_entry = cf.readline() if existing_entry: cache_entry = json.loads(existing_entry) except ValueError: # file doesn't have a valid entry, we'll recreate it pass for cache_key, cache_value in cache_dict.items(): put_recon_cache_entry(cache_entry, cache_key, cache_value) tf = None try: with NamedTemporaryFile(dir=os.path.dirname(cache_file), delete=False) as tf: tf.write(json.dumps(cache_entry) + '\n') renamer(tf.name, cache_file, fsync=False) finally: if tf is not None: try: os.unlink(tf.name) except OSError as err: if err.errno != errno.ENOENT: raise except (Exception, Timeout): logger.exception(_('Exception dumping recon cache')) def listdir(path): try: return os.listdir(path) except OSError as err: if err.errno != errno.ENOENT: raise return [] def streq_const_time(s1, s2): """Constant-time string comparison. :params s1: the first string :params s2: the second string :return: True if the strings are equal. This function takes two strings and compares them. It is intended to be used when doing a comparison for authentication purposes to help guard against timing attacks. """ if len(s1) != len(s2): return False result = 0 for (a, b) in zip(s1, s2): result |= ord(a) ^ ord(b) return result == 0 def pairs(item_list): """ Returns an iterator of all pairs of elements from item_list. :param items: items (no duplicates allowed) """ for i, item1 in enumerate(item_list): for item2 in item_list[(i + 1):]: yield (item1, item2) def replication(func): """ Decorator to declare which methods are accessible for different type of servers: * If option replication_server is None then this decorator doesn't matter. * If option replication_server is True then ONLY decorated with this decorator methods will be started. * If option replication_server is False then decorated with this decorator methods will NOT be started. :param func: function to mark accessible for replication """ func.replication = True return func def public(func): """ Decorator to declare which methods are publicly accessible as HTTP requests :param func: function to make public """ func.publicly_accessible = True return func def quorum_size(n): """ quorum size as it applies to services that use 'replication' for data integrity (Account/Container services). Object quorum_size is defined on a storage policy basis. Number of successful backend requests needed for the proxy to consider the client request successful. """ return (n // 2) + 1 def rsync_ip(ip): """ Transform ip string to an rsync-compatible form Will return ipv4 addresses unchanged, but will nest ipv6 addresses inside square brackets. :param ip: an ip string (ipv4 or ipv6) :returns: a string ip address """ try: socket.inet_pton(socket.AF_INET6, ip) except socket.error: # it's IPv4 return ip else: return '[%s]' % ip def rsync_module_interpolation(template, device): """ Interpolate devices variables inside a rsync module template :param template: rsync module template as a string :param device: a device from a ring :returns: a string with all variables replaced by device attributes """ replacements = { 'ip': rsync_ip(device.get('ip', '')), 'port': device.get('port', ''), 'replication_ip': rsync_ip(device.get('replication_ip', '')), 'replication_port': device.get('replication_port', ''), 'region': device.get('region', ''), 'zone': device.get('zone', ''), 'device': device.get('device', ''), 'meta': device.get('meta', ''), } try: module = template.format(**replacements) except KeyError as e: raise ValueError('Cannot interpolate rsync_module, invalid variable: ' '%s' % e) return module def get_valid_utf8_str(str_or_unicode): """ Get valid parts of utf-8 str from str, unicode and even invalid utf-8 str :param str_or_unicode: a string or an unicode which can be invalid utf-8 """ if isinstance(str_or_unicode, six.text_type): (str_or_unicode, _len) = utf8_encoder(str_or_unicode, 'replace') (valid_utf8_str, _len) = utf8_decoder(str_or_unicode, 'replace') return valid_utf8_str.encode('utf-8') def list_from_csv(comma_separated_str): """ Splits the str given and returns a properly stripped list of the comma separated values. """ if comma_separated_str: return [v.strip() for v in comma_separated_str.split(',') if v.strip()] return [] def csv_append(csv_string, item): """ Appends an item to a comma-separated string. If the comma-separated string is empty/None, just returns item. """ if csv_string: return ",".join((csv_string, item)) else: return item class CloseableChain(object): """ Like itertools.chain, but with a close method that will attempt to invoke its sub-iterators' close methods, if any. """ def __init__(self, *iterables): self.iterables = iterables def __iter__(self): return iter(itertools.chain(*(self.iterables))) def close(self): for it in self.iterables: close_method = getattr(it, 'close', None) if close_method: close_method() def reiterate(iterable): """ Consume the first item from an iterator, then re-chain it to the rest of the iterator. This is useful when you want to make sure the prologue to downstream generators have been executed before continuing. :param iterable: an iterable object """ if isinstance(iterable, (list, tuple)): return iterable else: iterator = iter(iterable) try: chunk = '' while not chunk: chunk = next(iterator) return CloseableChain([chunk], iterator) except StopIteration: return [] class InputProxy(object): """ File-like object that counts bytes read. To be swapped in for wsgi.input for accounting purposes. """ def __init__(self, wsgi_input): """ :param wsgi_input: file-like object to wrap the functionality of """ self.wsgi_input = wsgi_input self.bytes_received = 0 self.client_disconnect = False def read(self, *args, **kwargs): """ Pass read request to the underlying file-like object and add bytes read to total. """ try: chunk = self.wsgi_input.read(*args, **kwargs) except Exception: self.client_disconnect = True raise self.bytes_received += len(chunk) return chunk def readline(self, *args, **kwargs): """ Pass readline request to the underlying file-like object and add bytes read to total. """ try: line = self.wsgi_input.readline(*args, **kwargs) except Exception: self.client_disconnect = True raise self.bytes_received += len(line) return line class LRUCache(object): """ Decorator for size/time bound memoization that evicts the least recently used members. """ PREV, NEXT, KEY, CACHED_AT, VALUE = 0, 1, 2, 3, 4 # link fields def __init__(self, maxsize=1000, maxtime=3600): self.maxsize = maxsize self.maxtime = maxtime self.reset() def reset(self): self.mapping = {} self.head = [None, None, None, None, None] # oldest self.tail = [self.head, None, None, None, None] # newest self.head[self.NEXT] = self.tail def set_cache(self, value, *key): while len(self.mapping) >= self.maxsize: old_next, old_key = self.head[self.NEXT][self.NEXT:self.NEXT + 2] self.head[self.NEXT], old_next[self.PREV] = old_next, self.head del self.mapping[old_key] last = self.tail[self.PREV] link = [last, self.tail, key, time.time(), value] self.mapping[key] = last[self.NEXT] = self.tail[self.PREV] = link return value def get_cached(self, link, *key): link_prev, link_next, key, cached_at, value = link if cached_at + self.maxtime < time.time(): raise KeyError('%r has timed out' % (key,)) link_prev[self.NEXT] = link_next link_next[self.PREV] = link_prev last = self.tail[self.PREV] last[self.NEXT] = self.tail[self.PREV] = link link[self.PREV] = last link[self.NEXT] = self.tail return value def __call__(self, f): class LRUCacheWrapped(object): @functools.wraps(f) def __call__(im_self, *key): link = self.mapping.get(key, self.head) if link is not self.head: try: return self.get_cached(link, *key) except KeyError: pass value = f(*key) self.set_cache(value, *key) return value def size(im_self): """ Return the size of the cache """ return len(self.mapping) def reset(im_self): return self.reset() def get_maxsize(im_self): return self.maxsize def set_maxsize(im_self, i): self.maxsize = i def get_maxtime(im_self): return self.maxtime def set_maxtime(im_self, i): self.maxtime = i maxsize = property(get_maxsize, set_maxsize) maxtime = property(get_maxtime, set_maxtime) def __repr__(im_self): return '<%s %r>' % (im_self.__class__.__name__, f) return LRUCacheWrapped() def tpool_reraise(func, *args, **kwargs): """ Hack to work around Eventlet's tpool not catching and reraising Timeouts. """ def inner(): try: return func(*args, **kwargs) except BaseException as err: return err resp = tpool.execute(inner) if isinstance(resp, BaseException): raise resp return resp class ThreadPool(object): """ Perform blocking operations in background threads. Call its methods from within greenlets to green-wait for results without blocking the eventlet reactor (hopefully). """ BYTE = 'a'.encode('utf-8') def __init__(self, nthreads=2): self.nthreads = nthreads self._run_queue = stdlib_queue.Queue() self._result_queue = stdlib_queue.Queue() self._threads = [] self._alive = True if nthreads <= 0: return # We spawn a greenthread whose job it is to pull results from the # worker threads via a real Queue and send them to eventlet Events so # that the calling greenthreads can be awoken. # # Since each OS thread has its own collection of greenthreads, it # doesn't work to have the worker thread send stuff to the event, as # it then notifies its own thread-local eventlet hub to wake up, which # doesn't do anything to help out the actual calling greenthread over # in the main thread. # # Thus, each worker sticks its results into a result queue and then # writes a byte to a pipe, signaling the result-consuming greenlet (in # the main thread) to wake up and consume results. # # This is all stuff that eventlet.tpool does, but that code can't have # multiple instances instantiated. Since the object server uses one # pool per disk, we have to reimplement this stuff. _raw_rpipe, self.wpipe = os.pipe() self.rpipe = greenio.GreenPipe(_raw_rpipe, 'rb') for _junk in range(nthreads): thr = stdlib_threading.Thread( target=self._worker, args=(self._run_queue, self._result_queue)) thr.daemon = True thr.start() self._threads.append(thr) # This is the result-consuming greenthread that runs in the main OS # thread, as described above. self._consumer_coro = greenthread.spawn_n(self._consume_results, self._result_queue) def _worker(self, work_queue, result_queue): """ Pulls an item from the queue and runs it, then puts the result into the result queue. Repeats forever. :param work_queue: queue from which to pull work :param result_queue: queue into which to place results """ while True: item = work_queue.get() if item is None: break ev, func, args, kwargs = item try: result = func(*args, **kwargs) result_queue.put((ev, True, result)) except BaseException: result_queue.put((ev, False, sys.exc_info())) finally: work_queue.task_done() os.write(self.wpipe, self.BYTE) def _consume_results(self, queue): """ Runs as a greenthread in the same OS thread as callers of run_in_thread(). Takes results from the worker OS threads and sends them to the waiting greenthreads. """ while True: try: self.rpipe.read(1) except ValueError: # can happen at process shutdown when pipe is closed break while True: try: ev, success, result = queue.get(block=False) except stdlib_queue.Empty: break try: if success: ev.send(result) else: ev.send_exception(*result) finally: queue.task_done() def run_in_thread(self, func, *args, **kwargs): """ Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet until results are available. Exceptions thrown will be reraised in the calling thread. If the threadpool was initialized with nthreads=0, it invokes ``func(*args, **kwargs)`` directly, followed by eventlet.sleep() to ensure the eventlet hub has a chance to execute. It is more likely the hub will be invoked when queuing operations to an external thread. :returns: result of calling func :raises: whatever func raises """ if not self._alive: raise swift.common.exceptions.ThreadPoolDead() if self.nthreads <= 0: result = func(*args, **kwargs) sleep() return result ev = event.Event() self._run_queue.put((ev, func, args, kwargs), block=False) # blocks this greenlet (and only *this* greenlet) until the real # thread calls ev.send(). result = ev.wait() return result def _run_in_eventlet_tpool(self, func, *args, **kwargs): """ Really run something in an external thread, even if we haven't got any threads of our own. """ def inner(): try: return (True, func(*args, **kwargs)) except (Timeout, BaseException) as err: return (False, err) success, result = tpool.execute(inner) if success: return result else: raise result def force_run_in_thread(self, func, *args, **kwargs): """ Runs ``func(*args, **kwargs)`` in a thread. Blocks the current greenlet until results are available. Exceptions thrown will be reraised in the calling thread. If the threadpool was initialized with nthreads=0, uses eventlet.tpool to run the function. This is in contrast to run_in_thread(), which will (in that case) simply execute func in the calling thread. :returns: result of calling func :raises: whatever func raises """ if not self._alive: raise swift.common.exceptions.ThreadPoolDead() if self.nthreads <= 0: return self._run_in_eventlet_tpool(func, *args, **kwargs) else: return self.run_in_thread(func, *args, **kwargs) def terminate(self): """ Releases the threadpool's resources (OS threads, greenthreads, pipes, etc.) and renders it unusable. Don't call run_in_thread() or force_run_in_thread() after calling terminate(). """ self._alive = False if self.nthreads <= 0: return for _junk in range(self.nthreads): self._run_queue.put(None) for thr in self._threads: thr.join() self._threads = [] self.nthreads = 0 greenthread.kill(self._consumer_coro) self.rpipe.close() os.close(self.wpipe) def ismount(path): """ Test whether a path is a mount point. This will catch any exceptions and translate them into a False return value Use ismount_raw to have the exceptions raised instead. """ try: return ismount_raw(path) except OSError: return False def ismount_raw(path): """ Test whether a path is a mount point. Whereas ismount will catch any exceptions and just return False, this raw version will not catch exceptions. This is code hijacked from C Python 2.6.8, adapted to remove the extra lstat() system call. """ try: s1 = os.lstat(path) except os.error as err: if err.errno == errno.ENOENT: # It doesn't exist -- so not a mount point :-) return False raise if stat.S_ISLNK(s1.st_mode): # A symlink can never be a mount point return False s2 = os.lstat(os.path.join(path, '..')) dev1 = s1.st_dev dev2 = s2.st_dev if dev1 != dev2: # path/.. on a different device as path return True ino1 = s1.st_ino ino2 = s2.st_ino if ino1 == ino2: # path/.. is the same i-node as path return True return False def close_if_possible(maybe_closable): close_method = getattr(maybe_closable, 'close', None) if callable(close_method): return close_method() @contextmanager def closing_if_possible(maybe_closable): """ Like contextlib.closing(), but doesn't crash if the object lacks a close() method. PEP 333 (WSGI) says: "If the iterable returned by the application has a close() method, the server or gateway must call that method upon completion of the current request[.]" This function makes that easier. """ try: yield maybe_closable finally: close_if_possible(maybe_closable) _rfc_token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+' _rfc_extension_pattern = re.compile( r'(?:\s*;\s*(' + _rfc_token + r")\s*(?:=\s*(" + _rfc_token + r'|"(?:[^"\\]|\\.)*"))?)') _content_range_pattern = re.compile(r'^bytes (\d+)-(\d+)/(\d+)$') def parse_content_range(content_range): """ Parse a content-range header into (first_byte, last_byte, total_size). See RFC 7233 section 4.2 for details on the header format, but it's basically "Content-Range: bytes ${start}-${end}/${total}". :param content_range: Content-Range header value to parse, e.g. "bytes 100-1249/49004" :returns: 3-tuple (start, end, total) :raises: ValueError if malformed """ found = re.search(_content_range_pattern, content_range) if not found: raise ValueError("malformed Content-Range %r" % (content_range,)) return tuple(int(x) for x in found.groups()) def parse_content_type(content_type): """ Parse a content-type and its parameters into values. RFC 2616 sec 14.17 and 3.7 are pertinent. **Examples**:: 'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')]) 'text/plain; charset=UTF-8; level=1' -> ('text/plain', [('charset, 'UTF-8'), ('level', '1')]) :param content_type: content_type to parse :returns: a tuple containing (content type, list of k, v parameter tuples) """ parm_list = [] if ';' in content_type: content_type, parms = content_type.split(';', 1) parms = ';' + parms for m in _rfc_extension_pattern.findall(parms): key = m[0].strip() value = m[1].strip() parm_list.append((key, value)) return content_type, parm_list def extract_swift_bytes(content_type): """ Parse a content-type and return a tuple containing: - the content_type string minus any swift_bytes param, - the swift_bytes value or None if the param was not found :param content_type: a content-type string :return: a tuple of (content-type, swift_bytes or None) """ content_type, params = parse_content_type(content_type) swift_bytes = None for k, v in params: if k == 'swift_bytes': swift_bytes = v else: content_type += ';%s=%s' % (k, v) return content_type, swift_bytes def override_bytes_from_content_type(listing_dict, logger=None): """ Takes a dict from a container listing and overrides the content_type, bytes fields if swift_bytes is set. """ content_type, params = parse_content_type(listing_dict['content_type']) for key, value in params: if key == 'swift_bytes': try: listing_dict['bytes'] = int(value) except ValueError: if logger: logger.exception("Invalid swift_bytes") else: content_type += ';%s=%s' % (key, value) listing_dict['content_type'] = content_type def clean_content_type(value): if ';' in value: left, right = value.rsplit(';', 1) if right.lstrip().startswith('swift_bytes='): return left return value def quote(value, safe='/'): """ Patched version of urllib.quote that encodes utf-8 strings before quoting """ return _quote(get_valid_utf8_str(value), safe) def get_expirer_container(x_delete_at, expirer_divisor, acc, cont, obj): """ Returns a expiring object container name for given X-Delete-At and a/c/o. """ shard_int = int(hash_path(acc, cont, obj), 16) % 100 return normalize_delete_at_timestamp( int(x_delete_at) / expirer_divisor * expirer_divisor - shard_int) class _MultipartMimeFileLikeObject(object): def __init__(self, wsgi_input, boundary, input_buffer, read_chunk_size): self.no_more_data_for_this_file = False self.no_more_files = False self.wsgi_input = wsgi_input self.boundary = boundary self.input_buffer = input_buffer self.read_chunk_size = read_chunk_size def read(self, length=None): if not length: length = self.read_chunk_size if self.no_more_data_for_this_file: return b'' # read enough data to know whether we're going to run # into a boundary in next [length] bytes if len(self.input_buffer) < length + len(self.boundary) + 2: to_read = length + len(self.boundary) + 2 while to_read > 0: try: chunk = self.wsgi_input.read(to_read) except (IOError, ValueError) as e: raise swift.common.exceptions.ChunkReadError(str(e)) to_read -= len(chunk) self.input_buffer += chunk if not chunk: self.no_more_files = True break boundary_pos = self.input_buffer.find(self.boundary) # boundary does not exist in the next (length) bytes if boundary_pos == -1 or boundary_pos > length: ret = self.input_buffer[:length] self.input_buffer = self.input_buffer[length:] # if it does, just return data up to the boundary else: ret, self.input_buffer = self.input_buffer.split(self.boundary, 1) self.no_more_files = self.input_buffer.startswith(b'--') self.no_more_data_for_this_file = True self.input_buffer = self.input_buffer[2:] return ret def readline(self): if self.no_more_data_for_this_file: return b'' boundary_pos = newline_pos = -1 while newline_pos < 0 and boundary_pos < 0: try: chunk = self.wsgi_input.read(self.read_chunk_size) except (IOError, ValueError) as e: raise swift.common.exceptions.ChunkReadError(str(e)) self.input_buffer += chunk newline_pos = self.input_buffer.find(b'\r\n') boundary_pos = self.input_buffer.find(self.boundary) if not chunk: self.no_more_files = True break # found a newline if newline_pos >= 0 and \ (boundary_pos < 0 or newline_pos < boundary_pos): # Use self.read to ensure any logic there happens... ret = b'' to_read = newline_pos + 2 while to_read > 0: chunk = self.read(to_read) # Should never happen since we're reading from input_buffer, # but just for completeness... if not chunk: break to_read -= len(chunk) ret += chunk return ret else: # no newlines, just return up to next boundary return self.read(len(self.input_buffer)) def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096): """ Given a multi-part-mime-encoded input file object and boundary, yield file-like objects for each part. Note that this does not split each part into headers and body; the caller is responsible for doing that if necessary. :param wsgi_input: The file-like object to read from. :param boundary: The mime boundary to separate new file-like objects on. :returns: A generator of file-like objects for each part. :raises: MimeInvalid if the document is malformed """ boundary = '--' + boundary blen = len(boundary) + 2 # \r\n try: got = wsgi_input.readline(blen) while got == '\r\n': got = wsgi_input.readline(blen) except (IOError, ValueError) as e: raise swift.common.exceptions.ChunkReadError(str(e)) if got.strip() != boundary: raise swift.common.exceptions.MimeInvalid( 'invalid starting boundary: wanted %r, got %r', (boundary, got)) boundary = '\r\n' + boundary input_buffer = '' done = False while not done: it = _MultipartMimeFileLikeObject(wsgi_input, boundary, input_buffer, read_chunk_size) yield it done = it.no_more_files input_buffer = it.input_buffer def parse_mime_headers(doc_file): """ Takes a file-like object containing a MIME document and returns a HeaderKeyDict containing the headers. The body of the message is not consumed: the position in doc_file is left at the beginning of the body. This function was inspired by the Python standard library's http.client.parse_headers. :param doc_file: binary file-like object containing a MIME document :returns: a swift.common.swob.HeaderKeyDict containing the headers """ headers = [] while True: line = doc_file.readline() done = line in (b'\r\n', b'\n', b'') if six.PY3: try: line = line.decode('utf-8') except UnicodeDecodeError: line = line.decode('latin1') headers.append(line) if done: break if six.PY3: header_string = ''.join(headers) else: header_string = b''.join(headers) headers = email.parser.Parser().parsestr(header_string) return HeaderKeyDict(headers) def mime_to_document_iters(input_file, boundary, read_chunk_size=4096): """ Takes a file-like object containing a multipart MIME document and returns an iterator of (headers, body-file) tuples. :param input_file: file-like object with the MIME doc in it :param boundary: MIME boundary, sans dashes (e.g. "divider", not "--divider") :param read_chunk_size: size of strings read via input_file.read() """ doc_files = iter_multipart_mime_documents(input_file, boundary, read_chunk_size) for i, doc_file in enumerate(doc_files): # this consumes the headers and leaves just the body in doc_file headers = parse_mime_headers(doc_file) yield (headers, doc_file) def maybe_multipart_byteranges_to_document_iters(app_iter, content_type): """ Takes an iterator that may or may not contain a multipart MIME document as well as content type and returns an iterator of body iterators. :param app_iter: iterator that may contain a multipart MIME document :param content_type: content type of the app_iter, used to determine whether it conains a multipart document and, if so, what the boundary is between documents """ content_type, params_list = parse_content_type(content_type) if content_type != 'multipart/byteranges': yield app_iter return body_file = FileLikeIter(app_iter) boundary = dict(params_list)['boundary'] for _headers, body in mime_to_document_iters(body_file, boundary): yield (chunk for chunk in iter(lambda: body.read(65536), '')) def document_iters_to_multipart_byteranges(ranges_iter, boundary): """ Takes an iterator of range iters and yields a multipart/byteranges MIME document suitable for sending as the body of a multi-range 206 response. See document_iters_to_http_response_body for parameter descriptions. """ divider = "--" + boundary + "\r\n" terminator = "--" + boundary + "--" for range_spec in ranges_iter: start_byte = range_spec["start_byte"] end_byte = range_spec["end_byte"] entity_length = range_spec.get("entity_length", "*") content_type = range_spec["content_type"] part_iter = range_spec["part_iter"] part_header = ''.join(( divider, "Content-Type: ", str(content_type), "\r\n", "Content-Range: ", "bytes %d-%d/%s\r\n" % ( start_byte, end_byte, entity_length), "\r\n" )) yield part_header for chunk in part_iter: yield chunk yield "\r\n" yield terminator def document_iters_to_http_response_body(ranges_iter, boundary, multipart, logger): """ Takes an iterator of range iters and turns it into an appropriate HTTP response body, whether that's multipart/byteranges or not. This is almost, but not quite, the inverse of request_helpers.http_response_to_document_iters(). This function only yields chunks of the body, not any headers. :param ranges_iter: an iterator of dictionaries, one per range. Each dictionary must contain at least the following key: "part_iter": iterator yielding the bytes in the range Additionally, if multipart is True, then the following other keys are required: "start_byte": index of the first byte in the range "end_byte": index of the last byte in the range "content_type": value for the range's Content-Type header Finally, there is one optional key that is used in the multipart/byteranges case: "entity_length": length of the requested entity (not necessarily equal to the response length). If omitted, "*" will be used. Each part_iter will be exhausted prior to calling next(ranges_iter). :param boundary: MIME boundary to use, sans dashes (e.g. "boundary", not "--boundary"). :param multipart: True if the response should be multipart/byteranges, False otherwise. This should be True if and only if you have 2 or more ranges. :param logger: a logger """ if multipart: return document_iters_to_multipart_byteranges(ranges_iter, boundary) else: try: response_body_iter = next(ranges_iter)['part_iter'] except StopIteration: return '' # We need to make sure ranges_iter does not get garbage-collected # before response_body_iter is exhausted. The reason is that # ranges_iter has a finally block that calls close_swift_conn, and # so if that finally block fires before we read response_body_iter, # there's nothing there. def string_along(useful_iter, useless_iter_iter, logger): for x in useful_iter: yield x try: next(useless_iter_iter) except StopIteration: pass else: logger.warning( "More than one part in a single-part response?") return string_along(response_body_iter, ranges_iter, logger) def multipart_byteranges_to_document_iters(input_file, boundary, read_chunk_size=4096): """ Takes a file-like object containing a multipart/byteranges MIME document (see RFC 7233, Appendix A) and returns an iterator of (first-byte, last-byte, length, document-headers, body-file) 5-tuples. :param input_file: file-like object with the MIME doc in it :param boundary: MIME boundary, sans dashes (e.g. "divider", not "--divider") :param read_chunk_size: size of strings read via input_file.read() """ for headers, body in mime_to_document_iters(input_file, boundary, read_chunk_size): first_byte, last_byte, length = parse_content_range( headers.get('content-range')) yield (first_byte, last_byte, length, headers.items(), body) #: Regular expression to match form attributes. ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)') def parse_content_disposition(header): """ Given the value of a header like: Content-Disposition: form-data; name="somefile"; filename="test.html" Return data like ("form-data", {"name": "somefile", "filename": "test.html"}) :param header: Value of a header (the part after the ': '). :returns: (value name, dict) of the attribute data parsed (see above). """ attributes = {} attrs = '' if ';' in header: header, attrs = [x.strip() for x in header.split(';', 1)] m = True while m: m = ATTRIBUTES_RE.match(attrs) if m: attrs = attrs[len(m.group(0)):] attributes[m.group(1)] = m.group(2).strip('"') return header, attributes class sockaddr_alg(ctypes.Structure): _fields_ = [("salg_family", ctypes.c_ushort), ("salg_type", ctypes.c_ubyte * 14), ("salg_feat", ctypes.c_uint), ("salg_mask", ctypes.c_uint), ("salg_name", ctypes.c_ubyte * 64)] _bound_md5_sockfd = None def get_md5_socket(): """ Get an MD5 socket file descriptor. One can MD5 data with it by writing it to the socket with os.write, then os.read the 16 bytes of the checksum out later. NOTE: It is the caller's responsibility to ensure that os.close() is called on the returned file descriptor. This is a bare file descriptor, not a Python object. It doesn't close itself. """ # Linux's AF_ALG sockets work like this: # # First, initialize a socket with socket() and bind(). This tells the # socket what algorithm to use, as well as setting up any necessary bits # like crypto keys. Of course, MD5 doesn't need any keys, so it's just the # algorithm name. # # Second, to hash some data, get a second socket by calling accept() on # the first socket. Write data to the socket, then when finished, read the # checksum from the socket and close it. This lets you checksum multiple # things without repeating all the setup code each time. # # Since we only need to bind() one socket, we do that here and save it for # future re-use. That way, we only use one file descriptor to get an MD5 # socket instead of two, and we also get to save some syscalls. global _bound_md5_sockfd global _libc_socket global _libc_bind global _libc_accept if _libc_accept is None: _libc_accept = load_libc_function('accept', fail_if_missing=True) if _libc_socket is None: _libc_socket = load_libc_function('socket', fail_if_missing=True) if _libc_bind is None: _libc_bind = load_libc_function('bind', fail_if_missing=True) # Do this at first call rather than at import time so that we don't use a # file descriptor on systems that aren't using any MD5 sockets. if _bound_md5_sockfd is None: sockaddr_setup = sockaddr_alg( AF_ALG, (ord('h'), ord('a'), ord('s'), ord('h'), 0), 0, 0, (ord('m'), ord('d'), ord('5'), 0)) hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG), ctypes.c_int(socket.SOCK_SEQPACKET), ctypes.c_int(0)) if hash_sockfd < 0: raise IOError(ctypes.get_errno(), "Failed to initialize MD5 socket") bind_result = _libc_bind(ctypes.c_int(hash_sockfd), ctypes.pointer(sockaddr_setup), ctypes.c_int(ctypes.sizeof(sockaddr_alg))) if bind_result < 0: os.close(hash_sockfd) raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket") _bound_md5_sockfd = hash_sockfd md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0) if md5_sockfd < 0: raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket") return md5_sockfd swift-2.7.0/swift/common/db.py0000664000567000056710000010256412675204037017416 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Database code for Swift """ from contextlib import contextmanager, closing import hashlib import json import logging import os from uuid import uuid4 import sys import time import errno import six import six.moves.cPickle as pickle from swift import gettext_ as _ from tempfile import mkstemp from eventlet import sleep, Timeout import sqlite3 from swift.common.constraints import MAX_META_COUNT, MAX_META_OVERALL_SIZE from swift.common.utils import Timestamp, renamer, \ mkdirs, lock_parent_directory, fallocate from swift.common.exceptions import LockTimeout from swift.common.swob import HTTPBadRequest #: Whether calls will be made to preallocate disk space for database files. DB_PREALLOCATION = False #: Timeout for trying to connect to a DB BROKER_TIMEOUT = 25 #: Pickle protocol to use PICKLE_PROTOCOL = 2 #: Max size of .pending file in bytes. When this is exceeded, the pending # records will be merged. PENDING_CAP = 131072 def utf8encode(*args): return [(s.encode('utf8') if isinstance(s, six.text_type) else s) for s in args] def utf8encodekeys(metadata): uni_keys = [k for k in metadata if isinstance(k, six.text_type)] for k in uni_keys: sv = metadata[k] del metadata[k] metadata[k.encode('utf-8')] = sv def _db_timeout(timeout, db_file, call): with LockTimeout(timeout, db_file): retry_wait = 0.001 while True: try: return call() except sqlite3.OperationalError as e: if 'locked' not in str(e): raise sleep(retry_wait) retry_wait = min(retry_wait * 2, 0.05) class DatabaseConnectionError(sqlite3.DatabaseError): """More friendly error messages for DB Errors.""" def __init__(self, path, msg, timeout=0): self.path = path self.timeout = timeout self.msg = msg def __str__(self): return 'DB connection error (%s, %s):\n%s' % ( self.path, self.timeout, self.msg) class DatabaseAlreadyExists(sqlite3.DatabaseError): """More friendly error messages for DB Errors.""" def __init__(self, path): self.path = path def __str__(self): return 'DB %s already exists' % self.path class GreenDBConnection(sqlite3.Connection): """SQLite DB Connection handler that plays well with eventlet.""" def __init__(self, database, timeout=None, *args, **kwargs): if timeout is None: timeout = BROKER_TIMEOUT self.timeout = timeout self.db_file = database super(GreenDBConnection, self).__init__(database, 0, *args, **kwargs) def cursor(self, cls=None): if cls is None: cls = GreenDBCursor return sqlite3.Connection.cursor(self, cls) def commit(self): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Connection.commit(self)) class GreenDBCursor(sqlite3.Cursor): """SQLite Cursor handler that plays well with eventlet.""" def __init__(self, *args, **kwargs): self.timeout = args[0].timeout self.db_file = args[0].db_file super(GreenDBCursor, self).__init__(*args, **kwargs) def execute(self, *args, **kwargs): return _db_timeout( self.timeout, self.db_file, lambda: sqlite3.Cursor.execute( self, *args, **kwargs)) def dict_factory(crs, row): """ This should only be used when you need a real dict, i.e. when you're going to serialize the results. """ return dict( ((col[0], row[idx]) for idx, col in enumerate(crs.description))) def chexor(old, name, timestamp): """ Each entry in the account and container databases is XORed by the 128-bit hash on insert or delete. This serves as a rolling, order-independent hash of the contents. (check + XOR) :param old: hex representation of the current DB hash :param name: name of the object or container being inserted :param timestamp: internalized timestamp of the new record :returns: a hex representation of the new hash value """ if name is None: raise Exception('name is None!') new = hashlib.md5(('%s-%s' % (name, timestamp)).encode('utf8')).hexdigest() return '%032x' % (int(old, 16) ^ int(new, 16)) def get_db_connection(path, timeout=30, okay_to_create=False): """ Returns a properly configured SQLite database connection. :param path: path to DB :param timeout: timeout for connection :param okay_to_create: if True, create the DB if it doesn't exist :returns: DB connection object """ try: connect_time = time.time() conn = sqlite3.connect(path, check_same_thread=False, factory=GreenDBConnection, timeout=timeout) if path != ':memory:' and not okay_to_create: # attempt to detect and fail when connect creates the db file stat = os.stat(path) if stat.st_size == 0 and stat.st_ctime >= connect_time: os.unlink(path) raise DatabaseConnectionError(path, 'DB file created by connect?') conn.row_factory = sqlite3.Row conn.text_factory = str with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = NORMAL') cur.execute('PRAGMA count_changes = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = DELETE') conn.create_function('chexor', 3, chexor) except sqlite3.DatabaseError: import traceback raise DatabaseConnectionError(path, traceback.format_exc(), timeout=timeout) return conn class DatabaseBroker(object): """Encapsulates working with a database.""" def __init__(self, db_file, timeout=BROKER_TIMEOUT, logger=None, account=None, container=None, pending_timeout=None, stale_reads_ok=False): """Encapsulates working with a database.""" self.conn = None self.db_file = db_file self.pending_file = self.db_file + '.pending' self.pending_timeout = pending_timeout or 10 self.stale_reads_ok = stale_reads_ok self.db_dir = os.path.dirname(db_file) self.timeout = timeout self.logger = logger or logging.getLogger() self.account = account self.container = container self._db_version = -1 def __str__(self): """ Returns a string identifying the entity under broker to a human. The baseline implementation returns a full pathname to a database. This is vital for useful diagnostics. """ return self.db_file def initialize(self, put_timestamp=None, storage_policy_index=None): """ Create the DB The storage_policy_index is passed through to the subclass's ``_initialize`` method. It is ignored by ``AccountBroker``. :param put_timestamp: internalized timestamp of initial PUT request :param storage_policy_index: only required for containers """ if self.db_file == ':memory:': tmp_db_file = None conn = get_db_connection(self.db_file, self.timeout) else: mkdirs(self.db_dir) fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir) os.close(fd) conn = sqlite3.connect(tmp_db_file, check_same_thread=False, factory=GreenDBConnection, timeout=0) # creating dbs implicitly does a lot of transactions, so we # pick fast, unsafe options here and do a big fsync at the end. with closing(conn.cursor()) as cur: cur.execute('PRAGMA synchronous = OFF') cur.execute('PRAGMA temp_store = MEMORY') cur.execute('PRAGMA journal_mode = MEMORY') conn.create_function('chexor', 3, chexor) conn.row_factory = sqlite3.Row conn.text_factory = str conn.executescript(""" CREATE TABLE outgoing_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TABLE incoming_sync ( remote_id TEXT UNIQUE, sync_point INTEGER, updated_at TEXT DEFAULT 0 ); CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync BEGIN UPDATE outgoing_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync BEGIN UPDATE incoming_sync SET updated_at = STRFTIME('%s', 'NOW') WHERE ROWID = new.ROWID; END; """) if not put_timestamp: put_timestamp = Timestamp(0).internal self._initialize(conn, put_timestamp, storage_policy_index=storage_policy_index) conn.commit() if tmp_db_file: conn.close() with open(tmp_db_file, 'r+b') as fp: os.fsync(fp.fileno()) with lock_parent_directory(self.db_file, self.pending_timeout): if os.path.exists(self.db_file): # It's as if there was a "condition" where different parts # of the system were "racing" each other. raise DatabaseAlreadyExists(self.db_file) renamer(tmp_db_file, self.db_file) self.conn = get_db_connection(self.db_file, self.timeout) else: self.conn = conn def delete_db(self, timestamp): """ Mark the DB as deleted :param timestamp: internalized delete timestamp """ # first, clear the metadata cleared_meta = {} for k in self.metadata: cleared_meta[k] = ('', timestamp) self.update_metadata(cleared_meta) # then mark the db as deleted with self.get() as conn: self._delete_db(conn, timestamp) conn.commit() def possibly_quarantine(self, exc_type, exc_value, exc_traceback): """ Checks the exception info to see if it indicates a quarantine situation (malformed or corrupted database). If not, the original exception will be reraised. If so, the database will be quarantined and a new sqlite3.DatabaseError will be raised indicating the action taken. """ if 'database disk image is malformed' in str(exc_value): exc_hint = 'malformed' elif 'file is encrypted or is not a database' in str(exc_value): exc_hint = 'corrupted' elif 'disk I/O error' in str(exc_value): exc_hint = 'disk error while accessing' else: six.reraise(exc_type, exc_value, exc_traceback) prefix_path = os.path.dirname(self.db_dir) partition_path = os.path.dirname(prefix_path) dbs_path = os.path.dirname(partition_path) device_path = os.path.dirname(dbs_path) quar_path = os.path.join(device_path, 'quarantined', self.db_type + 's', os.path.basename(self.db_dir)) try: renamer(self.db_dir, quar_path, fsync=False) except OSError as e: if e.errno not in (errno.EEXIST, errno.ENOTEMPTY): raise quar_path = "%s-%s" % (quar_path, uuid4().hex) renamer(self.db_dir, quar_path, fsync=False) detail = _('Quarantined %s to %s due to %s database') % \ (self.db_dir, quar_path, exc_hint) self.logger.error(detail) raise sqlite3.DatabaseError(detail) @contextmanager def get(self): """Use with the "with" statement; returns a database connection.""" if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): try: self.conn = get_db_connection(self.db_file, self.timeout) except (sqlite3.DatabaseError, DatabaseConnectionError): self.possibly_quarantine(*sys.exc_info()) else: raise DatabaseConnectionError(self.db_file, "DB doesn't exist") conn = self.conn self.conn = None try: yield conn conn.rollback() self.conn = conn except sqlite3.DatabaseError: try: conn.close() except Exception: pass self.possibly_quarantine(*sys.exc_info()) except (Exception, Timeout): conn.close() raise @contextmanager def lock(self): """Use with the "with" statement; locks a database.""" if not self.conn: if self.db_file != ':memory:' and os.path.exists(self.db_file): self.conn = get_db_connection(self.db_file, self.timeout) else: raise DatabaseConnectionError(self.db_file, "DB doesn't exist") conn = self.conn self.conn = None orig_isolation_level = conn.isolation_level conn.isolation_level = None conn.execute('BEGIN IMMEDIATE') try: yield True except (Exception, Timeout): pass try: conn.execute('ROLLBACK') conn.isolation_level = orig_isolation_level self.conn = conn except (Exception, Timeout): logging.exception( _('Broker error trying to rollback locked connection')) conn.close() def newid(self, remote_id): """ Re-id the database. This should be called after an rsync. :param remote_id: the ID of the remote database being rsynced in """ with self.get() as conn: row = conn.execute(''' UPDATE %s_stat SET id=? ''' % self.db_type, (str(uuid4()),)) row = conn.execute(''' SELECT ROWID FROM %s ORDER BY ROWID DESC LIMIT 1 ''' % self.db_contains_type).fetchone() sync_point = row['ROWID'] if row else -1 conn.execute(''' INSERT OR REPLACE INTO incoming_sync (sync_point, remote_id) VALUES (?, ?) ''', (sync_point, remote_id)) self._newid(conn) conn.commit() def _newid(self, conn): # Override for additional work when receiving an rsynced db. pass def _is_deleted(self, conn): """ Check if the database is considered deleted :param conn: database conn :returns: True if the DB is considered to be deleted, False otherwise """ raise NotImplementedError() def is_deleted(self): """ Check if the DB is considered to be deleted. :returns: True if the DB is considered to be deleted, False otherwise """ if self.db_file != ':memory:' and not os.path.exists(self.db_file): return True self._commit_puts_stale_ok() with self.get() as conn: return self._is_deleted(conn) def merge_timestamps(self, created_at, put_timestamp, delete_timestamp): """ Used in replication to handle updating timestamps. :param created_at: create timestamp :param put_timestamp: put timestamp :param delete_timestamp: delete timestamp """ with self.get() as conn: old_status = self._is_deleted(conn) conn.execute(''' UPDATE %s_stat SET created_at=MIN(?, created_at), put_timestamp=MAX(?, put_timestamp), delete_timestamp=MAX(?, delete_timestamp) ''' % self.db_type, (created_at, put_timestamp, delete_timestamp)) if old_status != self._is_deleted(conn): timestamp = Timestamp(time.time()) self._update_status_changed_at(conn, timestamp.internal) conn.commit() def get_items_since(self, start, count): """ Get a list of objects in the database between start and end. :param start: start ROWID :param count: number to get :returns: list of objects between start and end """ self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute(''' SELECT * FROM %s WHERE ROWID > ? ORDER BY ROWID ASC LIMIT ? ''' % self.db_contains_type, (start, count)) curs.row_factory = dict_factory return [r for r in curs] def get_sync(self, id, incoming=True): """ Gets the most recent sync point for a server from the sync table. :param id: remote ID to get the sync_point for :param incoming: if True, get the last incoming sync, otherwise get the last outgoing sync :returns: the sync point, or -1 if the id doesn't exist. """ with self.get() as conn: row = conn.execute( "SELECT sync_point FROM %s_sync WHERE remote_id=?" % ('incoming' if incoming else 'outgoing'), (id,)).fetchone() if not row: return -1 return row['sync_point'] def get_syncs(self, incoming=True): """ Get a serialized copy of the sync table. :param incoming: if True, get the last incoming sync, otherwise get the last outgoing sync :returns: list of {'remote_id', 'sync_point'} """ with self.get() as conn: curs = conn.execute(''' SELECT remote_id, sync_point FROM %s_sync ''' % ('incoming' if incoming else 'outgoing')) result = [] for row in curs: result.append({'remote_id': row[0], 'sync_point': row[1]}) return result def get_max_row(self): query = ''' SELECT SQLITE_SEQUENCE.seq FROM SQLITE_SEQUENCE WHERE SQLITE_SEQUENCE.name == '%s' LIMIT 1 ''' % (self.db_contains_type) with self.get() as conn: row = conn.execute(query).fetchone() return row[0] if row else -1 def get_replication_info(self): """ Get information about the DB required for replication. :returns: dict containing keys from get_info plus max_row and metadata Note:: get_info's _count is translated to just "count" and metadata is the raw string. """ info = self.get_info() info['count'] = info.pop('%s_count' % self.db_contains_type) info['metadata'] = self.get_raw_metadata() info['max_row'] = self.get_max_row() return info def get_info(self): self._commit_puts_stale_ok() with self.get() as conn: curs = conn.execute('SELECT * from %s_stat' % self.db_type) curs.row_factory = dict_factory return curs.fetchone() def put_record(self, record): if self.db_file == ':memory:': self.merge_items([record]) return if not os.path.exists(self.db_file): raise DatabaseConnectionError(self.db_file, "DB doesn't exist") with lock_parent_directory(self.pending_file, self.pending_timeout): pending_size = 0 try: pending_size = os.path.getsize(self.pending_file) except OSError as err: if err.errno != errno.ENOENT: raise if pending_size > PENDING_CAP: self._commit_puts([record]) else: with open(self.pending_file, 'a+b') as fp: # Colons aren't used in base64 encoding; so they are our # delimiter fp.write(':') fp.write(pickle.dumps( self.make_tuple_for_pickle(record), protocol=PICKLE_PROTOCOL).encode('base64')) fp.flush() def _commit_puts(self, item_list=None): """ Scan for .pending files and commit the found records by feeding them to merge_items(). Assume that lock_parent_directory has already been called. :param item_list: A list of items to commit in addition to .pending """ if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return if item_list is None: item_list = [] self._preallocate() if not os.path.getsize(self.pending_file): if item_list: self.merge_items(item_list) return with open(self.pending_file, 'r+b') as fp: for entry in fp.read().split(':'): if entry: try: self._commit_puts_load(item_list, entry) except Exception: self.logger.exception( _('Invalid pending entry %(file)s: %(entry)s'), {'file': self.pending_file, 'entry': entry}) if item_list: self.merge_items(item_list) try: os.ftruncate(fp.fileno(), 0) except OSError as err: if err.errno != errno.ENOENT: raise def _commit_puts_stale_ok(self): """ Catch failures of _commit_puts() if broker is intended for reading of stats, and thus does not care for pending updates. """ if self.db_file == ':memory:' or not os.path.exists(self.pending_file): return try: with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() except LockTimeout: if not self.stale_reads_ok: raise def _commit_puts_load(self, item_list, entry): """ Unmarshall the :param:entry and append it to :param:item_list. This is implemented by a particular broker to be compatible with its :func:`merge_items`. """ raise NotImplementedError def make_tuple_for_pickle(self, record): """ Turn this db record dict into the format this service uses for pending pickles. """ raise NotImplementedError def merge_syncs(self, sync_points, incoming=True): """ Merge a list of sync points with the incoming sync table. :param sync_points: list of sync points where a sync point is a dict of {'sync_point', 'remote_id'} :param incoming: if True, get the last incoming sync, otherwise get the last outgoing sync """ with self.get() as conn: for rec in sync_points: try: conn.execute(''' INSERT INTO %s_sync (sync_point, remote_id) VALUES (?, ?) ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) except sqlite3.IntegrityError: conn.execute(''' UPDATE %s_sync SET sync_point=max(?, sync_point) WHERE remote_id=? ''' % ('incoming' if incoming else 'outgoing'), (rec['sync_point'], rec['remote_id'])) conn.commit() def _preallocate(self): """ The idea is to allocate space in front of an expanding db. If it gets within 512k of a boundary, it allocates to the next boundary. Boundaries are 2m, 5m, 10m, 25m, 50m, then every 50m after. """ if not DB_PREALLOCATION or self.db_file == ':memory:': return MB = (1024 * 1024) def prealloc_points(): for pm in (1, 2, 5, 10, 25, 50): yield pm * MB while True: pm += 50 yield pm * MB stat = os.stat(self.db_file) file_size = stat.st_size allocated_size = stat.st_blocks * 512 for point in prealloc_points(): if file_size <= point - MB / 2: prealloc_size = point break if allocated_size < prealloc_size: with open(self.db_file, 'rb+') as fp: fallocate(fp.fileno(), int(prealloc_size)) def get_raw_metadata(self): with self.get() as conn: try: metadata = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] except sqlite3.OperationalError as err: if 'no such column: metadata' not in str(err): raise metadata = '' return metadata @property def metadata(self): """ Returns the metadata dict for the database. The metadata dict values are tuples of (value, timestamp) where the timestamp indicates when that key was set to that value. """ metadata = self.get_raw_metadata() if metadata: metadata = json.loads(metadata) utf8encodekeys(metadata) else: metadata = {} return metadata @staticmethod def validate_metadata(metadata): """ Validates that metadata_falls within acceptable limits. :param metadata: to be validated :raises: HTTPBadRequest if MAX_META_COUNT or MAX_META_OVERALL_SIZE is exceeded """ meta_count = 0 meta_size = 0 for key, (value, timestamp) in metadata.items(): key = key.lower() if value != '' and (key.startswith('x-account-meta') or key.startswith('x-container-meta')): prefix = 'x-account-meta-' if key.startswith('x-container-meta-'): prefix = 'x-container-meta-' key = key[len(prefix):] meta_count = meta_count + 1 meta_size = meta_size + len(key) + len(value) if meta_count > MAX_META_COUNT: raise HTTPBadRequest('Too many metadata items; max %d' % MAX_META_COUNT) if meta_size > MAX_META_OVERALL_SIZE: raise HTTPBadRequest('Total metadata too large; max %d' % MAX_META_OVERALL_SIZE) def update_metadata(self, metadata_updates, validate_metadata=False): """ Updates the metadata dict for the database. The metadata dict values are tuples of (value, timestamp) where the timestamp indicates when that key was set to that value. Key/values will only be overwritten if the timestamp is newer. To delete a key, set its value to ('', timestamp). These empty keys will eventually be removed by :func:`reclaim` """ old_metadata = self.metadata if set(metadata_updates).issubset(set(old_metadata)): for key, (value, timestamp) in metadata_updates.items(): if timestamp > old_metadata[key][1]: break else: return with self.get() as conn: try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] md = json.loads(md) if md else {} utf8encodekeys(md) except sqlite3.OperationalError as err: if 'no such column: metadata' not in str(err): raise conn.execute(""" ALTER TABLE %s_stat ADD COLUMN metadata TEXT DEFAULT '' """ % self.db_type) md = {} for key, value_timestamp in metadata_updates.items(): value, timestamp = value_timestamp if key not in md or timestamp > md[key][1]: md[key] = value_timestamp if validate_metadata: DatabaseBroker.validate_metadata(md) conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) conn.commit() def reclaim(self, age_timestamp, sync_timestamp): """ Delete rows from the db_contains_type table that are marked deleted and whose created_at timestamp is < age_timestamp. Also deletes rows from incoming_sync and outgoing_sync where the updated_at timestamp is < sync_timestamp. In addition, this calls the DatabaseBroker's :func:`_reclaim` method. :param age_timestamp: max created_at timestamp of object rows to delete :param sync_timestamp: max update_at timestamp of sync rows to delete """ if self.db_file != ':memory:' and os.path.exists(self.pending_file): with lock_parent_directory(self.pending_file, self.pending_timeout): self._commit_puts() with self.get() as conn: conn.execute(''' DELETE FROM %s WHERE deleted = 1 AND %s < ? ''' % (self.db_contains_type, self.db_reclaim_timestamp), (age_timestamp,)) try: conn.execute(''' DELETE FROM outgoing_sync WHERE updated_at < ? ''', (sync_timestamp,)) conn.execute(''' DELETE FROM incoming_sync WHERE updated_at < ? ''', (sync_timestamp,)) except sqlite3.OperationalError as err: # Old dbs didn't have updated_at in the _sync tables. if 'no such column: updated_at' not in str(err): raise DatabaseBroker._reclaim(self, conn, age_timestamp) conn.commit() def _reclaim(self, conn, timestamp): """ Removes any empty metadata values older than the timestamp using the given database connection. This function will not call commit on the conn, but will instead return True if the database needs committing. This function was created as a worker to limit transactions and commits from other related functions. :param conn: Database connection to reclaim metadata within. :param timestamp: Empty metadata items last updated before this timestamp will be removed. :returns: True if conn.commit() should be called """ try: md = conn.execute('SELECT metadata FROM %s_stat' % self.db_type).fetchone()[0] if md: md = json.loads(md) keys_to_delete = [] for key, (value, value_timestamp) in md.items(): if value == '' and value_timestamp < timestamp: keys_to_delete.append(key) if keys_to_delete: for key in keys_to_delete: del md[key] conn.execute('UPDATE %s_stat SET metadata = ?' % self.db_type, (json.dumps(md),)) return True except sqlite3.OperationalError as err: if 'no such column: metadata' not in str(err): raise return False def update_put_timestamp(self, timestamp): """ Update the put_timestamp. Only modifies it if it is greater than the current timestamp. :param timestamp: internalized put timestamp """ with self.get() as conn: conn.execute( 'UPDATE %s_stat SET put_timestamp = ?' ' WHERE put_timestamp < ?' % self.db_type, (timestamp, timestamp)) conn.commit() def update_status_changed_at(self, timestamp): """ Update the status_changed_at field in the stat table. Only modifies status_changed_at if the timestamp is greater than the current status_changed_at timestamp. :param timestamp: internalized timestamp """ with self.get() as conn: self._update_status_changed_at(conn, timestamp) conn.commit() def _update_status_changed_at(self, conn, timestamp): conn.execute( 'UPDATE %s_stat SET status_changed_at = ?' ' WHERE status_changed_at < ?' % self.db_type, (timestamp, timestamp)) swift-2.7.0/swift/common/ring/0000775000567000056710000000000012675204211017400 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/common/ring/utils.py0000664000567000056710000006107712675204037021133 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import defaultdict import optparse import re import socket from swift.common.utils import expand_ipv6 def tiers_for_dev(dev): """ Returns a tuple of tiers for a given device in ascending order by length. :returns: tuple of tiers """ t1 = dev['region'] t2 = dev['zone'] t3 = dev['ip'] t4 = dev['id'] return ((t1,), (t1, t2), (t1, t2, t3), (t1, t2, t3, t4)) def build_tier_tree(devices): """ Construct the tier tree from the zone layout. The tier tree is a dictionary that maps tiers to their child tiers. A synthetic root node of () is generated so that there's one tree, not a forest. Example: region 1 -+---- zone 1 -+---- 192.168.101.1 -+---- device id 0 | | | | | +---- device id 1 | | | | | +---- device id 2 | | | +---- 192.168.101.2 -+---- device id 3 | | | +---- device id 4 | | | +---- device id 5 | +---- zone 2 -+---- 192.168.102.1 -+---- device id 6 | | | +---- device id 7 | | | +---- device id 8 | +---- 192.168.102.2 -+---- device id 9 | +---- device id 10 region 2 -+---- zone 1 -+---- 192.168.201.1 -+---- device id 12 | | | +---- device id 13 | | | +---- device id 14 | +---- 192.168.201.2 -+---- device id 15 | +---- device id 16 | +---- device id 17 The tier tree would look like: { (): [(1,), (2,)], (1,): [(1, 1), (1, 2)], (2,): [(2, 1)], (1, 1): [(1, 1, 192.168.101.1), (1, 1, 192.168.101.2)], (1, 2): [(1, 2, 192.168.102.1), (1, 2, 192.168.102.2)], (2, 1): [(2, 1, 192.168.201.1), (2, 1, 192.168.201.2)], (1, 1, 192.168.101.1): [(1, 1, 192.168.101.1, 0), (1, 1, 192.168.101.1, 1), (1, 1, 192.168.101.1, 2)], (1, 1, 192.168.101.2): [(1, 1, 192.168.101.2, 3), (1, 1, 192.168.101.2, 4), (1, 1, 192.168.101.2, 5)], (1, 2, 192.168.102.1): [(1, 2, 192.168.102.1, 6), (1, 2, 192.168.102.1, 7), (1, 2, 192.168.102.1, 8)], (1, 2, 192.168.102.2): [(1, 2, 192.168.102.2, 9), (1, 2, 192.168.102.2, 10)], (2, 1, 192.168.201.1): [(2, 1, 192.168.201.1, 12), (2, 1, 192.168.201.1, 13), (2, 1, 192.168.201.1, 14)], (2, 1, 192.168.201.2): [(2, 1, 192.168.201.2, 15), (2, 1, 192.168.201.2, 16), (2, 1, 192.168.201.2, 17)], } :devices: device dicts from which to generate the tree :returns: tier tree """ tier2children = defaultdict(set) for dev in devices: for tier in tiers_for_dev(dev): if len(tier) > 1: tier2children[tier[0:-1]].add(tier) else: tier2children[()].add(tier) return tier2children def validate_and_normalize_ip(ip): """ Return normalized ip if the ip is a valid ip. Otherwise raise ValueError Exception. The hostname is normalized to all lower case. IPv6-addresses are converted to lowercase and fully expanded. """ # first convert to lower case new_ip = ip.lower() if is_valid_ipv4(new_ip): return new_ip elif is_valid_ipv6(new_ip): return expand_ipv6(new_ip) else: raise ValueError('Invalid ip %s' % ip) def validate_and_normalize_address(address): """ Return normalized address if the address is a valid ip or hostname. Otherwise raise ValueError Exception. The hostname is normalized to all lower case. IPv6-addresses are converted to lowercase and fully expanded. RFC1123 2.1 Host Names and Nubmers DISCUSSION This last requirement is not intended to specify the complete syntactic form for entering a dotted-decimal host number; that is considered to be a user-interface issue. For example, a dotted-decimal number must be enclosed within "[ ]" brackets for SMTP mail (see Section 5.2.17). This notation could be made universal within a host system, simplifying the syntactic checking for a dotted-decimal number. If a dotted-decimal number can be entered without such identifying delimiters, then a full syntactic check must be made, because a segment of a host domain name is now allowed to begin with a digit and could legally be entirely numeric (see Section 6.1.2.4). However, a valid host name can never have the dotted-decimal form #.#.#.#, since at least the highest-level component label will be alphabetic. """ new_address = address.lstrip('[').rstrip(']') if address.startswith('[') and address.endswith(']'): return validate_and_normalize_ip(new_address) new_address = new_address.lower() if is_valid_ipv4(new_address): return new_address elif is_valid_ipv6(new_address): return expand_ipv6(new_address) elif is_valid_hostname(new_address): return new_address else: raise ValueError('Invalid address %s' % address) def is_valid_ip(ip): """ Return True if the provided ip is a valid IP-address """ return is_valid_ipv4(ip) or is_valid_ipv6(ip) def is_valid_ipv4(ip): """ Return True if the provided ip is a valid IPv4-address """ try: socket.inet_pton(socket.AF_INET, ip) except socket.error: return False return True def is_valid_ipv6(ip): """ Return True if the provided ip is a valid IPv6-address """ try: socket.inet_pton(socket.AF_INET6, ip) except socket.error: # not a valid address return False return True def is_valid_hostname(hostname): """ Return True if the provided hostname is a valid hostname """ if len(hostname) < 1 or len(hostname) > 255: return False if hostname.endswith('.'): # strip exactly one dot from the right, if present hostname = hostname[:-1] allowed = re.compile("(?!-)[A-Z\d-]{1,63}(? can be of the form:: drz-:R:/ _ Where and are replication ip and port. Any part is optional, but you must include at least one part. Examples:: d74 Matches the device id 74 r4 Matches devices in region 4 z1 Matches devices in zone 1 z1-1.2.3.4 Matches devices in zone 1 with the ip 1.2.3.4 1.2.3.4 Matches devices in any zone with the ip 1.2.3.4 z1:5678 Matches devices in zone 1 using port 5678 :5678 Matches devices that use port 5678 R5.6.7.8 Matches devices that use replication ip 5.6.7.8 R:5678 Matches devices that use replication port 5678 1.2.3.4R5.6.7.8 Matches devices that use ip 1.2.3.4 and replication ip 5.6.7.8 /sdb1 Matches devices with the device name sdb1 _shiny Matches devices with shiny in the meta data _"snet: 5.6.7.8" Matches devices with snet: 5.6.7.8 in the meta data [::1] Matches devices in any zone with the ip ::1 z1-[::1]:5678 Matches devices in zone 1 with ip ::1 and port 5678 Most specific example:: d74r4z1-1.2.3.4:5678/sdb1_"snet: 5.6.7.8" Nerd explanation: All items require their single character prefix except the ip, in which case the - is optional unless the device id or zone is also included. """ orig_search_value = search_value match = {} if search_value.startswith('d'): i = 1 while i < len(search_value) and search_value[i].isdigit(): i += 1 match['id'] = int(search_value[1:i]) search_value = search_value[i:] if search_value.startswith('r'): i = 1 while i < len(search_value) and search_value[i].isdigit(): i += 1 match['region'] = int(search_value[1:i]) search_value = search_value[i:] if search_value.startswith('z'): i = 1 while i < len(search_value) and search_value[i].isdigit(): i += 1 match['zone'] = int(search_value[1:i]) search_value = search_value[i:] if search_value.startswith('-'): search_value = search_value[1:] if search_value and search_value[0].isdigit(): i = 1 while i < len(search_value) and search_value[i] in '0123456789.': i += 1 match['ip'] = search_value[:i] search_value = search_value[i:] elif search_value and search_value.startswith('['): i = 1 while i < len(search_value) and search_value[i] != ']': i += 1 i += 1 match['ip'] = search_value[:i].lstrip('[').rstrip(']') search_value = search_value[i:] if 'ip' in match: # ipv6 addresses are converted to all lowercase # and use the fully expanded representation match['ip'] = validate_and_normalize_ip(match['ip']) if search_value.startswith(':'): i = 1 while i < len(search_value) and search_value[i].isdigit(): i += 1 match['port'] = int(search_value[1:i]) search_value = search_value[i:] # replication parameters if search_value.startswith('R'): search_value = search_value[1:] if search_value and search_value[0].isdigit(): i = 1 while (i < len(search_value) and search_value[i] in '0123456789.'): i += 1 match['replication_ip'] = search_value[:i] search_value = search_value[i:] elif search_value and search_value.startswith('['): i = 1 while i < len(search_value) and search_value[i] != ']': i += 1 i += 1 match['replication_ip'] = search_value[:i].lstrip('[').rstrip(']') search_value = search_value[i:] if 'replication_ip' in match: # ipv6 addresses are converted to all lowercase # and use the fully expanded representation match['replication_ip'] = \ validate_and_normalize_ip(match['replication_ip']) if search_value.startswith(':'): i = 1 while i < len(search_value) and search_value[i].isdigit(): i += 1 match['replication_port'] = int(search_value[1:i]) search_value = search_value[i:] if search_value.startswith('/'): i = 1 while i < len(search_value) and search_value[i] != '_': i += 1 match['device'] = search_value[1:i] search_value = search_value[i:] if search_value.startswith('_'): match['meta'] = search_value[1:] search_value = '' if search_value: raise ValueError('Invalid : %s' % repr(orig_search_value)) return match def parse_search_values_from_opts(opts): """ Convert optparse style options into a dictionary for searching. :param opts: optparse style options :returns: a dictionary with search values to filter devices, supported parameters are id, region, zone, ip, port, replication_ip, replication_port, device, weight, meta """ search_values = {} for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip', 'replication_port', 'device', 'weight', 'meta'): value = getattr(opts, key, None) if value: if key == 'ip' or key == 'replication_ip': value = validate_and_normalize_address(value) search_values[key] = value return search_values def parse_change_values_from_opts(opts): """ Convert optparse style options into a dictionary for changing. :param opts: optparse style options :returns: a dictonary with change values to filter devices, supported parameters are ip, port, replication_ip, replication_port """ change_values = {} for key in ('change_ip', 'change_port', 'change_replication_ip', 'change_replication_port', 'change_device', 'change_meta'): value = getattr(opts, key, None) if value: if key == 'change_ip' or key == 'change_replication_ip': value = validate_and_normalize_address(value) change_values[key.replace('change_', '')] = value return change_values def parse_add_value(add_value): """ Convert an add value, like 'r1z2-10.1.2.3:7878/sdf', to a dictionary. If the string does not start with 'r', then the value of 'region' in the returned dictionary will be None. Callers should check for this and set a reasonable default. This is done so callers can emit errors or warnings if desired. Similarly, 'replication_ip' and 'replication_port' will be None if not specified. :returns: dictionary with keys 'region', 'zone', 'ip', 'port', 'device', 'replication_ip', 'replication_port', 'meta' :raises: ValueError if add_value is malformed """ region = None rest = add_value if add_value.startswith('r'): i = 1 while i < len(add_value) and add_value[i].isdigit(): i += 1 region = int(add_value[1:i]) rest = add_value[i:] if not rest.startswith('z'): raise ValueError('Invalid add value: %s' % add_value) i = 1 while i < len(rest) and rest[i].isdigit(): i += 1 zone = int(rest[1:i]) rest = rest[i:] if not rest.startswith('-'): raise ValueError('Invalid add value: %s' % add_value) ip, port, rest = parse_address(rest[1:]) replication_ip = replication_port = None if rest.startswith('R'): replication_ip, replication_port, rest = \ parse_address(rest[1:]) if not rest.startswith('/'): raise ValueError( 'Invalid add value: %s' % add_value) i = 1 while i < len(rest) and rest[i] != '_': i += 1 device_name = rest[1:i] if not validate_device_name(device_name): raise ValueError('Invalid device name') rest = rest[i:] meta = '' if rest.startswith('_'): meta = rest[1:] return {'region': region, 'zone': zone, 'ip': ip, 'port': port, 'device': device_name, 'replication_ip': replication_ip, 'replication_port': replication_port, 'meta': meta} def parse_address(rest): if rest.startswith('['): # remove first [] for ip rest = rest.replace('[', '', 1).replace(']', '', 1) pos = 0 while (pos < len(rest) and not (rest[pos] == 'R' or rest[pos] == '/')): pos += 1 address = rest[:pos] rest = rest[pos:] port_start = address.rfind(':') if port_start == -1: raise ValueError('Invalid port in add value') ip = address[:port_start] try: port = int(address[(port_start + 1):]) except (TypeError, ValueError): raise ValueError( 'Invalid port %s in add value' % address[port_start:]) # if this is an ipv6 address then we want to convert it # to all lowercase and use its fully expanded representation # to make searches easier ip = validate_and_normalize_ip(ip) return (ip, port, rest) def validate_args(argvish): """ Build OptionParse and validate it whether the format is new command-line format or not. """ opts, args = parse_args(argvish) # id can be 0 (swift starts generating id from 0), # also zone, region and weight can be set to zero. new_cmd_format = opts.id is not None or opts.region is not None or \ opts.zone is not None or opts.ip or opts.port or \ opts.replication_ip or opts.replication_port or \ opts.device or opts.weight is not None or opts.meta return (new_cmd_format, opts, args) def parse_args(argvish): """ Build OptionParser and evaluate command line arguments. """ parser = optparse.OptionParser() parser.add_option('-u', '--id', type="int", help="Device ID") parser.add_option('-r', '--region', type="int", help="Region") parser.add_option('-z', '--zone', type="int", help="Zone") parser.add_option('-i', '--ip', type="string", help="IP address") parser.add_option('-p', '--port', type="int", help="Port number") parser.add_option('-j', '--replication-ip', type="string", help="Replication IP address") parser.add_option('-q', '--replication-port', type="int", help="Replication port number") parser.add_option('-d', '--device', type="string", help="Device name (e.g. md0, sdb1)") parser.add_option('-w', '--weight', type="float", help="Device weight") parser.add_option('-m', '--meta', type="string", default="", help="Extra device info (just a string)") parser.add_option('-I', '--change-ip', type="string", help="IP address for change") parser.add_option('-P', '--change-port', type="int", help="Port number for change") parser.add_option('-J', '--change-replication-ip', type="string", help="Replication IP address for change") parser.add_option('-Q', '--change-replication-port', type="int", help="Replication port number for change") parser.add_option('-D', '--change-device', type="string", help="Device name (e.g. md0, sdb1) for change") parser.add_option('-M', '--change-meta', type="string", default="", help="Extra device info (just a string) for change") return parser.parse_args(argvish) def parse_builder_ring_filename_args(argvish): first_arg = argvish[1] if first_arg.endswith('.ring.gz'): ring_file = first_arg builder_file = first_arg[:-len('.ring.gz')] + '.builder' else: builder_file = first_arg if not builder_file.endswith('.builder'): ring_file = first_arg else: ring_file = builder_file[:-len('.builder')] ring_file += '.ring.gz' return builder_file, ring_file def build_dev_from_opts(opts): """ Convert optparse stype options into a device dictionary. """ for attribute, shortopt, longopt in (['region', '-r', '--region'], ['zone', '-z', '--zone'], ['ip', '-i', '--ip'], ['port', '-p', '--port'], ['device', '-d', '--device'], ['weight', '-w', '--weight']): if not getattr(opts, attribute, None): raise ValueError('Required argument %s/%s not specified.' % (shortopt, longopt)) ip = validate_and_normalize_address(opts.ip) replication_ip = validate_and_normalize_address( (opts.replication_ip or opts.ip)) replication_port = opts.replication_port or opts.port if not validate_device_name(opts.device): raise ValueError('Invalid device name') return {'region': opts.region, 'zone': opts.zone, 'ip': ip, 'port': opts.port, 'device': opts.device, 'meta': opts.meta, 'replication_ip': replication_ip, 'replication_port': replication_port, 'weight': opts.weight} def dispersion_report(builder, search_filter=None, verbose=False): if not builder._dispersion_graph: builder._build_dispersion_graph() max_allowed_replicas = builder._build_max_replicas_by_tier() worst_tier = None max_dispersion = 0.0 sorted_graph = [] for tier, replica_counts in sorted(builder._dispersion_graph.items()): tier_name = get_tier_name(tier, builder) if search_filter and not re.match(search_filter, tier_name): continue max_replicas = int(max_allowed_replicas[tier]) at_risk_parts = sum(replica_counts[max_replicas + 1:]) placed_parts = sum(replica_counts[1:]) tier_dispersion = 100.0 * at_risk_parts / placed_parts if tier_dispersion > max_dispersion: max_dispersion = tier_dispersion worst_tier = tier_name max_dispersion = max(max_dispersion, tier_dispersion) if not verbose: continue tier_report = { 'max_replicas': max_replicas, 'placed_parts': placed_parts, 'dispersion': tier_dispersion, 'replicas': replica_counts, } sorted_graph.append((tier_name, tier_report)) return { 'max_dispersion': max_dispersion, 'worst_tier': worst_tier, 'graph': sorted_graph, } def get_tier_name(tier, builder): if len(tier) == 1: return "r%s" % (tier[0], ) if len(tier) == 2: return "r%sz%s" % (tier[0], tier[1]) if len(tier) == 3: return "r%sz%s-%s" % (tier[0], tier[1], tier[2]) if len(tier) == 4: device = builder.devs[tier[3]] or {} return "r%sz%s-%s/%s" % (tier[0], tier[1], tier[2], device.get('device', 'IDd%s' % tier[3])) def validate_device_name(device_name): return not ( device_name.startswith(' ') or device_name.endswith(' ') or len(device_name) == 0) swift-2.7.0/swift/common/ring/builder.py0000664000567000056710000021741712675204037021422 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import errno import itertools import logging import math import random import six.moves.cPickle as pickle from copy import deepcopy from contextlib import contextmanager from array import array from collections import defaultdict import six from six.moves import range from time import time from swift.common import exceptions from swift.common.ring import RingData from swift.common.ring.utils import tiers_for_dev, build_tier_tree, \ validate_and_normalize_address # we can't store None's in the replica2part2dev array, so we high-jack # the max value for magic to represent the part is not currently # assigned to any device. NONE_DEV = 2 ** 16 - 1 MAX_BALANCE = 999.99 MAX_BALANCE_GATHER_COUNT = 3 class RingValidationWarning(Warning): pass try: # python 2.7+ from logging import NullHandler except ImportError: # python 2.6 class NullHandler(logging.Handler): def emit(self, *a, **kw): pass class RingBuilder(object): """ Used to build swift.common.ring.RingData instances to be written to disk and used with swift.common.ring.Ring instances. See bin/swift-ring-builder for example usage. The instance variable devs_changed indicates if the device information has changed since the last balancing. This can be used by tools to know whether a rebalance request is an isolated request or due to added, changed, or removed devices. :param part_power: number of partitions = 2**part_power. :param replicas: number of replicas for each partition :param min_part_hours: minimum number of hours between partition changes """ def __init__(self, part_power, replicas, min_part_hours): if part_power > 32: raise ValueError("part_power must be at most 32 (was %d)" % (part_power,)) if replicas < 1: raise ValueError("replicas must be at least 1 (was %.6f)" % (replicas,)) if min_part_hours < 0: raise ValueError("min_part_hours must be non-negative (was %d)" % (min_part_hours,)) self.part_power = part_power self.replicas = replicas self.min_part_hours = min_part_hours self.parts = 2 ** self.part_power self.devs = [] self.devs_changed = False self.version = 0 self.overload = 0.0 # _replica2part2dev maps from replica number to partition number to # device id. So, for a three replica, 2**23 ring, it's an array of # three 2**23 arrays of device ids (unsigned shorts). This can work a # bit faster than the 2**23 array of triplet arrays of device ids in # many circumstances. Making one big 2**23 * 3 array didn't seem to # have any speed change; though you're welcome to try it again (it was # a while ago, code-wise, when I last tried it). self._replica2part2dev = None # _last_part_moves is an array of unsigned bytes representing # the number of hours since a given partition was last moved. # This is used to guarantee we don't move a partition twice # within a given number of hours (24 is my usual test). Removing # a device overrides this behavior as it's assumed that's only # done because of device failure. self._last_part_moves = None # _last_part_moves_epoch indicates the time the offsets in # _last_part_moves is based on. self._last_part_moves_epoch = 0 self._last_part_gather_start = 0 self._dispersion_graph = {} self.dispersion = 0.0 self._remove_devs = [] self._ring = None self.logger = logging.getLogger("swift.ring.builder") if not self.logger.handlers: self.logger.disabled = True # silence "no handler for X" error messages self.logger.addHandler(NullHandler()) @contextmanager def debug(self): """ Temporarily enables debug logging, useful in tests, e.g. with rb.debug(): rb.rebalance() """ self.logger.disabled = False try: yield finally: self.logger.disabled = True @property def min_part_seconds_left(self): """Get the total seconds until a rebalance can be performed""" elapsed_seconds = int(time() - self._last_part_moves_epoch) return max((self.min_part_hours * 3600) - elapsed_seconds, 0) def weight_of_one_part(self): """ Returns the weight of each partition as calculated from the total weight of all the devices. """ try: return self.parts * self.replicas / \ sum(d['weight'] for d in self._iter_devs()) except ZeroDivisionError: raise exceptions.EmptyRingError('There are no devices in this ' 'ring, or all devices have been ' 'deleted') @classmethod def from_dict(cls, builder_data): b = cls(1, 1, 1) # Dummy values b.copy_from(builder_data) return b def copy_from(self, builder): """ Reinitializes this RingBuilder instance from data obtained from the builder dict given. Code example:: b = RingBuilder(1, 1, 1) # Dummy values b.copy_from(builder) This is to restore a RingBuilder that has had its b.to_dict() previously saved. """ if hasattr(builder, 'devs'): self.part_power = builder.part_power self.replicas = builder.replicas self.min_part_hours = builder.min_part_hours self.parts = builder.parts self.devs = builder.devs self.devs_changed = builder.devs_changed self.overload = builder.overload self.version = builder.version self._replica2part2dev = builder._replica2part2dev self._last_part_moves_epoch = builder._last_part_moves_epoch self._last_part_moves = builder._last_part_moves self._last_part_gather_start = builder._last_part_gather_start self._remove_devs = builder._remove_devs else: self.part_power = builder['part_power'] self.replicas = builder['replicas'] self.min_part_hours = builder['min_part_hours'] self.parts = builder['parts'] self.devs = builder['devs'] self.devs_changed = builder['devs_changed'] self.overload = builder.get('overload', 0.0) self.version = builder['version'] self._replica2part2dev = builder['_replica2part2dev'] self._last_part_moves_epoch = builder['_last_part_moves_epoch'] self._last_part_moves = builder['_last_part_moves'] self._last_part_gather_start = builder['_last_part_gather_start'] self._dispersion_graph = builder.get('_dispersion_graph', {}) self.dispersion = builder.get('dispersion') self._remove_devs = builder['_remove_devs'] self._ring = None # Old builders may not have a region defined for their devices, in # which case we default it to 1. for dev in self._iter_devs(): dev.setdefault("region", 1) if not self._last_part_moves_epoch: self._last_part_moves_epoch = 0 def __deepcopy__(self, memo): return type(self).from_dict(deepcopy(self.to_dict(), memo)) def to_dict(self): """ Returns a dict that can be used later with copy_from to restore a RingBuilder. swift-ring-builder uses this to pickle.dump the dict to a file and later load that dict into copy_from. """ return {'part_power': self.part_power, 'replicas': self.replicas, 'min_part_hours': self.min_part_hours, 'parts': self.parts, 'devs': self.devs, 'devs_changed': self.devs_changed, 'version': self.version, 'overload': self.overload, '_replica2part2dev': self._replica2part2dev, '_last_part_moves_epoch': self._last_part_moves_epoch, '_last_part_moves': self._last_part_moves, '_last_part_gather_start': self._last_part_gather_start, '_dispersion_graph': self._dispersion_graph, 'dispersion': self.dispersion, '_remove_devs': self._remove_devs} def change_min_part_hours(self, min_part_hours): """ Changes the value used to decide if a given partition can be moved again. This restriction is to give the overall system enough time to settle a partition to its new location before moving it to yet another location. While no data would be lost if a partition is moved several times quickly, it could make that data unreachable for a short period of time. This should be set to at least the average full partition replication time. Starting it at 24 hours and then lowering it to what the replicator reports as the longest partition cycle is best. :param min_part_hours: new value for min_part_hours """ self.min_part_hours = min_part_hours def set_replicas(self, new_replica_count): """ Changes the number of replicas in this ring. If the new replica count is sufficiently different that self._replica2part2dev will change size, sets self.devs_changed. This is so tools like bin/swift-ring-builder can know to write out the new ring rather than bailing out due to lack of balance change. """ old_slots_used = int(self.parts * self.replicas) new_slots_used = int(self.parts * new_replica_count) if old_slots_used != new_slots_used: self.devs_changed = True self.replicas = new_replica_count def set_overload(self, overload): self.overload = overload def get_ring(self): """ Get the ring, or more specifically, the swift.common.ring.RingData. This ring data is the minimum required for use of the ring. The ring builder itself keeps additional data such as when partitions were last moved. """ # We cache the self._ring value so multiple requests for it don't build # it multiple times. Be sure to set self._ring = None whenever the ring # will need to be rebuilt. if not self._ring: # Make devs list (with holes for deleted devices) and not including # builder-specific extra attributes. devs = [None] * len(self.devs) for dev in self._iter_devs(): devs[dev['id']] = dict((k, v) for k, v in dev.items() if k not in ('parts', 'parts_wanted')) # Copy over the replica+partition->device assignments, the device # information, and the part_shift value (the number of bits to # shift an unsigned int >I right to obtain the partition for the # int). if not self._replica2part2dev: self._ring = RingData([], devs, 32 - self.part_power) else: self._ring = \ RingData([array('H', p2d) for p2d in self._replica2part2dev], devs, 32 - self.part_power) return self._ring def add_dev(self, dev): """ Add a device to the ring. This device dict should have a minimum of the following keys: ====== =============================================================== id unique integer identifier amongst devices. Defaults to the next id if the 'id' key is not provided in the dict weight a float of the relative weight of this device as compared to others; this indicates how many partitions the builder will try to assign to this device region integer indicating which region the device is in zone integer indicating which zone the device is in; a given partition will not be assigned to multiple devices within the same (region, zone) pair if there is any alternative ip the ip address of the device port the tcp port of the device device the device's name on disk (sdb1, for example) meta general use 'extra' field; for example: the online date, the hardware description ====== =============================================================== .. note:: This will not rebalance the ring immediately as you may want to make multiple changes for a single rebalance. :param dev: device dict :returns: id of device (not used in the tree anymore, but unknown users may depend on it) """ if 'id' not in dev: dev['id'] = 0 if self.devs: try: dev['id'] = self.devs.index(None) except ValueError: dev['id'] = len(self.devs) if dev['id'] < len(self.devs) and self.devs[dev['id']] is not None: raise exceptions.DuplicateDeviceError( 'Duplicate device id: %d' % dev['id']) # Add holes to self.devs to ensure self.devs[dev['id']] will be the dev while dev['id'] >= len(self.devs): self.devs.append(None) dev['weight'] = float(dev['weight']) dev['parts'] = 0 self.devs[dev['id']] = dev self.devs_changed = True self.version += 1 return dev['id'] def set_dev_weight(self, dev_id, weight): """ Set the weight of a device. This should be called rather than just altering the weight key in the device dict directly, as the builder will need to rebuild some internal state to reflect the change. .. note:: This will not rebalance the ring immediately as you may want to make multiple changes for a single rebalance. :param dev_id: device id :param weight: new weight for device """ if any(dev_id == d['id'] for d in self._remove_devs): raise ValueError("Can not set weight of dev_id %s because it " "is marked for removal" % (dev_id,)) self.devs[dev_id]['weight'] = weight self.devs_changed = True self.version += 1 def remove_dev(self, dev_id): """ Remove a device from the ring. .. note:: This will not rebalance the ring immediately as you may want to make multiple changes for a single rebalance. :param dev_id: device id """ dev = self.devs[dev_id] dev['weight'] = 0 self._remove_devs.append(dev) self.devs_changed = True self.version += 1 def rebalance(self, seed=None): """ Rebalance the ring. This is the main work function of the builder, as it will assign and reassign partitions to devices in the ring based on weights, distinct zones, recent reassignments, etc. The process doesn't always perfectly assign partitions (that'd take a lot more analysis and therefore a lot more time -- I had code that did that before). Because of this, it keeps rebalancing until the device skew (number of partitions a device wants compared to what it has) gets below 1% or doesn't change by more than 1% (only happens with ring that can't be balanced no matter what). :returns: (number_of_partitions_altered, resulting_balance, number_of_removed_devices) """ # count up the devs, and cache some stuff num_devices = 0 for dev in self._iter_devs(): dev['tiers'] = tiers_for_dev(dev) if dev['weight'] > 0: num_devices += 1 if num_devices < self.replicas: raise exceptions.RingValidationError( "Replica count of %(replicas)s requires more " "than %(num_devices)s devices" % { 'replicas': self.replicas, 'num_devices': num_devices, }) if seed is not None: random.seed(seed) self._ring = None old_replica2part2dev = copy.deepcopy(self._replica2part2dev) if self._last_part_moves is None: self.logger.debug("New builder; performing initial balance") self._last_part_moves = array('B', itertools.repeat(0, self.parts)) self._update_last_part_moves() replica_plan = self._build_replica_plan() self._set_parts_wanted(replica_plan) assign_parts = defaultdict(list) # gather parts from replica count adjustment self._adjust_replica2part2dev_size(assign_parts) # gather parts from failed devices removed_devs = self._gather_parts_from_failed_devices(assign_parts) # gather parts for dispersion (N.B. this only picks up parts that # *must* disperse according to the replica plan) self._gather_parts_for_dispersion(assign_parts, replica_plan) # we'll gather a few times, or until we archive the plan for gather_count in range(MAX_BALANCE_GATHER_COUNT): self._gather_parts_for_balance(assign_parts, replica_plan) if not assign_parts: # most likely min part hours finish_status = 'Unable to finish' break assign_parts_list = list(assign_parts.items()) # shuffle the parts to be reassigned, we have no preference on the # order in which the replica plan is fulfilled. random.shuffle(assign_parts_list) # reset assign_parts map for next iteration assign_parts = defaultdict(list) num_part_replicas = sum(len(r) for p, r in assign_parts_list) self.logger.debug("Gathered %d parts", num_part_replicas) self._reassign_parts(assign_parts_list, replica_plan) self.logger.debug("Assigned %d parts", num_part_replicas) if not sum(d['parts_wanted'] < 0 for d in self._iter_devs()): finish_status = 'Finished' break else: finish_status = 'Unable to finish' self.logger.debug('%s rebalance plan after %s attempts' % ( finish_status, gather_count + 1)) self.devs_changed = False self.version += 1 changed_parts = self._build_dispersion_graph(old_replica2part2dev) # clean up the cache for dev in self._iter_devs(): dev.pop('tiers', None) return changed_parts, self.get_balance(), removed_devs def _build_dispersion_graph(self, old_replica2part2dev=None): """ Build a dict of all tiers in the cluster to a list of the number of parts with a replica count at each index. The values of the dict will be lists of length the maximum whole replica + 1 so that the graph[tier][3] is the number of parts with in the tier with 3 replicas and graph [tier][0] is the number of parts not assigned in this tier. i.e. { : [ , , ... , ], ... } :param old_replica2part2dev: if called from rebalance, the old_replica2part2dev can be used to count moved moved parts. :returns: number of parts with different assignments than old_replica2part2dev if provided """ # Since we're going to loop over every replica of every part we'll # also count up changed_parts if old_replica2part2dev is passed in old_replica2part2dev = old_replica2part2dev or [] # Compare the partition allocation before and after the rebalance # Only changed device ids are taken into account; devices might be # "touched" during the rebalance, but actually not really moved changed_parts = 0 int_replicas = int(math.ceil(self.replicas)) max_allowed_replicas = self._build_max_replicas_by_tier() parts_at_risk = 0 dispersion_graph = {} # go over all the devices holding each replica part by part for part_id, dev_ids in enumerate( six.moves.zip(*self._replica2part2dev)): # count the number of replicas of this part for each tier of each # device, some devices may have overlapping tiers! replicas_at_tier = defaultdict(int) for rep_id, dev in enumerate(iter( self.devs[dev_id] for dev_id in dev_ids)): for tier in (dev.get('tiers') or tiers_for_dev(dev)): replicas_at_tier[tier] += 1 # IndexErrors will be raised if the replicas are increased or # decreased, and that actually means the partition has changed try: old_device = old_replica2part2dev[rep_id][part_id] except IndexError: changed_parts += 1 continue if old_device != dev['id']: changed_parts += 1 part_at_risk = False # update running totals for each tiers' number of parts with a # given replica count for tier, replicas in replicas_at_tier.items(): if tier not in dispersion_graph: dispersion_graph[tier] = [self.parts] + [0] * int_replicas dispersion_graph[tier][0] -= 1 dispersion_graph[tier][replicas] += 1 if replicas > max_allowed_replicas[tier]: part_at_risk = True # this part may be at risk in multiple tiers, but we only count it # as at_risk once if part_at_risk: parts_at_risk += 1 self._dispersion_graph = dispersion_graph self.dispersion = 100.0 * parts_at_risk / self.parts return changed_parts def validate(self, stats=False): """ Validate the ring. This is a safety function to try to catch any bugs in the building process. It ensures partitions have been assigned to real devices, aren't doubly assigned, etc. It can also optionally check the even distribution of partitions across devices. :param stats: if True, check distribution of partitions across devices :returns: if stats is True, a tuple of (device_usage, worst_stat), else (None, None). device_usage[dev_id] will equal the number of partitions assigned to that device. worst_stat will equal the number of partitions the worst device is skewed from the number it should have. :raises RingValidationError: problem was found with the ring. """ # "len" showed up in profiling, so it's just computed once. dev_len = len(self.devs) parts_on_devs = sum(d['parts'] for d in self._iter_devs()) if not self._replica2part2dev: raise exceptions.RingValidationError( '_replica2part2dev empty; did you forget to rebalance?') parts_in_map = sum(len(p2d) for p2d in self._replica2part2dev) if parts_on_devs != parts_in_map: raise exceptions.RingValidationError( 'All partitions are not double accounted for: %d != %d' % (parts_on_devs, parts_in_map)) if stats: # dev_usage[dev_id] will equal the number of partitions assigned to # that device. dev_usage = array('I', (0 for _junk in range(dev_len))) for part2dev in self._replica2part2dev: for dev_id in part2dev: dev_usage[dev_id] += 1 for dev in self._iter_devs(): if not isinstance(dev['port'], int): raise exceptions.RingValidationError( "Device %d has port %r, which is not an integer." % (dev['id'], dev['port'])) int_replicas = int(math.ceil(self.replicas)) rep2part_len = map(len, self._replica2part2dev) # check the assignments of each part's replicas for part in range(self.parts): devs_for_part = [] for replica, part_len in enumerate(rep2part_len): if part_len <= part: # last replica may be short on parts because of floating # replica count if replica + 1 < int_replicas: raise exceptions.RingValidationError( "The partition assignments of replica %r were " "shorter than expected (%s < %s) - this should " "only happen for the last replica" % ( replica, len(self._replica2part2dev[replica]), self.parts, )) break dev_id = self._replica2part2dev[replica][part] if dev_id >= dev_len or not self.devs[dev_id]: raise exceptions.RingValidationError( "Partition %d, replica %d was not allocated " "to a device." % (part, replica)) devs_for_part.append(dev_id) if len(devs_for_part) != len(set(devs_for_part)): raise exceptions.RingValidationError( "The partition %s has been assigned to " "duplicate devices %r" % ( part, devs_for_part)) if stats: weight_of_one_part = self.weight_of_one_part() worst = 0 for dev in self._iter_devs(): if not dev['weight']: if dev_usage[dev['id']]: # If a device has no weight, but has partitions, then # its overage is considered "infinity" and therefore # always the worst possible. We show MAX_BALANCE for # convenience. worst = MAX_BALANCE break continue skew = abs(100.0 * dev_usage[dev['id']] / (dev['weight'] * weight_of_one_part) - 100.0) if skew > worst: worst = skew return dev_usage, worst return None, None def _build_balance_per_dev(self): """ Build a map of => where is a float representing the percentage difference from the desired amount of partitions a given device wants and the amount it has. N.B. this method only considers a device's weight and the parts assigned, not the parts wanted according to the replica plan. """ weight_of_one_part = self.weight_of_one_part() balance_per_dev = {} for dev in self._iter_devs(): if not dev['weight']: if dev['parts']: # If a device has no weight, but has partitions, then its # overage is considered "infinity" and therefore always the # worst possible. We show MAX_BALANCE for convenience. balance = MAX_BALANCE else: balance = 0 else: balance = 100.0 * dev['parts'] / ( dev['weight'] * weight_of_one_part) - 100.0 balance_per_dev[dev['id']] = balance return balance_per_dev def get_balance(self): """ Get the balance of the ring. The balance value is the highest percentage off the desired amount of partitions a given device wants. For instance, if the "worst" device wants (based on its weight relative to the sum of all the devices' weights) 123 partitions and it has 124 partitions, the balance value would be 0.83 (1 extra / 123 wanted * 100 for percentage). :returns: balance of the ring """ balance_per_dev = self._build_balance_per_dev() return max(abs(b) for b in balance_per_dev.values()) def get_required_overload(self, weighted=None, wanted=None): """ Returns the minimum overload value required to make the ring maximally dispersed. The required overload is the largest percentage change of any single device from its weighted replicanth to its wanted replicanth (note under weighted devices have a negative percentage change) to archive dispersion - that is to say a single device that must be overloaded by 5% is worse than 5 devices in a single tier overloaded by 1%. """ weighted = weighted or self._build_weighted_replicas_by_tier() wanted = wanted or self._build_wanted_replicas_by_tier() max_overload = 0.0 for dev in self._iter_devs(): tier = (dev['region'], dev['zone'], dev['ip'], dev['id']) if not dev['weight']: if tier not in wanted or not wanted[tier]: continue raise exceptions.RingValidationError( 'Device %s has zero weight and ' 'should not want any replicas' % (tier,)) required = (wanted[tier] - weighted[tier]) / weighted[tier] self.logger.debug('%s wants %s and is weighted for %s so ' 'therefore requires %s overload' % ( tier, wanted[tier], weighted[tier], required)) if required > max_overload: max_overload = required return max_overload def pretend_min_part_hours_passed(self): """ Override min_part_hours by marking all partitions as having been moved 255 hours ago and last move epoch to 'the beginning of time'. This can be used to force a full rebalance on the next call to rebalance. """ for part in range(self.parts): self._last_part_moves[part] = 0xff self._last_part_moves_epoch = 0 def get_part_devices(self, part): """ Get the devices that are responsible for the partition, filtering out duplicates. :param part: partition to get devices for :returns: list of device dicts """ devices = [] for dev in self._devs_for_part(part): if dev not in devices: devices.append(dev) return devices def _iter_devs(self): """ Returns an iterator all the non-None devices in the ring. Note that this means list(b._iter_devs())[some_id] may not equal b.devs[some_id]; you will have to check the 'id' key of each device to obtain its dev_id. """ for dev in self.devs: if dev is not None: yield dev def _build_tier2children(self): """ Wrap helper build_tier_tree so exclude zero-weight devices. """ return build_tier_tree(d for d in self._iter_devs() if d['weight']) def _set_parts_wanted(self, replica_plan): """ Sets the parts_wanted key for each of the devices to the number of partitions the device wants based on its relative weight. This key is used to sort the devices according to "most wanted" during rebalancing to best distribute partitions. A negative parts_wanted indicates the device is "overweight" and wishes to give partitions away if possible. :param replica_plan: a dict of dicts, as returned from _build_replica_plan, that that maps each tier to it's target replicanths. """ tier2children = self._build_tier2children() parts_by_tier = defaultdict(int) def place_parts(tier, parts): parts_by_tier[tier] = parts sub_tiers = sorted(tier2children[tier]) if not sub_tiers: return to_place = defaultdict(int) for t in sub_tiers: to_place[t] = int(math.floor( replica_plan[t]['target'] * self.parts)) parts -= to_place[t] # if there's some parts left over, just throw 'em about sub_tier_gen = itertools.cycle(sorted( sub_tiers, key=lambda t: replica_plan[t]['target'])) while parts: t = next(sub_tier_gen) to_place[t] += 1 parts -= 1 for t, p in to_place.items(): place_parts(t, p) total_parts = int(self.replicas * self.parts) place_parts((), total_parts) # belts & suspenders/paranoia - at every level, the sum of # parts_by_tier should be total_parts for the ring tiers = ['cluster', 'regions', 'zones', 'servers', 'devices'] for i, tier_name in enumerate(tiers): parts_at_tier = sum(parts_by_tier[t] for t in parts_by_tier if len(t) == i) if parts_at_tier != total_parts: raise exceptions.RingValidationError( '%s != %s at tier %s' % ( parts_at_tier, total_parts, tier_name)) for dev in self._iter_devs(): if not dev['weight']: # With no weight, that means we wish to "drain" the device. So # we set the parts_wanted to a really large negative number to # indicate its strong desire to give up everything it has. dev['parts_wanted'] = -self.parts * self.replicas else: tier = (dev['region'], dev['zone'], dev['ip'], dev['id']) dev['parts_wanted'] = parts_by_tier[tier] - dev['parts'] def _update_last_part_moves(self): """ Updates how many hours ago each partition was moved based on the current time. The builder won't move a partition that has been moved more recently than min_part_hours. """ elapsed_hours = int(time() - self._last_part_moves_epoch) / 3600 if elapsed_hours <= 0: return for part in range(self.parts): # The "min(self._last_part_moves[part] + elapsed_hours, 0xff)" # which was here showed up in profiling, so it got inlined. last_plus_elapsed = self._last_part_moves[part] + elapsed_hours if last_plus_elapsed < 0xff: self._last_part_moves[part] = last_plus_elapsed else: self._last_part_moves[part] = 0xff self._last_part_moves_epoch = int(time()) def _gather_parts_from_failed_devices(self, assign_parts): """ Update the map of partition => [replicas] to be reassigned from removed devices. """ # First we gather partitions from removed devices. Since removed # devices usually indicate device failures, we have no choice but to # reassign these partitions. However, we mark them as moved so later # choices will skip other replicas of the same partition if possible. if self._remove_devs: dev_ids = [d['id'] for d in self._remove_devs if d['parts']] if dev_ids: for part, replica in self._each_part_replica(): dev_id = self._replica2part2dev[replica][part] if dev_id in dev_ids: self._replica2part2dev[replica][part] = NONE_DEV self._last_part_moves[part] = 0 assign_parts[part].append(replica) self.logger.debug( "Gathered %d/%d from dev %d [dev removed]", part, replica, dev_id) removed_devs = 0 while self._remove_devs: remove_dev_id = self._remove_devs.pop()['id'] self.logger.debug("Removing dev %d", remove_dev_id) self.devs[remove_dev_id] = None removed_devs += 1 return removed_devs def _adjust_replica2part2dev_size(self, to_assign): """ Make sure that the lengths of the arrays in _replica2part2dev are correct for the current value of self.replicas. Example: self.part_power = 8 self.replicas = 2.25 self._replica2part2dev will contain 3 arrays: the first 2 of length 256 (2**8), and the last of length 64 (0.25 * 2**8). Update the mapping of partition => [replicas] that need assignment. """ fractional_replicas, whole_replicas = math.modf(self.replicas) whole_replicas = int(whole_replicas) removed_parts = 0 new_parts = 0 desired_lengths = [self.parts] * whole_replicas if fractional_replicas: desired_lengths.append(int(self.parts * fractional_replicas)) if self._replica2part2dev is not None: # If we crossed an integer threshold (say, 4.1 --> 4), # we'll have a partial extra replica clinging on here. Clean # up any such extra stuff. for part2dev in self._replica2part2dev[len(desired_lengths):]: for dev_id in part2dev: dev_losing_part = self.devs[dev_id] dev_losing_part['parts'] -= 1 removed_parts -= 1 self._replica2part2dev = \ self._replica2part2dev[:len(desired_lengths)] else: self._replica2part2dev = [] for replica, desired_length in enumerate(desired_lengths): if replica < len(self._replica2part2dev): part2dev = self._replica2part2dev[replica] if len(part2dev) < desired_length: # Not long enough: needs to be extended and the # newly-added pieces assigned to devices. for part in range(len(part2dev), desired_length): to_assign[part].append(replica) part2dev.append(NONE_DEV) new_parts += 1 elif len(part2dev) > desired_length: # Too long: truncate this mapping. for part in range(desired_length, len(part2dev)): dev_losing_part = self.devs[part2dev[part]] dev_losing_part['parts'] -= 1 removed_parts -= 1 self._replica2part2dev[replica] = part2dev[:desired_length] else: # Mapping not present at all: make one up and assign # all of it. for part in range(desired_length): to_assign[part].append(replica) new_parts += 1 self._replica2part2dev.append( array('H', itertools.repeat(NONE_DEV, desired_length))) self.logger.debug( "%d new parts and %d removed parts from replica-count change", new_parts, removed_parts) def _gather_parts_for_dispersion(self, assign_parts, replica_plan): """ Update the map of partition => [replicas] to be reassigned from insufficiently-far-apart replicas. """ # Now we gather partitions that are "at risk" because they aren't # currently sufficient spread out across the cluster. for part in range(self.parts): if self._last_part_moves[part] < self.min_part_hours: continue # First, add up the count of replicas at each tier for each # partition. replicas_at_tier = defaultdict(int) for dev in self._devs_for_part(part): for tier in dev['tiers']: replicas_at_tier[tier] += 1 # Now, look for partitions not yet spread out enough. undispersed_dev_replicas = [] for replica in self._replicas_for_part(part): dev_id = self._replica2part2dev[replica][part] if dev_id == NONE_DEV: continue dev = self.devs[dev_id] if all(replicas_at_tier[tier] <= replica_plan[tier]['max'] for tier in dev['tiers']): continue undispersed_dev_replicas.append((dev, replica)) if not undispersed_dev_replicas: continue undispersed_dev_replicas.sort( key=lambda dr: dr[0]['parts_wanted']) for dev, replica in undispersed_dev_replicas: # the min part hour check is ignored iff a device has more # than one replica of a part assigned to it - which would have # only been possible on rings built with older version of code if (self._last_part_moves[part] < self.min_part_hours and not replicas_at_tier[dev['tiers'][-1]] > 1): continue dev['parts_wanted'] += 1 dev['parts'] -= 1 assign_parts[part].append(replica) self.logger.debug( "Gathered %d/%d from dev %d [dispersion]", part, replica, dev['id']) self._replica2part2dev[replica][part] = NONE_DEV for tier in dev['tiers']: replicas_at_tier[tier] -= 1 self._last_part_moves[part] = 0 def _gather_parts_for_balance_can_disperse(self, assign_parts, start, replica_plan): """ Update the map of partition => [replicas] to be reassigned from overweight drives where the replicas can be better dispersed to another failure domain. :param assign_parts: the map of partition => [replica] to update :param start: offset into self.parts to begin search :param replica_plan: replicanth targets for tiers """ # Last, we gather partitions from devices that are "overweight" because # they have more partitions than their parts_wanted. for offset in range(self.parts): part = (start + offset) % self.parts if self._last_part_moves[part] < self.min_part_hours: continue # For each part we'll look at the devices holding those parts and # see if any are overweight, keeping track of replicas_at_tier as # we go overweight_dev_replica = [] replicas_at_tier = defaultdict(int) for replica in self._replicas_for_part(part): dev_id = self._replica2part2dev[replica][part] if dev_id == NONE_DEV: continue dev = self.devs[dev_id] for tier in dev['tiers']: replicas_at_tier[tier] += 1 if dev['parts_wanted'] < 0: overweight_dev_replica.append((dev, replica)) if not overweight_dev_replica: continue overweight_dev_replica.sort( key=lambda dr: dr[0]['parts_wanted']) for dev, replica in overweight_dev_replica: if self._last_part_moves[part] < self.min_part_hours: break if any(replica_plan[tier]['min'] <= replicas_at_tier[tier] < replica_plan[tier]['max'] for tier in dev['tiers']): continue # this is the most overweight_device holding a replica # of this part that can shed it according to the plan dev['parts_wanted'] += 1 dev['parts'] -= 1 assign_parts[part].append(replica) self.logger.debug( "Gathered %d/%d from dev %d [weight disperse]", part, replica, dev['id']) self._replica2part2dev[replica][part] = NONE_DEV for tier in dev['tiers']: replicas_at_tier[tier] -= 1 self._last_part_moves[part] = 0 def _gather_parts_for_balance(self, assign_parts, replica_plan): """ Gather parts that look like they should move for balance reasons. A simple gather of parts that looks dispersible normally works out, we'll switch strategies if things don't be seem to moving... """ # pick a random starting point on the other side of the ring quarter_turn = (self.parts // 4) random_half = random.randint(0, self.parts / 2) start = (self._last_part_gather_start + quarter_turn + random_half) % self.parts self.logger.debug('Gather start is %s ' '(Last start was %s)' % ( start, self._last_part_gather_start)) self._last_part_gather_start = start self._gather_parts_for_balance_can_disperse( assign_parts, start, replica_plan) if not assign_parts: self._gather_parts_for_balance_forced(assign_parts, start) def _gather_parts_for_balance_forced(self, assign_parts, start, **kwargs): """ Update the map of partition => [replicas] to be reassigned from overweight drives without restriction, parts gathered from this method may be placed back onto devices that are no better (or worse) than the device from which they are gathered. This method allows devices to flop around enough to unlock replicas that would have otherwise potentially been locked because of dispersion - it should be used as a last resort. :param assign_parts: the map of partition => [replica] to update :param start: offset into self.parts to begin search """ for offset in range(self.parts): part = (start + offset) % self.parts if self._last_part_moves[part] < self.min_part_hours: continue overweight_dev_replica = [] for replica in self._replicas_for_part(part): dev_id = self._replica2part2dev[replica][part] if dev_id == NONE_DEV: continue dev = self.devs[dev_id] if dev['parts_wanted'] < 0: overweight_dev_replica.append((dev, replica)) if not overweight_dev_replica: continue overweight_dev_replica.sort( key=lambda dr: dr[0]['parts_wanted']) for dev, replica in overweight_dev_replica: if self._last_part_moves[part] < self.min_part_hours: break # this is the most overweight_device holding a replica of this # part we don't know where it's going to end up - but we'll # pick it up and hope for the best. dev['parts_wanted'] += 1 dev['parts'] -= 1 assign_parts[part].append(replica) self.logger.debug( "Gathered %d/%d from dev %d [weight forced]", part, replica, dev['id']) self._replica2part2dev[replica][part] = NONE_DEV self._last_part_moves[part] = 0 def _reassign_parts(self, reassign_parts, replica_plan): """ For an existing ring data set, partitions are reassigned similarly to the initial assignment. The devices are ordered by how many partitions they still want and kept in that order throughout the process. The gathered partitions are iterated through, assigning them to devices according to the "most wanted" while keeping the replicas as "far apart" as possible. Two different regions are considered the farthest-apart things, followed by zones, then different ip within a zone; the least-far-apart things are different devices with the same ip in the same zone. :param reassign_parts: An iterable of (part, replicas_to_replace) pairs. replicas_to_replace is an iterable of the replica (an int) to replace for that partition. replicas_to_replace may be shared for multiple partitions, so be sure you do not modify it. """ parts_available_in_tier = defaultdict(int) for dev in self._iter_devs(): dev['sort_key'] = self._sort_key_for(dev) # Note: this represents how many partitions may be assigned to a # given tier (region/zone/server/disk). It does not take into # account how many partitions a given tier wants to shed. # # If we did not do this, we could have a zone where, at some # point during assignment, number-of-parts-to-gain equals # number-of-parts-to-shed. At that point, no further placement # into that zone would occur since its parts_available_in_tier # would be 0. This would happen any time a zone had any device # with partitions to shed, which is any time a device is being # removed, which is a pretty frequent operation. wanted = max(dev['parts_wanted'], 0) for tier in dev['tiers']: parts_available_in_tier[tier] += wanted available_devs = \ sorted((d for d in self._iter_devs() if d['weight']), key=lambda x: x['sort_key']) tier2devs = defaultdict(list) tier2sort_key = defaultdict(tuple) tier2dev_sort_key = defaultdict(list) max_tier_depth = 0 for dev in available_devs: for tier in dev['tiers']: tier2devs[tier].append(dev) # <-- starts out sorted! tier2dev_sort_key[tier].append(dev['sort_key']) tier2sort_key[tier] = dev['sort_key'] if len(tier) > max_tier_depth: max_tier_depth = len(tier) tier2children_sets = build_tier_tree(available_devs) tier2children = defaultdict(list) tier2children_sort_key = {} tiers_list = [()] depth = 1 while depth <= max_tier_depth: new_tiers_list = [] for tier in tiers_list: child_tiers = list(tier2children_sets[tier]) child_tiers.sort(key=tier2sort_key.__getitem__) tier2children[tier] = child_tiers tier2children_sort_key[tier] = map( tier2sort_key.__getitem__, child_tiers) new_tiers_list.extend(child_tiers) tiers_list = new_tiers_list depth += 1 for part, replace_replicas in reassign_parts: # always update part_moves for min_part_hours self._last_part_moves[part] = 0 # count up where these replicas be replicas_at_tier = defaultdict(int) for dev in self._devs_for_part(part): for tier in dev['tiers']: replicas_at_tier[tier] += 1 for replica in replace_replicas: # Find a new home for this replica tier = () # This used to be a cute, recursive function, but it's been # unrolled for performance. depth = 1 while depth <= max_tier_depth: # Choose the roomiest tier among those that don't # already have their max replicas assigned according # to the replica_plan. candidates = [t for t in tier2children[tier] if replicas_at_tier[t] < replica_plan[t]['max']] if not candidates: raise Exception('no home for %s/%s %s' % ( part, replica, {t: ( replicas_at_tier[t], replica_plan[t]['max'], ) for t in tier2children[tier]})) tier = max(candidates, key=lambda t: parts_available_in_tier[t]) depth += 1 dev = tier2devs[tier][-1] dev['parts_wanted'] -= 1 dev['parts'] += 1 for tier in dev['tiers']: parts_available_in_tier[tier] -= 1 replicas_at_tier[tier] += 1 self._replica2part2dev[replica][part] = dev['id'] self.logger.debug( "Placed %d/%d onto dev %d", part, replica, dev['id']) # Just to save memory and keep from accidental reuse. for dev in self._iter_devs(): del dev['sort_key'] @staticmethod def _sort_key_for(dev): return (dev['parts_wanted'], random.randint(0, 0xFFFF), dev['id']) def _build_max_replicas_by_tier(self, bound=math.ceil): """ Returns a defaultdict of (tier: replica_count) for all tiers in the ring excluding zero weight devices. There will always be a () entry as the root of the structure, whose replica_count will equal the ring's replica_count. Then there will be (region,) entries for each region, indicating the maximum number of replicas the region might have for any given partition. Next there will be (region, zone) entries for each zone, indicating the maximum number of replicas in a given region and zone. Anything greater than 1 indicates a partition at slightly elevated risk, as if that zone were to fail multiple replicas of that partition would be unreachable. Next there will be (region, zone, ip_port) entries for each node, indicating the maximum number of replicas stored on a node in a given region and zone. Anything greater than 1 indicates a partition at elevated risk, as if that ip_port were to fail multiple replicas of that partition would be unreachable. Last there will be (region, zone, ip_port, device) entries for each device, indicating the maximum number of replicas the device shares with other devices on the same node for any given partition. Anything greater than 1 indicates a partition at serious risk, as the data on that partition will not be stored distinctly at the ring's replica_count. Example return dict for the common SAIO setup:: {(): 3.0, (1,): 3.0, (1, 1): 1.0, (1, 1, '127.0.0.1:6010'): 1.0, (1, 1, '127.0.0.1:6010', 0): 1.0, (1, 2): 1.0, (1, 2, '127.0.0.1:6020'): 1.0, (1, 2, '127.0.0.1:6020', 1): 1.0, (1, 3): 1.0, (1, 3, '127.0.0.1:6030'): 1.0, (1, 3, '127.0.0.1:6030', 2): 1.0, (1, 4): 1.0, (1, 4, '127.0.0.1:6040'): 1.0, (1, 4, '127.0.0.1:6040', 3): 1.0} """ # Used by walk_tree to know what entries to create for each recursive # call. tier2children = self._build_tier2children() def walk_tree(tier, replica_count): if len(tier) == 4: # special case for device, it's not recursive replica_count = min(1, replica_count) mr = {tier: replica_count} if tier in tier2children: subtiers = tier2children[tier] for subtier in subtiers: submax = bound(float(replica_count) / len(subtiers)) mr.update(walk_tree(subtier, submax)) return mr mr = defaultdict(float) mr.update(walk_tree((), self.replicas)) return mr def _build_weighted_replicas_by_tier(self): """ Returns a dict mapping => replicanths for all tiers in the ring based on their weights. """ weight_of_one_part = self.weight_of_one_part() # assign each device some replicanths by weight (can't be > 1) weighted_replicas_for_dev = {} devices_with_room = [] for dev in self._iter_devs(): if not dev['weight']: continue weighted_replicas = ( dev['weight'] * weight_of_one_part / self.parts) if weighted_replicas < 1: devices_with_room.append(dev['id']) else: weighted_replicas = 1 weighted_replicas_for_dev[dev['id']] = weighted_replicas while True: remaining = self.replicas - sum(weighted_replicas_for_dev.values()) if remaining < 1e-10: break devices_with_room = [d for d in devices_with_room if weighted_replicas_for_dev[d] < 1] rel_weight = remaining / sum( weighted_replicas_for_dev[d] for d in devices_with_room) for d in devices_with_room: weighted_replicas_for_dev[d] = min( 1, weighted_replicas_for_dev[d] * (rel_weight + 1)) weighted_replicas_by_tier = defaultdict(float) for dev in self._iter_devs(): if not dev['weight']: continue assigned_replicanths = weighted_replicas_for_dev[dev['id']] dev_tier = (dev['region'], dev['zone'], dev['ip'], dev['id']) for i in range(len(dev_tier) + 1): tier = dev_tier[:i] weighted_replicas_by_tier[tier] += assigned_replicanths # belts & suspenders/paranoia - at every level, the sum of # weighted_replicas should be very close to the total number of # replicas for the ring tiers = ['cluster', 'regions', 'zones', 'servers', 'devices'] for i, tier_name in enumerate(tiers): replicas_at_tier = sum(weighted_replicas_by_tier[t] for t in weighted_replicas_by_tier if len(t) == i) if abs(self.replicas - replicas_at_tier) > 1e-10: raise exceptions.RingValidationError( '%s != %s at tier %s' % ( replicas_at_tier, self.replicas, tier_name)) return weighted_replicas_by_tier def _build_wanted_replicas_by_tier(self): """ Returns a defaultdict of (tier: replicanths) for all tiers in the ring based on unique-as-possible (full dispersion) with respect to their weights and device counts. N.B. _build_max_replicas_by_tier calculates the upper bound on the replicanths each tier may hold irrespective of the weights of the tier; this method will calculate the minimum replicanth <= max_replicas[tier] that will still solve dispersion. However it is not guaranteed to return a fully dispersed solution if failure domains are over-weighted for their device count. """ weighted_replicas = self._build_weighted_replicas_by_tier() dispersed_replicas = { t: { 'min': math.floor(r), 'max': math.ceil(r), } for (t, r) in self._build_max_replicas_by_tier(bound=float).items() } # watch out for device limited tiers num_devices = defaultdict(int) for d in self._iter_devs(): if d['weight'] <= 0: continue for t in (d.get('tiers') or tiers_for_dev(d)): num_devices[t] += 1 num_devices[()] += 1 tier2children = self._build_tier2children() wanted_replicas = defaultdict(float) def place_replicas(tier, replicanths): if replicanths > num_devices[tier]: raise exceptions.RingValidationError( 'More than replicanths (%s) than devices (%s) ' 'in tier (%s)' % (replicanths, num_devices[tier], tier)) wanted_replicas[tier] = replicanths sub_tiers = sorted(tier2children[tier]) if not sub_tiers: return to_place = defaultdict(float) remaining = replicanths tiers_to_spread = sub_tiers device_limited = False while True: rel_weight = remaining / sum(weighted_replicas[t] for t in tiers_to_spread) for t in tiers_to_spread: replicas = to_place[t] + ( weighted_replicas[t] * rel_weight) if replicas < dispersed_replicas[t]['min']: replicas = dispersed_replicas[t]['min'] elif (replicas > dispersed_replicas[t]['max'] and not device_limited): replicas = dispersed_replicas[t]['max'] if replicas > num_devices[t]: replicas = num_devices[t] to_place[t] = replicas remaining = replicanths - sum(to_place.values()) if remaining < -1e-10: tiers_to_spread = [ t for t in sub_tiers if to_place[t] > dispersed_replicas[t]['min'] ] elif remaining > 1e-10: tiers_to_spread = [ t for t in sub_tiers if (num_devices[t] > to_place[t] < dispersed_replicas[t]['max']) ] if not tiers_to_spread: device_limited = True tiers_to_spread = [ t for t in sub_tiers if to_place[t] < num_devices[t] ] else: # remaining is "empty" break for t in sub_tiers: self.logger.debug('Planning %s on %s', to_place[t], t) place_replicas(t, to_place[t]) # place all replicas in the cluster tier place_replicas((), self.replicas) # belts & suspenders/paranoia - at every level, the sum of # wanted_replicas should be very close to the total number of # replicas for the ring tiers = ['cluster', 'regions', 'zones', 'servers', 'devices'] for i, tier_name in enumerate(tiers): replicas_at_tier = sum(wanted_replicas[t] for t in wanted_replicas if len(t) == i) if abs(self.replicas - replicas_at_tier) > 1e-10: raise exceptions.RingValidationError( '%s != %s at tier %s' % ( replicas_at_tier, self.replicas, tier_name)) return wanted_replicas def _build_target_replicas_by_tier(self): """ Build a map of => accounting for device weights, unique-as-possible dispersion and overload. - a tuple, describing each tier in the ring topology - a float, the target replicanths at the tier """ weighted_replicas = self._build_weighted_replicas_by_tier() wanted_replicas = self._build_wanted_replicas_by_tier() max_overload = self.get_required_overload(weighted=weighted_replicas, wanted=wanted_replicas) if max_overload <= 0.0: return wanted_replicas else: overload = min(self.overload, max_overload) self.logger.debug("Using effective overload of %f", overload) target_replicas = defaultdict(float) for tier, weighted in weighted_replicas.items(): m = (wanted_replicas[tier] - weighted) / max_overload target_replicas[tier] = m * overload + weighted # belts & suspenders/paranoia - at every level, the sum of # target_replicas should be very close to the total number # of replicas for the ring tiers = ['cluster', 'regions', 'zones', 'servers', 'devices'] for i, tier_name in enumerate(tiers): replicas_at_tier = sum(target_replicas[t] for t in target_replicas if len(t) == i) if abs(self.replicas - replicas_at_tier) > 1e-10: raise exceptions.RingValidationError( '%s != %s at tier %s' % ( replicas_at_tier, self.replicas, tier_name)) return target_replicas def _build_replica_plan(self): """ Wraps return value of _build_target_replicas_by_tier to include pre-calculated min and max values for each tier. :returns: a dict, mapping => , where is itself a dict include at least the following keys: min - the minimum number of replicas at the tier target - the target replicanths at the tier max - the maximum number of replicas at the tier """ # replica part-y planner! target_replicas = self._build_target_replicas_by_tier() replica_plan = defaultdict( lambda: {'min': 0, 'target': 0, 'max': 0}) replica_plan.update({ t: { 'min': math.floor(r + 1e-10), 'target': r, 'max': math.ceil(r - 1e-10), } for (t, r) in target_replicas.items() }) return replica_plan def _devs_for_part(self, part): """ Returns a list of devices for a specified partition. Deliberately includes duplicates. """ if self._replica2part2dev is None: return [] devs = [] for part2dev in self._replica2part2dev: if part >= len(part2dev): continue dev_id = part2dev[part] if dev_id == NONE_DEV: continue devs.append(self.devs[dev_id]) return devs def _replicas_for_part(self, part): """ Returns a list of replicas for a specified partition. These can be used as indices into self._replica2part2dev without worrying about IndexErrors. """ return [replica for replica, part2dev in enumerate(self._replica2part2dev) if part < len(part2dev)] def _each_part_replica(self): """ Generator yielding every (partition, replica) pair in the ring. """ for replica, part2dev in enumerate(self._replica2part2dev): for part in range(len(part2dev)): yield (part, replica) @classmethod def load(cls, builder_file, open=open): """ Obtain RingBuilder instance of the provided builder file :param builder_file: path to builder file to load :return: RingBuilder instance """ try: fp = open(builder_file, 'rb') except IOError as e: if e.errno == errno.ENOENT: raise exceptions.FileNotFoundError( 'Ring Builder file does not exist: %s' % builder_file) elif e.errno in [errno.EPERM, errno.EACCES]: raise exceptions.PermissionError( 'Ring Builder file cannot be accessed: %s' % builder_file) else: raise else: with fp: try: builder = pickle.load(fp) except Exception: # raise error during unpickling as UnPicklingError raise exceptions.UnPicklingError( 'Ring Builder file is invalid: %s' % builder_file) if not hasattr(builder, 'devs'): builder_dict = builder builder = RingBuilder(1, 1, 1) builder.copy_from(builder_dict) for dev in builder.devs: # really old rings didn't have meta keys if dev and 'meta' not in dev: dev['meta'] = '' # NOTE(akscram): An old ring builder file don't contain # replication parameters. if dev: if 'ip' in dev: dev.setdefault('replication_ip', dev['ip']) if 'port' in dev: dev.setdefault('replication_port', dev['port']) return builder def save(self, builder_file): """Serialize this RingBuilder instance to disk. :param builder_file: path to builder file to save """ with open(builder_file, 'wb') as f: pickle.dump(self.to_dict(), f, protocol=2) def search_devs(self, search_values): """Search devices by parameters. :param search_values: a dictionary with search values to filter devices, supported parameters are id, region, zone, ip, port, replication_ip, replication_port, device, weight, meta :returns: list of device dicts """ matched_devs = [] for dev in self.devs: if not dev: continue matched = True for key in ('id', 'region', 'zone', 'ip', 'port', 'replication_ip', 'replication_port', 'device', 'weight', 'meta'): if key in search_values: value = search_values.get(key) if value is not None: if key == 'meta': if value not in dev.get(key): matched = False elif key == 'ip' or key == 'replication_ip': cdev = '' try: cdev = validate_and_normalize_address( dev.get(key, '')) except ValueError: pass if cdev != value: matched = False elif dev.get(key) != value: matched = False if matched: matched_devs.append(dev) return matched_devs def increase_partition_power(self): """ Increases ring partition power by one. Devices will be assigned to partitions like this: OLD: 0, 3, 7, 5, 2, 1, ... NEW: 0, 0, 3, 3, 7, 7, 5, 5, 2, 2, 1, 1, ... """ new_replica2part2dev = [] for replica in self._replica2part2dev: new_replica = array('H') for device in replica: new_replica.append(device) new_replica.append(device) # append device a second time new_replica2part2dev.append(new_replica) self._replica2part2dev = new_replica2part2dev for device in self._iter_devs(): device['parts'] *= 2 # We need to update the time when a partition has been moved the last # time. Since this is an array of all partitions, we need to double it # two new_last_part_moves = [] for partition in self._last_part_moves: new_last_part_moves.append(partition) new_last_part_moves.append(partition) self._last_part_moves = new_last_part_moves self.part_power += 1 self.parts *= 2 self.version += 1 swift-2.7.0/swift/common/ring/ring.py0000664000567000056710000004405012675204037020722 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import array import six.moves.cPickle as pickle import json from collections import defaultdict from gzip import GzipFile from os.path import getmtime import struct from time import time import os from io import BufferedReader from hashlib import md5 from itertools import chain from tempfile import NamedTemporaryFile from six.moves import range from swift.common.utils import hash_path, validate_configuration from swift.common.ring.utils import tiers_for_dev class RingData(object): """Partitioned consistent hashing ring data (used for serialization).""" def __init__(self, replica2part2dev_id, devs, part_shift): self.devs = devs self._replica2part2dev_id = replica2part2dev_id self._part_shift = part_shift for dev in self.devs: if dev is not None: dev.setdefault("region", 1) @classmethod def deserialize_v1(cls, gz_file, metadata_only=False): """ Deserialize a v1 ring file into a dictionary with `devs`, `part_shift`, and `replica2part2dev_id` keys. If the optional kwarg `metadata_only` is True, then the `replica2part2dev_id` is not loaded and that key in the returned dictionary just has the value `[]`. :param file gz_file: An opened file-like object which has already consumed the 6 bytes of magic and version. :param bool metadata_only: If True, only load `devs` and `part_shift` :returns: A dict containing `devs`, `part_shift`, and `replica2part2dev_id` """ json_len, = struct.unpack('!I', gz_file.read(4)) ring_dict = json.loads(gz_file.read(json_len)) ring_dict['replica2part2dev_id'] = [] if metadata_only: return ring_dict partition_count = 1 << (32 - ring_dict['part_shift']) for x in range(ring_dict['replica_count']): ring_dict['replica2part2dev_id'].append( array.array('H', gz_file.read(2 * partition_count))) return ring_dict @classmethod def load(cls, filename, metadata_only=False): """ Load ring data from a file. :param filename: Path to a file serialized by the save() method. :param bool metadata_only: If True, only load `devs` and `part_shift`. :returns: A RingData instance containing the loaded data. """ gz_file = GzipFile(filename, 'rb') # Python 2.6 GzipFile doesn't support BufferedIO if hasattr(gz_file, '_checkReadable'): gz_file = BufferedReader(gz_file) # See if the file is in the new format magic = gz_file.read(4) if magic == 'R1NG': format_version, = struct.unpack('!H', gz_file.read(2)) if format_version == 1: ring_data = cls.deserialize_v1( gz_file, metadata_only=metadata_only) else: raise Exception('Unknown ring format version %d' % format_version) else: # Assume old-style pickled ring gz_file.seek(0) ring_data = pickle.load(gz_file) if not hasattr(ring_data, 'devs'): ring_data = RingData(ring_data['replica2part2dev_id'], ring_data['devs'], ring_data['part_shift']) return ring_data def serialize_v1(self, file_obj): # Write out new-style serialization magic and version: file_obj.write(struct.pack('!4sH', 'R1NG', 1)) ring = self.to_dict() json_encoder = json.JSONEncoder(sort_keys=True) json_text = json_encoder.encode( {'devs': ring['devs'], 'part_shift': ring['part_shift'], 'replica_count': len(ring['replica2part2dev_id'])}) json_len = len(json_text) file_obj.write(struct.pack('!I', json_len)) file_obj.write(json_text) for part2dev_id in ring['replica2part2dev_id']: file_obj.write(part2dev_id.tostring()) def save(self, filename, mtime=1300507380.0): """ Serialize this RingData instance to disk. :param filename: File into which this instance should be serialized. :param mtime: time used to override mtime for gzip, default or None if the caller wants to include time """ # Override the timestamp so that the same ring data creates # the same bytes on disk. This makes a checksum comparison a # good way to see if two rings are identical. tempf = NamedTemporaryFile(dir=".", prefix=filename, delete=False) gz_file = GzipFile(filename, mode='wb', fileobj=tempf, mtime=mtime) self.serialize_v1(gz_file) gz_file.close() tempf.flush() os.fsync(tempf.fileno()) tempf.close() os.chmod(tempf.name, 0o644) os.rename(tempf.name, filename) def to_dict(self): return {'devs': self.devs, 'replica2part2dev_id': self._replica2part2dev_id, 'part_shift': self._part_shift} class Ring(object): """ Partitioned consistent hashing ring. :param serialized_path: path to serialized RingData instance :param reload_time: time interval in seconds to check for a ring change """ def __init__(self, serialized_path, reload_time=15, ring_name=None): # can't use the ring unless HASH_PATH_SUFFIX is set validate_configuration() if ring_name: self.serialized_path = os.path.join(serialized_path, ring_name + '.ring.gz') else: self.serialized_path = os.path.join(serialized_path) self.reload_time = reload_time self._reload(force=True) def _reload(self, force=False): self._rtime = time() + self.reload_time if force or self.has_changed(): ring_data = RingData.load(self.serialized_path) self._mtime = getmtime(self.serialized_path) self._devs = ring_data.devs # NOTE(akscram): Replication parameters like replication_ip # and replication_port are required for # replication process. An old replication # ring doesn't contain this parameters into # device. Old-style pickled rings won't have # region information. for dev in self._devs: if dev: dev.setdefault('region', 1) if 'ip' in dev: dev.setdefault('replication_ip', dev['ip']) if 'port' in dev: dev.setdefault('replication_port', dev['port']) self._replica2part2dev_id = ring_data._replica2part2dev_id self._part_shift = ring_data._part_shift self._rebuild_tier_data() # Do this now, when we know the data has changed, rather than # doing it on every call to get_more_nodes(). # # Since this is to speed up the finding of handoffs, we only # consider devices with at least one partition assigned. This # way, a region, zone, or server with no partitions assigned # does not count toward our totals, thereby keeping the early # bailouts in get_more_nodes() working. dev_ids_with_parts = set() for part2dev_id in self._replica2part2dev_id: for dev_id in part2dev_id: dev_ids_with_parts.add(dev_id) regions = set() zones = set() ips = set() self._num_devs = 0 for dev in self._devs: if dev and dev['id'] in dev_ids_with_parts: regions.add(dev['region']) zones.add((dev['region'], dev['zone'])) ips.add((dev['region'], dev['zone'], dev['ip'])) self._num_devs += 1 self._num_regions = len(regions) self._num_zones = len(zones) self._num_ips = len(ips) def _rebuild_tier_data(self): self.tier2devs = defaultdict(list) for dev in self._devs: if not dev: continue for tier in tiers_for_dev(dev): self.tier2devs[tier].append(dev) tiers_by_length = defaultdict(list) for tier in self.tier2devs: tiers_by_length[len(tier)].append(tier) self.tiers_by_length = sorted(tiers_by_length.values(), key=lambda x: len(x[0])) for tiers in self.tiers_by_length: tiers.sort() @property def replica_count(self): """Number of replicas (full or partial) used in the ring.""" return len(self._replica2part2dev_id) @property def partition_count(self): """Number of partitions in the ring.""" return len(self._replica2part2dev_id[0]) @property def devs(self): """devices in the ring""" if time() > self._rtime: self._reload() return self._devs def has_changed(self): """ Check to see if the ring on disk is different than the current one in memory. :returns: True if the ring on disk has changed, False otherwise """ return getmtime(self.serialized_path) != self._mtime def _get_part_nodes(self, part): part_nodes = [] seen_ids = set() for r2p2d in self._replica2part2dev_id: if part < len(r2p2d): dev_id = r2p2d[part] if dev_id not in seen_ids: part_nodes.append(self.devs[dev_id]) seen_ids.add(dev_id) return [dict(node, index=i) for i, node in enumerate(part_nodes)] def get_part(self, account, container=None, obj=None): """ Get the partition for an account/container/object. :param account: account name :param container: container name :param obj: object name :returns: the partition number """ key = hash_path(account, container, obj, raw_digest=True) if time() > self._rtime: self._reload() part = struct.unpack_from('>I', key)[0] >> self._part_shift return part def get_part_nodes(self, part): """ Get the nodes that are responsible for the partition. If one node is responsible for more than one replica of the same partition, it will only appear in the output once. :param part: partition to get nodes for :returns: list of node dicts See :func:`get_nodes` for a description of the node dicts. """ if time() > self._rtime: self._reload() return self._get_part_nodes(part) def get_nodes(self, account, container=None, obj=None): """ Get the partition and nodes for an account/container/object. If a node is responsible for more than one replica, it will only appear in the output once. :param account: account name :param container: container name :param obj: object name :returns: a tuple of (partition, list of node dicts) Each node dict will have at least the following keys: ====== =============================================================== id unique integer identifier amongst devices index offset into the primary node list for the partition weight a float of the relative weight of this device as compared to others; this indicates how many partitions the builder will try to assign to this device zone integer indicating which zone the device is in; a given partition will not be assigned to multiple devices within the same zone ip the ip address of the device port the tcp port of the device device the device's name on disk (sdb1, for example) meta general use 'extra' field; for example: the online date, the hardware description ====== =============================================================== """ part = self.get_part(account, container, obj) return part, self._get_part_nodes(part) def get_more_nodes(self, part): """ Generator to get extra nodes for a partition for hinted handoff. The handoff nodes will try to be in zones other than the primary zones, will take into account the device weights, and will usually keep the same sequences of handoffs even with ring changes. :param part: partition to get handoff nodes for :returns: generator of node dicts See :func:`get_nodes` for a description of the node dicts. """ if time() > self._rtime: self._reload() primary_nodes = self._get_part_nodes(part) used = set(d['id'] for d in primary_nodes) same_regions = set(d['region'] for d in primary_nodes) same_zones = set((d['region'], d['zone']) for d in primary_nodes) same_ips = set( (d['region'], d['zone'], d['ip']) for d in primary_nodes) parts = len(self._replica2part2dev_id[0]) start = struct.unpack_from( '>I', md5(str(part)).digest())[0] >> self._part_shift inc = int(parts / 65536) or 1 # Multiple loops for execution speed; the checks and bookkeeping get # simpler as you go along hit_all_regions = len(same_regions) == self._num_regions for handoff_part in chain(range(start, parts, inc), range(inc - ((parts - start) % inc), start, inc)): if hit_all_regions: # At this point, there are no regions left untouched, so we # can stop looking. break for part2dev_id in self._replica2part2dev_id: if handoff_part < len(part2dev_id): dev_id = part2dev_id[handoff_part] dev = self._devs[dev_id] region = dev['region'] if dev_id not in used and region not in same_regions: yield dev used.add(dev_id) same_regions.add(region) zone = dev['zone'] ip = (region, zone, dev['ip']) same_zones.add((region, zone)) same_ips.add(ip) if len(same_regions) == self._num_regions: hit_all_regions = True break hit_all_zones = len(same_zones) == self._num_zones for handoff_part in chain(range(start, parts, inc), range(inc - ((parts - start) % inc), start, inc)): if hit_all_zones: # Much like we stopped looking for fresh regions before, we # can now stop looking for fresh zones; there are no more. break for part2dev_id in self._replica2part2dev_id: if handoff_part < len(part2dev_id): dev_id = part2dev_id[handoff_part] dev = self._devs[dev_id] zone = (dev['region'], dev['zone']) if dev_id not in used and zone not in same_zones: yield dev used.add(dev_id) same_zones.add(zone) ip = zone + (dev['ip'],) same_ips.add(ip) if len(same_zones) == self._num_zones: hit_all_zones = True break hit_all_ips = len(same_ips) == self._num_ips for handoff_part in chain(range(start, parts, inc), range(inc - ((parts - start) % inc), start, inc)): if hit_all_ips: # We've exhausted the pool of unused backends, so stop # looking. break for part2dev_id in self._replica2part2dev_id: if handoff_part < len(part2dev_id): dev_id = part2dev_id[handoff_part] dev = self._devs[dev_id] ip = (dev['region'], dev['zone'], dev['ip']) if dev_id not in used and ip not in same_ips: yield dev used.add(dev_id) same_ips.add(ip) if len(same_ips) == self._num_ips: hit_all_ips = True break hit_all_devs = len(used) == self._num_devs for handoff_part in chain(range(start, parts, inc), range(inc - ((parts - start) % inc), start, inc)): if hit_all_devs: # We've used every device we have, so let's stop looking for # unused devices now. break for part2dev_id in self._replica2part2dev_id: if handoff_part < len(part2dev_id): dev_id = part2dev_id[handoff_part] if dev_id not in used: yield self._devs[dev_id] used.add(dev_id) if len(used) == self._num_devs: hit_all_devs = True break swift-2.7.0/swift/common/ring/__init__.py0000664000567000056710000000024212675204037021515 0ustar jenkinsjenkins00000000000000from swift.common.ring.ring import RingData, Ring from swift.common.ring.builder import RingBuilder __all__ = [ 'RingData', 'Ring', 'RingBuilder', ] swift-2.7.0/swift/common/manager.py0000664000567000056710000006301412675204037020437 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import functools import errno import os import resource import signal import time import subprocess import re from swift import gettext_ as _ from swift.common.utils import search_tree, remove_file, write_file from swift.common.exceptions import InvalidPidFileException SWIFT_DIR = '/etc/swift' RUN_DIR = '/var/run/swift' PROC_DIR = '/proc' # auth-server has been removed from ALL_SERVERS, start it explicitly ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor', 'container-replicator', 'container-reconciler', 'container-server', 'container-sync', 'container-updater', 'object-auditor', 'object-server', 'object-expirer', 'object-replicator', 'object-reconstructor', 'object-updater', 'proxy-server', 'account-replicator', 'account-reaper'] MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server', 'object-server'] REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS] # aliases mapping ALIASES = {'all': ALL_SERVERS, 'main': MAIN_SERVERS, 'rest': REST_SERVERS} GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server'] START_ONCE_SERVERS = REST_SERVERS # These are servers that match a type (account-*, container-*, object-*) but # don't use that type-server.conf file and instead use their own. STANDALONE_SERVERS = ['object-expirer', 'container-reconciler'] KILL_WAIT = 15 # seconds to wait for servers to die (by default) WARNING_WAIT = 3 # seconds to wait after message that may just be a warning MAX_DESCRIPTORS = 32768 MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB MAX_PROCS = 8192 # workers * disks * threads_per_disk, can get high def setup_env(): """Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp """ try: resource.setrlimit(resource.RLIMIT_NOFILE, (MAX_DESCRIPTORS, MAX_DESCRIPTORS)) except ValueError: print(_("WARNING: Unable to modify file descriptor limit. " "Running as non-root?")) try: resource.setrlimit(resource.RLIMIT_DATA, (MAX_MEMORY, MAX_MEMORY)) except ValueError: print(_("WARNING: Unable to modify memory limit. " "Running as non-root?")) try: resource.setrlimit(resource.RLIMIT_NPROC, (MAX_PROCS, MAX_PROCS)) except ValueError: print(_("WARNING: Unable to modify max process limit. " "Running as non-root?")) # Set PYTHON_EGG_CACHE if it isn't already set os.environ.setdefault('PYTHON_EGG_CACHE', '/tmp') def command(func): """ Decorator to declare which methods are accessible as commands, commands always return 1 or 0, where 0 should indicate success. :param func: function to make public """ func.publicly_accessible = True @functools.wraps(func) def wrapped(*a, **kw): rv = func(*a, **kw) return 1 if rv else 0 return wrapped def watch_server_pids(server_pids, interval=1, **kwargs): """Monitor a collection of server pids yielding back those pids that aren't responding to signals. :param server_pids: a dict, lists of pids [int,...] keyed on Server objects """ status = {} start = time.time() end = start + interval server_pids = dict(server_pids) # make a copy while True: for server, pids in server_pids.items(): for pid in pids: try: # let pid stop if it wants to os.waitpid(pid, os.WNOHANG) except OSError as e: if e.errno not in (errno.ECHILD, errno.ESRCH): raise # else no such child/process # check running pids for server status[server] = server.get_running_pids(**kwargs) for pid in pids: # original pids no longer in running pids! if pid not in status[server]: yield server, pid # update active pids list using running_pids server_pids[server] = status[server] if not [p for server, pids in status.items() for p in pids]: # no more running pids break if time.time() > end: break else: time.sleep(0.1) def safe_kill(pid, sig, name): """Send signal to process and check process name : param pid: process id : param sig: signal to send : param name: name to ensure target process """ # check process name for SIG_DFL if sig == signal.SIG_DFL: try: proc_file = '%s/%d/cmdline' % (PROC_DIR, pid) if os.path.exists(proc_file): with open(proc_file, 'r') as fd: if name not in fd.read(): # unknown process is using the pid raise InvalidPidFileException() except IOError: pass os.kill(pid, sig) def kill_group(pid, sig): """Send signal to process group : param pid: process id : param sig: signal to send """ # Negative PID means process group os.kill(-pid, sig) class UnknownCommandError(Exception): pass class Manager(object): """Main class for performing commands on groups of servers. :param servers: list of server names as strings """ def __init__(self, servers, run_dir=RUN_DIR): self.server_names = set() self._default_strict = True for server in servers: if server in ALIASES: self.server_names.update(ALIASES[server]) self._default_strict = False elif '*' in server: # convert glob to regex self.server_names.update([ s for s in ALL_SERVERS if re.match(server.replace('*', '.*'), s)]) self._default_strict = False else: self.server_names.add(server) self.servers = set() for name in self.server_names: self.servers.add(Server(name, run_dir)) def __iter__(self): return iter(self.servers) @command def status(self, **kwargs): """display status of tracked pids for server """ status = 0 for server in self.servers: status += server.status(**kwargs) return status @command def start(self, **kwargs): """starts a server """ setup_env() status = 0 strict = kwargs.get('strict') # if strict not set explicitly if strict is None: strict = self._default_strict for server in self.servers: status += 0 if server.launch(**kwargs) else 1 if not strict: status = 0 if not kwargs.get('daemon', True): for server in self.servers: try: status += server.interact(**kwargs) except KeyboardInterrupt: print(_('\nuser quit')) self.stop(**kwargs) break elif kwargs.get('wait', True): for server in self.servers: status += server.wait(**kwargs) return status @command def no_wait(self, **kwargs): """spawn server and return immediately """ kwargs['wait'] = False return self.start(**kwargs) @command def no_daemon(self, **kwargs): """start a server interactively """ kwargs['daemon'] = False return self.start(**kwargs) @command def once(self, **kwargs): """start server and run one pass on supporting daemons """ kwargs['once'] = True return self.start(**kwargs) @command def stop(self, **kwargs): """stops a server """ server_pids = {} for server in self.servers: signaled_pids = server.stop(**kwargs) if not signaled_pids: print(_('No %s running') % server) else: server_pids[server] = signaled_pids # all signaled_pids, i.e. list(itertools.chain(*server_pids.values())) signaled_pids = [p for server, pids in server_pids.items() for p in pids] # keep track of the pids yeiled back as killed for all servers killed_pids = set() kill_wait = kwargs.get('kill_wait', KILL_WAIT) for server, killed_pid in watch_server_pids(server_pids, interval=kill_wait, **kwargs): print(_("%s (%s) appears to have stopped") % (server, killed_pid)) killed_pids.add(killed_pid) if not killed_pids.symmetric_difference(signaled_pids): # all processes have been stopped return 0 # reached interval n watch_pids w/o killing all servers kill_after_timeout = kwargs.get('kill_after_timeout', False) for server, pids in server_pids.items(): if not killed_pids.issuperset(pids): # some pids of this server were not killed if kill_after_timeout: print(_('Waited %s seconds for %s to die; killing') % ( kill_wait, server)) # Send SIGKILL to all remaining pids for pid in set(pids.keys()) - killed_pids: print(_('Signal %s pid: %s signal: %s') % ( server, pid, signal.SIGKILL)) # Send SIGKILL to process group try: kill_group(pid, signal.SIGKILL) except OSError as e: # PID died before kill_group can take action? if e.errno != errno.ESRCH: raise e else: print(_('Waited %s seconds for %s to die; giving up') % ( kill_wait, server)) return 1 @command def kill(self, **kwargs): """stop a server (no error if not running) """ status = self.stop(**kwargs) kwargs['quiet'] = True if status and not self.status(**kwargs): # only exit error if the server is still running return status return 0 @command def shutdown(self, **kwargs): """allow current requests to finish on supporting servers """ kwargs['graceful'] = True status = 0 status += self.stop(**kwargs) return status @command def restart(self, **kwargs): """stops then restarts server """ status = 0 status += self.stop(**kwargs) status += self.start(**kwargs) return status @command def reload(self, **kwargs): """graceful shutdown then restart on supporting servers """ kwargs['graceful'] = True status = 0 for server in self.server_names: m = Manager([server]) status += m.stop(**kwargs) status += m.start(**kwargs) return status @command def force_reload(self, **kwargs): """alias for reload """ return self.reload(**kwargs) def get_command(self, cmd): """Find and return the decorated method named like cmd :param cmd: the command to get, a string, if not found raises UnknownCommandError """ cmd = cmd.lower().replace('-', '_') try: f = getattr(self, cmd) except AttributeError: raise UnknownCommandError(cmd) if not hasattr(f, 'publicly_accessible'): raise UnknownCommandError(cmd) return f @classmethod def list_commands(cls): """Get all publicly accessible commands :returns: a list of string tuples (cmd, help), the method names who are decorated as commands """ get_method = lambda cmd: getattr(cls, cmd) return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip()) for x in dir(cls) if getattr(get_method(x), 'publicly_accessible', False)]) def run_command(self, cmd, **kwargs): """Find the named command and run it :param cmd: the command name to run """ f = self.get_command(cmd) return f(**kwargs) class Server(object): """Manage operations on a server or group of servers of similar type :param server: name of server """ def __init__(self, server, run_dir=RUN_DIR): self.server = server.lower() if '.' in self.server: self.server, self.conf = self.server.rsplit('.', 1) else: self.conf = None if '-' not in self.server: self.server = '%s-server' % self.server self.type = self.server.rsplit('-', 1)[0] self.cmd = 'swift-%s' % self.server self.procs = [] self.run_dir = run_dir def __str__(self): return self.server def __repr__(self): return "%s(%s)" % (self.__class__.__name__, repr(str(self))) def __hash__(self): return hash(str(self)) def __eq__(self, other): try: return self.server == other.server except AttributeError: return False def get_pid_file_name(self, conf_file): """Translate conf_file to a corresponding pid_file :param conf_file: an conf_file for this server, a string :returns: the pid_file for this conf_file """ return conf_file.replace( os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace( '%s-server' % self.type, self.server, 1).replace( '.conf', '.pid', 1) def get_conf_file_name(self, pid_file): """Translate pid_file to a corresponding conf_file :param pid_file: a pid_file for this server, a string :returns: the conf_file for this pid_file """ if self.server in STANDALONE_SERVERS: return pid_file.replace( os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace( '.pid', '.conf', 1) else: return pid_file.replace( os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace( self.server, '%s-server' % self.type, 1).replace( '.pid', '.conf', 1) def conf_files(self, **kwargs): """Get conf files for this server :param: number, if supplied will only lookup the nth server :returns: list of conf files """ if self.server in STANDALONE_SERVERS: server_search = self.server else: server_search = "%s-server" % self.type if self.conf is not None: found_conf_files = search_tree(SWIFT_DIR, server_search, self.conf + '.conf', dir_ext=self.conf + '.conf.d') else: found_conf_files = search_tree(SWIFT_DIR, server_search + '*', '.conf', dir_ext='.conf.d') number = kwargs.get('number') if number: try: conf_files = [found_conf_files[number - 1]] except IndexError: conf_files = [] else: conf_files = found_conf_files if not conf_files: # maybe there's a config file(s) out there, but I couldn't find it! if not kwargs.get('quiet'): if number: print(_('Unable to locate config number %s for %s') % (number, self.server)) else: print(_('Unable to locate config for %s') % self.server) if kwargs.get('verbose') and not kwargs.get('quiet'): if found_conf_files: print(_('Found configs:')) for i, conf_file in enumerate(found_conf_files): print(' %d) %s' % (i + 1, conf_file)) return conf_files def pid_files(self, **kwargs): """Get pid files for this server :param: number, if supplied will only lookup the nth server :returns: list of pid files """ if self.conf is not None: pid_files = search_tree(self.run_dir, '%s*' % self.server, exts=[self.conf + '.pid', self.conf + '.pid.d']) else: pid_files = search_tree(self.run_dir, '%s*' % self.server) if kwargs.get('number', 0): conf_files = self.conf_files(**kwargs) # filter pid_files to match the index of numbered conf_file pid_files = [pid_file for pid_file in pid_files if self.get_conf_file_name(pid_file) in conf_files] return pid_files def iter_pid_files(self, **kwargs): """Generator, yields (pid_file, pids) """ for pid_file in self.pid_files(**kwargs): try: pid = int(open(pid_file).read().strip()) except ValueError: pid = None yield pid_file, pid def signal_pids(self, sig, **kwargs): """Send a signal to pids for this server :param sig: signal to send :returns: a dict mapping pids (ints) to pid_files (paths) """ pids = {} for pid_file, pid in self.iter_pid_files(**kwargs): if not pid: # Catches None and 0 print (_('Removing pid file %s with invalid pid') % pid_file) remove_file(pid_file) continue try: if sig != signal.SIG_DFL: print(_('Signal %s pid: %s signal: %s') % (self.server, pid, sig)) safe_kill(pid, sig, 'swift-%s' % self.server) except InvalidPidFileException as e: if kwargs.get('verbose'): print(_('Removing pid file %(pid_file)s with wrong pid ' '%(pid)d'), {'pid_file': pid_file, 'pid': pid}) remove_file(pid_file) except OSError as e: if e.errno == errno.ESRCH: # pid does not exist if kwargs.get('verbose'): print(_("Removing stale pid file %s") % pid_file) remove_file(pid_file) elif e.errno == errno.EPERM: print(_("No permission to signal PID %d") % pid) else: # process exists pids[pid] = pid_file return pids def get_running_pids(self, **kwargs): """Get running pids :returns: a dict mapping pids (ints) to pid_files (paths) """ return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop def kill_running_pids(self, **kwargs): """Kill running pids :param graceful: if True, attempt SIGHUP on supporting servers :returns: a dict mapping pids (ints) to pid_files (paths) """ graceful = kwargs.get('graceful') if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS: sig = signal.SIGHUP else: sig = signal.SIGTERM return self.signal_pids(sig, **kwargs) def status(self, pids=None, **kwargs): """Display status of server :param: pids, if not supplied pids will be populated automatically :param: number, if supplied will only lookup the nth server :returns: 1 if server is not running, 0 otherwise """ if pids is None: pids = self.get_running_pids(**kwargs) if not pids: number = kwargs.get('number', 0) if number: kwargs['quiet'] = True conf_files = self.conf_files(**kwargs) if conf_files: print(_("%s #%d not running (%s)") % (self.server, number, conf_files[0])) else: print(_("No %s running") % self.server) return 1 for pid, pid_file in pids.items(): conf_file = self.get_conf_file_name(pid_file) print(_("%s running (%s - %s)") % (self.server, pid, conf_file)) return 0 def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs): """Launch a subprocess for this server. :param conf_file: path to conf_file to use as first arg :param once: boolean, add once argument to command :param wait: boolean, if true capture stdout with a pipe :param daemon: boolean, if false ask server to log to console :returns : the pid of the spawned process """ args = [self.cmd, conf_file] if once: args.append('once') if not daemon: # ask the server to log to console args.append('verbose') # figure out what we're going to do with stdio if not daemon: # do nothing, this process is open until the spawns close anyway re_out = None re_err = None else: re_err = subprocess.STDOUT if wait: # we're going to need to block on this... re_out = subprocess.PIPE else: re_out = open(os.devnull, 'w+b') proc = subprocess.Popen(args, stdout=re_out, stderr=re_err) pid_file = self.get_pid_file_name(conf_file) write_file(pid_file, proc.pid) self.procs.append(proc) return proc.pid def wait(self, **kwargs): """ wait on spawned procs to start """ status = 0 for proc in self.procs: # wait for process to close its stdout output = proc.stdout.read() if kwargs.get('once', False): # if you don't want once to wait you can send it to the # background on the command line, I generally just run with # no-daemon anyway, but this is quieter proc.wait() if output: print(output) start = time.time() # wait for process to die (output may just be a warning) while time.time() - start < WARNING_WAIT: time.sleep(0.1) if proc.poll() is not None: status += proc.returncode break return status def interact(self, **kwargs): """ wait on spawned procs to terminate """ status = 0 for proc in self.procs: # wait for process to terminate proc.communicate() if proc.returncode: status += 1 return status def launch(self, **kwargs): """ Collect conf files and attempt to spawn the processes for this server """ conf_files = self.conf_files(**kwargs) if not conf_files: return {} pids = self.get_running_pids(**kwargs) already_started = False for pid, pid_file in pids.items(): conf_file = self.get_conf_file_name(pid_file) # for legacy compat you can't start other servers if one server is # already running (unless -n specifies which one you want), this # restriction could potentially be lifted, and launch could start # any unstarted instances if conf_file in conf_files: already_started = True print(_("%s running (%s - %s)") % (self.server, pid, conf_file)) elif not kwargs.get('number', 0): already_started = True print(_("%s running (%s - %s)") % (self.server, pid, pid_file)) if already_started: print(_("%s already started...") % self.server) return {} if self.server not in START_ONCE_SERVERS: kwargs['once'] = False pids = {} for conf_file in conf_files: if kwargs.get('once'): msg = _('Running %s once') % self.server else: msg = _('Starting %s') % self.server print('%s...(%s)' % (msg, conf_file)) try: pid = self.spawn(conf_file, **kwargs) except OSError as e: if e.errno == errno.ENOENT: # TODO(clayg): should I check if self.cmd exists earlier? print(_("%s does not exist") % self.cmd) break else: raise pids[pid] = conf_file return pids def stop(self, **kwargs): """Send stop signals to pids for this server :returns: a dict mapping pids (ints) to pid_files (paths) """ return self.kill_running_pids(**kwargs) swift-2.7.0/swift/common/storage_policy.py0000775000567000056710000010054712675204037022056 0ustar jenkinsjenkins00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import string import textwrap import six from six.moves.configparser import ConfigParser from swift.common.utils import ( config_true_value, SWIFT_CONF_FILE, whataremyips, list_from_csv) from swift.common.ring import Ring, RingData from swift.common.utils import quorum_size from swift.common.exceptions import RingValidationError from pyeclib.ec_iface import ECDriver, ECDriverError, VALID_EC_TYPES LEGACY_POLICY_NAME = 'Policy-0' VALID_CHARS = '-' + string.ascii_letters + string.digits DEFAULT_POLICY_TYPE = REPL_POLICY = 'replication' EC_POLICY = 'erasure_coding' DEFAULT_EC_OBJECT_SEGMENT_SIZE = 1048576 class BindPortsCache(object): def __init__(self, swift_dir, bind_ip): self.swift_dir = swift_dir self.mtimes_by_ring_path = {} self.portsets_by_ring_path = {} self.my_ips = set(whataremyips(bind_ip)) def all_bind_ports_for_node(self): """ Given an iterable of IP addresses identifying a storage backend server, return a set of all bind ports defined in all rings for this storage backend server. The caller is responsible for not calling this method (which performs at least a stat on all ring files) too frequently. """ # NOTE: we don't worry about disappearing rings here because you can't # ever delete a storage policy. for policy in POLICIES: # NOTE: we must NOT use policy.load_ring to load the ring. Users # of this utility function will not need the actual ring data, just # the bind ports. # # This is duplicated with Ring.__init__ just a bit... serialized_path = os.path.join(self.swift_dir, policy.ring_name + '.ring.gz') try: new_mtime = os.path.getmtime(serialized_path) except OSError: continue old_mtime = self.mtimes_by_ring_path.get(serialized_path) if not old_mtime or old_mtime != new_mtime: self.portsets_by_ring_path[serialized_path] = set( dev['port'] for dev in RingData.load(serialized_path, metadata_only=True).devs if dev and dev['ip'] in self.my_ips) self.mtimes_by_ring_path[serialized_path] = new_mtime # No "break" here so that the above line will update the # mtimes_by_ring_path entry for any ring that changes, not just # the first one we notice. # Return the requested set of ports from our (now-freshened) cache return six.moves.reduce(set.union, self.portsets_by_ring_path.values(), set()) class PolicyError(ValueError): def __init__(self, msg, index=None): if index is not None: msg += ', for index %r' % index super(PolicyError, self).__init__(msg) def _get_policy_string(base, policy_index): if policy_index == 0 or policy_index is None: return_string = base else: return_string = base + "-%d" % int(policy_index) return return_string def get_policy_string(base, policy_or_index): """ Helper function to construct a string from a base and the policy. Used to encode the policy index into either a file name or a directory name by various modules. :param base: the base string :param policy_or_index: StoragePolicy instance, or an index (string or int), if None the legacy storage Policy-0 is assumed. :returns: base name with policy index added :raises: PolicyError if no policy exists with the given policy_index """ if isinstance(policy_or_index, BaseStoragePolicy): policy = policy_or_index else: policy = POLICIES.get_by_index(policy_or_index) if policy is None: raise PolicyError("Unknown policy", index=policy_or_index) return _get_policy_string(base, int(policy)) def split_policy_string(policy_string): """ Helper function to convert a string representing a base and a policy. Used to decode the policy from either a file name or a directory name by various modules. :param policy_string: base name with policy index added :raises: PolicyError if given index does not map to a valid policy :returns: a tuple, in the form (base, policy) where base is the base string and policy is the StoragePolicy instance for the index encoded in the policy_string. """ if '-' in policy_string: base, policy_index = policy_string.rsplit('-', 1) else: base, policy_index = policy_string, None policy = POLICIES.get_by_index(policy_index) if get_policy_string(base, policy) != policy_string: raise PolicyError("Unknown policy", index=policy_index) return base, policy class BaseStoragePolicy(object): """ Represents a storage policy. Not meant to be instantiated directly; implement a derived subclasses (e.g. StoragePolicy, ECStoragePolicy, etc) or use :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. The object_ring property is lazy loaded once the service's ``swift_dir`` is known via :meth:`~StoragePolicyCollection.get_object_ring`, but it may be over-ridden via object_ring kwarg at create time for testing or actively loaded with :meth:`~StoragePolicy.load_ring`. """ policy_type_to_policy_cls = {} def __init__(self, idx, name='', is_default=False, is_deprecated=False, object_ring=None, aliases=''): # do not allow BaseStoragePolicy class to be instantiated directly if type(self) == BaseStoragePolicy: raise TypeError("Can't instantiate BaseStoragePolicy directly") # policy parameter validation try: self.idx = int(idx) except ValueError: raise PolicyError('Invalid index', idx) if self.idx < 0: raise PolicyError('Invalid index', idx) self.alias_list = [] if not name or not self._validate_policy_name(name): raise PolicyError('Invalid name %r' % name, idx) self.alias_list.append(name) if aliases: names_list = list_from_csv(aliases) for alias in names_list: if alias == name: continue self._validate_policy_name(alias) self.alias_list.append(alias) self.is_deprecated = config_true_value(is_deprecated) self.is_default = config_true_value(is_default) if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls: raise PolicyError('Invalid type', self.policy_type) if self.is_deprecated and self.is_default: raise PolicyError('Deprecated policy can not be default. ' 'Invalid config', self.idx) self.ring_name = _get_policy_string('object', self.idx) self.object_ring = object_ring @property def name(self): return self.alias_list[0] @name.setter def name_setter(self, name): self._validate_policy_name(name) self.alias_list[0] = name @property def aliases(self): return ", ".join(self.alias_list) def __int__(self): return self.idx def __cmp__(self, other): return cmp(self.idx, int(other)) def __repr__(self): return ("%s(%d, %r, is_default=%s, " "is_deprecated=%s, policy_type=%r)") % \ (self.__class__.__name__, self.idx, self.alias_list, self.is_default, self.is_deprecated, self.policy_type) @classmethod def register(cls, policy_type): """ Decorator for Storage Policy implementations to register their StoragePolicy class. This will also set the policy_type attribute on the registered implementation. """ def register_wrapper(policy_cls): if policy_type in cls.policy_type_to_policy_cls: raise PolicyError( '%r is already registered for the policy_type %r' % ( cls.policy_type_to_policy_cls[policy_type], policy_type)) cls.policy_type_to_policy_cls[policy_type] = policy_cls policy_cls.policy_type = policy_type return policy_cls return register_wrapper @classmethod def _config_options_map(cls): """ Map config option name to StoragePolicy parameter name. """ return { 'name': 'name', 'aliases': 'aliases', 'policy_type': 'policy_type', 'default': 'is_default', 'deprecated': 'is_deprecated', } @classmethod def from_config(cls, policy_index, options): config_to_policy_option_map = cls._config_options_map() policy_options = {} for config_option, value in options.items(): try: policy_option = config_to_policy_option_map[config_option] except KeyError: raise PolicyError('Invalid option %r in ' 'storage-policy section' % config_option, index=policy_index) policy_options[policy_option] = value return cls(policy_index, **policy_options) def get_info(self, config=False): """ Return the info dict and conf file options for this policy. :param config: boolean, if True all config options are returned """ info = {} for config_option, policy_attribute in \ self._config_options_map().items(): info[config_option] = getattr(self, policy_attribute) if not config: # remove some options for public consumption if not self.is_default: info.pop('default') if not self.is_deprecated: info.pop('deprecated') info.pop('policy_type') return info def _validate_policy_name(self, name): """ Helper function to determine the validity of a policy name. Used to check policy names before setting them. :param name: a name string for a single policy name. :returns: true if the name is valid. :raises: PolicyError if the policy name is invalid. """ # this is defensively restrictive, but could be expanded in the future if not all(c in VALID_CHARS for c in name): raise PolicyError('Names are used as HTTP headers, and can not ' 'reliably contain any characters not in %r. ' 'Invalid name %r' % (VALID_CHARS, name)) if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0: msg = 'The name %s is reserved for policy index 0. ' \ 'Invalid name %r' % (LEGACY_POLICY_NAME, name) raise PolicyError(msg, self.idx) if name.upper() in (existing_name.upper() for existing_name in self.alias_list): msg = 'The name %s is already assigned to this policy.' % name raise PolicyError(msg, self.idx) return True def add_name(self, name): """ Adds an alias name to the storage policy. Shouldn't be called directly from the storage policy but instead through the storage policy collection class, so lookups by name resolve correctly. :param name: a new alias for the storage policy """ if self._validate_policy_name(name): self.alias_list.append(name) def remove_name(self, name): """ Removes an alias name from the storage policy. Shouldn't be called directly from the storage policy but instead through the storage policy collection class, so lookups by name resolve correctly. If the name removed is the primary name then the next available alias will be adopted as the new primary name. :param name: a name assigned to the storage policy """ if name not in self.alias_list: raise PolicyError("%s is not a name assigned to policy %s" % (name, self.idx)) if len(self.alias_list) == 1: raise PolicyError("Cannot remove only name %s from policy %s. " "Policies must have at least one name." % (name, self.idx)) else: self.alias_list.remove(name) def change_primary_name(self, name): """ Changes the primary/default name of the policy to a specified name. :param name: a string name to replace the current primary name. """ if name == self.name: return elif name in self.alias_list: self.remove_name(name) else: self._validate_policy_name(name) self.alias_list.insert(0, name) def _validate_ring(self): """ Hook, called when the ring is loaded. Can be used to validate the ring against the StoragePolicy configuration. """ pass def load_ring(self, swift_dir): """ Load the ring for this policy immediately. :param swift_dir: path to rings """ if self.object_ring: return self.object_ring = Ring(swift_dir, ring_name=self.ring_name) # Validate ring to make sure it conforms to policy requirements self._validate_ring() @property def quorum(self): """ Number of successful backend requests needed for the proxy to consider the client request successful. """ raise NotImplementedError() @BaseStoragePolicy.register(REPL_POLICY) class StoragePolicy(BaseStoragePolicy): """ Represents a storage policy of type 'replication'. Default storage policy class unless otherwise overridden from swift.conf. Not meant to be instantiated directly; use :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. """ @property def quorum(self): """ Quorum concept in the replication case: floor(number of replica / 2) + 1 """ if not self.object_ring: raise PolicyError('Ring is not loaded') return quorum_size(self.object_ring.replica_count) @BaseStoragePolicy.register(EC_POLICY) class ECStoragePolicy(BaseStoragePolicy): """ Represents a storage policy of type 'erasure_coding'. Not meant to be instantiated directly; use :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. """ def __init__(self, idx, name='', aliases='', is_default=False, is_deprecated=False, object_ring=None, ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, ec_type=None, ec_ndata=None, ec_nparity=None): super(ECStoragePolicy, self).__init__( idx=idx, name=name, aliases=aliases, is_default=is_default, is_deprecated=is_deprecated, object_ring=object_ring) # Validate erasure_coding policy specific members # ec_type is one of the EC implementations supported by PyEClib if ec_type is None: raise PolicyError('Missing ec_type') if ec_type not in VALID_EC_TYPES: raise PolicyError('Wrong ec_type %s for policy %s, should be one' ' of "%s"' % (ec_type, self.name, ', '.join(VALID_EC_TYPES))) self._ec_type = ec_type # Define _ec_ndata as the number of EC data fragments # Accessible as the property "ec_ndata" try: value = int(ec_ndata) if value <= 0: raise ValueError self._ec_ndata = value except (TypeError, ValueError): raise PolicyError('Invalid ec_num_data_fragments %r' % ec_ndata, index=self.idx) # Define _ec_nparity as the number of EC parity fragments # Accessible as the property "ec_nparity" try: value = int(ec_nparity) if value <= 0: raise ValueError self._ec_nparity = value except (TypeError, ValueError): raise PolicyError('Invalid ec_num_parity_fragments %r' % ec_nparity, index=self.idx) # Define _ec_segment_size as the encode segment unit size # Accessible as the property "ec_segment_size" try: value = int(ec_segment_size) if value <= 0: raise ValueError self._ec_segment_size = value except (TypeError, ValueError): raise PolicyError('Invalid ec_object_segment_size %r' % ec_segment_size, index=self.idx) # Initialize PyECLib EC backend try: self.pyeclib_driver = \ ECDriver(k=self._ec_ndata, m=self._ec_nparity, ec_type=self._ec_type) except ECDriverError as e: raise PolicyError("Error creating EC policy (%s)" % e, index=self.idx) # quorum size in the EC case depends on the choice of EC scheme. self._ec_quorum_size = \ self._ec_ndata + self.pyeclib_driver.min_parity_fragments_needed() @property def ec_type(self): return self._ec_type @property def ec_ndata(self): return self._ec_ndata @property def ec_nparity(self): return self._ec_nparity @property def ec_segment_size(self): return self._ec_segment_size @property def fragment_size(self): """ Maximum length of a fragment, including header. NB: a fragment archive is a sequence of 0 or more max-length fragments followed by one possibly-shorter fragment. """ # Technically pyeclib's get_segment_info signature calls for # (data_len, segment_size) but on a ranged GET we don't know the # ec-content-length header before we need to compute where in the # object we should request to align with the fragment size. So we # tell pyeclib a lie - from it's perspective, as long as data_len >= # segment_size it'll give us the answer we want. From our # perspective, because we only use this answer to calculate the # *minimum* size we should read from an object body even if data_len < # segment_size we'll still only read *the whole one and only last # fragment* and pass than into pyeclib who will know what to do with # it just as it always does when the last fragment is < fragment_size. return self.pyeclib_driver.get_segment_info( self.ec_segment_size, self.ec_segment_size)['fragment_size'] @property def ec_scheme_description(self): """ This short hand form of the important parts of the ec schema is stored in Object System Metadata on the EC Fragment Archives for debugging. """ return "%s %d+%d" % (self._ec_type, self._ec_ndata, self._ec_nparity) def __repr__(self): return ("%s, EC config(ec_type=%s, ec_segment_size=%d, " "ec_ndata=%d, ec_nparity=%d)") % \ (super(ECStoragePolicy, self).__repr__(), self.ec_type, self.ec_segment_size, self.ec_ndata, self.ec_nparity) @classmethod def _config_options_map(cls): options = super(ECStoragePolicy, cls)._config_options_map() options.update({ 'ec_type': 'ec_type', 'ec_object_segment_size': 'ec_segment_size', 'ec_num_data_fragments': 'ec_ndata', 'ec_num_parity_fragments': 'ec_nparity', }) return options def get_info(self, config=False): info = super(ECStoragePolicy, self).get_info(config=config) if not config: info.pop('ec_object_segment_size') info.pop('ec_num_data_fragments') info.pop('ec_num_parity_fragments') info.pop('ec_type') return info def _validate_ring(self): """ EC specific validation Replica count check - we need _at_least_ (#data + #parity) replicas configured. Also if the replica count is larger than exactly that number there's a non-zero risk of error for code that is considering the number of nodes in the primary list from the ring. """ if not self.object_ring: raise PolicyError('Ring is not loaded') nodes_configured = self.object_ring.replica_count if nodes_configured != (self.ec_ndata + self.ec_nparity): raise RingValidationError( 'EC ring for policy %s needs to be configured with ' 'exactly %d nodes. Got %d.' % ( self.name, self.ec_ndata + self.ec_nparity, nodes_configured)) @property def quorum(self): """ Number of successful backend requests needed for the proxy to consider the client request successful. The quorum size for EC policies defines the minimum number of data + parity elements required to be able to guarantee the desired fault tolerance, which is the number of data elements supplemented by the minimum number of parity elements required by the chosen erasure coding scheme. For example, for Reed-Solomon, the minimum number parity elements required is 1, and thus the quorum_size requirement is ec_ndata + 1. Given the number of parity elements required is not the same for every erasure coding scheme, consult PyECLib for min_parity_fragments_needed() """ return self._ec_quorum_size class StoragePolicyCollection(object): """ This class represents the collection of valid storage policies for the cluster and is instantiated as :class:`StoragePolicy` objects are added to the collection when ``swift.conf`` is parsed by :func:`parse_storage_policies`. When a StoragePolicyCollection is created, the following validation is enforced: * If a policy with index 0 is not declared and no other policies defined, Swift will create one * The policy index must be a non-negative integer * If no policy is declared as the default and no other policies are defined, the policy with index 0 is set as the default * Policy indexes must be unique * Policy names are required * Policy names are case insensitive * Policy names must contain only letters, digits or a dash * Policy names must be unique * The policy name 'Policy-0' can only be used for the policy with index 0 * If any policies are defined, exactly one policy must be declared default * Deprecated policies can not be declared the default """ def __init__(self, pols): self.default = [] self.by_name = {} self.by_index = {} self._validate_policies(pols) def _add_policy(self, policy): """ Add pre-validated policies to internal indexes. """ for name in policy.alias_list: self.by_name[name.upper()] = policy self.by_index[int(policy)] = policy def __repr__(self): return (textwrap.dedent(""" StoragePolicyCollection([ %s ]) """) % ',\n '.join(repr(p) for p in self)).strip() def __len__(self): return len(self.by_index) def __getitem__(self, key): return self.by_index[key] def __iter__(self): return iter(self.by_index.values()) def _validate_policies(self, policies): """ :param policies: list of policies """ for policy in policies: if int(policy) in self.by_index: raise PolicyError('Duplicate index %s conflicts with %s' % ( policy, self.get_by_index(int(policy)))) for name in policy.alias_list: if name.upper() in self.by_name: raise PolicyError('Duplicate name %s conflicts with %s' % ( policy, self.get_by_name(name))) if policy.is_default: if not self.default: self.default = policy else: raise PolicyError( 'Duplicate default %s conflicts with %s' % ( policy, self.default)) self._add_policy(policy) # If a 0 policy wasn't explicitly given, or nothing was # provided, create the 0 policy now if 0 not in self.by_index: if len(self) != 0: raise PolicyError('You must specify a storage policy ' 'section for policy index 0 in order ' 'to define multiple policies') self._add_policy(StoragePolicy(0, name=LEGACY_POLICY_NAME)) # at least one policy must be enabled enabled_policies = [p for p in self if not p.is_deprecated] if not enabled_policies: raise PolicyError("Unable to find policy that's not deprecated!") # if needed, specify default if not self.default: if len(self) > 1: raise PolicyError("Unable to find default policy") self.default = self[0] self.default.is_default = True def get_by_name(self, name): """ Find a storage policy by its name. :param name: name of the policy :returns: storage policy, or None """ return self.by_name.get(name.upper()) def get_by_index(self, index): """ Find a storage policy by its index. An index of None will be treated as 0. :param index: numeric index of the storage policy :returns: storage policy, or None if no such policy """ # makes it easier for callers to just pass in a header value if index in ('', None): index = 0 else: try: index = int(index) except ValueError: return None return self.by_index.get(index) @property def legacy(self): return self.get_by_index(None) def get_object_ring(self, policy_idx, swift_dir): """ Get the ring object to use to handle a request based on its policy. An index of None will be treated as 0. :param policy_idx: policy index as defined in swift.conf :param swift_dir: swift_dir used by the caller :returns: appropriate ring object """ policy = self.get_by_index(policy_idx) if not policy: raise PolicyError("No policy with index %s" % policy_idx) if not policy.object_ring: policy.load_ring(swift_dir) return policy.object_ring def get_policy_info(self): """ Build info about policies for the /info endpoint :returns: list of dicts containing relevant policy information """ policy_info = [] for pol in self: # delete from /info if deprecated if pol.is_deprecated: continue policy_entry = pol.get_info() policy_info.append(policy_entry) return policy_info def add_policy_alias(self, policy_index, *aliases): """ Adds a new name or names to a policy :param policy_index: index of a policy in this policy collection. :param aliases: arbitrary number of string policy names to add. """ policy = self.get_by_index(policy_index) for alias in aliases: if alias.upper() in self.by_name: raise PolicyError('Duplicate name %s in use ' 'by policy %s' % (alias, self.get_by_name(alias))) else: policy.add_name(alias) self.by_name[alias.upper()] = policy def remove_policy_alias(self, *aliases): """ Removes a name or names from a policy. If the name removed is the primary name then the next available alias will be adopted as the new primary name. :param aliases: arbitrary number of existing policy names to remove. """ for alias in aliases: policy = self.get_by_name(alias) if not policy: raise PolicyError('No policy with name %s exists.' % alias) if len(policy.alias_list) == 1: raise PolicyError('Policy %s with name %s has only one name. ' 'Policies must have at least one name.' % ( policy, alias)) else: policy.remove_name(alias) del self.by_name[alias.upper()] def change_policy_primary_name(self, policy_index, new_name): """ Changes the primary or default name of a policy. The new primary name can be an alias that already belongs to the policy or a completely new name. :param policy_index: index of a policy in this policy collection. :param new_name: a string name to set as the new default name. """ policy = self.get_by_index(policy_index) name_taken = self.get_by_name(new_name) # if the name belongs to some other policy in the collection if name_taken and name_taken != policy: raise PolicyError('Other policy %s with name %s exists.' % (self.get_by_name(new_name).idx, new_name)) else: policy.change_primary_name(new_name) self.by_name[new_name.upper()] = policy def parse_storage_policies(conf): """ Parse storage policies in ``swift.conf`` - note that validation is done when the :class:`StoragePolicyCollection` is instantiated. :param conf: ConfigParser parser object for swift.conf """ policies = [] for section in conf.sections(): if not section.startswith('storage-policy:'): continue policy_index = section.split(':', 1)[1] config_options = dict(conf.items(section)) policy_type = config_options.pop('policy_type', DEFAULT_POLICY_TYPE) policy_cls = BaseStoragePolicy.policy_type_to_policy_cls[policy_type] policy = policy_cls.from_config(policy_index, config_options) policies.append(policy) return StoragePolicyCollection(policies) class StoragePolicySingleton(object): """ An instance of this class is the primary interface to storage policies exposed as a module level global named ``POLICIES``. This global reference wraps ``_POLICIES`` which is normally instantiated by parsing ``swift.conf`` and will result in an instance of :class:`StoragePolicyCollection`. You should never patch this instance directly, instead patch the module level ``_POLICIES`` instance so that swift code which imported ``POLICIES`` directly will reference the patched :class:`StoragePolicyCollection`. """ def __iter__(self): return iter(_POLICIES) def __len__(self): return len(_POLICIES) def __getitem__(self, key): return _POLICIES[key] def __getattribute__(self, name): return getattr(_POLICIES, name) def __repr__(self): return repr(_POLICIES) def reload_storage_policies(): """ Reload POLICIES from ``swift.conf``. """ global _POLICIES policy_conf = ConfigParser() policy_conf.read(SWIFT_CONF_FILE) try: _POLICIES = parse_storage_policies(policy_conf) except PolicyError as e: raise SystemExit('ERROR: Invalid Storage Policy Configuration ' 'in %s (%s)' % (SWIFT_CONF_FILE, e)) # parse configuration and setup singleton _POLICIES = None reload_storage_policies() POLICIES = StoragePolicySingleton() swift-2.7.0/swift/common/db_replicator.py0000664000567000056710000010753112675204037021641 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import random import math import time import shutil import uuid import errno import re from contextlib import contextmanager from swift import gettext_ as _ from eventlet import GreenPool, sleep, Timeout from eventlet.green import subprocess import swift.common.db from swift.common.direct_client import quote from swift.common.utils import get_logger, whataremyips, storage_directory, \ renamer, mkdirs, lock_parent_directory, config_true_value, \ unlink_older_than, dump_recon_cache, rsync_module_interpolation, ismount, \ json, Timestamp from swift.common import ring from swift.common.ring.utils import is_local_device from swift.common.http import HTTP_NOT_FOUND, HTTP_INSUFFICIENT_STORAGE from swift.common.bufferedhttp import BufferedHTTPConnection from swift.common.exceptions import DriveNotMounted from swift.common.daemon import Daemon from swift.common.swob import Response, HTTPNotFound, HTTPNoContent, \ HTTPAccepted, HTTPBadRequest DEBUG_TIMINGS_THRESHOLD = 10 def quarantine_db(object_file, server_type): """ In the case that a corrupt file is found, move it to a quarantined area to allow replication to fix it. :param object_file: path to corrupt file :param server_type: type of file that is corrupt ('container' or 'account') """ object_dir = os.path.dirname(object_file) quarantine_dir = os.path.abspath( os.path.join(object_dir, '..', '..', '..', '..', 'quarantined', server_type + 's', os.path.basename(object_dir))) try: renamer(object_dir, quarantine_dir, fsync=False) except OSError as e: if e.errno not in (errno.EEXIST, errno.ENOTEMPTY): raise quarantine_dir = "%s-%s" % (quarantine_dir, uuid.uuid4().hex) renamer(object_dir, quarantine_dir, fsync=False) def roundrobin_datadirs(datadirs): """ Generator to walk the data dirs in a round robin manner, evenly hitting each device on the system, and yielding any .db files found (in their proper places). The partitions within each data dir are walked randomly, however. :param datadirs: a list of (path, node_id) to walk :returns: A generator of (partition, path_to_db_file, node_id) """ def walk_datadir(datadir, node_id): partitions = os.listdir(datadir) random.shuffle(partitions) for partition in partitions: part_dir = os.path.join(datadir, partition) if not os.path.isdir(part_dir): continue suffixes = os.listdir(part_dir) if not suffixes: os.rmdir(part_dir) for suffix in suffixes: suff_dir = os.path.join(part_dir, suffix) if not os.path.isdir(suff_dir): continue hashes = os.listdir(suff_dir) for hsh in hashes: hash_dir = os.path.join(suff_dir, hsh) if not os.path.isdir(hash_dir): continue object_file = os.path.join(hash_dir, hsh + '.db') if os.path.exists(object_file): yield (partition, object_file, node_id) its = [walk_datadir(datadir, node_id) for datadir, node_id in datadirs] while its: for it in its: try: yield next(it) except StopIteration: its.remove(it) class ReplConnection(BufferedHTTPConnection): """ Helper to simplify REPLICATEing to a remote server. """ def __init__(self, node, partition, hash_, logger): "" self.logger = logger self.node = node host = "%s:%s" % (node['replication_ip'], node['replication_port']) BufferedHTTPConnection.__init__(self, host) self.path = '/%s/%s/%s' % (node['device'], partition, hash_) def replicate(self, *args): """ Make an HTTP REPLICATE request :param args: list of json-encodable objects :returns: bufferedhttp response object """ try: body = json.dumps(args) self.request('REPLICATE', self.path, body, {'Content-Type': 'application/json'}) response = self.getresponse() response.data = response.read() return response except (Exception, Timeout): self.logger.exception( _('ERROR reading HTTP response from %s'), self.node) return None class Replicator(Daemon): """ Implements the logic for directing db replication. """ def __init__(self, conf, logger=None): self.conf = conf self.logger = logger or get_logger(conf, log_route='replicator') self.root = conf.get('devices', '/srv/node') self.mount_check = config_true_value(conf.get('mount_check', 'true')) self.bind_ip = conf.get('bind_ip', '0.0.0.0') self.port = int(conf.get('bind_port', self.default_port)) concurrency = int(conf.get('concurrency', 8)) self.cpool = GreenPool(size=concurrency) swift_dir = conf.get('swift_dir', '/etc/swift') self.ring = ring.Ring(swift_dir, ring_name=self.server_type) self._local_device_ids = set() self.per_diff = int(conf.get('per_diff', 1000)) self.max_diffs = int(conf.get('max_diffs') or 100) self.interval = int(conf.get('interval') or conf.get('run_pause') or 30) self.node_timeout = float(conf.get('node_timeout', 10)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.rsync_compress = config_true_value( conf.get('rsync_compress', 'no')) self.rsync_module = conf.get('rsync_module', '').rstrip('/') if not self.rsync_module: self.rsync_module = '{replication_ip}::%s' % self.server_type if config_true_value(conf.get('vm_test_mode', 'no')): self.logger.warning('Option %(type)s-replicator/vm_test_mode ' 'is deprecated and will be removed in a ' 'future version. Update your configuration' ' to use option %(type)s-replicator/' 'rsync_module.' % {'type': self.server_type}) self.rsync_module += '{replication_port}' self.reclaim_age = float(conf.get('reclaim_age', 86400 * 7)) swift.common.db.DB_PREALLOCATION = \ config_true_value(conf.get('db_preallocation', 'f')) self._zero_stats() self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.recon_replicator = '%s.recon' % self.server_type self.rcache = os.path.join(self.recon_cache_path, self.recon_replicator) self.extract_device_re = re.compile('%s%s([^%s]+)' % ( self.root, os.path.sep, os.path.sep)) def _zero_stats(self): """Zero out the stats.""" self.stats = {'attempted': 0, 'success': 0, 'failure': 0, 'ts_repl': 0, 'no_change': 0, 'hashmatch': 0, 'rsync': 0, 'diff': 0, 'remove': 0, 'empty': 0, 'remote_merge': 0, 'start': time.time(), 'diff_capped': 0, 'failure_nodes': {}} def _report_stats(self): """Report the current stats to the logs.""" now = time.time() self.logger.info( _('Attempted to replicate %(count)d dbs in %(time).5f seconds ' '(%(rate).5f/s)'), {'count': self.stats['attempted'], 'time': now - self.stats['start'], 'rate': self.stats['attempted'] / (now - self.stats['start'] + 0.0000001)}) self.logger.info(_('Removed %(remove)d dbs') % self.stats) self.logger.info(_('%(success)s successes, %(failure)s failures') % self.stats) dump_recon_cache( {'replication_stats': self.stats, 'replication_time': now - self.stats['start'], 'replication_last': now}, self.rcache, self.logger) self.logger.info(' '.join(['%s:%s' % item for item in self.stats.items() if item[0] in ('no_change', 'hashmatch', 'rsync', 'diff', 'ts_repl', 'empty', 'diff_capped')])) def _add_failure_stats(self, failure_devs_info): for node, dev in failure_devs_info: self.stats['failure'] += 1 failure_devs = self.stats['failure_nodes'].setdefault(node, {}) failure_devs.setdefault(dev, 0) failure_devs[dev] += 1 def _rsync_file(self, db_file, remote_file, whole_file=True, different_region=False): """ Sync a single file using rsync. Used by _rsync_db to handle syncing. :param db_file: file to be synced :param remote_file: remote location to sync the DB file to :param whole-file: if True, uses rsync's --whole-file flag :param different_region: if True, the destination node is in a different region :returns: True if the sync was successful, False otherwise """ popen_args = ['rsync', '--quiet', '--no-motd', '--timeout=%s' % int(math.ceil(self.node_timeout)), '--contimeout=%s' % int(math.ceil(self.conn_timeout))] if whole_file: popen_args.append('--whole-file') if self.rsync_compress and different_region: # Allow for compression, but only if the remote node is in # a different region than the local one. popen_args.append('--compress') popen_args.extend([db_file, remote_file]) proc = subprocess.Popen(popen_args) proc.communicate() if proc.returncode != 0: self.logger.error(_('ERROR rsync failed with %(code)s: %(args)s'), {'code': proc.returncode, 'args': popen_args}) return proc.returncode == 0 def _rsync_db(self, broker, device, http, local_id, replicate_method='complete_rsync', replicate_timeout=None, different_region=False): """ Sync a whole db using rsync. :param broker: DB broker object of DB to be synced :param device: device to sync to :param http: ReplConnection object :param local_id: unique ID of the local database replica :param replicate_method: remote operation to perform after rsync :param replicate_timeout: timeout to wait in seconds :param different_region: if True, the destination node is in a different region """ rsync_module = rsync_module_interpolation(self.rsync_module, device) rsync_path = '%s/tmp/%s' % (device['device'], local_id) remote_file = '%s/%s' % (rsync_module, rsync_path) mtime = os.path.getmtime(broker.db_file) if not self._rsync_file(broker.db_file, remote_file, different_region=different_region): return False # perform block-level sync if the db was modified during the first sync if os.path.exists(broker.db_file + '-journal') or \ os.path.getmtime(broker.db_file) > mtime: # grab a lock so nobody else can modify it with broker.lock(): if not self._rsync_file(broker.db_file, remote_file, whole_file=False, different_region=different_region): return False with Timeout(replicate_timeout or self.node_timeout): response = http.replicate(replicate_method, local_id) return response and response.status >= 200 and response.status < 300 def _usync_db(self, point, broker, http, remote_id, local_id): """ Sync a db by sending all records since the last sync. :param point: synchronization high water mark between the replicas :param broker: database broker object :param http: ReplConnection object for the remote server :param remote_id: database id for the remote replica :param local_id: database id for the local replica :returns: boolean indicating completion and success """ self.stats['diff'] += 1 self.logger.increment('diffs') self.logger.debug('Syncing chunks with %s, starting at %s', http.host, point) sync_table = broker.get_syncs() objects = broker.get_items_since(point, self.per_diff) diffs = 0 while len(objects) and diffs < self.max_diffs: diffs += 1 with Timeout(self.node_timeout): response = http.replicate('merge_items', objects, local_id) if not response or response.status >= 300 or response.status < 200: if response: self.logger.error(_('ERROR Bad response %(status)s from ' '%(host)s'), {'status': response.status, 'host': http.host}) return False # replication relies on db order to send the next merge batch in # order with no gaps point = objects[-1]['ROWID'] objects = broker.get_items_since(point, self.per_diff) if objects: self.logger.debug( 'Synchronization for %s has fallen more than ' '%s rows behind; moving on and will try again next pass.', broker, self.max_diffs * self.per_diff) self.stats['diff_capped'] += 1 self.logger.increment('diff_caps') else: with Timeout(self.node_timeout): response = http.replicate('merge_syncs', sync_table) if response and response.status >= 200 and response.status < 300: broker.merge_syncs([{'remote_id': remote_id, 'sync_point': point}], incoming=False) return True return False def _in_sync(self, rinfo, info, broker, local_sync): """ Determine whether or not two replicas of a databases are considered to be in sync. :param rinfo: remote database info :param info: local database info :param broker: database broker object :param local_sync: cached last sync point between replicas :returns: boolean indicating whether or not the replicas are in sync """ if max(rinfo['point'], local_sync) >= info['max_row']: self.stats['no_change'] += 1 self.logger.increment('no_changes') return True if rinfo['hash'] == info['hash']: self.stats['hashmatch'] += 1 self.logger.increment('hashmatches') broker.merge_syncs([{'remote_id': rinfo['id'], 'sync_point': rinfo['point']}], incoming=False) return True def _http_connect(self, node, partition, db_file): """ Make an http_connection using ReplConnection :param node: node dictionary from the ring :param partition: partition partition to send in the url :param db_file: DB file :returns: ReplConnection object """ return ReplConnection(node, partition, os.path.basename(db_file).split('.', 1)[0], self.logger) def _gather_sync_args(self, info): """ Convert local replication_info to sync args tuple. """ sync_args_order = ('max_row', 'hash', 'id', 'created_at', 'put_timestamp', 'delete_timestamp', 'metadata') return tuple(info[key] for key in sync_args_order) def _repl_to_node(self, node, broker, partition, info, different_region=False): """ Replicate a database to a node. :param node: node dictionary from the ring to be replicated to :param broker: DB broker for the DB to be replication :param partition: partition on the node to replicate to :param info: DB info as a dictionary of {'max_row', 'hash', 'id', 'created_at', 'put_timestamp', 'delete_timestamp', 'metadata'} :param different_region: if True, the destination node is in a different region :returns: True if successful, False otherwise """ http = self._http_connect(node, partition, broker.db_file) sync_args = self._gather_sync_args(info) with Timeout(self.node_timeout): response = http.replicate('sync', *sync_args) if not response: return False return self._handle_sync_response(node, response, info, broker, http, different_region=different_region) def _handle_sync_response(self, node, response, info, broker, http, different_region=False): if response.status == HTTP_NOT_FOUND: # completely missing, rsync self.stats['rsync'] += 1 self.logger.increment('rsyncs') return self._rsync_db(broker, node, http, info['id'], different_region=different_region) elif response.status == HTTP_INSUFFICIENT_STORAGE: raise DriveNotMounted() elif response.status >= 200 and response.status < 300: rinfo = json.loads(response.data) local_sync = broker.get_sync(rinfo['id'], incoming=False) if self._in_sync(rinfo, info, broker, local_sync): return True # if the difference in rowids between the two differs by # more than 50% and the difference is greater than per_diff, # rsync then do a remote merge. # NOTE: difference > per_diff stops us from dropping to rsync # on smaller containers, who have only a few rows to sync. if rinfo['max_row'] / float(info['max_row']) < 0.5 and \ info['max_row'] - rinfo['max_row'] > self.per_diff: self.stats['remote_merge'] += 1 self.logger.increment('remote_merges') return self._rsync_db(broker, node, http, info['id'], replicate_method='rsync_then_merge', replicate_timeout=(info['count'] / 2000), different_region=different_region) # else send diffs over to the remote server return self._usync_db(max(rinfo['point'], local_sync), broker, http, rinfo['id'], info['id']) def _post_replicate_hook(self, broker, info, responses): """ :param broker: the container that just replicated :param info: pre-replication full info dict :param responses: a list of bools indicating success from nodes """ pass def _replicate_object(self, partition, object_file, node_id): """ Replicate the db, choosing method based on whether or not it already exists on peers. :param partition: partition to be replicated to :param object_file: DB file name to be replicated :param node_id: node id of the node to be replicated to """ start_time = now = time.time() self.logger.debug('Replicating db %s', object_file) self.stats['attempted'] += 1 self.logger.increment('attempts') shouldbehere = True try: broker = self.brokerclass(object_file, pending_timeout=30) broker.reclaim(now - self.reclaim_age, now - (self.reclaim_age * 2)) info = broker.get_replication_info() bpart = self.ring.get_part( info['account'], info.get('container')) if bpart != int(partition): partition = bpart # Important to set this false here since the later check only # checks if it's on the proper device, not partition. shouldbehere = False name = '/' + quote(info['account']) if 'container' in info: name += '/' + quote(info['container']) self.logger.error( 'Found %s for %s when it should be on partition %s; will ' 'replicate out and remove.' % (object_file, name, bpart)) except (Exception, Timeout) as e: if 'no such table' in str(e): self.logger.error(_('Quarantining DB %s'), object_file) quarantine_db(broker.db_file, broker.db_type) else: self.logger.exception(_('ERROR reading db %s'), object_file) nodes = self.ring.get_part_nodes(int(partition)) self._add_failure_stats([(failure_dev['replication_ip'], failure_dev['device']) for failure_dev in nodes]) self.logger.increment('failures') return # The db is considered deleted if the delete_timestamp value is greater # than the put_timestamp, and there are no objects. delete_timestamp = Timestamp(info.get('delete_timestamp') or 0) put_timestamp = Timestamp(info.get('put_timestamp') or 0) if delete_timestamp < (now - self.reclaim_age) and \ delete_timestamp > put_timestamp and \ info['count'] in (None, '', 0, '0'): if self.report_up_to_date(info): self.delete_db(broker) self.logger.timing_since('timing', start_time) return responses = [] failure_devs_info = set() nodes = self.ring.get_part_nodes(int(partition)) local_dev = None for node in nodes: if node['id'] == node_id: local_dev = node break if shouldbehere: shouldbehere = bool([n for n in nodes if n['id'] == node_id]) # See Footnote [1] for an explanation of the repl_nodes assignment. i = 0 while i < len(nodes) and nodes[i]['id'] != node_id: i += 1 repl_nodes = nodes[i + 1:] + nodes[:i] more_nodes = self.ring.get_more_nodes(int(partition)) if not local_dev: # Check further if local device is a handoff node for node in more_nodes: if node['id'] == node_id: local_dev = node break for node in repl_nodes: different_region = False if local_dev and local_dev['region'] != node['region']: # This additional information will help later if we # want to handle syncing to a node in different # region with some optimizations. different_region = True success = False try: success = self._repl_to_node(node, broker, partition, info, different_region) except DriveNotMounted: repl_nodes.append(next(more_nodes)) self.logger.error(_('ERROR Remote drive not mounted %s'), node) except (Exception, Timeout): self.logger.exception(_('ERROR syncing %(file)s with node' ' %(node)s'), {'file': object_file, 'node': node}) if not success: failure_devs_info.add((node['replication_ip'], node['device'])) self.logger.increment('successes' if success else 'failures') responses.append(success) try: self._post_replicate_hook(broker, info, responses) except (Exception, Timeout): self.logger.exception('UNHANDLED EXCEPTION: in post replicate ' 'hook for %s', broker.db_file) if not shouldbehere and all(responses): # If the db shouldn't be on this node and has been successfully # synced to all of its peers, it can be removed. if not self.delete_db(broker): failure_devs_info.update( [(failure_dev['replication_ip'], failure_dev['device']) for failure_dev in repl_nodes]) target_devs_info = set([(target_dev['replication_ip'], target_dev['device']) for target_dev in repl_nodes]) self.stats['success'] += len(target_devs_info - failure_devs_info) self._add_failure_stats(failure_devs_info) self.logger.timing_since('timing', start_time) def delete_db(self, broker): object_file = broker.db_file hash_dir = os.path.dirname(object_file) suf_dir = os.path.dirname(hash_dir) with lock_parent_directory(object_file): shutil.rmtree(hash_dir, True) try: os.rmdir(suf_dir) except OSError as err: if err.errno not in (errno.ENOENT, errno.ENOTEMPTY): self.logger.exception( _('ERROR while trying to clean up %s') % suf_dir) return False self.stats['remove'] += 1 device_name = self.extract_device(object_file) self.logger.increment('removes.' + device_name) return True def extract_device(self, object_file): """ Extract the device name from an object path. Returns "UNKNOWN" if the path could not be extracted successfully for some reason. :param object_file: the path to a database file. """ match = self.extract_device_re.match(object_file) if match: return match.groups()[0] return "UNKNOWN" def report_up_to_date(self, full_info): return True def run_once(self, *args, **kwargs): """Run a replication pass once.""" self._zero_stats() dirs = [] ips = whataremyips(self.bind_ip) if not ips: self.logger.error(_('ERROR Failed to get my own IPs?')) return self._local_device_ids = set() found_local = False for node in self.ring.devs: if node and is_local_device(ips, self.port, node['replication_ip'], node['replication_port']): found_local = True if self.mount_check and not ismount( os.path.join(self.root, node['device'])): self._add_failure_stats( [(failure_dev['replication_ip'], failure_dev['device']) for failure_dev in self.ring.devs if failure_dev]) self.logger.warning( _('Skipping %(device)s as it is not mounted') % node) continue unlink_older_than( os.path.join(self.root, node['device'], 'tmp'), time.time() - self.reclaim_age) datadir = os.path.join(self.root, node['device'], self.datadir) if os.path.isdir(datadir): self._local_device_ids.add(node['id']) dirs.append((datadir, node['id'])) if not found_local: self.logger.error("Can't find itself %s with port %s in ring " "file, not replicating", ", ".join(ips), self.port) self.logger.info(_('Beginning replication run')) for part, object_file, node_id in roundrobin_datadirs(dirs): self.cpool.spawn_n( self._replicate_object, part, object_file, node_id) self.cpool.waitall() self.logger.info(_('Replication run OVER')) self._report_stats() def run_forever(self, *args, **kwargs): """ Replicate dbs under the given root in an infinite loop. """ sleep(random.random() * self.interval) while True: begin = time.time() try: self.run_once() except (Exception, Timeout): self.logger.exception(_('ERROR trying to replicate')) elapsed = time.time() - begin if elapsed < self.interval: sleep(self.interval - elapsed) class ReplicatorRpc(object): """Handle Replication RPC calls. TODO(redbo): document please :)""" def __init__(self, root, datadir, broker_class, mount_check=True, logger=None): self.root = root self.datadir = datadir self.broker_class = broker_class self.mount_check = mount_check self.logger = logger or get_logger({}, log_route='replicator-rpc') def dispatch(self, replicate_args, args): if not hasattr(args, 'pop'): return HTTPBadRequest(body='Invalid object type') op = args.pop(0) drive, partition, hsh = replicate_args if self.mount_check and not ismount(os.path.join(self.root, drive)): return Response(status='507 %s is not mounted' % drive) db_file = os.path.join(self.root, drive, storage_directory(self.datadir, partition, hsh), hsh + '.db') if op == 'rsync_then_merge': return self.rsync_then_merge(drive, db_file, args) if op == 'complete_rsync': return self.complete_rsync(drive, db_file, args) else: # someone might be about to rsync a db to us, # make sure there's a tmp dir to receive it. mkdirs(os.path.join(self.root, drive, 'tmp')) if not os.path.exists(db_file): return HTTPNotFound() return getattr(self, op)(self.broker_class(db_file), args) @contextmanager def debug_timing(self, name): timemark = time.time() yield timespan = time.time() - timemark if timespan > DEBUG_TIMINGS_THRESHOLD: self.logger.debug( 'replicator-rpc-sync time for %s: %.02fs' % ( name, timespan)) def _parse_sync_args(self, args): """ Convert remote sync args to remote_info dictionary. """ (remote_sync, hash_, id_, created_at, put_timestamp, delete_timestamp, metadata) = args[:7] remote_metadata = {} if metadata: try: remote_metadata = json.loads(metadata) except ValueError: self.logger.error("Unable to decode remote metadata %r", metadata) remote_info = { 'point': remote_sync, 'hash': hash_, 'id': id_, 'created_at': created_at, 'put_timestamp': put_timestamp, 'delete_timestamp': delete_timestamp, 'metadata': remote_metadata, } return remote_info def sync(self, broker, args): remote_info = self._parse_sync_args(args) return self._handle_sync_request(broker, remote_info) def _get_synced_replication_info(self, broker, remote_info): """ Apply any changes to the broker based on remote_info and return the current replication info. :param broker: the database broker :param remote_info: the remote replication info :returns: local broker replication info """ return broker.get_replication_info() def _handle_sync_request(self, broker, remote_info): """ Update metadata, timestamps, sync points. """ with self.debug_timing('info'): try: info = self._get_synced_replication_info(broker, remote_info) except (Exception, Timeout) as e: if 'no such table' in str(e): self.logger.error(_("Quarantining DB %s"), broker) quarantine_db(broker.db_file, broker.db_type) return HTTPNotFound() raise if remote_info['metadata']: with self.debug_timing('update_metadata'): broker.update_metadata(remote_info['metadata']) sync_timestamps = ('created_at', 'put_timestamp', 'delete_timestamp') if any(info[ts] != remote_info[ts] for ts in sync_timestamps): with self.debug_timing('merge_timestamps'): broker.merge_timestamps(*(remote_info[ts] for ts in sync_timestamps)) with self.debug_timing('get_sync'): info['point'] = broker.get_sync(remote_info['id']) if remote_info['hash'] == info['hash'] and \ info['point'] < remote_info['point']: with self.debug_timing('merge_syncs'): translate = { 'remote_id': 'id', 'sync_point': 'point', } data = dict((k, remote_info[v]) for k, v in translate.items()) broker.merge_syncs([data]) info['point'] = remote_info['point'] return Response(json.dumps(info)) def merge_syncs(self, broker, args): broker.merge_syncs(args[0]) return HTTPAccepted() def merge_items(self, broker, args): broker.merge_items(args[0], args[1]) return HTTPAccepted() def complete_rsync(self, drive, db_file, args): old_filename = os.path.join(self.root, drive, 'tmp', args[0]) if os.path.exists(db_file): return HTTPNotFound() if not os.path.exists(old_filename): return HTTPNotFound() broker = self.broker_class(old_filename) broker.newid(args[0]) renamer(old_filename, db_file) return HTTPNoContent() def rsync_then_merge(self, drive, db_file, args): old_filename = os.path.join(self.root, drive, 'tmp', args[0]) if not os.path.exists(db_file) or not os.path.exists(old_filename): return HTTPNotFound() new_broker = self.broker_class(old_filename) existing_broker = self.broker_class(db_file) point = -1 objects = existing_broker.get_items_since(point, 1000) while len(objects): new_broker.merge_items(objects) point = objects[-1]['ROWID'] objects = existing_broker.get_items_since(point, 1000) sleep() new_broker.newid(args[0]) renamer(old_filename, db_file) return HTTPNoContent() # Footnote [1]: # This orders the nodes so that, given nodes a b c, a will contact b then c, # b will contact c then a, and c will contact a then b -- in other words, each # node will always contact the next node in the list first. # This helps in the case where databases are all way out of sync, so each # node is likely to be sending to a different node than it's receiving from, # rather than two nodes talking to each other, starving out the third. # If the third didn't even have a copy and the first two nodes were way out # of sync, such starvation would mean the third node wouldn't get any copy # until the first two nodes finally got in sync, which could take a while. # This new ordering ensures such starvation doesn't occur, making the data # more durable. swift-2.7.0/swift/common/container_sync_realms.py0000664000567000056710000001364312675204037023411 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import hashlib import hmac import os import time from six.moves import configparser from swift import gettext_ as _ from swift.common.utils import get_valid_utf8_str class ContainerSyncRealms(object): """ Loads and parses the container-sync-realms.conf, occasionally checking the file's mtime to see if it needs to be reloaded. """ def __init__(self, conf_path, logger): self.conf_path = conf_path self.logger = logger self.next_mtime_check = 0 self.mtime_check_interval = 300 self.conf_path_mtime = 0 self.data = {} self.reload() def reload(self): """Forces a reload of the conf file.""" self.next_mtime_check = 0 self.conf_path_mtime = 0 self._reload() def _reload(self): now = time.time() if now >= self.next_mtime_check: self.next_mtime_check = now + self.mtime_check_interval try: mtime = os.path.getmtime(self.conf_path) except OSError as err: if err.errno == errno.ENOENT: log_func = self.logger.debug else: log_func = self.logger.error log_func(_('Could not load %r: %s'), self.conf_path, err) else: if mtime != self.conf_path_mtime: self.conf_path_mtime = mtime try: conf = configparser.SafeConfigParser() conf.read(self.conf_path) except configparser.ParsingError as err: self.logger.error( _('Could not load %r: %s'), self.conf_path, err) else: try: self.mtime_check_interval = conf.getint( 'DEFAULT', 'mtime_check_interval') self.next_mtime_check = \ now + self.mtime_check_interval except configparser.NoOptionError: self.mtime_check_interval = 300 self.next_mtime_check = \ now + self.mtime_check_interval except (configparser.ParsingError, ValueError) as err: self.logger.error( _('Error in %r with mtime_check_interval: %s'), self.conf_path, err) realms = {} for section in conf.sections(): realm = {} clusters = {} for option, value in conf.items(section): if option in ('key', 'key2'): realm[option] = value elif option.startswith('cluster_'): clusters[option[8:].upper()] = value realm['clusters'] = clusters realms[section.upper()] = realm self.data = realms def realms(self): """Returns a list of realms.""" self._reload() return self.data.keys() def key(self, realm): """Returns the key for the realm.""" self._reload() result = self.data.get(realm.upper()) if result: result = result.get('key') return result def key2(self, realm): """Returns the key2 for the realm.""" self._reload() result = self.data.get(realm.upper()) if result: result = result.get('key2') return result def clusters(self, realm): """Returns a list of clusters for the realm.""" self._reload() result = self.data.get(realm.upper()) if result: result = result.get('clusters') if result: result = result.keys() return result or [] def endpoint(self, realm, cluster): """Returns the endpoint for the cluster in the realm.""" self._reload() result = None realm_data = self.data.get(realm.upper()) if realm_data: cluster_data = realm_data.get('clusters') if cluster_data: result = cluster_data.get(cluster.upper()) return result def get_sig(self, request_method, path, x_timestamp, nonce, realm_key, user_key): """ Returns the hexdigest string of the HMAC-SHA1 (RFC 2104) for the information given. :param request_method: HTTP method of the request. :param path: The path to the resource. :param x_timestamp: The X-Timestamp header value for the request. :param nonce: A unique value for the request. :param realm_key: Shared secret at the cluster operator level. :param user_key: Shared secret at the user's container level. :returns: hexdigest str of the HMAC-SHA1 for the request. """ nonce = get_valid_utf8_str(nonce) realm_key = get_valid_utf8_str(realm_key) user_key = get_valid_utf8_str(user_key) return hmac.new( realm_key, '%s\n%s\n%s\n%s\n%s' % ( request_method, path, x_timestamp, nonce, user_key), hashlib.sha1).hexdigest() swift-2.7.0/swift/common/memcached.py0000664000567000056710000004457312675204037020744 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Why our own memcache client? By Michael Barton python-memcached doesn't use consistent hashing, so adding or removing a memcache server from the pool invalidates a huge percentage of cached items. If you keep a pool of python-memcached client objects, each client object has its own connection to every memcached server, only one of which is ever in use. So you wind up with n * m open sockets and almost all of them idle. This client effectively has a pool for each server, so the number of backend connections is hopefully greatly reduced. python-memcache uses pickle to store things, and there was already a huge stink about Swift using pickles in memcache (http://osvdb.org/show/osvdb/86581). That seemed sort of unfair, since nova and keystone and everyone else use pickles for memcache too, but it's hidden behind a "standard" library. But changing would be a security regression at this point. Also, pylibmc wouldn't work for us because it needs to use python sockets in order to play nice with eventlet. Lucid comes with memcached: v1.4.2. Protocol documentation for that version is at: http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt """ import six.moves.cPickle as pickle import json import logging import time from bisect import bisect from swift import gettext_ as _ from hashlib import md5 from eventlet.green import socket from eventlet.pools import Pool from eventlet import Timeout from six.moves import range from swift.common import utils DEFAULT_MEMCACHED_PORT = 11211 CONN_TIMEOUT = 0.3 POOL_TIMEOUT = 1.0 # WAG IO_TIMEOUT = 2.0 PICKLE_FLAG = 1 JSON_FLAG = 2 NODE_WEIGHT = 50 PICKLE_PROTOCOL = 2 TRY_COUNT = 3 # if ERROR_LIMIT_COUNT errors occur in ERROR_LIMIT_TIME seconds, the server # will be considered failed for ERROR_LIMIT_DURATION seconds. ERROR_LIMIT_COUNT = 10 ERROR_LIMIT_TIME = 60 ERROR_LIMIT_DURATION = 60 def md5hash(key): return md5(key).hexdigest() def sanitize_timeout(timeout): """ Sanitize a timeout value to use an absolute expiration time if the delta is greater than 30 days (in seconds). Note that the memcached server translates negative values to mean a delta of 30 days in seconds (and 1 additional second), client beware. """ if timeout > (30 * 24 * 60 * 60): timeout += time.time() return timeout class MemcacheConnectionError(Exception): pass class MemcachePoolTimeout(Timeout): pass class MemcacheConnPool(Pool): """ Connection pool for Memcache Connections The *server* parameter can be a hostname, an IPv4 address, or an IPv6 address with an optional port. See :func:`swift.common.utils.parse_socket_string` for details. """ def __init__(self, server, size, connect_timeout): Pool.__init__(self, max_size=size) self.host, self.port = utils.parse_socket_string( server, DEFAULT_MEMCACHED_PORT) self._connect_timeout = connect_timeout def create(self): addrs = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) family, socktype, proto, canonname, sockaddr = addrs[0] sock = socket.socket(family, socket.SOCK_STREAM) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) with Timeout(self._connect_timeout): sock.connect(sockaddr) return (sock.makefile(), sock) def get(self): fp, sock = super(MemcacheConnPool, self).get() if fp is None: # An error happened previously, so we need a new connection fp, sock = self.create() return fp, sock class MemcacheRing(object): """ Simple, consistent-hashed memcache client. """ def __init__(self, servers, connect_timeout=CONN_TIMEOUT, io_timeout=IO_TIMEOUT, pool_timeout=POOL_TIMEOUT, tries=TRY_COUNT, allow_pickle=False, allow_unpickle=False, max_conns=2): self._ring = {} self._errors = dict(((serv, []) for serv in servers)) self._error_limited = dict(((serv, 0) for serv in servers)) for server in sorted(servers): for i in range(NODE_WEIGHT): self._ring[md5hash('%s-%s' % (server, i))] = server self._tries = tries if tries <= len(servers) else len(servers) self._sorted = sorted(self._ring) self._client_cache = dict(((server, MemcacheConnPool(server, max_conns, connect_timeout)) for server in servers)) self._connect_timeout = connect_timeout self._io_timeout = io_timeout self._pool_timeout = pool_timeout self._allow_pickle = allow_pickle self._allow_unpickle = allow_unpickle or allow_pickle def _exception_occurred(self, server, e, action='talking', sock=None, fp=None, got_connection=True): if isinstance(e, Timeout): logging.error(_("Timeout %(action)s to memcached: %(server)s"), {'action': action, 'server': server}) else: logging.exception(_("Error %(action)s to memcached: %(server)s"), {'action': action, 'server': server}) try: if fp: fp.close() del fp except Exception: pass try: if sock: sock.close() del sock except Exception: pass if got_connection: # We need to return something to the pool # A new connection will be created the next time it is retrieved self._return_conn(server, None, None) now = time.time() self._errors[server].append(time.time()) if len(self._errors[server]) > ERROR_LIMIT_COUNT: self._errors[server] = [err for err in self._errors[server] if err > now - ERROR_LIMIT_TIME] if len(self._errors[server]) > ERROR_LIMIT_COUNT: self._error_limited[server] = now + ERROR_LIMIT_DURATION logging.error(_('Error limiting server %s'), server) def _get_conns(self, key): """ Retrieves a server conn from the pool, or connects a new one. Chooses the server based on a consistent hash of "key". """ pos = bisect(self._sorted, key) served = [] while len(served) < self._tries: pos = (pos + 1) % len(self._sorted) server = self._ring[self._sorted[pos]] if server in served: continue served.append(server) if self._error_limited[server] > time.time(): continue sock = None try: with MemcachePoolTimeout(self._pool_timeout): fp, sock = self._client_cache[server].get() yield server, fp, sock except MemcachePoolTimeout as e: self._exception_occurred( server, e, action='getting a connection', got_connection=False) except (Exception, Timeout) as e: # Typically a Timeout exception caught here is the one raised # by the create() method of this server's MemcacheConnPool # object. self._exception_occurred( server, e, action='connecting', sock=sock) def _return_conn(self, server, fp, sock): """Returns a server connection to the pool.""" self._client_cache[server].put((fp, sock)) def set(self, key, value, serialize=True, time=0, min_compress_len=0): """ Set a key/value pair in memcache :param key: key :param value: value :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) :param time: the time to live :min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it. """ key = md5hash(key) timeout = sanitize_timeout(time) flags = 0 if serialize and self._allow_pickle: value = pickle.dumps(value, PICKLE_PROTOCOL) flags |= PICKLE_FLAG elif serialize: value = json.dumps(value) flags |= JSON_FLAG for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('set %s %d %d %s\r\n%s\r\n' % (key, flags, timeout, len(value), value)) # Wait for the set to complete fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def get(self, key): """ Gets the object specified by key. It will also unserialize the object before returning if it is serialized in memcache with JSON, or if it is pickled and unpickling is allowed. :param key: key :returns: value of the key in memcache """ key = md5hash(key) value = None for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('get %s\r\n' % key) line = fp.readline().strip().split() while line[0].upper() != 'END': if line[0].upper() == 'VALUE' and line[1] == key: size = int(line[3]) value = fp.read(size) if int(line[2]) & PICKLE_FLAG: if self._allow_unpickle: value = pickle.loads(value) else: value = None elif int(line[2]) & JSON_FLAG: value = json.loads(value) fp.readline() line = fp.readline().strip().split() self._return_conn(server, fp, sock) return value except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def incr(self, key, delta=1, time=0): """ Increments a key which has a numeric value by delta. If the key can't be found, it's added as delta or 0 if delta < 0. If passed a negative number, will use memcached's decr. Returns the int stored in memcached Note: The data memcached stores as the result of incr/decr is an unsigned int. decr's that result in a number below 0 are stored as 0. :param key: key :param delta: amount to add to the value of key (or set as the value if the key is not found) will be cast to an int :param time: the time to live :returns: result of incrementing :raises MemcacheConnectionError: """ key = md5hash(key) command = 'incr' if delta < 0: command = 'decr' delta = str(abs(int(delta))) timeout = sanitize_timeout(time) for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('%s %s %s\r\n' % (command, key, delta)) line = fp.readline().strip().split() if line[0].upper() == 'NOT_FOUND': add_val = delta if command == 'decr': add_val = '0' sock.sendall('add %s %d %d %s\r\n%s\r\n' % (key, 0, timeout, len(add_val), add_val)) line = fp.readline().strip().split() if line[0].upper() == 'NOT_STORED': sock.sendall('%s %s %s\r\n' % (command, key, delta)) line = fp.readline().strip().split() ret = int(line[0].strip()) else: ret = int(add_val) else: ret = int(line[0].strip()) self._return_conn(server, fp, sock) return ret except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) raise MemcacheConnectionError("No Memcached connections succeeded.") def decr(self, key, delta=1, time=0): """ Decrements a key which has a numeric value by delta. Calls incr with -delta. :param key: key :param delta: amount to subtract to the value of key (or set the value to 0 if the key is not found) will be cast to an int :param time: the time to live :returns: result of decrementing :raises MemcacheConnectionError: """ return self.incr(key, delta=-delta, time=time) def delete(self, key): """ Deletes a key/value pair from memcache. :param key: key to be deleted """ key = md5hash(key) for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('delete %s\r\n' % key) # Wait for the delete to complete fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def set_multi(self, mapping, server_key, serialize=True, time=0, min_compress_len=0): """ Sets multiple key/value pairs in memcache. :param mapping: dictionary of keys and values to be set in memcache :param servery_key: key to use in determining which server in the ring is used :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) :param time: the time to live :min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it """ server_key = md5hash(server_key) timeout = sanitize_timeout(time) msg = '' for key, value in mapping.items(): key = md5hash(key) flags = 0 if serialize and self._allow_pickle: value = pickle.dumps(value, PICKLE_PROTOCOL) flags |= PICKLE_FLAG elif serialize: value = json.dumps(value) flags |= JSON_FLAG msg += ('set %s %d %d %s\r\n%s\r\n' % (key, flags, timeout, len(value), value)) for (server, fp, sock) in self._get_conns(server_key): try: with Timeout(self._io_timeout): sock.sendall(msg) # Wait for the set to complete for line in range(len(mapping)): fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def get_multi(self, keys, server_key): """ Gets multiple values from memcache for the given keys. :param keys: keys for values to be retrieved from memcache :param servery_key: key to use in determining which server in the ring is used :returns: list of values """ server_key = md5hash(server_key) keys = [md5hash(key) for key in keys] for (server, fp, sock) in self._get_conns(server_key): try: with Timeout(self._io_timeout): sock.sendall('get %s\r\n' % ' '.join(keys)) line = fp.readline().strip().split() responses = {} while line[0].upper() != 'END': if line[0].upper() == 'VALUE': size = int(line[3]) value = fp.read(size) if int(line[2]) & PICKLE_FLAG: if self._allow_unpickle: value = pickle.loads(value) else: value = None elif int(line[2]) & JSON_FLAG: value = json.loads(value) responses[line[1]] = value fp.readline() line = fp.readline().strip().split() values = [] for key in keys: if key in responses: values.append(responses[key]) else: values.append(None) self._return_conn(server, fp, sock) return values except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) swift-2.7.0/swift/common/request_helpers.py0000664000567000056710000005305412675204037022242 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Miscellaneous utility functions for use in generating responses. Why not swift.common.utils, you ask? Because this way we can import things from swob in here without creating circular imports. """ import hashlib import itertools import sys import time import six from six.moves.urllib.parse import unquote from swift import gettext_ as _ from swift.common.storage_policy import POLICIES from swift.common.constraints import FORMAT2CONTENT_TYPE from swift.common.exceptions import ListingIterError, SegmentError from swift.common.http import is_success from swift.common.swob import HTTPBadRequest, HTTPNotAcceptable, \ HTTPServiceUnavailable, Range, is_chunked from swift.common.utils import split_path, validate_device_partition, \ close_if_possible, maybe_multipart_byteranges_to_document_iters, \ multipart_byteranges_to_document_iters, parse_content_type, \ parse_content_range from swift.common.wsgi import make_subrequest def get_param(req, name, default=None): """ Get parameters from an HTTP request ensuring proper handling UTF-8 encoding. :param req: request object :param name: parameter name :param default: result to return if the parameter is not found :returns: HTTP request parameter value (as UTF-8 encoded str, not unicode object) :raises: HTTPBadRequest if param not valid UTF-8 byte sequence """ value = req.params.get(name, default) if value and not isinstance(value, six.text_type): try: value.decode('utf8') # Ensure UTF8ness except UnicodeDecodeError: raise HTTPBadRequest( request=req, content_type='text/plain', body='"%s" parameter not valid UTF-8' % name) return value def get_listing_content_type(req): """ Determine the content type to use for an account or container listing response. :param req: request object :returns: content type as a string (e.g. text/plain, application/json) :raises: HTTPNotAcceptable if the requested content type is not acceptable :raises: HTTPBadRequest if the 'format' query param is provided and not valid UTF-8 """ query_format = get_param(req, 'format') if query_format: req.accept = FORMAT2CONTENT_TYPE.get( query_format.lower(), FORMAT2CONTENT_TYPE['plain']) out_content_type = req.accept.best_match( ['text/plain', 'application/json', 'application/xml', 'text/xml']) if not out_content_type: raise HTTPNotAcceptable(request=req) return out_content_type def get_name_and_placement(request, minsegs=1, maxsegs=None, rest_with_last=False): """ Utility function to split and validate the request path and storage policy. The storage policy index is extracted from the headers of the request and converted to a StoragePolicy instance. The remaining args are passed through to :meth:`split_and_validate_path`. :returns: a list, result of :meth:`split_and_validate_path` with the BaseStoragePolicy instance appended on the end :raises: HTTPServiceUnavailable if the path is invalid or no policy exists with the extracted policy_index. """ policy_index = request.headers.get('X-Backend-Storage-Policy-Index') policy = POLICIES.get_by_index(policy_index) if not policy: raise HTTPServiceUnavailable( body=_("No policy with index %s") % policy_index, request=request, content_type='text/plain') results = split_and_validate_path(request, minsegs=minsegs, maxsegs=maxsegs, rest_with_last=rest_with_last) results.append(policy) return results def split_and_validate_path(request, minsegs=1, maxsegs=None, rest_with_last=False): """ Utility function to split and validate the request path. :returns: result of :meth:`~swift.common.utils.split_path` if everything's okay :raises: HTTPBadRequest if something's not okay """ try: segs = split_path(unquote(request.path), minsegs, maxsegs, rest_with_last) validate_device_partition(segs[0], segs[1]) return segs except ValueError as err: raise HTTPBadRequest(body=str(err), request=request, content_type='text/plain') def is_user_meta(server_type, key): """ Tests if a header key starts with and is longer than the user metadata prefix for given server type. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: True if the key satisfies the test, False otherwise """ if len(key) <= 8 + len(server_type): return False return key.lower().startswith(get_user_meta_prefix(server_type)) def is_sys_meta(server_type, key): """ Tests if a header key starts with and is longer than the system metadata prefix for given server type. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: True if the key satisfies the test, False otherwise """ if len(key) <= 11 + len(server_type): return False return key.lower().startswith(get_sys_meta_prefix(server_type)) def is_sys_or_user_meta(server_type, key): """ Tests if a header key starts with and is longer than the user or system metadata prefix for given server type. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: True if the key satisfies the test, False otherwise """ return is_user_meta(server_type, key) or is_sys_meta(server_type, key) def strip_user_meta_prefix(server_type, key): """ Removes the user metadata prefix for a given server type from the start of a header key. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: stripped header key """ return key[len(get_user_meta_prefix(server_type)):] def strip_sys_meta_prefix(server_type, key): """ Removes the system metadata prefix for a given server type from the start of a header key. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: stripped header key """ return key[len(get_sys_meta_prefix(server_type)):] def get_user_meta_prefix(server_type): """ Returns the prefix for user metadata headers for given server type. This prefix defines the namespace for headers that will be persisted by backend servers. :param server_type: type of backend server i.e. [account|container|object] :returns: prefix string for server type's user metadata headers """ return 'x-%s-%s-' % (server_type.lower(), 'meta') def get_sys_meta_prefix(server_type): """ Returns the prefix for system metadata headers for given server type. This prefix defines the namespace for headers that will be persisted by backend servers. :param server_type: type of backend server i.e. [account|container|object] :returns: prefix string for server type's system metadata headers """ return 'x-%s-%s-' % (server_type.lower(), 'sysmeta') def remove_items(headers, condition): """ Removes items from a dict whose keys satisfy the given condition. :param headers: a dict of headers :param condition: a function that will be passed the header key as a single argument and should return True if the header is to be removed. :returns: a dict, possibly empty, of headers that have been removed """ removed = {} keys = filter(condition, headers) removed.update((key, headers.pop(key)) for key in keys) return removed def copy_header_subset(from_r, to_r, condition): """ Will copy desired subset of headers from from_r to to_r. :param from_r: a swob Request or Response :param to_r: a swob Request or Response :param condition: a function that will be passed the header key as a single argument and should return True if the header is to be copied. """ for k, v in from_r.headers.items(): if condition(k): to_r.headers[k] = v class SegmentedIterable(object): """ Iterable that returns the object contents for a large object. :param req: original request object :param app: WSGI application from which segments will come :param listing_iter: iterable yielding the object segments to fetch, along with the byte subranges to fetch, in the form of a tuple (object-path, first-byte, last-byte) or (object-path, None, None) to fetch the whole thing. :param max_get_time: maximum permitted duration of a GET request (seconds) :param logger: logger object :param swift_source: value of swift.source in subrequest environ (just for logging) :param ua_suffix: string to append to user-agent. :param name: name of manifest (used in logging only) :param response_body_length: optional response body length for the response being sent to the client. """ def __init__(self, req, app, listing_iter, max_get_time, logger, ua_suffix, swift_source, name='', response_body_length=None): self.req = req self.app = app self.listing_iter = listing_iter self.max_get_time = max_get_time self.logger = logger self.ua_suffix = " " + ua_suffix self.swift_source = swift_source self.name = name self.response_body_length = response_body_length self.peeked_chunk = None self.app_iter = self._internal_iter() self.validated_first_segment = False self.current_resp = None def _coalesce_requests(self): start_time = time.time() pending_req = None pending_etag = None pending_size = None try: for seg_path, seg_etag, seg_size, first_byte, last_byte \ in self.listing_iter: first_byte = first_byte or 0 go_to_end = last_byte is None or ( seg_size is not None and last_byte == seg_size - 1) if time.time() - start_time > self.max_get_time: raise SegmentError( 'ERROR: While processing manifest %s, ' 'max LO GET time of %ds exceeded' % (self.name, self.max_get_time)) # The "multipart-manifest=get" query param ensures that the # segment is a plain old object, not some flavor of large # object; therefore, its etag is its MD5sum and hence we can # check it. path = seg_path + '?multipart-manifest=get' seg_req = make_subrequest( self.req.environ, path=path, method='GET', headers={'x-auth-token': self.req.headers.get( 'x-auth-token')}, agent=('%(orig)s ' + self.ua_suffix), swift_source=self.swift_source) seg_req_rangeval = None if first_byte != 0 or not go_to_end: seg_req_rangeval = "%s-%s" % ( first_byte, '' if go_to_end else last_byte) seg_req.headers['Range'] = "bytes=" + seg_req_rangeval # We can only coalesce if paths match and we know the segment # size (so we can check that the ranges will be allowed) if pending_req and pending_req.path == seg_req.path and \ seg_size is not None: # Make a new Range object so that we don't goof up the # existing one in case of invalid ranges. Note that a # range set with too many individual byteranges is # invalid, so we can combine N valid byteranges and 1 # valid byterange and get an invalid range set. if pending_req.range: new_range_str = str(pending_req.range) else: new_range_str = "bytes=0-%d" % (seg_size - 1) if seg_req.range: new_range_str += "," + seg_req_rangeval else: new_range_str += ",0-%d" % (seg_size - 1) if Range(new_range_str).ranges_for_length(seg_size): # Good news! We can coalesce the requests pending_req.headers['Range'] = new_range_str continue # else, Too many ranges, or too much backtracking, or ... if pending_req: yield pending_req, pending_etag, pending_size pending_req = seg_req pending_etag = seg_etag pending_size = seg_size except ListingIterError: e_type, e_value, e_traceback = sys.exc_info() if time.time() - start_time > self.max_get_time: raise SegmentError( 'ERROR: While processing manifest %s, ' 'max LO GET time of %ds exceeded' % (self.name, self.max_get_time)) if pending_req: yield pending_req, pending_etag, pending_size six.reraise(e_type, e_value, e_traceback) if time.time() - start_time > self.max_get_time: raise SegmentError( 'ERROR: While processing manifest %s, ' 'max LO GET time of %ds exceeded' % (self.name, self.max_get_time)) if pending_req: yield pending_req, pending_etag, pending_size def _internal_iter(self): bytes_left = self.response_body_length try: for seg_req, seg_etag, seg_size in self._coalesce_requests(): seg_resp = seg_req.get_response(self.app) if not is_success(seg_resp.status_int): close_if_possible(seg_resp.app_iter) raise SegmentError( 'ERROR: While processing manifest %s, ' 'got %d while retrieving %s' % (self.name, seg_resp.status_int, seg_req.path)) elif ((seg_etag and (seg_resp.etag != seg_etag)) or (seg_size and (seg_resp.content_length != seg_size) and not seg_req.range)): # The content-length check is for security reasons. Seems # possible that an attacker could upload a >1mb object and # then replace it with a much smaller object with same # etag. Then create a big nested SLO that calls that # object many times which would hammer our obj servers. If # this is a range request, don't check content-length # because it won't match. close_if_possible(seg_resp.app_iter) raise SegmentError( 'Object segment no longer valid: ' '%(path)s etag: %(r_etag)s != %(s_etag)s or ' '%(r_size)s != %(s_size)s.' % {'path': seg_req.path, 'r_etag': seg_resp.etag, 'r_size': seg_resp.content_length, 's_etag': seg_etag, 's_size': seg_size}) else: self.current_resp = seg_resp seg_hash = None if seg_resp.etag and not seg_req.headers.get('Range'): # Only calculate the MD5 if it we can use it to validate seg_hash = hashlib.md5() document_iters = maybe_multipart_byteranges_to_document_iters( seg_resp.app_iter, seg_resp.headers['Content-Type']) for chunk in itertools.chain.from_iterable(document_iters): if seg_hash: seg_hash.update(chunk) if bytes_left is None: yield chunk elif bytes_left >= len(chunk): yield chunk bytes_left -= len(chunk) else: yield chunk[:bytes_left] bytes_left -= len(chunk) close_if_possible(seg_resp.app_iter) raise SegmentError( 'Too many bytes for %(name)s; truncating in ' '%(seg)s with %(left)d bytes left' % {'name': self.name, 'seg': seg_req.path, 'left': bytes_left}) close_if_possible(seg_resp.app_iter) if seg_hash and seg_hash.hexdigest() != seg_resp.etag: raise SegmentError( "Bad MD5 checksum in %(name)s for %(seg)s: headers had" " %(etag)s, but object MD5 was actually %(actual)s" % {'seg': seg_req.path, 'etag': seg_resp.etag, 'name': self.name, 'actual': seg_hash.hexdigest()}) if bytes_left: raise SegmentError( 'Not enough bytes for %s; closing connection' % self.name) except (ListingIterError, SegmentError): self.logger.exception(_('ERROR: An error occurred ' 'while retrieving segments')) raise finally: if self.current_resp: close_if_possible(self.current_resp.app_iter) def app_iter_range(self, *a, **kw): """ swob.Response will only respond with a 206 status in certain cases; one of those is if the body iterator responds to .app_iter_range(). However, this object (or really, its listing iter) is smart enough to handle the range stuff internally, so we just no-op this out for swob. """ return self def validate_first_segment(self): """ Start fetching object data to ensure that the first segment (if any) is valid. This is to catch cases like "first segment is missing" or "first segment's etag doesn't match manifest". Note: this does not validate that you have any segments. A zero-segment large object is not erroneous; it is just empty. """ if self.validated_first_segment: return self.validated_first_segment = True try: self.peeked_chunk = next(self.app_iter) except StopIteration: pass def __iter__(self): if self.peeked_chunk is not None: pc = self.peeked_chunk self.peeked_chunk = None return itertools.chain([pc], self.app_iter) else: return self.app_iter def close(self): """ Called when the client disconnect. Ensure that the connection to the backend server is closed. """ close_if_possible(self.app_iter) def http_response_to_document_iters(response, read_chunk_size=4096): """ Takes a successful object-GET HTTP response and turns it into an iterator of (first-byte, last-byte, length, headers, body-file) 5-tuples. The response must either be a 200 or a 206; if you feed in a 204 or something similar, this probably won't work. :param response: HTTP response, like from bufferedhttp.http_connect(), not a swob.Response. """ chunked = is_chunked(dict(response.getheaders())) if response.status == 200: if chunked: # Single "range" that's the whole object with an unknown length return iter([(0, None, None, response.getheaders(), response)]) # Single "range" that's the whole object content_length = int(response.getheader('Content-Length')) return iter([(0, content_length - 1, content_length, response.getheaders(), response)]) content_type, params_list = parse_content_type( response.getheader('Content-Type')) if content_type != 'multipart/byteranges': # Single range; no MIME framing, just the bytes. The start and end # byte indices are in the Content-Range header. start, end, length = parse_content_range( response.getheader('Content-Range')) return iter([(start, end, length, response.getheaders(), response)]) else: # Multiple ranges; the response body is a multipart/byteranges MIME # document, and we have to parse it using the MIME boundary # extracted from the Content-Type header. params = dict(params_list) return multipart_byteranges_to_document_iters( response, params['boundary'], read_chunk_size) swift-2.7.0/swift/common/swob.py0000664000567000056710000015100112675204037017771 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Implementation of WSGI Request and Response objects. This library has a very similar API to Webob. It wraps WSGI request environments and response values into objects that are more friendly to interact with. Why Swob and not just use WebOb? By Michael Barton We used webob for years. The main problem was that the interface wasn't stable. For a while, each of our several test suites required a slightly different version of webob to run, and none of them worked with the then-current version. It was a huge headache, so we just scrapped it. This is kind of a ton of code, but it's also been a huge relief to not have to scramble to add a bunch of code branches all over the place to keep Swift working every time webob decides some interface needs to change. """ from collections import defaultdict, MutableMapping import time from functools import partial from datetime import datetime, timedelta, tzinfo from email.utils import parsedate import re import random import functools import inspect import six from six import BytesIO from six import StringIO from six.moves import urllib from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import reiterate, split_path, Timestamp, pairs, \ close_if_possible from swift.common.exceptions import InvalidTimestamp RESPONSE_REASONS = { 100: ('Continue', ''), 200: ('OK', ''), 201: ('Created', ''), 202: ('Accepted', 'The request is accepted for processing.'), 204: ('No Content', ''), 206: ('Partial Content', ''), 301: ('Moved Permanently', 'The resource has moved permanently.'), 302: ('Found', 'The resource has moved temporarily.'), 303: ('See Other', 'The response to the request can be found under a ' 'different URI.'), 304: ('Not Modified', ''), 307: ('Temporary Redirect', 'The resource has moved temporarily.'), 400: ('Bad Request', 'The server could not comply with the request since ' 'it is either malformed or otherwise incorrect.'), 401: ('Unauthorized', 'This server could not verify that you are ' 'authorized to access the document you requested.'), 402: ('Payment Required', 'Access was denied for financial reasons.'), 403: ('Forbidden', 'Access was denied to this resource.'), 404: ('Not Found', 'The resource could not be found.'), 405: ('Method Not Allowed', 'The method is not allowed for this ' 'resource.'), 406: ('Not Acceptable', 'The resource is not available in a format ' 'acceptable to your browser.'), 408: ('Request Timeout', 'The server has waited too long for the request ' 'to be sent by the client.'), 409: ('Conflict', 'There was a conflict when trying to complete ' 'your request.'), 410: ('Gone', 'This resource is no longer available.'), 411: ('Length Required', 'Content-Length header required.'), 412: ('Precondition Failed', 'A precondition for this request was not ' 'met.'), 413: ('Request Entity Too Large', 'The body of your request was too ' 'large for this server.'), 414: ('Request URI Too Long', 'The request URI was too long for this ' 'server.'), 415: ('Unsupported Media Type', 'The request media type is not ' 'supported by this server.'), 416: ('Requested Range Not Satisfiable', 'The Range requested is not ' 'available.'), 417: ('Expectation Failed', 'Expectation failed.'), 422: ('Unprocessable Entity', 'Unable to process the contained ' 'instructions'), 499: ('Client Disconnect', 'The client was disconnected during request.'), 500: ('Internal Error', 'The server has either erred or is incapable of ' 'performing the requested operation.'), 501: ('Not Implemented', 'The requested method is not implemented by ' 'this server.'), 502: ('Bad Gateway', 'Bad gateway.'), 503: ('Service Unavailable', 'The server is currently unavailable. ' 'Please try again at a later time.'), 504: ('Gateway Timeout', 'A timeout has occurred speaking to a ' 'backend server.'), 507: ('Insufficient Storage', 'There was not enough space to save the ' 'resource. Drive: %(drive)s'), } MAX_RANGE_OVERLAPS = 2 MAX_NONASCENDING_RANGES = 8 MAX_RANGES = 50 class _UTC(tzinfo): """ A tzinfo class for datetime objects that returns a 0 timedelta (UTC time) """ def dst(self, dt): return timedelta(0) utcoffset = dst def tzname(self, dt): return 'UTC' UTC = _UTC() class WsgiBytesIO(BytesIO): """ This class adds support for the additional wsgi.input methods defined on eventlet.wsgi.Input to the BytesIO class which would otherwise be a fine stand-in for the file-like object in the WSGI environment. """ def set_hundred_continue_response_headers(self, headers): pass def send_hundred_continue_response(self): pass def _datetime_property(header): """ Set and retrieve the datetime value of self.headers[header] (Used by both request and response) The header is parsed on retrieval and a datetime object is returned. The header can be set using a datetime, numeric value, or str. If a value of None is given, the header is deleted. :param header: name of the header, e.g. "Content-Length" """ def getter(self): value = self.headers.get(header, None) if value is not None: try: parts = parsedate(self.headers[header])[:7] return datetime(*(parts + (UTC,))) except Exception: return None def setter(self, value): if isinstance(value, (float,) + six.integer_types): self.headers[header] = time.strftime( "%a, %d %b %Y %H:%M:%S GMT", time.gmtime(value)) elif isinstance(value, datetime): self.headers[header] = value.strftime("%a, %d %b %Y %H:%M:%S GMT") else: self.headers[header] = value return property(getter, setter, doc=("Retrieve and set the %s header as a datetime, " "set it with a datetime, int, or str") % header) def _header_property(header): """ Set and retrieve the value of self.headers[header] (Used by both request and response) If a value of None is given, the header is deleted. :param header: name of the header, e.g. "Transfer-Encoding" """ def getter(self): return self.headers.get(header, None) def setter(self, value): self.headers[header] = value return property(getter, setter, doc="Retrieve and set the %s header" % header) def _header_int_property(header): """ Set and retrieve the value of self.headers[header] (Used by both request and response) On retrieval, it converts values to integers. If a value of None is given, the header is deleted. :param header: name of the header, e.g. "Content-Length" """ def getter(self): val = self.headers.get(header, None) if val is not None: val = int(val) return val def setter(self, value): self.headers[header] = value return property(getter, setter, doc="Retrieve and set the %s header as an int" % header) def header_to_environ_key(header_name): header_name = 'HTTP_' + header_name.replace('-', '_').upper() if header_name == 'HTTP_CONTENT_LENGTH': return 'CONTENT_LENGTH' if header_name == 'HTTP_CONTENT_TYPE': return 'CONTENT_TYPE' return header_name class HeaderEnvironProxy(MutableMapping): """ A dict-like object that proxies requests to a wsgi environ, rewriting header keys to environ keys. For example, headers['Content-Range'] sets and gets the value of headers.environ['HTTP_CONTENT_RANGE'] """ def __init__(self, environ): self.environ = environ def __iter__(self): for k in self.keys(): yield k def __len__(self): return len(self.keys()) def __getitem__(self, key): return self.environ[header_to_environ_key(key)] def __setitem__(self, key, value): if value is None: self.environ.pop(header_to_environ_key(key), None) elif isinstance(value, six.text_type): self.environ[header_to_environ_key(key)] = value.encode('utf-8') else: self.environ[header_to_environ_key(key)] = str(value) def __contains__(self, key): return header_to_environ_key(key) in self.environ def __delitem__(self, key): del self.environ[header_to_environ_key(key)] def keys(self): keys = [key[5:].replace('_', '-').title() for key in self.environ if key.startswith('HTTP_')] if 'CONTENT_LENGTH' in self.environ: keys.append('Content-Length') if 'CONTENT_TYPE' in self.environ: keys.append('Content-Type') return keys def _resp_status_property(): """ Set and retrieve the value of Response.status On retrieval, it concatenates status_int and title. When set to a str, it splits status_int and title apart. When set to an integer, retrieves the correct title for that response code from the RESPONSE_REASONS dict. """ def getter(self): return '%s %s' % (self.status_int, self.title) def setter(self, value): if isinstance(value, (int, long)): self.status_int = value self.explanation = self.title = RESPONSE_REASONS[value][0] else: if isinstance(value, six.text_type): value = value.encode('utf-8') self.status_int = int(value.split(' ', 1)[0]) self.explanation = self.title = value.split(' ', 1)[1] return property(getter, setter, doc="Retrieve and set the Response status, e.g. '200 OK'") def _resp_body_property(): """ Set and retrieve the value of Response.body If necessary, it will consume Response.app_iter to create a body. On assignment, encodes unicode values to utf-8, and sets the content-length to the length of the str. """ def getter(self): if not self._body: if not self._app_iter: return '' self._body = ''.join(self._app_iter) self._app_iter = None return self._body def setter(self, value): if isinstance(value, six.text_type): value = value.encode('utf-8') if isinstance(value, str): self.content_length = len(value) self._app_iter = None self._body = value return property(getter, setter, doc="Retrieve and set the Response body str") def _resp_etag_property(): """ Set and retrieve Response.etag This may be broken for etag use cases other than Swift's. Quotes strings when assigned and unquotes when read, for compatibility with webob. """ def getter(self): etag = self.headers.get('etag', None) if etag: etag = etag.replace('"', '') return etag def setter(self, value): if value is None: self.headers['etag'] = None else: self.headers['etag'] = '"%s"' % value return property(getter, setter, doc="Retrieve and set the response Etag header") def _resp_content_type_property(): """ Set and retrieve Response.content_type Strips off any charset when retrieved -- that is accessible via Response.charset. """ def getter(self): if 'content-type' in self.headers: return self.headers.get('content-type').split(';')[0] def setter(self, value): self.headers['content-type'] = value return property(getter, setter, doc="Retrieve and set the response Content-Type header") def _resp_charset_property(): """ Set and retrieve Response.charset On retrieval, separates the charset from the content-type. On assignment, removes any existing charset from the content-type and appends the new one. """ def getter(self): if '; charset=' in self.headers['content-type']: return self.headers['content-type'].split('; charset=')[1] def setter(self, value): if 'content-type' in self.headers: self.headers['content-type'] = self.headers['content-type'].split( ';')[0] if value: self.headers['content-type'] += '; charset=' + value return property(getter, setter, doc="Retrieve and set the response charset") def _resp_app_iter_property(): """ Set and retrieve Response.app_iter Mostly a pass-through to Response._app_iter; it's a property so it can zero out an existing content-length on assignment. """ def getter(self): return self._app_iter def setter(self, value): if isinstance(value, (list, tuple)): self.content_length = sum(map(len, value)) elif value is not None: self.content_length = None self._body = None self._app_iter = value return property(getter, setter, doc="Retrieve and set the response app_iter") def _req_fancy_property(cls, header, even_if_nonexistent=False): """ Set and retrieve "fancy" properties. On retrieval, these properties return a class that takes the value of the header as the only argument to their constructor. For assignment, those classes should implement a __str__ that converts them back to their header values. :param header: name of the header, e.g. "Accept" :param even_if_nonexistent: Return a value even if the header does not exist. Classes using this should be prepared to accept None as a parameter. """ def getter(self): try: if header in self.headers or even_if_nonexistent: return cls(self.headers.get(header)) except ValueError: return None def setter(self, value): self.headers[header] = value return property(getter, setter, doc=("Retrieve and set the %s " "property in the WSGI environ, as a %s object") % (header, cls.__name__)) class Range(object): """ Wraps a Request's Range header as a friendly object. After initialization, "range.ranges" is populated with a list of (start, end) tuples denoting the requested ranges. If there were any syntactically-invalid byte-range-spec values, the constructor will raise a ValueError, per the relevant RFC: "The recipient of a byte-range-set that includes one or more syntactically invalid byte-range-spec values MUST ignore the header field that includes that byte-range-set." According to the RFC 2616 specification, the following cases will be all considered as syntactically invalid, thus, a ValueError is thrown so that the range header will be ignored. If the range value contains at least one of the following cases, the entire range is considered invalid, ValueError will be thrown so that the header will be ignored. 1. value not starts with bytes= 2. range value start is greater than the end, eg. bytes=5-3 3. range does not have start or end, eg. bytes=- 4. range does not have hyphen, eg. bytes=45 5. range value is non numeric 6. any combination of the above Every syntactically valid range will be added into the ranges list even when some of the ranges may not be satisfied by underlying content. :param headerval: value of the header as a str """ def __init__(self, headerval): headerval = headerval.replace(' ', '') if not headerval.lower().startswith('bytes='): raise ValueError('Invalid Range header: %s' % headerval) self.ranges = [] for rng in headerval[6:].split(','): # Check if the range has required hyphen. if rng.find('-') == -1: raise ValueError('Invalid Range header: %s' % headerval) start, end = rng.split('-', 1) if start: # when start contains non numeric value, this also causes # ValueError start = int(start) else: start = None if end: # when end contains non numeric value, this also causes # ValueError end = int(end) if start is not None and end < start: raise ValueError('Invalid Range header: %s' % headerval) else: end = None if start is None: raise ValueError('Invalid Range header: %s' % headerval) self.ranges.append((start, end)) def __str__(self): string = 'bytes=' for i, (start, end) in enumerate(self.ranges): if start is not None: string += str(start) string += '-' if end is not None: string += str(end) if i < len(self.ranges) - 1: string += ',' return string def ranges_for_length(self, length): """ This method is used to return multiple ranges for a given length which should represent the length of the underlying content. The constructor method __init__ made sure that any range in ranges list is syntactically valid. So if length is None or size of the ranges is zero, then the Range header should be ignored which will eventually make the response to be 200. If an empty list is returned by this method, it indicates that there are unsatisfiable ranges found in the Range header, 416 will be returned. if a returned list has at least one element, the list indicates that there is at least one range valid and the server should serve the request with a 206 status code. The start value of each range represents the starting position in the content, the end value represents the ending position. This method purposely adds 1 to the end number because the spec defines the Range to be inclusive. The Range spec can be found at the following link: http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35.1 :param length: length of the underlying content """ # not syntactically valid ranges, must ignore if length is None or not self.ranges or self.ranges == []: return None all_ranges = [] for single_range in self.ranges: begin, end = single_range # The possible values for begin and end are # None, 0, or a positive numeric number if begin is None: if end == 0: # this is the bytes=-0 case continue elif end > length: # This is the case where the end is greater than the # content length, as the RFC 2616 stated, the entire # content should be returned. all_ranges.append((0, length)) else: all_ranges.append((length - end, length)) continue # begin can only be 0 and numeric value from this point on if end is None: if begin < length: all_ranges.append((begin, length)) else: # the begin position is greater than or equal to the # content length; skip and move on to the next range continue # end can only be 0 or numeric value elif begin < length: # the begin position is valid, take the min of end + 1 or # the total length of the content all_ranges.append((begin, min(end + 1, length))) # RFC 7233 section 6.1 ("Denial-of-Service Attacks Using Range") says: # # Unconstrained multiple range requests are susceptible to denial-of- # service attacks because the effort required to request many # overlapping ranges of the same data is tiny compared to the time, # memory, and bandwidth consumed by attempting to serve the requested # data in many parts. Servers ought to ignore, coalesce, or reject # egregious range requests, such as requests for more than two # overlapping ranges or for many small ranges in a single set, # particularly when the ranges are requested out of order for no # apparent reason. Multipart range requests are not designed to # support random access. # # We're defining "egregious" here as: # # * more than 100 requested ranges OR # * more than 2 overlapping ranges OR # * more than 8 non-ascending-order ranges if len(all_ranges) > MAX_RANGES: return [] overlaps = 0 for ((start1, end1), (start2, end2)) in pairs(all_ranges): if ((start1 < start2 < end1) or (start1 < end2 < end1) or (start2 < start1 < end2) or (start2 < end1 < end2)): overlaps += 1 if overlaps > MAX_RANGE_OVERLAPS: return [] ascending = True for start1, start2 in zip(all_ranges, all_ranges[1:]): if start1 > start2: ascending = False break if not ascending and len(all_ranges) >= MAX_NONASCENDING_RANGES: return [] return all_ranges class Match(object): """ Wraps a Request's If-[None-]Match header as a friendly object. :param headerval: value of the header as a str """ def __init__(self, headerval): self.tags = set() for tag in headerval.split(', '): if tag.startswith('"') and tag.endswith('"'): self.tags.add(tag[1:-1]) else: self.tags.add(tag) def __contains__(self, val): return '*' in self.tags or val in self.tags class Accept(object): """ Wraps a Request's Accept header as a friendly object. :param headerval: value of the header as a str """ # RFC 2616 section 2.2 token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+' qdtext = r'[^"]' quoted_pair = r'(?:\\.)' quoted_string = r'"(?:' + qdtext + r'|' + quoted_pair + r')*"' extension = (r'(?:\s*;\s*(?:' + token + r")\s*=\s*" + r'(?:' + token + r'|' + quoted_string + r'))') acc = (r'^\s*(' + token + r')/(' + token + r')(' + extension + r'*?\s*)$') acc_pattern = re.compile(acc) def __init__(self, headerval): self.headerval = headerval def _get_types(self): types = [] if not self.headerval: return [] for typ in self.headerval.split(','): type_parms = self.acc_pattern.findall(typ) if not type_parms: raise ValueError('Invalid accept header') typ, subtype, parms = type_parms[0] parms = [p.strip() for p in parms.split(';') if p.strip()] seen_q_already = False quality = 1.0 for parm in parms: name, value = parm.split('=') name = name.strip() value = value.strip() if name == 'q': if seen_q_already: raise ValueError('Multiple "q" params') seen_q_already = True quality = float(value) pattern = '^' + \ (self.token if typ == '*' else re.escape(typ)) + '/' + \ (self.token if subtype == '*' else re.escape(subtype)) + '$' types.append((pattern, quality, '*' not in (typ, subtype))) # sort candidates by quality, then whether or not there were globs types.sort(reverse=True, key=lambda t: (t[1], t[2])) return [t[0] for t in types] def best_match(self, options): """ Returns the item from "options" that best matches the accept header. Returns None if no available options are acceptable to the client. :param options: a list of content-types the server can respond with """ try: types = self._get_types() except ValueError: return None if not types and options: return options[0] for pattern in types: for option in options: if re.match(pattern, option): return option return None def __repr__(self): return self.headerval def _req_environ_property(environ_field): """ Set and retrieve value of the environ_field entry in self.environ. (Used by both request and response) """ def getter(self): return self.environ.get(environ_field, None) def setter(self, value): if isinstance(value, six.text_type): self.environ[environ_field] = value.encode('utf-8') else: self.environ[environ_field] = value return property(getter, setter, doc=("Get and set the %s property " "in the WSGI environment") % environ_field) def _req_body_property(): """ Set and retrieve the Request.body parameter. It consumes wsgi.input and returns the results. On assignment, uses a WsgiBytesIO to create a new wsgi.input. """ def getter(self): body = self.environ['wsgi.input'].read() self.environ['wsgi.input'] = WsgiBytesIO(body) return body def setter(self, value): self.environ['wsgi.input'] = WsgiBytesIO(value) self.environ['CONTENT_LENGTH'] = str(len(value)) return property(getter, setter, doc="Get and set the request body str") def _host_url_property(): """ Retrieves the best guess that can be made for an absolute location up to the path, for example: https://host.com:1234 """ def getter(self): if 'HTTP_HOST' in self.environ: host = self.environ['HTTP_HOST'] else: host = '%s:%s' % (self.environ['SERVER_NAME'], self.environ['SERVER_PORT']) scheme = self.environ.get('wsgi.url_scheme', 'http') if scheme == 'http' and host.endswith(':80'): host, port = host.rsplit(':', 1) elif scheme == 'https' and host.endswith(':443'): host, port = host.rsplit(':', 1) return '%s://%s' % (scheme, host) return property(getter, doc="Get url for request/response up to path") def is_chunked(headers): te = None for key in headers: if key.lower() == 'transfer-encoding': te = headers.get(key) if te: encodings = te.split(',') if len(encodings) > 1: raise AttributeError('Unsupported Transfer-Coding header' ' value specified in Transfer-Encoding' ' header') # If there are more than one transfer encoding value, the last # one must be chunked, see RFC 2616 Sec. 3.6 if encodings[-1].lower() == 'chunked': return True else: raise ValueError('Invalid Transfer-Encoding header value') else: return False class Request(object): """ WSGI Request object. """ range = _req_fancy_property(Range, 'range') if_none_match = _req_fancy_property(Match, 'if-none-match') accept = _req_fancy_property(Accept, 'accept', True) method = _req_environ_property('REQUEST_METHOD') referrer = referer = _req_environ_property('HTTP_REFERER') script_name = _req_environ_property('SCRIPT_NAME') path_info = _req_environ_property('PATH_INFO') host = _req_environ_property('HTTP_HOST') host_url = _host_url_property() remote_addr = _req_environ_property('REMOTE_ADDR') remote_user = _req_environ_property('REMOTE_USER') user_agent = _req_environ_property('HTTP_USER_AGENT') query_string = _req_environ_property('QUERY_STRING') if_match = _req_fancy_property(Match, 'if-match') body_file = _req_environ_property('wsgi.input') content_length = _header_int_property('content-length') if_modified_since = _datetime_property('if-modified-since') if_unmodified_since = _datetime_property('if-unmodified-since') body = _req_body_property() charset = None _params_cache = None _timestamp = None acl = _req_environ_property('swob.ACL') def __init__(self, environ): self.environ = environ self.headers = HeaderEnvironProxy(self.environ) @classmethod def blank(cls, path, environ=None, headers=None, body=None, **kwargs): """ Create a new request object with the given parameters, and an environment otherwise filled in with non-surprising default values. :param path: encoded, parsed, and unquoted into PATH_INFO :param environ: WSGI environ dictionary :param headers: HTTP headers :param body: stuffed in a WsgiBytesIO and hung on wsgi.input :param kwargs: any environ key with an property setter """ headers = headers or {} environ = environ or {} if isinstance(path, six.text_type): path = path.encode('utf-8') parsed_path = urllib.parse.urlparse(path) server_name = 'localhost' if parsed_path.netloc: server_name = parsed_path.netloc.split(':', 1)[0] server_port = parsed_path.port if server_port is None: server_port = {'http': 80, 'https': 443}.get(parsed_path.scheme, 80) if parsed_path.scheme and parsed_path.scheme not in ['http', 'https']: raise TypeError('Invalid scheme: %s' % parsed_path.scheme) env = { 'REQUEST_METHOD': 'GET', 'SCRIPT_NAME': '', 'QUERY_STRING': parsed_path.query, 'PATH_INFO': urllib.parse.unquote(parsed_path.path), 'SERVER_NAME': server_name, 'SERVER_PORT': str(server_port), 'HTTP_HOST': '%s:%d' % (server_name, server_port), 'SERVER_PROTOCOL': 'HTTP/1.0', 'wsgi.version': (1, 0), 'wsgi.url_scheme': parsed_path.scheme or 'http', 'wsgi.errors': StringIO(), 'wsgi.multithread': False, 'wsgi.multiprocess': False } env.update(environ) if body is not None: env['wsgi.input'] = WsgiBytesIO(body) env['CONTENT_LENGTH'] = str(len(body)) elif 'wsgi.input' not in env: env['wsgi.input'] = WsgiBytesIO() req = Request(env) for key, val in headers.items(): req.headers[key] = val for key, val in kwargs.items(): prop = getattr(Request, key, None) if prop and isinstance(prop, property): try: setattr(req, key, val) except AttributeError: pass else: continue raise TypeError("got unexpected keyword argument %r" % key) return req @property def params(self): "Provides QUERY_STRING parameters as a dictionary" if self._params_cache is None: if 'QUERY_STRING' in self.environ: self._params_cache = dict( urllib.parse.parse_qsl(self.environ['QUERY_STRING'], True)) else: self._params_cache = {} return self._params_cache str_params = params @property def timestamp(self): """ Provides HTTP_X_TIMESTAMP as a :class:`~swift.common.utils.Timestamp` """ if self._timestamp is None: try: raw_timestamp = self.environ['HTTP_X_TIMESTAMP'] except KeyError: raise InvalidTimestamp('Missing X-Timestamp header') try: self._timestamp = Timestamp(raw_timestamp) except ValueError: raise InvalidTimestamp('Invalid X-Timestamp header') return self._timestamp @property def path_qs(self): """The path of the request, without host but with query string.""" path = self.path if self.query_string: path += '?' + self.query_string return path @property def path(self): "Provides the full path of the request, excluding the QUERY_STRING" return urllib.parse.quote(self.environ.get('SCRIPT_NAME', '') + self.environ['PATH_INFO']) @property def swift_entity_path(self): """ Provides the account/container/object path, sans API version. This can be useful when constructing a path to send to a backend server, as that path will need everything after the "/v1". """ _ver, entity_path = self.split_path(1, 2, rest_with_last=True) if entity_path is not None: return '/' + entity_path @property def is_chunked(self): return is_chunked(self.headers) @property def url(self): "Provides the full url of the request" return self.host_url + self.path_qs def as_referer(self): return self.method + ' ' + self.url def path_info_pop(self): """ Takes one path portion (delineated by slashes) from the path_info, and appends it to the script_name. Returns the path segment. """ path_info = self.path_info if not path_info or not path_info.startswith('/'): return None try: slash_loc = path_info.index('/', 1) except ValueError: slash_loc = len(path_info) self.script_name += path_info[:slash_loc] self.path_info = path_info[slash_loc:] return path_info[1:slash_loc] def copy_get(self): """ Makes a copy of the request, converting it to a GET. """ env = self.environ.copy() env.update({ 'REQUEST_METHOD': 'GET', 'CONTENT_LENGTH': '0', 'wsgi.input': WsgiBytesIO(), }) return Request(env) def call_application(self, application): """ Calls the application with this request's environment. Returns the status, headers, and app_iter for the response as a tuple. :param application: the WSGI application to call """ output = [] captured = [] def start_response(status, headers, exc_info=None): captured[:] = [status, headers, exc_info] return output.append app_iter = application(self.environ, start_response) if not app_iter: app_iter = output if not captured: app_iter = reiterate(app_iter) return (captured[0], captured[1], app_iter) def get_response(self, application): """ Calls the application with this request's environment. Returns a Response object that wraps up the application's result. :param application: the WSGI application to call """ status, headers, app_iter = self.call_application(application) return Response(status=status, headers=dict(headers), app_iter=app_iter, request=self) def split_path(self, minsegs=1, maxsegs=None, rest_with_last=False): """ Validate and split the Request's path. **Examples**:: ['a'] = split_path('/a') ['a', None] = split_path('/a', 1, 2) ['a', 'c'] = split_path('/a/c', 1, 2) ['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True) :param minsegs: Minimum number of segments to be extracted :param maxsegs: Maximum number of segments to be extracted :param rest_with_last: If True, trailing data will be returned as part of last segment. If False, and there is trailing data, raises ValueError. :returns: list of segments with a length of maxsegs (non-existent segments will return as None) :raises: ValueError if given an invalid path """ return split_path( self.environ.get('SCRIPT_NAME', '') + self.environ['PATH_INFO'], minsegs, maxsegs, rest_with_last) def message_length(self): """ Properly determine the message length for this request. It will return an integer if the headers explicitly contain the message length, or None if the headers don't contain a length. The ValueError exception will be raised if the headers are invalid. :raises ValueError: if either transfer-encoding or content-length headers have bad values :raises AttributeError: if the last value of the transfer-encoding header is not "chunked" """ if not is_chunked(self.headers): # Because we are not using chunked transfer encoding we can pay # attention to the content-length header. fsize = self.headers.get('content-length', None) if fsize is not None: try: fsize = int(fsize) except ValueError: raise ValueError('Invalid Content-Length header value') else: fsize = None return fsize def content_range_header_value(start, stop, size): return 'bytes %s-%s/%s' % (start, (stop - 1), size) def content_range_header(start, stop, size): return "Content-Range: " + content_range_header_value(start, stop, size) def multi_range_iterator(ranges, content_type, boundary, size, sub_iter_gen): for start, stop in ranges: yield ''.join(['--', boundary, '\r\n', 'Content-Type: ', content_type, '\r\n']) yield content_range_header(start, stop, size) + '\r\n\r\n' sub_iter = sub_iter_gen(start, stop) for chunk in sub_iter: yield chunk yield '\r\n' yield '--' + boundary + '--' class Response(object): """ WSGI Response object. """ content_length = _header_int_property('content-length') content_type = _resp_content_type_property() content_range = _header_property('content-range') etag = _resp_etag_property() status = _resp_status_property() body = _resp_body_property() host_url = _host_url_property() last_modified = _datetime_property('last-modified') location = _header_property('location') accept_ranges = _header_property('accept-ranges') charset = _resp_charset_property() app_iter = _resp_app_iter_property() def __init__(self, body=None, status=200, headers=None, app_iter=None, request=None, conditional_response=False, conditional_etag=None, **kw): self.headers = HeaderKeyDict( [('Content-Type', 'text/html; charset=UTF-8')]) self.conditional_response = conditional_response self._conditional_etag = conditional_etag self.request = request self.body = body self.app_iter = app_iter self.response_iter = None self.status = status self.boundary = "%.32x" % random.randint(0, 256 ** 16) if request: self.environ = request.environ else: self.environ = {} if headers: if self._body and 'Content-Length' in headers: # If body is not empty, prioritize actual body length over # content_length in headers del headers['Content-Length'] self.headers.update(headers) if self.status_int == 401 and 'www-authenticate' not in self.headers: self.headers.update({'www-authenticate': self.www_authenticate()}) for key, value in kw.items(): setattr(self, key, value) # When specifying both 'content_type' and 'charset' in the kwargs, # charset needs to be applied *after* content_type, otherwise charset # can get wiped out when content_type sorts later in dict order. if 'charset' in kw and 'content_type' in kw: self.charset = kw['charset'] @property def conditional_etag(self): """ The conditional_etag keyword argument for Response will allow the conditional match value of a If-Match request to be compared to a non-standard value. This is available for Storage Policies that do not store the client object data verbatim on the storage nodes, but still need support conditional requests. It's most effectively used with X-Backend-Etag-Is-At which would define the additional Metadata key where the original ETag of the clear-form client request data. """ if self._conditional_etag is not None: return self._conditional_etag else: return self.etag def _prepare_for_ranges(self, ranges): """ Prepare the Response for multiple ranges. """ content_size = self.content_length content_type = self.headers.get('content-type') self.content_type = ''.join(['multipart/byteranges;', 'boundary=', self.boundary]) # This section calculates the total size of the response. section_header_fixed_len = ( # --boundary\r\n len(self.boundary) + 4 # Content-Type: \r\n + len('Content-Type: ') + len(content_type) + 2 # Content-Range: \r\n; accounted for later + len('Content-Range: ') + 2 # \r\n at end of headers + 2) body_size = 0 for start, end in ranges: body_size += section_header_fixed_len # length of the value of Content-Range, not including the \r\n # since that's already accounted for cr = content_range_header_value(start, end, content_size) body_size += len(cr) # the actual bytes (note: this range is half-open, i.e. begins # with byte and ends with byte , so there's no # fencepost error here) body_size += (end - start) # \r\n prior to --boundary body_size += 2 # --boundary-- terminates the message body_size += len(self.boundary) + 4 self.content_length = body_size self.content_range = None return content_size, content_type def _response_iter(self, app_iter, body): etag = self.conditional_etag if self.conditional_response and self.request: if etag and self.request.if_none_match and \ etag in self.request.if_none_match: self.status = 304 self.content_length = 0 close_if_possible(app_iter) return [''] if etag and self.request.if_match and \ etag not in self.request.if_match: self.status = 412 self.content_length = 0 close_if_possible(app_iter) return [''] if self.status_int == 404 and self.request.if_match \ and '*' in self.request.if_match: # If none of the entity tags match, or if "*" is given and no # current entity exists, the server MUST NOT perform the # requested method, and MUST return a 412 (Precondition # Failed) response. [RFC 2616 section 14.24] self.status = 412 self.content_length = 0 close_if_possible(app_iter) return [''] if self.last_modified and self.request.if_modified_since \ and self.last_modified <= self.request.if_modified_since: self.status = 304 self.content_length = 0 close_if_possible(app_iter) return [''] if self.last_modified and self.request.if_unmodified_since \ and self.last_modified > self.request.if_unmodified_since: self.status = 412 self.content_length = 0 close_if_possible(app_iter) return [''] if self.request and self.request.method == 'HEAD': # We explicitly do NOT want to set self.content_length to 0 here return [''] if self.conditional_response and self.request and \ self.request.range and self.request.range.ranges and \ not self.content_range: ranges = self.request.range.ranges_for_length(self.content_length) if ranges == []: self.status = 416 self.content_length = 0 close_if_possible(app_iter) return [''] elif ranges: range_size = len(ranges) if range_size > 0: # There is at least one valid range in the request, so try # to satisfy the request if range_size == 1: start, end = ranges[0] if app_iter and hasattr(app_iter, 'app_iter_range'): self.status = 206 self.content_range = content_range_header_value( start, end, self.content_length) self.content_length = (end - start) return app_iter.app_iter_range(start, end) elif body: self.status = 206 self.content_range = content_range_header_value( start, end, self.content_length) self.content_length = (end - start) return [body[start:end]] elif range_size > 1: if app_iter and hasattr(app_iter, 'app_iter_ranges'): self.status = 206 content_size, content_type = \ self._prepare_for_ranges(ranges) return app_iter.app_iter_ranges(ranges, content_type, self.boundary, content_size) elif body: self.status = 206 content_size, content_type, = \ self._prepare_for_ranges(ranges) def _body_slicer(start, stop): yield body[start:stop] return multi_range_iterator(ranges, content_type, self.boundary, content_size, _body_slicer) if app_iter: return app_iter if body is not None: return [body] if self.status_int in RESPONSE_REASONS: title, exp = RESPONSE_REASONS[self.status_int] if exp: body = '

%s

%s

' % ( title, exp % defaultdict(lambda: 'unknown', self.__dict__)) self.content_length = len(body) return [body] return [''] def fix_conditional_response(self): """ You may call this once you have set the content_length to the whole object length and body or app_iter to reset the content_length properties on the request. It is ok to not call this method, the conditional response will be maintained for you when you __call__ the response. """ self.response_iter = self._response_iter(self.app_iter, self._body) def absolute_location(self): """ Attempt to construct an absolute location. """ if not self.location.startswith('/'): return self.location return self.host_url + self.location def www_authenticate(self): """ Construct a suitable value for WWW-Authenticate response header If we have a request and a valid-looking path, the realm is the account; otherwise we set it to 'unknown'. """ try: vrs, realm, rest = self.request.split_path(2, 3, True) if realm in ('v1.0', 'auth'): realm = 'unknown' except (AttributeError, ValueError): realm = 'unknown' return 'Swift realm="%s"' % urllib.parse.quote(realm) @property def is_success(self): return self.status_int // 100 == 2 def __call__(self, env, start_response): """ Respond to the WSGI request. .. warning:: This will translate any relative Location header value to an absolute URL using the WSGI environment's HOST_URL as a prefix, as RFC 2616 specifies. However, it is quite common to use relative redirects, especially when it is difficult to know the exact HOST_URL the browser would have used when behind several CNAMEs, CDN services, etc. All modern browsers support relative redirects. To skip over RFC enforcement of the Location header value, you may set ``env['swift.leave_relative_location'] = True`` in the WSGI environment. """ if not self.request: self.request = Request(env) self.environ = env if not self.response_iter: self.response_iter = self._response_iter(self.app_iter, self._body) if 'location' in self.headers and \ not env.get('swift.leave_relative_location'): self.location = self.absolute_location() start_response(self.status, self.headers.items()) return self.response_iter class HTTPException(Response, Exception): def __init__(self, *args, **kwargs): Response.__init__(self, *args, **kwargs) Exception.__init__(self, self.status) def wsgify(func): """ A decorator for translating functions which take a swob Request object and return a Response object into WSGI callables. Also catches any raised HTTPExceptions and treats them as a returned Response. """ argspec = inspect.getargspec(func) if argspec.args and argspec.args[0] == 'self': @functools.wraps(func) def _wsgify_self(self, env, start_response): try: return func(self, Request(env))(env, start_response) except HTTPException as err_resp: return err_resp(env, start_response) return _wsgify_self else: @functools.wraps(func) def _wsgify_bare(env, start_response): try: return func(Request(env))(env, start_response) except HTTPException as err_resp: return err_resp(env, start_response) return _wsgify_bare class StatusMap(object): """ A dict-like object that returns HTTPException subclasses/factory functions where the given key is the status code. """ def __getitem__(self, key): return partial(HTTPException, status=key) status_map = StatusMap() HTTPOk = status_map[200] HTTPCreated = status_map[201] HTTPAccepted = status_map[202] HTTPNoContent = status_map[204] HTTPMovedPermanently = status_map[301] HTTPFound = status_map[302] HTTPSeeOther = status_map[303] HTTPNotModified = status_map[304] HTTPTemporaryRedirect = status_map[307] HTTPBadRequest = status_map[400] HTTPUnauthorized = status_map[401] HTTPForbidden = status_map[403] HTTPMethodNotAllowed = status_map[405] HTTPNotFound = status_map[404] HTTPNotAcceptable = status_map[406] HTTPRequestTimeout = status_map[408] HTTPConflict = status_map[409] HTTPLengthRequired = status_map[411] HTTPPreconditionFailed = status_map[412] HTTPRequestEntityTooLarge = status_map[413] HTTPRequestedRangeNotSatisfiable = status_map[416] HTTPUnprocessableEntity = status_map[422] HTTPClientDisconnect = status_map[499] HTTPServerError = status_map[500] HTTPInternalServerError = status_map[500] HTTPNotImplemented = status_map[501] HTTPBadGateway = status_map[502] HTTPServiceUnavailable = status_map[503] HTTPInsufficientStorage = status_map[507] swift-2.7.0/swift/common/base_storage_server.py0000664000567000056710000000550712675204037023054 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from swift import __version__ as swift_version from swift.common.utils import public, timing_stats, config_true_value from swift.common.swob import Response class BaseStorageServer(object): """ Implements common OPTIONS method for object, account, container servers. """ def __init__(self, conf, **kwargs): self._allowed_methods = None replication_server = conf.get('replication_server', None) if replication_server is not None: replication_server = config_true_value(replication_server) self.replication_server = replication_server @property def server_type(self): raise NotImplementedError( 'Storage nodes have not implemented the Server type.') @property def allowed_methods(self): if self._allowed_methods is None: self._allowed_methods = [] all_methods = inspect.getmembers(self, predicate=callable) if self.replication_server is True: for name, m in all_methods: if (getattr(m, 'publicly_accessible', False) and getattr(m, 'replication', False)): self._allowed_methods.append(name) elif self.replication_server is False: for name, m in all_methods: if (getattr(m, 'publicly_accessible', False) and not getattr(m, 'replication', False)): self._allowed_methods.append(name) elif self.replication_server is None: for name, m in all_methods: if getattr(m, 'publicly_accessible', False): self._allowed_methods.append(name) self._allowed_methods.sort() return self._allowed_methods @public @timing_stats() def OPTIONS(self, req): """ Base handler for OPTIONS requests :param req: swob.Request object :returns: swob.Response object """ # Prepare the default response headers = {'Allow': ', '.join(self.allowed_methods), 'Server': '%s/%s' % (self.server_type, swift_version)} resp = Response(status=200, request=req, headers=headers) return resp swift-2.7.0/swift/common/http.py0000664000567000056710000001076712675204037020013 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. def is_informational(status): """ Check if HTTP status code is informational. :param status: http status code :returns: True if status is successful, else False """ return 100 <= status <= 199 def is_success(status): """ Check if HTTP status code is successful. :param status: http status code :returns: True if status is successful, else False """ return 200 <= status <= 299 def is_redirection(status): """ Check if HTTP status code is redirection. :param status: http status code :returns: True if status is redirection, else False """ return 300 <= status <= 399 def is_client_error(status): """ Check if HTTP status code is client error. :param status: http status code :returns: True if status is client error, else False """ return 400 <= status <= 499 def is_server_error(status): """ Check if HTTP status code is server error. :param status: http status code :returns: True if status is server error, else False """ return 500 <= status <= 599 # List of HTTP status codes ############################################################################### # 1xx Informational ############################################################################### HTTP_CONTINUE = 100 HTTP_SWITCHING_PROTOCOLS = 101 HTTP_PROCESSING = 102 # WebDAV HTTP_CHECKPOINT = 103 HTTP_REQUEST_URI_TOO_LONG = 122 ############################################################################### # 2xx Success ############################################################################### HTTP_OK = 200 HTTP_CREATED = 201 HTTP_ACCEPTED = 202 HTTP_NON_AUTHORITATIVE_INFORMATION = 203 HTTP_NO_CONTENT = 204 HTTP_RESET_CONTENT = 205 HTTP_PARTIAL_CONTENT = 206 HTTP_MULTI_STATUS = 207 # WebDAV HTTP_IM_USED = 226 ############################################################################### # 3xx Redirection ############################################################################### HTTP_MULTIPLE_CHOICES = 300 HTTP_MOVED_PERMANENTLY = 301 HTTP_FOUND = 302 HTTP_SEE_OTHER = 303 HTTP_NOT_MODIFIED = 304 HTTP_USE_PROXY = 305 HTTP_SWITCH_PROXY = 306 HTTP_TEMPORARY_REDIRECT = 307 HTTP_RESUME_INCOMPLETE = 308 ############################################################################### # 4xx Client Error ############################################################################### HTTP_BAD_REQUEST = 400 HTTP_UNAUTHORIZED = 401 HTTP_PAYMENT_REQUIRED = 402 HTTP_FORBIDDEN = 403 HTTP_NOT_FOUND = 404 HTTP_METHOD_NOT_ALLOWED = 405 HTTP_NOT_ACCEPTABLE = 406 HTTP_PROXY_AUTHENTICATION_REQUIRED = 407 HTTP_REQUEST_TIMEOUT = 408 HTTP_CONFLICT = 409 HTTP_GONE = 410 HTTP_LENGTH_REQUIRED = 411 HTTP_PRECONDITION_FAILED = 412 HTTP_REQUEST_ENTITY_TOO_LARGE = 413 HTTP_REQUEST_URI_TOO_LONG = 414 HTTP_UNSUPPORTED_MEDIA_TYPE = 415 HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = 416 HTTP_EXPECTATION_FAILED = 417 HTTP_IM_A_TEAPOT = 418 HTTP_UNPROCESSABLE_ENTITY = 422 # WebDAV HTTP_LOCKED = 423 # WebDAV HTTP_FAILED_DEPENDENCY = 424 # WebDAV HTTP_UNORDERED_COLLECTION = 425 HTTP_UPGRADE_REQUIED = 426 HTTP_PRECONDITION_REQUIRED = 428 HTTP_TOO_MANY_REQUESTS = 429 HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = 431 HTTP_NO_RESPONSE = 444 HTTP_RETRY_WITH = 449 HTTP_BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450 HTTP_CLIENT_CLOSED_REQUEST = 499 ############################################################################### # 5xx Server Error ############################################################################### HTTP_INTERNAL_SERVER_ERROR = 500 HTTP_NOT_IMPLEMENTED = 501 HTTP_BAD_GATEWAY = 502 HTTP_SERVICE_UNAVAILABLE = 503 HTTP_GATEWAY_TIMEOUT = 504 HTTP_VERSION_NOT_SUPPORTED = 505 HTTP_VARIANT_ALSO_NEGOTIATES = 506 HTTP_INSUFFICIENT_STORAGE = 507 # WebDAV HTTP_BANDWIDTH_LIMIT_EXCEEDED = 509 HTTP_NOT_EXTENDED = 510 HTTP_NETWORK_AUTHENTICATION_REQUIRED = 511 HTTP_NETWORK_READ_TIMEOUT_ERROR = 598 # not used in RFC HTTP_NETWORK_CONNECT_TIMEOUT_ERROR = 599 # not used in RFC swift-2.7.0/swift/common/daemon.py0000664000567000056710000000777712675204037020306 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import time import signal from re import sub import eventlet.debug from swift.common import utils class Daemon(object): """Daemon base class""" def __init__(self, conf): self.conf = conf self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs): """Override this to run the script once""" raise NotImplementedError('run_once not implemented') def run_forever(self, *args, **kwargs): """Override this to run forever""" raise NotImplementedError('run_forever not implemented') def run(self, once=False, **kwargs): """Run the daemon""" utils.validate_configuration() utils.drop_privileges(self.conf.get('user', 'swift')) utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) sys.exit() signal.signal(signal.SIGTERM, kill_children) if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): """ Loads settings from conf, then instantiates daemon "klass" and runs the daemon with the specified once kwarg. The section_name will be derived from the daemon "klass" if not provided (e.g. ObjectReplicator => object-replicator). :param klass: Class to instantiate, subclass of common.daemon.Daemon :param conf_file: Path to configuration file :param section_name: Section name from conf file to load config from :param once: Passed to daemon run method """ # very often the config section_name is based on the class name # the None singleton will be passed through to readconf as is if section_name is '': section_name = sub(r'([a-z])([A-Z])', r'\1-\2', klass.__name__).lower() conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) # once on command line (i.e. daemonize=false) will over-ride config once = once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired reserve = int(conf.get('fallocate_reserve', 0)) if reserve > 0: utils.FALLOCATE_RESERVE = reserve # By default, disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to the timezone in which # the server first starts running in locations that periodically change # timezones. os.environ['TZ'] = time.strftime("%z", time.gmtime()) try: klass(conf).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User quit') logger.info('Exited') swift-2.7.0/swift/common/wsgi.py0000664000567000056710000012373512675204037020005 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """WSGI tools for use with swift.""" from __future__ import print_function import errno import inspect import os import signal import time import mimetools from swift import gettext_ as _ from textwrap import dedent import eventlet import eventlet.debug from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout from paste.deploy import loadwsgi from eventlet.green import socket, ssl, os as green_os from six import BytesIO from six import StringIO from six.moves.urllib.parse import unquote from swift.common import utils, constraints from swift.common.storage_policy import BindPortsCache from swift.common.swob import Request from swift.common.utils import capture_stdio, disable_fallocate, \ drop_privileges, get_logger, NullLogger, config_true_value, \ validate_configuration, get_hub, config_auto_int_value, \ CloseableChain # Set maximum line size of message headers to be accepted. wsgi.MAX_HEADER_LINE = constraints.MAX_HEADER_SIZE try: import multiprocessing CPU_COUNT = multiprocessing.cpu_count() or 1 except (ImportError, NotImplementedError): CPU_COUNT = 1 class NamedConfigLoader(loadwsgi.ConfigLoader): """ Patch paste.deploy's ConfigLoader so each context object will know what config section it came from. """ def get_context(self, object_type, name=None, global_conf=None): context = super(NamedConfigLoader, self).get_context( object_type, name=name, global_conf=global_conf) context.name = name return context loadwsgi.ConfigLoader = NamedConfigLoader class ConfigDirLoader(NamedConfigLoader): """ Read configuration from multiple files under the given path. """ def __init__(self, conf_dir): # parent class uses filename attribute when building error messages self.filename = conf_dir = conf_dir.strip() defaults = { 'here': os.path.normpath(os.path.abspath(conf_dir)), '__file__': os.path.abspath(conf_dir) } self.parser = loadwsgi.NicerConfigParser(conf_dir, defaults=defaults) self.parser.optionxform = str # Don't lower-case keys utils.read_conf_dir(self.parser, conf_dir) def _loadconfigdir(object_type, uri, path, name, relative_to, global_conf): if relative_to: path = os.path.normpath(os.path.join(relative_to, path)) loader = ConfigDirLoader(path) if global_conf: loader.update_defaults(global_conf, overwrite=False) return loader.get_context(object_type, name, global_conf) # add config_dir parsing to paste.deploy loadwsgi._loaders['config_dir'] = _loadconfigdir class ConfigString(NamedConfigLoader): """ Wrap a raw config string up for paste.deploy. If you give one of these to our loadcontext (e.g. give it to our appconfig) we'll intercept it and get it routed to the right loader. """ def __init__(self, config_string): self.contents = StringIO(dedent(config_string)) self.filename = "string" defaults = { 'here': "string", '__file__': "string", } self.parser = loadwsgi.NicerConfigParser("string", defaults=defaults) self.parser.optionxform = str # Don't lower-case keys self.parser.readfp(self.contents) def wrap_conf_type(f): """ Wrap a function whos first argument is a paste.deploy style config uri, such that you can pass it an un-adorned raw filesystem path (or config string) and the config directive (either config:, config_dir:, or config_str:) will be added automatically based on the type of entity (either a file or directory, or if no such entity on the file system - just a string) before passing it through to the paste.deploy function. """ def wrapper(conf_path, *args, **kwargs): if os.path.isdir(conf_path): conf_type = 'config_dir' else: conf_type = 'config' conf_uri = '%s:%s' % (conf_type, conf_path) return f(conf_uri, *args, **kwargs) return wrapper appconfig = wrap_conf_type(loadwsgi.appconfig) def monkey_patch_mimetools(): """ mimetools.Message defaults content-type to "text/plain" This changes it to default to None, so we can detect missing headers. """ orig_parsetype = mimetools.Message.parsetype def parsetype(self): if not self.typeheader: self.type = None self.maintype = None self.subtype = None self.plisttext = '' else: orig_parsetype(self) parsetype.patched = True if not getattr(mimetools.Message.parsetype, 'patched', None): mimetools.Message.parsetype = parsetype def get_socket(conf): """Bind socket to bind ip:port in conf :param conf: Configuration dict to read settings from :returns : a socket object as returned from socket.listen or ssl.wrap_socket if conf specifies cert_file """ try: bind_port = int(conf['bind_port']) except (ValueError, KeyError, TypeError): raise ConfigFilePortError() bind_addr = (conf.get('bind_ip', '0.0.0.0'), bind_port) address_family = [addr[0] for addr in socket.getaddrinfo( bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) if addr[0] in (socket.AF_INET, socket.AF_INET6)][0] sock = None bind_timeout = int(conf.get('bind_timeout', 30)) retry_until = time.time() + bind_timeout warn_ssl = False while not sock and time.time() < retry_until: try: sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)), family=address_family) if 'cert_file' in conf: warn_ssl = True sock = ssl.wrap_socket(sock, certfile=conf['cert_file'], keyfile=conf['key_file']) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise sleep(0.1) if not sock: raise Exception(_('Could not bind to %s:%s ' 'after trying for %s seconds') % ( bind_addr[0], bind_addr[1], bind_timeout)) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # in my experience, sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600) if warn_ssl: ssl_warning_message = _('WARNING: SSL should only be enabled for ' 'testing purposes. Use external SSL ' 'termination for a production deployment.') get_logger(conf).warning(ssl_warning_message) print(ssl_warning_message) return sock class RestrictedGreenPool(GreenPool): """ Works the same as GreenPool, but if the size is specified as one, then the spawn_n() method will invoke waitall() before returning to prevent the caller from doing any other work (like calling accept()). """ def __init__(self, size=1024): super(RestrictedGreenPool, self).__init__(size=size) self._rgp_do_wait = (size == 1) def spawn_n(self, *args, **kwargs): super(RestrictedGreenPool, self).spawn_n(*args, **kwargs) if self._rgp_do_wait: self.waitall() def pipeline_property(name, **kwargs): """ Create a property accessor for the given name. The property will dig through the bound instance on which it was accessed for an attribute "app" and check that object for an attribute of the given name. If the "app" object does not have such an attribute, it will look for an attribute "app" on THAT object and continue it's search from there. If the named attribute cannot be found accessing the property will raise AttributeError. If a default kwarg is provided you get that instead of the AttributeError. When found the attribute will be cached on instance with the property accessor using the same name as the attribute prefixed with a leading underscore. """ cache_attr_name = '_%s' % name def getter(self): cached_value = getattr(self, cache_attr_name, None) if cached_value: return cached_value app = self # first app is on self while True: app = getattr(app, 'app', None) if not app: break try: value = getattr(app, name) except AttributeError: continue setattr(self, cache_attr_name, value) return value if 'default' in kwargs: return kwargs['default'] raise AttributeError('No apps in pipeline have a ' '%s attribute' % name) return property(getter) class PipelineWrapper(object): """ This class provides a number of utility methods for modifying the composition of a wsgi pipeline. """ def __init__(self, context): self.context = context def __contains__(self, entry_point_name): try: self.index(entry_point_name) return True except ValueError: return False def startswith(self, entry_point_name): """ Tests if the pipeline starts with the given entry point name. :param entry_point_name: entry point of middleware or app (Swift only) :returns: True if entry_point_name is first in pipeline, False otherwise """ try: first_ctx = self.context.filter_contexts[0] except IndexError: first_ctx = self.context.app_context return first_ctx.entry_point_name == entry_point_name def _format_for_display(self, ctx): # Contexts specified by pipeline= have .name set in NamedConfigLoader. if hasattr(ctx, 'name'): return ctx.name # This should not happen: a foreign context. Let's not crash. return "" def __str__(self): parts = [self._format_for_display(ctx) for ctx in self.context.filter_contexts] parts.append(self._format_for_display(self.context.app_context)) return " ".join(parts) def create_filter(self, entry_point_name): """ Creates a context for a filter that can subsequently be added to a pipeline context. :param entry_point_name: entry point of the middleware (Swift only) :returns: a filter context """ spec = 'egg:swift#' + entry_point_name ctx = loadwsgi.loadcontext(loadwsgi.FILTER, spec, global_conf=self.context.global_conf) ctx.protocol = 'paste.filter_factory' ctx.name = entry_point_name return ctx def index(self, entry_point_name): """ Returns the first index of the given entry point name in the pipeline. Raises ValueError if the given module is not in the pipeline. """ for i, ctx in enumerate(self.context.filter_contexts): if ctx.entry_point_name == entry_point_name: return i raise ValueError("%s is not in pipeline" % (entry_point_name,)) def insert_filter(self, ctx, index=0): """ Inserts a filter module into the pipeline context. :param ctx: the context to be inserted :param index: (optional) index at which filter should be inserted in the list of pipeline filters. Default is 0, which means the start of the pipeline. """ self.context.filter_contexts.insert(index, ctx) def loadcontext(object_type, uri, name=None, relative_to=None, global_conf=None): if isinstance(uri, loadwsgi.ConfigLoader): # bypass loadcontext's uri parsing and loader routing and # just directly return the context if global_conf: uri.update_defaults(global_conf, overwrite=False) return uri.get_context(object_type, name, global_conf) add_conf_type = wrap_conf_type(lambda x: x) return loadwsgi.loadcontext(object_type, add_conf_type(uri), name=name, relative_to=relative_to, global_conf=global_conf) def _add_pipeline_properties(app, *names): for property_name in names: if not hasattr(app, property_name): setattr(app.__class__, property_name, pipeline_property(property_name)) def loadapp(conf_file, global_conf=None, allow_modify_pipeline=True): """ Loads a context from a config file, and if the context is a pipeline then presents the app with the opportunity to modify the pipeline. """ global_conf = global_conf or {} ctx = loadcontext(loadwsgi.APP, conf_file, global_conf=global_conf) if ctx.object_type.name == 'pipeline': # give app the opportunity to modify the pipeline context app = ctx.app_context.create() func = getattr(app, 'modify_wsgi_pipeline', None) if func and allow_modify_pipeline: func(PipelineWrapper(ctx)) return ctx.create() def run_server(conf, logger, sock, global_conf=None): # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to the timezone in which # the server first starts running in locations that periodically change # timezones. os.environ['TZ'] = time.strftime("%z", time.gmtime()) wsgi.HttpProtocol.default_request_version = "HTTP/1.0" # Turn off logging requests by the underlying WSGI software. wsgi.HttpProtocol.log_request = lambda *a: None # Redirect logging other messages by the underlying WSGI software. wsgi.HttpProtocol.log_message = \ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a) wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) # NOTE(sileht): monkey-patching thread is required by python-keystoneclient eventlet.patcher.monkey_patch(all=False, socket=True, thread=True) eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) wsgi_logger = NullLogger() if eventlet_debug: # let eventlet.wsgi.server log to stderr wsgi_logger = None # utils.LogAdapter stashes name in server; fallback on unadapted loggers if not global_conf: if hasattr(logger, 'server'): log_name = logger.server else: log_name = logger.name global_conf = {'log_name': log_name} app = loadapp(conf['__file__'], global_conf=global_conf) max_clients = int(conf.get('max_clients', '1024')) pool = RestrictedGreenPool(size=max_clients) try: # Disable capitalizing headers in Eventlet if possible. This is # necessary for the AWS SDK to work with swift3 middleware. argspec = inspect.getargspec(wsgi.server) if 'capitalize_response_headers' in argspec.args: wsgi.server(sock, app, wsgi_logger, custom_pool=pool, capitalize_response_headers=False) else: wsgi.server(sock, app, wsgi_logger, custom_pool=pool) except socket.error as err: if err[0] != errno.EINVAL: raise pool.waitall() class WorkersStrategy(object): """ WSGI server management strategy object for a single bind port and listen socket shared by a configured number of forked-off workers. Used in :py:func:`run_wsgi`. :param dict conf: Server configuration dictionary. :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` object. """ def __init__(self, conf, logger): self.conf = conf self.logger = logger self.sock = None self.children = [] self.worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT) def loop_timeout(self): """ We want to keep from busy-waiting, but we also need a non-None value so the main loop gets a chance to tell whether it should keep running or not (e.g. SIGHUP received). So we return 0.5. """ return 0.5 def bind_ports(self): """ Bind the one listen socket for this strategy and drop privileges (since the parent process will never need to bind again). """ try: self.sock = get_socket(self.conf) except ConfigFilePortError: msg = 'bind_port wasn\'t properly set in the config file. ' \ 'It must be explicitly set to a valid port number.' return msg drop_privileges(self.conf.get('user', 'swift')) def no_fork_sock(self): """ Return a server listen socket if the server should run in the foreground (no fork). """ # Useful for profiling [no forks]. if self.worker_count == 0: return self.sock def new_worker_socks(self): """ Yield a sequence of (socket, opqaue_data) tuples for each server which should be forked-off and started. The opaque_data item for each socket will passed into the :py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods where it will be ignored. """ while len(self.children) < self.worker_count: yield self.sock, None def post_fork_hook(self): """ Perform any initialization in a forked-off child process prior to starting the wsgi server. """ pass def log_sock_exit(self, sock, _unused): """ Log a server's exit. :param socket sock: The listen socket for the worker just started. :param _unused: The socket's opaque_data yielded by :py:meth:`new_worker_socks`. """ self.logger.notice('Child %d exiting normally' % os.getpid()) def register_worker_start(self, sock, _unused, pid): """ Called when a new worker is started. :param socket sock: The listen socket for the worker just started. :param _unused: The socket's opaque_data yielded by new_worker_socks(). :param int pid: The new worker process' PID """ self.logger.notice('Started child %s' % pid) self.children.append(pid) def register_worker_exit(self, pid): """ Called when a worker has exited. :param int pid: The PID of the worker that exited. """ self.logger.error('Removing dead child %s' % pid) self.children.remove(pid) def shutdown_sockets(self): """ Shutdown any listen sockets. """ greenio.shutdown_safe(self.sock) self.sock.close() class PortPidState(object): """ A helper class for :py:class:`ServersPerPortStrategy` to track listen sockets and PIDs for each port. :param int servers_per_port: The configured number of servers per port. :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` """ def __init__(self, servers_per_port, logger): self.servers_per_port = servers_per_port self.logger = logger self.sock_data_by_port = {} def sock_for_port(self, port): """ :param int port: The port whose socket is desired. :returns: The bound listen socket for the given port. """ return self.sock_data_by_port[port]['sock'] def port_for_sock(self, sock): """ :param socket sock: A tracked bound listen socket :returns: The port the socket is bound to. """ for port, sock_data in self.sock_data_by_port.items(): if sock_data['sock'] == sock: return port def _pid_to_port_and_index(self, pid): for port, sock_data in self.sock_data_by_port.items(): for server_idx, a_pid in enumerate(sock_data['pids']): if pid == a_pid: return port, server_idx def port_index_pairs(self): """ Returns current (port, server index) pairs. :returns: A set of (port, server_idx) tuples for currently-tracked ports, sockets, and PIDs. """ current_port_index_pairs = set() for port, pid_state in self.sock_data_by_port.items(): current_port_index_pairs |= set( (port, i) for i, pid in enumerate(pid_state['pids']) if pid is not None) return current_port_index_pairs def track_port(self, port, sock): """ Start tracking servers for the given port and listen socket. :param int port: The port to start tracking :param socket sock: The bound listen socket for the port. """ self.sock_data_by_port[port] = { 'sock': sock, 'pids': [None] * self.servers_per_port, } def not_tracking(self, port): """ Return True if the specified port is not being tracked. :param int port: A port to check. """ return port not in self.sock_data_by_port def all_socks(self): """ Yield all current listen sockets. """ for orphan_data in self.sock_data_by_port.itervalues(): yield orphan_data['sock'] def forget_port(self, port): """ Idempotently forget a port, closing the listen socket at most once. """ orphan_data = self.sock_data_by_port.pop(port, None) if orphan_data: greenio.shutdown_safe(orphan_data['sock']) orphan_data['sock'].close() self.logger.notice('Closing unnecessary sock for port %d', port) def add_pid(self, port, index, pid): self.sock_data_by_port[port]['pids'][index] = pid def forget_pid(self, pid): """ Idempotently forget a PID. It's okay if the PID is no longer in our data structure (it could have been removed by the "orphan port" removal in :py:meth:`new_worker_socks`). :param int pid: The PID which exited. """ port_server_idx = self._pid_to_port_and_index(pid) if port_server_idx is None: # This method can lose a race with the "orphan port" removal, when # a ring reload no longer contains a port. So it's okay if we were # unable to find a (port, server_idx) pair. return dead_port, server_idx = port_server_idx self.logger.error('Removing dead child %d (PID: %s) for port %s', server_idx, pid, dead_port) self.sock_data_by_port[dead_port]['pids'][server_idx] = None class ServersPerPortStrategy(object): """ WSGI server management strategy object for an object-server with one listen port per unique local port in the storage policy rings. The `servers_per_port` integer config setting determines how many workers are run per port. Used in :py:func:`run_wsgi`. :param dict conf: Server configuration dictionary. :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` object. :param int servers_per_port: The number of workers to run per port. """ def __init__(self, conf, logger, servers_per_port): self.conf = conf self.logger = logger self.servers_per_port = servers_per_port self.swift_dir = conf.get('swift_dir', '/etc/swift') self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.port_pid_state = PortPidState(servers_per_port, logger) bind_ip = conf.get('bind_ip', '0.0.0.0') self.cache = BindPortsCache(self.swift_dir, bind_ip) def _reload_bind_ports(self): self.bind_ports = self.cache.all_bind_ports_for_node() def _bind_port(self, port): new_conf = self.conf.copy() new_conf['bind_port'] = port sock = get_socket(new_conf) self.port_pid_state.track_port(port, sock) def loop_timeout(self): """ Return timeout before checking for reloaded rings. :returns: The time to wait for a child to exit before checking for reloaded rings (new ports). """ return self.ring_check_interval def bind_ports(self): """ Bind one listen socket per unique local storage policy ring port. Then do all the work of drop_privileges except the actual dropping of privileges (each forked-off worker will do that post-fork in :py:meth:`post_fork_hook`). """ self._reload_bind_ports() for port in self.bind_ports: self._bind_port(port) # The workers strategy drops privileges here, which we obviously cannot # do if we want to support binding to low ports. But we do want some # of the actions that drop_privileges did. try: os.setsid() except OSError: pass # In case you need to rmdir where you started the daemon: os.chdir('/') # Ensure files are created with the correct privileges: os.umask(0o22) def no_fork_sock(self): """ This strategy does not support running in the foreground. """ pass def new_worker_socks(self): """ Yield a sequence of (socket, server_idx) tuples for each server which should be forked-off and started. Any sockets for "orphaned" ports no longer in any ring will be closed (causing their associated workers to gracefully exit) after all new sockets have been yielded. The server_idx item for each socket will passed into the :py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods. """ self._reload_bind_ports() desired_port_index_pairs = set( (p, i) for p in self.bind_ports for i in range(self.servers_per_port)) current_port_index_pairs = self.port_pid_state.port_index_pairs() if desired_port_index_pairs != current_port_index_pairs: # Orphan ports are ports which had object-server processes running, # but which no longer appear in the ring. We'll kill them after we # start missing workers. orphan_port_index_pairs = current_port_index_pairs - \ desired_port_index_pairs # Fork off worker(s) for every port who's supposed to have # worker(s) but doesn't missing_port_index_pairs = desired_port_index_pairs - \ current_port_index_pairs for port, server_idx in sorted(missing_port_index_pairs): if self.port_pid_state.not_tracking(port): try: self._bind_port(port) except Exception as e: self.logger.critical('Unable to bind to port %d: %s', port, e) continue yield self.port_pid_state.sock_for_port(port), server_idx for orphan_pair in orphan_port_index_pairs: # For any port in orphan_port_index_pairs, it is guaranteed # that there should be no listen socket for that port, so we # can close and forget them. self.port_pid_state.forget_port(orphan_pair[0]) def post_fork_hook(self): """ Called in each child process, prior to starting the actual wsgi server, to drop privileges. """ drop_privileges(self.conf.get('user', 'swift'), call_setsid=False) def log_sock_exit(self, sock, server_idx): """ Log a server's exit. """ port = self.port_pid_state.port_for_sock(sock) self.logger.notice('Child %d (PID %d, port %d) exiting normally', server_idx, os.getpid(), port) def register_worker_start(self, sock, server_idx, pid): """ Called when a new worker is started. :param socket sock: The listen socket for the worker just started. :param server_idx: The socket's server_idx as yielded by :py:meth:`new_worker_socks`. :param int pid: The new worker process' PID """ port = self.port_pid_state.port_for_sock(sock) self.logger.notice('Started child %d (PID %d) for port %d', server_idx, pid, port) self.port_pid_state.add_pid(port, server_idx, pid) def register_worker_exit(self, pid): """ Called when a worker has exited. :param int pid: The PID of the worker that exited. """ self.port_pid_state.forget_pid(pid) def shutdown_sockets(self): """ Shutdown any listen sockets. """ for sock in self.port_pid_state.all_socks(): greenio.shutdown_safe(sock) sock.close() def run_wsgi(conf_path, app_section, *args, **kwargs): """ Runs the server according to some strategy. The default strategy runs a specified number of workers in pre-fork model. The object-server (only) may use a servers-per-port strategy if its config has a servers_per_port setting with a value greater than zero. :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: 0 if successful, nonzero otherwise """ # Load configuration, Set logger and Load request processor try: (conf, logger, log_name) = \ _initrp(conf_path, app_section, *args, **kwargs) except ConfigFileError as e: print(e) return 1 servers_per_port = int(conf.get('servers_per_port', '0') or 0) # NOTE: for now servers_per_port is object-server-only; future work could # be done to test and allow it to be used for account and container # servers, but that has not been done yet. if servers_per_port and app_section == 'object-server': strategy = ServersPerPortStrategy( conf, logger, servers_per_port=servers_per_port) else: strategy = WorkersStrategy(conf, logger) error_msg = strategy.bind_ports() if error_msg: logger.error(error_msg) print(error_msg) return 1 # Ensure the configuration and application can be loaded before proceeding. global_conf = {'log_name': log_name} if 'global_conf_callback' in kwargs: kwargs['global_conf_callback'](conf, global_conf) loadapp(conf_path, global_conf=global_conf) # set utils.FALLOCATE_RESERVE if desired reserve = int(conf.get('fallocate_reserve', 0)) if reserve > 0: utils.FALLOCATE_RESERVE = reserve # redirect errors to logger and close stdio capture_stdio(logger) no_fork_sock = strategy.no_fork_sock() if no_fork_sock: run_server(conf, logger, no_fork_sock, global_conf=global_conf) return 0 def kill_children(*args): """Kills the entire process group.""" logger.error('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) running[0] = False os.killpg(0, signal.SIGTERM) def hup(*args): """Shuts down the server, but allows running requests to complete""" logger.error('SIGHUP received') signal.signal(signal.SIGHUP, signal.SIG_IGN) running[0] = False running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) while running[0]: for sock, sock_info in strategy.new_worker_socks(): pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) strategy.post_fork_hook() run_server(conf, logger, sock) strategy.log_sock_exit(sock, sock_info) return 0 else: strategy.register_worker_start(sock, sock_info, pid) # The strategy may need to pay attention to something in addition to # child process exits (like new ports showing up in a ring). # # NOTE: a timeout value of None will just instantiate the Timeout # object and not actually schedule it, which is equivalent to no # timeout for the green_os.wait(). loop_timeout = strategy.loop_timeout() with Timeout(loop_timeout, exception=False): try: pid, status = green_os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): strategy.register_worker_exit(pid) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise except KeyboardInterrupt: logger.notice('User quit') running[0] = False break strategy.shutdown_sockets() logger.notice('Exited') return 0 class ConfigFileError(Exception): pass class ConfigFilePortError(ConfigFileError): pass def _initrp(conf_path, app_section, *args, **kwargs): try: conf = appconfig(conf_path, name=app_section) except Exception as e: raise ConfigFileError("Error trying to load config from %s: %s" % (conf_path, e)) validate_configuration() # pre-configure logger log_name = conf.get('log_name', app_section) if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = get_logger(conf, log_name, log_to_console=kwargs.pop('verbose', False), log_route='wsgi') # disable fallocate if desired if config_true_value(conf.get('disable_fallocate', 'no')): disable_fallocate() monkey_patch_mimetools() return (conf, logger, log_name) def init_request_processor(conf_path, app_section, *args, **kwargs): """ Loads common settings from conf Sets the logger Loads the request processor :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: the loaded application entry point :raises ConfigFileError: Exception is raised for config file error """ (conf, logger, log_name) = _initrp(conf_path, app_section, *args, **kwargs) app = loadapp(conf_path, global_conf={'log_name': log_name}) return (app, conf, logger, log_name) class WSGIContext(object): """ This class provides a means to provide context (scope) for a middleware filter to have access to the wsgi start_response results like the request status and headers. """ def __init__(self, wsgi_app): self.app = wsgi_app def _start_response(self, status, headers, exc_info=None): """ Saves response info without sending it to the remote client. Uses the same semantics as the usual WSGI start_response. """ self._response_status = status self._response_headers = headers self._response_exc_info = exc_info def _app_call(self, env): """ Ensures start_response has been called before returning. """ self._response_status = None self._response_headers = None self._response_exc_info = None resp = self.app(env, self._start_response) # if start_response has been called, just return the iter if self._response_status is not None: return resp resp = iter(resp) try: first_chunk = next(resp) except StopIteration: return iter([]) else: # We got a first_chunk return CloseableChain([first_chunk], resp) def _get_status_int(self): """ Returns the HTTP status int from the last called self._start_response result. """ return int(self._response_status.split(' ', 1)[0]) def _response_header_value(self, key): "Returns str of value for given header key or None" for h_key, val in self._response_headers: if h_key.lower() == key.lower(): return val return None def make_env(env, method=None, path=None, agent='Swift', query_string=None, swift_source=None): """ Returns a new fresh WSGI environment. :param env: The WSGI environment to base the new environment on. :param method: The new REQUEST_METHOD or None to use the original. :param path: The new path_info or none to use the original. path should NOT be quoted. When building a url, a Webob Request (in accordance with wsgi spec) will quote env['PATH_INFO']. url += quote(environ['PATH_INFO']) :param query_string: The new query_string or none to use the original. When building a url, a Webob Request will append the query string directly to the url. url += '?' + env['QUERY_STRING'] :param agent: The HTTP user agent to use; default 'Swift'. You can put %(orig)s in the agent to have it replaced with the original env's HTTP_USER_AGENT, such as '%(orig)s StaticWeb'. You also set agent to None to use the original env's HTTP_USER_AGENT or '' to have no HTTP_USER_AGENT. :param swift_source: Used to mark the request as originating out of middleware. Will be logged in proxy logs. :returns: Fresh WSGI environment. """ newenv = {} for name in ('HTTP_USER_AGENT', 'HTTP_HOST', 'PATH_INFO', 'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD', 'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT', 'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD', 'SERVER_PROTOCOL', 'swift.cache', 'swift.source', 'swift.trans_id', 'swift.authorize_override', 'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID', 'HTTP_REFERER'): if name in env: newenv[name] = env[name] if method: newenv['REQUEST_METHOD'] = method if path: newenv['PATH_INFO'] = path newenv['SCRIPT_NAME'] = '' if query_string is not None: newenv['QUERY_STRING'] = query_string if agent: newenv['HTTP_USER_AGENT'] = ( agent % {'orig': env.get('HTTP_USER_AGENT', '')}).strip() elif agent == '' and 'HTTP_USER_AGENT' in newenv: del newenv['HTTP_USER_AGENT'] if swift_source: newenv['swift.source'] = swift_source newenv['wsgi.input'] = BytesIO() if 'SCRIPT_NAME' not in newenv: newenv['SCRIPT_NAME'] = '' return newenv def make_subrequest(env, method=None, path=None, body=None, headers=None, agent='Swift', swift_source=None, make_env=make_env): """ Makes a new swob.Request based on the current env but with the parameters specified. :param env: The WSGI environment to base the new request on. :param method: HTTP method of new request; default is from the original env. :param path: HTTP path of new request; default is from the original env. path should be compatible with what you would send to Request.blank. path should be quoted and it can include a query string. for example: '/a%20space?unicode_str%E8%AA%9E=y%20es' :param body: HTTP body of new request; empty by default. :param headers: Extra HTTP headers of new request; None by default. :param agent: The HTTP user agent to use; default 'Swift'. You can put %(orig)s in the agent to have it replaced with the original env's HTTP_USER_AGENT, such as '%(orig)s StaticWeb'. You also set agent to None to use the original env's HTTP_USER_AGENT or '' to have no HTTP_USER_AGENT. :param swift_source: Used to mark the request as originating out of middleware. Will be logged in proxy logs. :param make_env: make_subrequest calls this make_env to help build the swob.Request. :returns: Fresh swob.Request object. """ query_string = None path = path or '' if path and '?' in path: path, query_string = path.split('?', 1) newenv = make_env(env, method, path=unquote(path), agent=agent, query_string=query_string, swift_source=swift_source) if not headers: headers = {} if body: return Request.blank(path, environ=newenv, body=body, headers=headers) else: return Request.blank(path, environ=newenv, headers=headers) def make_pre_authed_env(env, method=None, path=None, agent='Swift', query_string=None, swift_source=None): """Same as :py:func:`make_env` but with preauthorization.""" newenv = make_env( env, method=method, path=path, agent=agent, query_string=query_string, swift_source=swift_source) newenv['swift.authorize'] = lambda req: None newenv['swift.authorize_override'] = True newenv['REMOTE_USER'] = '.wsgi.pre_authed' return newenv def make_pre_authed_request(env, method=None, path=None, body=None, headers=None, agent='Swift', swift_source=None): """Same as :py:func:`make_subrequest` but with preauthorization.""" return make_subrequest( env, method=method, path=path, body=body, headers=headers, agent=agent, swift_source=swift_source, make_env=make_pre_authed_env) swift-2.7.0/swift/common/internal_client.py0000664000567000056710000010320512675204045022173 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import sleep, Timeout from eventlet.green import httplib, socket, urllib2 import json import six from six.moves import range from six.moves import urllib import struct from sys import exc_info, exit import zlib from swift import gettext_ as _ from time import gmtime, strftime, time from zlib import compressobj from swift.common.exceptions import ClientException from swift.common.http import HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES from swift.common.swob import Request from swift.common.utils import quote from swift.common.wsgi import loadapp, pipeline_property class UnexpectedResponse(Exception): """ Exception raised on invalid responses to InternalClient.make_request(). :param message: Exception message. :param resp: The unexpected response. """ def __init__(self, message, resp): super(UnexpectedResponse, self).__init__(message) self.resp = resp class CompressingFileReader(object): """ Wrapper for file object to compress object while reading. Can be used to wrap file objects passed to InternalClient.upload_object(). Used in testing of InternalClient. :param file_obj: File object to wrap. :param compresslevel: Compression level, defaults to 9. :param chunk_size: Size of chunks read when iterating using object, defaults to 4096. """ def __init__(self, file_obj, compresslevel=9, chunk_size=4096): self._f = file_obj self.compresslevel = compresslevel self.chunk_size = chunk_size self.set_initial_state() def set_initial_state(self): """ Sets the object to the state needed for the first read. """ self._f.seek(0) self._compressor = compressobj( self.compresslevel, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0) self.done = False self.first = True self.crc32 = 0 self.total_size = 0 def read(self, *a, **kw): """ Reads a chunk from the file object. Params are passed directly to the underlying file object's read(). :returns: Compressed chunk from file object. """ if self.done: return '' x = self._f.read(*a, **kw) if x: self.crc32 = zlib.crc32(x, self.crc32) & 0xffffffff self.total_size += len(x) compressed = self._compressor.compress(x) if not compressed: compressed = self._compressor.flush(zlib.Z_SYNC_FLUSH) else: compressed = self._compressor.flush(zlib.Z_FINISH) crc32 = struct.pack("= HTTP_MULTIPLE_CHOICES: ''.join(resp.app_iter) break data = json.loads(resp.body) if not data: break for item in data: yield item marker = data[-1]['name'].encode('utf8') def make_path(self, account, container=None, obj=None): """ Returns a swift path for a request quoting and utf-8 encoding the path parts as need be. :param account: swift account :param container: container, defaults to None :param obj: object, defaults to None :raises ValueError: Is raised if obj is specified and container is not. """ path = '/v1/%s' % quote(account) if container: path += '/%s' % quote(container) if obj: path += '/%s' % quote(obj) elif obj: raise ValueError('Object specified without container') return path def _set_metadata( self, path, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets metadata on path using metadata_prefix to set values in headers of POST request. :param path: Path to do POST on. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = {} for k, v in metadata.items(): if k.lower().startswith(metadata_prefix): headers[k] = v else: headers['%s%s' % (metadata_prefix, k)] = v self.make_request('POST', path, headers, acceptable_statuses) # account methods def iter_containers( self, account, marker='', end_marker='', acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Returns an iterator of containers dicts from an account. :param account: Account on which to do the container listing. :param marker: Prefix of first desired item, defaults to ''. :param end_marker: Last item returned will be 'less' than this, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) return self._iter_items(path, marker, end_marker, acceptable_statuses) def get_account_info( self, account, acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Returns (container_count, object_count) for an account. :param account: Account on which to get the information. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) resp = self.make_request('HEAD', path, {}, acceptable_statuses) if not resp.status_int // 100 == 2: return (0, 0) return (int(resp.headers.get('x-account-container-count', 0)), int(resp.headers.get('x-account-object-count', 0))) def get_account_metadata( self, account, metadata_prefix='', acceptable_statuses=(2,)): """ Gets account metadata. :param account: Account on which to get the metadata. :param metadata_prefix: Used to filter values from the headers returned. Will strip that prefix from the keys in the dict returned. Defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :returns : Returns dict of account metadata. Keys will be lowercase. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) return self._get_metadata(path, metadata_prefix, acceptable_statuses) def set_account_metadata( self, account, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets account metadata. A call to this will add to the account metadata and not overwrite all of it with values in the metadata dict. To clear an account metadata value, pass an empty string as the value for the key in the metadata dict. :param account: Account on which to get the metadata. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) self._set_metadata( path, metadata, metadata_prefix, acceptable_statuses) # container methods def container_exists(self, account, container): """ Checks to see if a container exists. :param account: The container's account. :param container: Container to check. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. :returns : True if container exists, false otherwise. """ path = self.make_path(account, container) resp = self.make_request('HEAD', path, {}, (2, HTTP_NOT_FOUND)) return not resp.status_int == HTTP_NOT_FOUND def create_container( self, account, container, headers=None, acceptable_statuses=(2,)): """ Creates container. :param account: The container's account. :param container: Container to create. :param headers: Defaults to empty dict. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = headers or {} path = self.make_path(account, container) self.make_request('PUT', path, headers, acceptable_statuses) def delete_container( self, account, container, acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Deletes a container. :param account: The container's account. :param container: Container to delete. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) self.make_request('DELETE', path, {}, acceptable_statuses) def get_container_metadata( self, account, container, metadata_prefix='', acceptable_statuses=(2,)): """ Gets container metadata. :param account: The container's account. :param container: Container to get metadata on. :param metadata_prefix: Used to filter values from the headers returned. Will strip that prefix from the keys in the dict returned. Defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :returns : Returns dict of container metadata. Keys will be lowercase. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) return self._get_metadata(path, metadata_prefix, acceptable_statuses) def iter_objects( self, account, container, marker='', end_marker='', acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Returns an iterator of object dicts from a container. :param account: The container's account. :param container: Container to iterate objects on. :param marker: Prefix of first desired item, defaults to ''. :param end_marker: Last item returned will be 'less' than this, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) return self._iter_items(path, marker, end_marker, acceptable_statuses) def set_container_metadata( self, account, container, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets container metadata. A call to this will add to the container metadata and not overwrite all of it with values in the metadata dict. To clear a container metadata value, pass an empty string as the value for the key in the metadata dict. :param account: The container's account. :param container: Container to set metadata on. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) self._set_metadata( path, metadata, metadata_prefix, acceptable_statuses) # object methods def delete_object( self, account, container, obj, acceptable_statuses=(2, HTTP_NOT_FOUND), headers=None): """ Deletes an object. :param account: The object's account. :param container: The object's container. :param obj: The object. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :param headers: extra headers to send with request :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container, obj) self.make_request('DELETE', path, (headers or {}), acceptable_statuses) def get_object_metadata( self, account, container, obj, metadata_prefix='', acceptable_statuses=(2,), headers=None): """ Gets object metadata. :param account: The object's account. :param container: The object's container. :param obj: The object. :param metadata_prefix: Used to filter values from the headers returned. Will strip that prefix from the keys in the dict returned. Defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :param headers: extra headers to send with request :returns : Dict of object metadata. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container, obj) return self._get_metadata(path, metadata_prefix, acceptable_statuses, headers=headers) def get_object(self, account, container, obj, headers, acceptable_statuses=(2,)): """ Returns a 3-tuple (status, headers, iterator of object body) """ headers = headers or {} path = self.make_path(account, container, obj) resp = self.make_request('GET', path, headers, acceptable_statuses) return (resp.status_int, resp.headers, resp.app_iter) def iter_object_lines( self, account, container, obj, headers=None, acceptable_statuses=(2,)): """ Returns an iterator of object lines from an uncompressed or compressed text object. Uncompress object as it is read if the object's name ends with '.gz'. :param account: The object's account. :param container: The object's container. :param obj: The object. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = headers or {} path = self.make_path(account, container, obj) resp = self.make_request('GET', path, headers, acceptable_statuses) if not resp.status_int // 100 == 2: return last_part = '' compressed = obj.endswith('.gz') # magic in the following zlib.decompressobj argument is courtesy of # Python decompressing gzip chunk-by-chunk # http://stackoverflow.com/questions/2423866 d = zlib.decompressobj(16 + zlib.MAX_WBITS) for chunk in resp.app_iter: if compressed: chunk = d.decompress(chunk) parts = chunk.split('\n') if len(parts) == 1: last_part = last_part + parts[0] else: parts[0] = last_part + parts[0] for part in parts[:-1]: yield part last_part = parts[-1] if last_part: yield last_part def set_object_metadata( self, account, container, obj, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets an object's metadata. The object's metadata will be overwritten by the values in the metadata dict. :param account: The object's account. :param container: The object's container. :param obj: The object. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container, obj) self._set_metadata( path, metadata, metadata_prefix, acceptable_statuses) def upload_object( self, fobj, account, container, obj, headers=None): """ :param fobj: File object to read object's content from. :param account: The object's account. :param container: The object's container. :param obj: The object. :param headers: Headers to send with request, defaults ot empty dict. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = dict(headers or {}) if 'Content-Length' not in headers: headers['Transfer-Encoding'] = 'chunked' path = self.make_path(account, container, obj) self.make_request('PUT', path, headers, (2,), fobj) def get_auth(url, user, key, auth_version='1.0', **kwargs): if auth_version != '1.0': exit('ERROR: swiftclient missing, only auth v1.0 supported') req = urllib2.Request(url) req.add_header('X-Auth-User', user) req.add_header('X-Auth-Key', key) conn = urllib2.urlopen(req) headers = conn.info() return ( headers.getheader('X-Storage-Url'), headers.getheader('X-Auth-Token')) class SimpleClient(object): """ Simple client that is used in bin/swift-dispersion-* and container sync """ def __init__(self, url=None, token=None, starting_backoff=1, max_backoff=5, retries=5): self.url = url self.token = token self.attempts = 0 # needed in swif-dispersion-populate self.starting_backoff = starting_backoff self.max_backoff = max_backoff self.retries = retries def base_request(self, method, container=None, name=None, prefix=None, headers=None, proxy=None, contents=None, full_listing=None, logger=None, additional_info=None, timeout=None, marker=None): # Common request method trans_start = time() url = self.url if full_listing: info, body_data = self.base_request( method, container, name, prefix, headers, proxy, timeout=timeout, marker=marker) listing = body_data while listing: marker = listing[-1]['name'] info, listing = self.base_request( method, container, name, prefix, headers, proxy, timeout=timeout, marker=marker) if listing: body_data.extend(listing) return [info, body_data] if headers is None: headers = {} if self.token: headers['X-Auth-Token'] = self.token if container: url = '%s/%s' % (url.rstrip('/'), quote(container)) if name: url = '%s/%s' % (url.rstrip('/'), quote(name)) else: url += '?format=json' if prefix: url += '&prefix=%s' % prefix if marker: url += '&marker=%s' % quote(marker) req = urllib2.Request(url, headers=headers, data=contents) if proxy: proxy = urllib.parse.urlparse(proxy) req.set_proxy(proxy.netloc, proxy.scheme) req.get_method = lambda: method conn = urllib2.urlopen(req, timeout=timeout) body = conn.read() info = conn.info() try: body_data = json.loads(body) except ValueError: body_data = None trans_stop = time() if logger: sent_content_length = 0 for n, v in headers.items(): nl = n.lower() if nl == 'content-length': try: sent_content_length = int(v) break except ValueError: pass logger.debug("-> " + " ".join( quote(str(x) if x else "-", ":/") for x in ( strftime('%Y-%m-%dT%H:%M:%S', gmtime(trans_stop)), method, url, conn.getcode(), sent_content_length, info['content-length'], trans_start, trans_stop, trans_stop - trans_start, additional_info ))) return [info, body_data] def retry_request(self, method, **kwargs): retries = kwargs.pop('retries', self.retries) self.attempts = 0 backoff = self.starting_backoff while self.attempts <= retries: self.attempts += 1 try: return self.base_request(method, **kwargs) except (socket.error, httplib.HTTPException, urllib2.URLError) \ as err: if self.attempts > retries: if isinstance(err, urllib2.HTTPError): raise ClientException('Raise too many retries', http_status=err.getcode()) else: raise sleep(backoff) backoff = min(backoff * 2, self.max_backoff) def get_account(self, *args, **kwargs): # Used in swift-dispersion-populate return self.retry_request('GET', **kwargs) def put_container(self, container, **kwargs): # Used in swift-dispersion-populate return self.retry_request('PUT', container=container, **kwargs) def get_container(self, container, **kwargs): # Used in swift-dispersion-populate return self.retry_request('GET', container=container, **kwargs) def put_object(self, container, name, contents, **kwargs): # Used in swift-dispersion-populate return self.retry_request('PUT', container=container, name=name, contents=contents.read(), **kwargs) def head_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) return client.retry_request('HEAD', **kwargs) def put_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) client.retry_request('PUT', **kwargs) def delete_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) client.retry_request('DELETE', **kwargs) swift-2.7.0/swift/common/middleware/0000775000567000056710000000000012675204211020556 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/common/middleware/slo.py0000664000567000056710000013332312675204037021740 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Middleware that will provide Static Large Object (SLO) support. This feature is very similar to Dynamic Large Object (DLO) support in that it allows the user to upload many objects concurrently and afterwards download them as a single object. It is different in that it does not rely on eventually consistent container listings to do so. Instead, a user defined manifest of the object segments is used. ---------------------- Uploading the Manifest ---------------------- After the user has uploaded the objects to be concatenated, a manifest is uploaded. The request must be a PUT with the query parameter:: ?multipart-manifest=put The body of this request will be an ordered list of segment descriptions in JSON format. The data to be supplied for each segment is: =========== ======================================================== Key Description =========== ======================================================== path the path to the segment object (not including account) /container/object_name etag the ETag given back when the segment object was PUT, or null size_bytes the size of the complete segment object in bytes, or null range (optional) the (inclusive) range within the object to use as a segment. If omitted, the entire object is used. =========== ======================================================== The format of the list will be: .. code:: [{"path": "/cont/object", "etag": "etagoftheobjectsegment", "size_bytes": 10485760, "range": "1048576-2097151"}, ...] The number of object segments is limited to a configurable amount, default 1000. Each segment must be at least 1 byte. On upload, the middleware will head every segment passed in to verify: 1. the segment exists (i.e. the HEAD was successful); 2. the segment meets minimum size requirements; 3. if the user provided a non-null etag, the etag matches; 4. if the user provided a non-null size_bytes, the size_bytes matches; and 5. if the user provided a range, it is a singular, syntactically correct range that is satisfiable given the size of the object. Note that the etag and size_bytes keys are still required; this acts as a guard against user errors such as typos. If any of the objects fail to verify (not found, size/etag mismatch, below minimum size, invalid range) then the user will receive a 4xx error response. If everything does match, the user will receive a 2xx response and the SLO object is ready for downloading. Behind the scenes, on success, a json manifest generated from the user input is sent to object servers with an extra "X-Static-Large-Object: True" header and a modified Content-Type. The items in this manifest will include the etag and size_bytes for each segment, regardless of whether the client specified them for verification. The parameter: swift_bytes=$total_size will be appended to the existing Content-Type, where total_size is the sum of all the included segments' size_bytes. This extra parameter will be hidden from the user. Manifest files can reference objects in separate containers, which will improve concurrent upload speed. Objects can be referenced by multiple manifests. The segments of a SLO manifest can even be other SLO manifests. Treat them as any other object i.e., use the Etag and Content-Length given on the PUT of the sub-SLO in the manifest to the parent SLO. ------------------- Range Specification ------------------- Users now have the ability to specify ranges for SLO segments. Users can now include an optional 'range' field in segment descriptions to specify which bytes from the underlying object should be used for the segment data. Only one range may be specified per segment. .. note:: The 'etag' and 'size_bytes' fields still describe the backing object as a whole. If a user uploads this manifest: .. code:: [{"path": "/con/obj_seg_1", "etag": null, "size_bytes": 2097152, "range": "0-1048576"}, {"path": "/con/obj_seg_2", "etag": null, "size_bytes": 2097152, "range": "512-1550000"}, {"path": "/con/obj_seg_1", "etag": null, "size_bytes": 2097152, "range": "-2048"}] The segment will consist of the first 1048576 bytes of /con/obj_seg_1, followed by bytes 513 through 1550000 (inclusive) of /con/obj_seg_2, and finally bytes 2095104 through 2097152 (i.e., the last 2048 bytes) of /con/obj_seg_1. .. note:: The minimum sized range is 1 byte. This is the same as the minimum segment size. ------------------------- Retrieving a Large Object ------------------------- A GET request to the manifest object will return the concatenation of the objects from the manifest much like DLO. If any of the segments from the manifest are not found or their Etag/Content Length have changed since upload, the connection will drop. In this case a 409 Conflict will be logged in the proxy logs and the user will receive incomplete results. Note that this will be enforced regardless of whether the user perfomed per-segment validation during upload. The headers from this GET or HEAD request will return the metadata attached to the manifest object itself with some exceptions:: Content-Length: the total size of the SLO (the sum of the sizes of the segments in the manifest) X-Static-Large-Object: True Etag: the etag of the SLO (generated the same way as DLO) A GET request with the query parameter:: ?multipart-manifest=get will return a transformed version of the original manifest, containing additional fields and different key names. A GET request with the query parameters:: ?multipart-manifest=get&format=raw will return the contents of the original manifest as it was sent by the client. The main purpose for both calls is solely debugging. When the manifest object is uploaded you are more or less guaranteed that every segment in the manifest exists and matched the specifications. However, there is nothing that prevents the user from breaking the SLO download by deleting/replacing a segment referenced in the manifest. It is left to the user to use caution in handling the segments. ----------------------- Deleting a Large Object ----------------------- A DELETE request will just delete the manifest object itself. A DELETE with a query parameter:: ?multipart-manifest=delete will delete all the segments referenced in the manifest and then the manifest itself. The failure response will be similar to the bulk delete middleware. ------------------------ Modifying a Large Object ------------------------ PUTs / POSTs will work as expected, PUTs will just overwrite the manifest object for example. ------------------ Container Listings ------------------ In a container listing the size listed for SLO manifest objects will be the total_size of the concatenated segments in the manifest. The overall X-Container-Bytes-Used for the container (and subsequently for the account) will not reflect total_size of the manifest but the actual size of the json data stored. The reason for this somewhat confusing discrepancy is we want the container listing to reflect the size of the manifest object when it is downloaded. We do not, however, want to count the bytes-used twice (for both the manifest and the segments it's referring to) in the container and account metadata which can be used for stats purposes. """ from six.moves import range from datetime import datetime import json import mimetypes import re import six from six import BytesIO from hashlib import md5 from swift.common.exceptions import ListingIterError, SegmentError from swift.common.swob import Request, HTTPBadRequest, HTTPServerError, \ HTTPMethodNotAllowed, HTTPRequestEntityTooLarge, HTTPLengthRequired, \ HTTPOk, HTTPPreconditionFailed, HTTPException, HTTPNotFound, \ HTTPUnauthorized, HTTPConflict, HTTPRequestedRangeNotSatisfiable,\ Response, Range from swift.common.utils import get_logger, config_true_value, \ get_valid_utf8_str, override_bytes_from_content_type, split_path, \ register_swift_info, RateLimitedIterator, quote, close_if_possible, \ closing_if_possible from swift.common.request_helpers import SegmentedIterable from swift.common.constraints import check_utf8, MAX_BUFFERED_SLO_SEGMENTS from swift.common.http import HTTP_NOT_FOUND, HTTP_UNAUTHORIZED, is_success from swift.common.wsgi import WSGIContext, make_subrequest from swift.common.middleware.bulk import get_response_body, \ ACCEPTABLE_FORMATS, Bulk DEFAULT_RATE_LIMIT_UNDER_SIZE = 1024 * 1024 # 1 MiB DEFAULT_MAX_MANIFEST_SEGMENTS = 1000 DEFAULT_MAX_MANIFEST_SIZE = 1024 * 1024 * 2 # 2 MiB REQUIRED_SLO_KEYS = set(['path', 'etag', 'size_bytes']) OPTIONAL_SLO_KEYS = set(['range']) ALLOWED_SLO_KEYS = REQUIRED_SLO_KEYS | OPTIONAL_SLO_KEYS def parse_and_validate_input(req_body, req_path): """ Given a request body, parses it and returns a list of dictionaries. The output structure is nearly the same as the input structure, but it is not an exact copy. Given a valid input dictionary `d_in`, its corresponding output dictionary `d_out` will be as follows: * d_out['etag'] == d_in['etag'] * d_out['path'] == d_in['path'] * d_in['size_bytes'] can be a string ("12") or an integer (12), but d_out['size_bytes'] is an integer. * (optional) d_in['range'] is a string of the form "M-N", "M-", or "-N", where M and N are non-negative integers. d_out['range'] is the corresponding swob.Range object. If d_in does not have a key 'range', neither will d_out. :raises: HTTPException on parse errors or semantic errors (e.g. bogus JSON structure, syntactically invalid ranges) :returns: a list of dictionaries on success """ try: parsed_data = json.loads(req_body) except ValueError: raise HTTPBadRequest("Manifest must be valid JSON.\n") if not isinstance(parsed_data, list): raise HTTPBadRequest("Manifest must be a list.\n") # If we got here, req_path refers to an object, so this won't ever raise # ValueError. vrs, account, _junk = split_path(req_path, 3, 3, True) errors = [] for seg_index, seg_dict in enumerate(parsed_data): if not isinstance(seg_dict, dict): errors.append("Index %d: not a JSON object" % seg_index) continue missing_keys = [k for k in REQUIRED_SLO_KEYS if k not in seg_dict] if missing_keys: errors.append( "Index %d: missing keys %s" % (seg_index, ", ".join('"%s"' % (mk,) for mk in sorted(missing_keys)))) continue extraneous_keys = [k for k in seg_dict if k not in ALLOWED_SLO_KEYS] if extraneous_keys: errors.append( "Index %d: extraneous keys %s" % (seg_index, ", ".join('"%s"' % (ek,) for ek in sorted(extraneous_keys)))) continue if not isinstance(seg_dict['path'], basestring): errors.append("Index %d: \"path\" must be a string" % seg_index) continue if not (seg_dict['etag'] is None or isinstance(seg_dict['etag'], basestring)): errors.append( "Index %d: \"etag\" must be a string or null" % seg_index) continue if '/' not in seg_dict['path'].strip('/'): errors.append( "Index %d: path does not refer to an object. Path must be of " "the form /container/object." % seg_index) continue seg_size = seg_dict['size_bytes'] if seg_size is not None: try: seg_size = int(seg_size) seg_dict['size_bytes'] = seg_size except (TypeError, ValueError): errors.append("Index %d: invalid size_bytes" % seg_index) continue if seg_size < 1: errors.append("Index %d: too small; each segment must be " "at least 1 byte." % (seg_index,)) continue obj_path = '/'.join(['', vrs, account, seg_dict['path'].lstrip('/')]) if req_path == quote(obj_path): errors.append( "Index %d: manifest must not include itself as a segment" % seg_index) continue if seg_dict.get('range'): try: seg_dict['range'] = Range('bytes=%s' % seg_dict['range']) except ValueError: errors.append("Index %d: invalid range" % seg_index) continue if len(seg_dict['range'].ranges) > 1: errors.append("Index %d: multiple ranges (only one allowed)" % seg_index) continue # If the user *told* us the object's size, we can check range # satisfiability right now. If they lied about the size, we'll # fail that validation later. if (seg_size is not None and len(seg_dict['range'].ranges_for_length(seg_size)) != 1): errors.append("Index %d: unsatisfiable range" % seg_index) continue if errors: error_message = "".join(e + "\n" for e in errors) raise HTTPBadRequest(error_message, headers={"Content-Type": "text/plain"}) return parsed_data class SloPutContext(WSGIContext): def __init__(self, slo, slo_etag): super(SloPutContext, self).__init__(slo.app) self.slo_etag = '"' + slo_etag.hexdigest() + '"' def handle_slo_put(self, req, start_response): app_resp = self._app_call(req.environ) for i in range(len(self._response_headers)): if self._response_headers[i][0].lower() == 'etag': self._response_headers[i] = ('Etag', self.slo_etag) break start_response(self._response_status, self._response_headers, self._response_exc_info) return app_resp class SloGetContext(WSGIContext): max_slo_recursion_depth = 10 def __init__(self, slo): self.slo = slo self.first_byte = None self.last_byte = None super(SloGetContext, self).__init__(slo.app) def _fetch_sub_slo_segments(self, req, version, acc, con, obj): """ Fetch the submanifest, parse it, and return it. Raise exception on failures. """ sub_req = make_subrequest( req.environ, path='/'.join(['', version, acc, con, obj]), method='GET', headers={'x-auth-token': req.headers.get('x-auth-token')}, agent=('%(orig)s ' + 'SLO MultipartGET'), swift_source='SLO') sub_resp = sub_req.get_response(self.slo.app) if not is_success(sub_resp.status_int): close_if_possible(sub_resp.app_iter) raise ListingIterError( 'ERROR: while fetching %s, GET of submanifest %s ' 'failed with status %d' % (req.path, sub_req.path, sub_resp.status_int)) try: with closing_if_possible(sub_resp.app_iter): return json.loads(''.join(sub_resp.app_iter)) except ValueError as err: raise ListingIterError( 'ERROR: while fetching %s, JSON-decoding of submanifest %s ' 'failed with %s' % (req.path, sub_req.path, err)) def _segment_length(self, seg_dict): """ Returns the number of bytes that will be fetched from the specified segment on a plain GET request for this SLO manifest. """ seg_range = seg_dict.get('range') if seg_range is not None: # The range is of the form N-M, where N and M are both positive # decimal integers. We know this because this middleware is the # only thing that creates the SLO manifests stored in the # cluster. range_start, range_end = [int(x) for x in seg_range.split('-')] return range_end - range_start + 1 else: return int(seg_dict['bytes']) def _segment_listing_iterator(self, req, version, account, segments, recursion_depth=1): for seg_dict in segments: if config_true_value(seg_dict.get('sub_slo')): override_bytes_from_content_type(seg_dict, logger=self.slo.logger) # We handle the range stuff here so that we can be smart about # skipping unused submanifests. For example, if our first segment is a # submanifest referencing 50 MiB total, but start_byte falls in # the 51st MiB, then we can avoid fetching the first submanifest. # # If we were to make SegmentedIterable handle all the range # calculations, we would be unable to make this optimization. total_length = sum(self._segment_length(seg) for seg in segments) if self.first_byte is None: self.first_byte = 0 if self.last_byte is None: self.last_byte = total_length - 1 last_sub_path = None for seg_dict in segments: seg_length = self._segment_length(seg_dict) if self.first_byte >= seg_length: # don't need any bytes from this segment self.first_byte -= seg_length self.last_byte -= seg_length continue if self.last_byte < 0: # no bytes are needed from this or any future segment break seg_range = seg_dict.get('range') if seg_range is None: range_start, range_end = 0, seg_length - 1 else: # We already validated and supplied concrete values # for the range on upload range_start, range_end = map(int, seg_range.split('-')) if config_true_value(seg_dict.get('sub_slo')): # do this check here so that we can avoid fetching this last # manifest before raising the exception if recursion_depth >= self.max_slo_recursion_depth: raise ListingIterError("Max recursion depth exceeded") sub_path = get_valid_utf8_str(seg_dict['name']) sub_cont, sub_obj = split_path(sub_path, 2, 2, True) if last_sub_path != sub_path: sub_segments = self._fetch_sub_slo_segments( req, version, account, sub_cont, sub_obj) last_sub_path = sub_path # Use the existing machinery to slice into the sub-SLO. # This requires that we save off our current state, and # restore at the other end. orig_start, orig_end = self.first_byte, self.last_byte self.first_byte = range_start + max(0, self.first_byte) self.last_byte = min(range_end, range_start + self.last_byte) for sub_seg_dict, sb, eb in self._segment_listing_iterator( req, version, account, sub_segments, recursion_depth=recursion_depth + 1): yield sub_seg_dict, sb, eb # Restore the first/last state self.first_byte, self.last_byte = orig_start, orig_end else: if isinstance(seg_dict['name'], six.text_type): seg_dict['name'] = seg_dict['name'].encode("utf-8") yield (seg_dict, max(0, self.first_byte) + range_start, min(range_end, range_start + self.last_byte)) self.first_byte -= seg_length self.last_byte -= seg_length def _need_to_refetch_manifest(self, req): """ Just because a response shows that an object is a SLO manifest does not mean that response's body contains the entire SLO manifest. If it doesn't, we need to make a second request to actually get the whole thing. Note: this assumes that X-Static-Large-Object has already been found. """ if req.method == 'HEAD': return True response_status = int(self._response_status[:3]) # These are based on etag, and the SLO's etag is almost certainly not # the manifest object's etag. Still, it's highly likely that the # submitted If-None-Match won't match the manifest object's etag, so # we can avoid re-fetching the manifest if we got a successful # response. if ((req.if_match or req.if_none_match) and not is_success(response_status)): return True if req.range and response_status in (206, 416): content_range = '' for header, value in self._response_headers: if header.lower() == 'content-range': content_range = value break # e.g. Content-Range: bytes 0-14289/14290 match = re.match('bytes (\d+)-(\d+)/(\d+)$', content_range) if not match: # Malformed or missing, so we don't know what we got. return True first_byte, last_byte, length = [int(x) for x in match.groups()] # If and only if we actually got back the full manifest body, then # we can avoid re-fetching the object. got_everything = (first_byte == 0 and last_byte == length - 1) return not got_everything return False def handle_slo_get_or_head(self, req, start_response): """ Takes a request and a start_response callable and does the normal WSGI thing with them. Returns an iterator suitable for sending up the WSGI chain. :param req: swob.Request object; is a GET or HEAD request aimed at what may be a static large object manifest (or may not). :param start_response: WSGI start_response callable """ resp_iter = self._app_call(req.environ) # make sure this response is for a static large object manifest for header, value in self._response_headers: if (header.lower() == 'x-static-large-object' and config_true_value(value)): break else: # Not a static large object manifest. Just pass it through. start_response(self._response_status, self._response_headers, self._response_exc_info) return resp_iter # Handle pass-through request for the manifest itself if req.params.get('multipart-manifest') == 'get': if req.params.get('format') == 'raw': resp_iter = self.convert_segment_listing( self._response_headers, resp_iter) new_headers = [] for header, value in self._response_headers: if header.lower() == 'content-type': new_headers.append(('Content-Type', 'application/json; charset=utf-8')) else: new_headers.append((header, value)) self._response_headers = new_headers start_response(self._response_status, self._response_headers, self._response_exc_info) return resp_iter if self._need_to_refetch_manifest(req): req.environ['swift.non_client_disconnect'] = True close_if_possible(resp_iter) del req.environ['swift.non_client_disconnect'] get_req = make_subrequest( req.environ, method='GET', headers={'x-auth-token': req.headers.get('x-auth-token')}, agent=('%(orig)s ' + 'SLO MultipartGET'), swift_source='SLO') resp_iter = self._app_call(get_req.environ) # Any Content-Range from a manifest is almost certainly wrong for the # full large object. resp_headers = [(h, v) for h, v in self._response_headers if not h.lower() == 'content-range'] response = self.get_or_head_response( req, resp_headers, resp_iter) return response(req.environ, start_response) def convert_segment_listing(self, resp_headers, resp_iter): """ Converts the manifest data to match with the format that was put in through ?multipart-manifest=put :param resp_headers: response headers :param resp_iter: a response iterable """ segments = self._get_manifest_read(resp_iter) for seg_dict in segments: seg_dict.pop('content_type', None) seg_dict.pop('last_modified', None) seg_dict.pop('sub_slo', None) seg_dict['path'] = seg_dict.pop('name', None) seg_dict['size_bytes'] = seg_dict.pop('bytes', None) seg_dict['etag'] = seg_dict.pop('hash', None) json_data = json.dumps(segments) # convert to string if six.PY3: json_data = json_data.encode('utf-8') new_headers = [] for header, value in resp_headers: if header.lower() == 'content-length': new_headers.append(('Content-Length', len(json_data))) else: new_headers.append((header, value)) self._response_headers = new_headers return [json_data] def _get_manifest_read(self, resp_iter): with closing_if_possible(resp_iter): resp_body = ''.join(resp_iter) try: segments = json.loads(resp_body) except ValueError: segments = [] return segments def get_or_head_response(self, req, resp_headers, resp_iter): segments = self._get_manifest_read(resp_iter) etag = md5() content_length = 0 for seg_dict in segments: if seg_dict.get('range'): etag.update('%s:%s;' % (seg_dict['hash'], seg_dict['range'])) else: etag.update(seg_dict['hash']) if config_true_value(seg_dict.get('sub_slo')): override_bytes_from_content_type( seg_dict, logger=self.slo.logger) content_length += self._segment_length(seg_dict) response_headers = [(h, v) for h, v in resp_headers if h.lower() not in ('etag', 'content-length')] response_headers.append(('Content-Length', str(content_length))) response_headers.append(('Etag', '"%s"' % etag.hexdigest())) if req.method == 'HEAD': return self._manifest_head_response(req, response_headers) else: return self._manifest_get_response( req, content_length, response_headers, segments) def _manifest_head_response(self, req, response_headers): return HTTPOk(request=req, headers=response_headers, body='', conditional_response=True) def _manifest_get_response(self, req, content_length, response_headers, segments): self.first_byte, self.last_byte = None, None if req.range: byteranges = req.range.ranges_for_length(content_length) if len(byteranges) == 0: return HTTPRequestedRangeNotSatisfiable(request=req) elif len(byteranges) == 1: self.first_byte, self.last_byte = byteranges[0] # For some reason, swob.Range.ranges_for_length adds 1 to the # last byte's position. self.last_byte -= 1 else: req.range = None ver, account, _junk = req.split_path(3, 3, rest_with_last=True) plain_listing_iter = self._segment_listing_iterator( req, ver, account, segments) def is_small_segment((seg_dict, start_byte, end_byte)): start = 0 if start_byte is None else start_byte end = int(seg_dict['bytes']) - 1 if end_byte is None else end_byte is_small = (end - start + 1) < self.slo.rate_limit_under_size return is_small ratelimited_listing_iter = RateLimitedIterator( plain_listing_iter, self.slo.rate_limit_segments_per_sec, limit_after=self.slo.rate_limit_after_segment, ratelimit_if=is_small_segment) # self._segment_listing_iterator gives us 3-tuples of (segment dict, # start byte, end byte), but SegmentedIterable wants (obj path, etag, # size, start byte, end byte), so we clean that up here segment_listing_iter = ( ("/{ver}/{acc}/{conobj}".format( ver=ver, acc=account, conobj=seg_dict['name'].lstrip('/')), seg_dict['hash'], int(seg_dict['bytes']), start_byte, end_byte) for seg_dict, start_byte, end_byte in ratelimited_listing_iter) segmented_iter = SegmentedIterable( req, self.slo.app, segment_listing_iter, name=req.path, logger=self.slo.logger, ua_suffix="SLO MultipartGET", swift_source="SLO", max_get_time=self.slo.max_get_time) try: segmented_iter.validate_first_segment() except (ListingIterError, SegmentError): # Copy from the SLO explanation in top of this file. # If any of the segments from the manifest are not found or # their Etag/Content Length no longer match the connection # will drop. In this case a 409 Conflict will be logged in # the proxy logs and the user will receive incomplete results. return HTTPConflict(request=req) response = Response(request=req, content_length=content_length, headers=response_headers, conditional_response=True, app_iter=segmented_iter) if req.range: response.headers.pop('Etag') return response class StaticLargeObject(object): """ StaticLargeObject Middleware See above for a full description. The proxy logs created for any subrequests made will have swift.source set to "SLO". :param app: The next WSGI filter or app in the paste.deploy chain. :param conf: The configuration dict for the middleware. """ def __init__(self, app, conf, max_manifest_segments=DEFAULT_MAX_MANIFEST_SEGMENTS, max_manifest_size=DEFAULT_MAX_MANIFEST_SIZE): self.conf = conf self.app = app self.logger = get_logger(conf, log_route='slo') self.max_manifest_segments = max_manifest_segments self.max_manifest_size = max_manifest_size self.max_get_time = int(self.conf.get('max_get_time', 86400)) self.rate_limit_under_size = int(self.conf.get( 'rate_limit_under_size', DEFAULT_RATE_LIMIT_UNDER_SIZE)) self.rate_limit_after_segment = int(self.conf.get( 'rate_limit_after_segment', '10')) self.rate_limit_segments_per_sec = int(self.conf.get( 'rate_limit_segments_per_sec', '1')) self.bulk_deleter = Bulk(app, {}, logger=self.logger) def handle_multipart_get_or_head(self, req, start_response): """ Handles the GET or HEAD of a SLO manifest. The response body (only on GET, of course) will consist of the concatenation of the segments. :params req: a swob.Request with a path referencing an object :raises: HttpException on errors """ return SloGetContext(self).handle_slo_get_or_head(req, start_response) def copy_hook(self, inner_hook): def slo_hook(source_req, source_resp, sink_req): x_slo = source_resp.headers.get('X-Static-Large-Object') if (config_true_value(x_slo) and source_req.params.get('multipart-manifest') != 'get' and 'swift.post_as_copy' not in source_req.environ): source_resp = SloGetContext(self).get_or_head_response( source_req, source_resp.headers.items(), source_resp.app_iter) return inner_hook(source_req, source_resp, sink_req) return slo_hook def handle_multipart_put(self, req, start_response): """ Will handle the PUT of a SLO manifest. Heads every object in manifest to check if is valid and if so will save a manifest generated from the user input. Uses WSGIContext to call self and start_response and returns a WSGI iterator. :params req: a swob.Request with an obj in path :raises: HttpException on errors """ try: vrs, account, container, obj = req.split_path(1, 4, True) except ValueError: return self.app(req.environ, start_response) if req.content_length > self.max_manifest_size: raise HTTPRequestEntityTooLarge( "Manifest File > %d bytes" % self.max_manifest_size) if req.headers.get('X-Copy-From'): raise HTTPMethodNotAllowed( 'Multipart Manifest PUTs cannot be COPY requests') if req.content_length is None and \ req.headers.get('transfer-encoding', '').lower() != 'chunked': raise HTTPLengthRequired(request=req) parsed_data = parse_and_validate_input( req.body_file.read(self.max_manifest_size), req.path) problem_segments = [] if len(parsed_data) > self.max_manifest_segments: raise HTTPRequestEntityTooLarge( 'Number of segments must be <= %d' % self.max_manifest_segments) total_size = 0 out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS) if not out_content_type: out_content_type = 'text/plain' data_for_storage = [] slo_etag = md5() last_obj_path = None for index, seg_dict in enumerate(parsed_data): obj_name = seg_dict['path'] if isinstance(obj_name, six.text_type): obj_name = obj_name.encode('utf-8') obj_path = '/'.join(['', vrs, account, obj_name.lstrip('/')]) new_env = req.environ.copy() new_env['PATH_INFO'] = obj_path new_env['REQUEST_METHOD'] = 'HEAD' new_env['swift.source'] = 'SLO' del(new_env['wsgi.input']) del(new_env['QUERY_STRING']) new_env['CONTENT_LENGTH'] = 0 new_env['HTTP_USER_AGENT'] = \ '%s MultipartPUT' % req.environ.get('HTTP_USER_AGENT') if obj_path != last_obj_path: last_obj_path = obj_path head_seg_resp = \ Request.blank(obj_path, new_env).get_response(self) if head_seg_resp.is_success: segment_length = head_seg_resp.content_length if seg_dict.get('range'): # Since we now know the length, we can normalize the # range. We know that there is exactly one range # requested since we checked that earlier in # parse_and_validate_input(). ranges = seg_dict['range'].ranges_for_length( head_seg_resp.content_length) if not ranges: problem_segments.append([quote(obj_name), 'Unsatisfiable Range']) elif ranges == [(0, head_seg_resp.content_length)]: # Just one range, and it exactly matches the object. # Why'd we do this again? del seg_dict['range'] segment_length = head_seg_resp.content_length else: rng = ranges[0] seg_dict['range'] = '%d-%d' % (rng[0], rng[1] - 1) segment_length = rng[1] - rng[0] if segment_length < 1: problem_segments.append( [quote(obj_name), 'Too small; each segment must be at least 1 byte.']) total_size += segment_length if seg_dict['size_bytes'] is not None and \ seg_dict['size_bytes'] != head_seg_resp.content_length: problem_segments.append([quote(obj_name), 'Size Mismatch']) if seg_dict['etag'] is None or \ seg_dict['etag'] == head_seg_resp.etag: if seg_dict.get('range'): slo_etag.update('%s:%s;' % (head_seg_resp.etag, seg_dict['range'])) else: slo_etag.update(head_seg_resp.etag) else: problem_segments.append([quote(obj_name), 'Etag Mismatch']) if head_seg_resp.last_modified: last_modified = head_seg_resp.last_modified else: # shouldn't happen last_modified = datetime.now() last_modified_formatted = \ last_modified.strftime('%Y-%m-%dT%H:%M:%S.%f') seg_data = {'name': '/' + seg_dict['path'].lstrip('/'), 'bytes': head_seg_resp.content_length, 'hash': head_seg_resp.etag, 'content_type': head_seg_resp.content_type, 'last_modified': last_modified_formatted} if seg_dict.get('range'): seg_data['range'] = seg_dict['range'] if config_true_value( head_seg_resp.headers.get('X-Static-Large-Object')): seg_data['sub_slo'] = True data_for_storage.append(seg_data) else: problem_segments.append([quote(obj_name), head_seg_resp.status]) if problem_segments: resp_body = get_response_body( out_content_type, {}, problem_segments) raise HTTPBadRequest(resp_body, content_type=out_content_type) env = req.environ if not env.get('CONTENT_TYPE'): guessed_type, _junk = mimetypes.guess_type(req.path_info) env['CONTENT_TYPE'] = guessed_type or 'application/octet-stream' env['swift.content_type_overridden'] = True env['CONTENT_TYPE'] += ";swift_bytes=%d" % total_size env['HTTP_X_STATIC_LARGE_OBJECT'] = 'True' json_data = json.dumps(data_for_storage) if six.PY3: json_data = json_data.encode('utf-8') env['CONTENT_LENGTH'] = str(len(json_data)) env['wsgi.input'] = BytesIO(json_data) slo_put_context = SloPutContext(self, slo_etag) return slo_put_context.handle_slo_put(req, start_response) def get_segments_to_delete_iter(self, req): """ A generator function to be used to delete all the segments and sub-segments referenced in a manifest. :params req: a swob.Request with an SLO manifest in path :raises HTTPPreconditionFailed: on invalid UTF8 in request path :raises HTTPBadRequest: on too many buffered sub segments and on invalid SLO manifest path """ if not check_utf8(req.path_info): raise HTTPPreconditionFailed( request=req, body='Invalid UTF8 or contains NULL') vrs, account, container, obj = req.split_path(4, 4, True) segments = [{ 'sub_slo': True, 'name': ('/%s/%s' % (container, obj)).decode('utf-8')}] while segments: if len(segments) > MAX_BUFFERED_SLO_SEGMENTS: raise HTTPBadRequest( 'Too many buffered slo segments to delete.') seg_data = segments.pop(0) if seg_data.get('sub_slo'): try: segments.extend( self.get_slo_segments(seg_data['name'], req)) except HTTPException as err: # allow bulk delete response to report errors seg_data['error'] = {'code': err.status_int, 'message': err.body} # add manifest back to be deleted after segments seg_data['sub_slo'] = False segments.append(seg_data) else: seg_data['name'] = seg_data['name'].encode('utf-8') yield seg_data def get_slo_segments(self, obj_name, req): """ Performs a swob.Request and returns the SLO manifest's segments. :raises HTTPServerError: on unable to load obj_name or on unable to load the SLO manifest data. :raises HTTPBadRequest: on not an SLO manifest :raises HTTPNotFound: on SLO manifest not found :returns: SLO manifest's segments """ vrs, account, _junk = req.split_path(2, 3, True) new_env = req.environ.copy() new_env['REQUEST_METHOD'] = 'GET' del(new_env['wsgi.input']) new_env['QUERY_STRING'] = 'multipart-manifest=get' new_env['CONTENT_LENGTH'] = 0 new_env['HTTP_USER_AGENT'] = \ '%s MultipartDELETE' % new_env.get('HTTP_USER_AGENT') new_env['swift.source'] = 'SLO' new_env['PATH_INFO'] = ( '/%s/%s/%s' % (vrs, account, obj_name.lstrip('/')) ).encode('utf-8') resp = Request.blank('', new_env).get_response(self.app) if resp.is_success: if config_true_value(resp.headers.get('X-Static-Large-Object')): try: return json.loads(resp.body) except ValueError: raise HTTPServerError('Unable to load SLO manifest') else: raise HTTPBadRequest('Not an SLO manifest') elif resp.status_int == HTTP_NOT_FOUND: raise HTTPNotFound('SLO manifest not found') elif resp.status_int == HTTP_UNAUTHORIZED: raise HTTPUnauthorized('401 Unauthorized') else: raise HTTPServerError('Unable to load SLO manifest or segment.') def handle_multipart_delete(self, req): """ Will delete all the segments in the SLO manifest and then, if successful, will delete the manifest file. :params req: a swob.Request with an obj in path :returns: swob.Response whose app_iter set to Bulk.handle_delete_iter """ req.headers['Content-Type'] = None # Ignore content-type from client resp = HTTPOk(request=req) out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS) if out_content_type: resp.content_type = out_content_type resp.app_iter = self.bulk_deleter.handle_delete_iter( req, objs_to_delete=self.get_segments_to_delete_iter(req), user_agent='MultipartDELETE', swift_source='SLO', out_content_type=out_content_type) return resp def __call__(self, env, start_response): """ WSGI entry point """ req = Request(env) try: vrs, account, container, obj = req.split_path(4, 4, True) except ValueError: return self.app(env, start_response) # install our COPY-callback hook env['swift.copy_hook'] = self.copy_hook( env.get('swift.copy_hook', lambda src_req, src_resp, sink_req: src_resp)) try: if req.method == 'PUT' and \ req.params.get('multipart-manifest') == 'put': return self.handle_multipart_put(req, start_response) if req.method == 'DELETE' and \ req.params.get('multipart-manifest') == 'delete': return self.handle_multipart_delete(req)(env, start_response) if req.method == 'GET' or req.method == 'HEAD': return self.handle_multipart_get_or_head(req, start_response) if 'X-Static-Large-Object' in req.headers: raise HTTPBadRequest( request=req, body='X-Static-Large-Object is a reserved header. ' 'To create a static large object add query param ' 'multipart-manifest=put.') except HTTPException as err_resp: return err_resp(env, start_response) return self.app(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) max_manifest_segments = int(conf.get('max_manifest_segments', DEFAULT_MAX_MANIFEST_SEGMENTS)) max_manifest_size = int(conf.get('max_manifest_size', DEFAULT_MAX_MANIFEST_SIZE)) register_swift_info('slo', max_manifest_segments=max_manifest_segments, max_manifest_size=max_manifest_size, # this used to be configurable; report it as 1 for # clients that might still care min_segment_size=1) def slo_filter(app): return StaticLargeObject( app, conf, max_manifest_segments=max_manifest_segments, max_manifest_size=max_manifest_size) return slo_filter swift-2.7.0/swift/common/middleware/versioned_writes.py0000664000567000056710000005252712675204037024544 0ustar jenkinsjenkins00000000000000# Copyright (c) 2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Object versioning in swift is implemented by setting a flag on the container to tell swift to version all objects in the container. The flag is the ``X-Versions-Location`` header on the container, and its value is the container where the versions are stored. It is recommended to use a different ``X-Versions-Location`` container for each container that is being versioned. When data is ``PUT`` into a versioned container (a container with the versioning flag turned on), the existing data in the file is redirected to a new object and the data in the ``PUT`` request is saved as the data for the versioned object. The new object name (for the previous version) is ``//``, where ``length`` is the 3-character zero-padded hexadecimal length of the ```` and ```` is the timestamp of when the previous version was created. A ``GET`` to a versioned object will return the current version of the object without having to do any request redirects or metadata lookups. A ``POST`` to a versioned object will update the object metadata as normal, but will not create a new version of the object. In other words, new versions are only created when the content of the object changes. A ``DELETE`` to a versioned object will only remove the current version of the object. If you have 5 total versions of the object, you must delete the object 5 times to completely remove the object. -------------------------------------------------- How to Enable Object Versioning in a Swift Cluster -------------------------------------------------- This middleware was written as an effort to refactor parts of the proxy server, so this functionality was already available in previous releases and every attempt was made to maintain backwards compatibility. To allow operators to perform a seamless upgrade, it is not required to add the middleware to the proxy pipeline and the flag ``allow_versions`` in the container server configuration files are still valid. In future releases, ``allow_versions`` will be deprecated in favor of adding this middleware to the pipeline to enable or disable the feature. In case the middleware is added to the proxy pipeline, you must also set ``allow_versioned_writes`` to ``True`` in the middleware options to enable the information about this middleware to be returned in a /info request. Upgrade considerations: If ``allow_versioned_writes`` is set in the filter configuration, you can leave the ``allow_versions`` flag in the container server configuration files untouched. If you decide to disable or remove the ``allow_versions`` flag, you must re-set any existing containers that had the 'X-Versions-Location' flag configured so that it can now be tracked by the versioned_writes middleware. ----------------------- Examples Using ``curl`` ----------------------- First, create a container with the ``X-Versions-Location`` header or add the header to an existing container. Also make sure the container referenced by the ``X-Versions-Location`` exists. In this example, the name of that container is "versions":: curl -i -XPUT -H "X-Auth-Token: " \ -H "X-Versions-Location: versions" http:///container curl -i -XPUT -H "X-Auth-Token: " http:///versions Create an object (the first version):: curl -i -XPUT --data-binary 1 -H "X-Auth-Token: " \ http:///container/myobject Now create a new version of that object:: curl -i -XPUT --data-binary 2 -H "X-Auth-Token: " \ http:///container/myobject See a listing of the older versions of the object:: curl -i -H "X-Auth-Token: " \ http:///versions?prefix=008myobject/ Now delete the current version of the object and see that the older version is gone from 'versions' container and back in 'container' container:: curl -i -XDELETE -H "X-Auth-Token: " \ http:///container/myobject curl -i -H "X-Auth-Token: " \ http:///versions?prefix=008myobject/ curl -i -XGET -H "X-Auth-Token: " \ http:///container/myobject --------------------------------------------------- How to Disable Object Versioning in a Swift Cluster --------------------------------------------------- If you want to disable all functionality, set ``allow_versioned_writes`` to ``False`` in the middleware options. Disable versioning from a container (x is any value except empty):: curl -i -XPOST -H "X-Auth-Token: " \ -H "X-Remove-Versions-Location: x" http:///container """ import calendar import json import six from six.moves.urllib.parse import quote, unquote import time from swift.common.utils import get_logger, Timestamp, \ register_swift_info, config_true_value from swift.common.request_helpers import get_sys_meta_prefix from swift.common.wsgi import WSGIContext, make_pre_authed_request from swift.common.swob import Request, HTTPException from swift.common.constraints import ( check_account_format, check_container_format, check_destination_header) from swift.proxy.controllers.base import get_container_info from swift.common.http import ( is_success, is_client_error, HTTP_NOT_FOUND) from swift.common.swob import HTTPPreconditionFailed, HTTPServiceUnavailable, \ HTTPServerError from swift.common.exceptions import ( ListingIterNotFound, ListingIterError) class VersionedWritesContext(WSGIContext): def __init__(self, wsgi_app, logger): WSGIContext.__init__(self, wsgi_app) self.logger = logger def _listing_iter(self, account_name, lcontainer, lprefix, req): try: for page in self._listing_pages_iter(account_name, lcontainer, lprefix, req.environ): for item in page: yield item except ListingIterNotFound: pass except HTTPPreconditionFailed: raise HTTPPreconditionFailed(request=req) except ListingIterError: raise HTTPServerError(request=req) def _listing_pages_iter(self, account_name, lcontainer, lprefix, env): marker = '' while True: lreq = make_pre_authed_request( env, method='GET', swift_source='VW', path='/v1/%s/%s' % (account_name, lcontainer)) lreq.environ['QUERY_STRING'] = \ 'format=json&prefix=%s&reverse=on&marker=%s' % ( quote(lprefix), quote(marker)) lresp = lreq.get_response(self.app) if not is_success(lresp.status_int): if lresp.status_int == HTTP_NOT_FOUND: raise ListingIterNotFound() elif is_client_error(lresp.status_int): raise HTTPPreconditionFailed() else: raise ListingIterError() if not lresp.body: break sublisting = json.loads(lresp.body) if not sublisting: break marker = sublisting[-1]['name'].encode('utf-8') yield sublisting def handle_obj_versions_put(self, req, object_versions, object_name, policy_index): ret = None # do a HEAD request to check object versions _headers = {'X-Newest': 'True', 'X-Backend-Storage-Policy-Index': policy_index, 'x-auth-token': req.headers.get('x-auth-token')} # make a pre_auth request in case the user has write access # to container, but not READ. This was allowed in previous version # (i.e., before middleware) so keeping the same behavior here head_req = make_pre_authed_request( req.environ, path=req.path_info, headers=_headers, method='HEAD', swift_source='VW') hresp = head_req.get_response(self.app) is_dlo_manifest = 'X-Object-Manifest' in req.headers or \ 'X-Object-Manifest' in hresp.headers # if there's an existing object, then copy it to # X-Versions-Location if is_success(hresp.status_int) and not is_dlo_manifest: lcontainer = object_versions.split('/')[0] prefix_len = '%03x' % len(object_name) lprefix = prefix_len + object_name + '/' ts_source = hresp.environ.get('swift_x_timestamp') if ts_source is None: ts_source = calendar.timegm(time.strptime( hresp.headers['last-modified'], '%a, %d %b %Y %H:%M:%S GMT')) new_ts = Timestamp(ts_source).internal vers_obj_name = lprefix + new_ts copy_headers = { 'Destination': '%s/%s' % (lcontainer, vers_obj_name), 'x-auth-token': req.headers.get('x-auth-token')} # COPY implementation sets X-Newest to True when it internally # does a GET on source object. So, we don't have to explicity # set it in request headers here. copy_req = make_pre_authed_request( req.environ, path=req.path_info, headers=copy_headers, method='COPY', swift_source='VW') copy_resp = copy_req.get_response(self.app) if is_success(copy_resp.status_int): # success versioning previous existing object # return None and handle original request ret = None else: if is_client_error(copy_resp.status_int): # missing container or bad permissions ret = HTTPPreconditionFailed(request=req) else: # could not copy the data, bail ret = HTTPServiceUnavailable(request=req) else: if hresp.status_int == HTTP_NOT_FOUND or is_dlo_manifest: # nothing to version # return None and handle original request ret = None else: # if not HTTP_NOT_FOUND, return error immediately ret = hresp return ret def handle_obj_versions_delete(self, req, object_versions, account_name, container_name, object_name): lcontainer = object_versions.split('/')[0] prefix_len = '%03x' % len(object_name) lprefix = prefix_len + object_name + '/' item_iter = self._listing_iter(account_name, lcontainer, lprefix, req) authed = False for previous_version in item_iter: if not authed: # we're about to start making COPY requests - need to # validate the write access to the versioned container if 'swift.authorize' in req.environ: container_info = get_container_info( req.environ, self.app) req.acl = container_info.get('write_acl') aresp = req.environ['swift.authorize'](req) if aresp: return aresp authed = True # there are older versions so copy the previous version to the # current object and delete the previous version prev_obj_name = previous_version['name'].encode('utf-8') copy_path = '/v1/' + account_name + '/' + \ lcontainer + '/' + prev_obj_name copy_headers = {'X-Newest': 'True', 'Destination': container_name + '/' + object_name, 'x-auth-token': req.headers.get('x-auth-token')} copy_req = make_pre_authed_request( req.environ, path=copy_path, headers=copy_headers, method='COPY', swift_source='VW') copy_resp = copy_req.get_response(self.app) # if the version isn't there, keep trying with previous version if copy_resp.status_int == HTTP_NOT_FOUND: continue if not is_success(copy_resp.status_int): if is_client_error(copy_resp.status_int): # some user error, maybe permissions return HTTPPreconditionFailed(request=req) else: # could not copy the data, bail return HTTPServiceUnavailable(request=req) # reset these because the COPY changed them new_del_req = make_pre_authed_request( req.environ, path=copy_path, method='DELETE', swift_source='VW') req = new_del_req # remove 'X-If-Delete-At', since it is not for the older copy if 'X-If-Delete-At' in req.headers: del req.headers['X-If-Delete-At'] break # handle DELETE request here in case it was modified return req.get_response(self.app) def handle_container_request(self, env, start_response): app_resp = self._app_call(env) if self._response_headers is None: self._response_headers = [] sysmeta_version_hdr = get_sys_meta_prefix('container') + \ 'versions-location' location = '' for key, val in self._response_headers: if key.lower() == sysmeta_version_hdr: location = val if location: self._response_headers.extend([('X-Versions-Location', location)]) start_response(self._response_status, self._response_headers, self._response_exc_info) return app_resp class VersionedWritesMiddleware(object): def __init__(self, app, conf): self.app = app self.conf = conf self.logger = get_logger(conf, log_route='versioned_writes') def container_request(self, req, start_response, enabled): sysmeta_version_hdr = get_sys_meta_prefix('container') + \ 'versions-location' # set version location header as sysmeta if 'X-Versions-Location' in req.headers: val = req.headers.get('X-Versions-Location') if val: # differently from previous version, we are actually # returning an error if user tries to set versions location # while feature is explicitly disabled. if not config_true_value(enabled) and \ req.method in ('PUT', 'POST'): raise HTTPPreconditionFailed( request=req, content_type='text/plain', body='Versioned Writes is disabled') location = check_container_format(req, val) req.headers[sysmeta_version_hdr] = location # reset original header to maintain sanity # now only sysmeta is source of Versions Location req.headers['X-Versions-Location'] = '' # if both headers are in the same request # adding location takes precendence over removing if 'X-Remove-Versions-Location' in req.headers: del req.headers['X-Remove-Versions-Location'] else: # empty value is the same as X-Remove-Versions-Location req.headers['X-Remove-Versions-Location'] = 'x' # handle removing versions container val = req.headers.get('X-Remove-Versions-Location') if val: req.headers.update({sysmeta_version_hdr: ''}) req.headers.update({'X-Versions-Location': ''}) del req.headers['X-Remove-Versions-Location'] # send request and translate sysmeta headers from response vw_ctx = VersionedWritesContext(self.app, self.logger) return vw_ctx.handle_container_request(req.environ, start_response) def object_request(self, req, version, account, container, obj, allow_versioned_writes): account_name = unquote(account) container_name = unquote(container) object_name = unquote(obj) container_info = None resp = None is_enabled = config_true_value(allow_versioned_writes) if req.method in ('PUT', 'DELETE'): container_info = get_container_info( req.environ, self.app) elif req.method == 'COPY' and 'Destination' in req.headers: if 'Destination-Account' in req.headers: account_name = req.headers.get('Destination-Account') account_name = check_account_format(req, account_name) container_name, object_name = check_destination_header(req) req.environ['PATH_INFO'] = "/%s/%s/%s/%s" % ( version, account_name, container_name, object_name) container_info = get_container_info( req.environ, self.app) if not container_info: return self.app # To maintain backwards compatibility, container version # location could be stored as sysmeta or not, need to check both. # If stored as sysmeta, check if middleware is enabled. If sysmeta # is not set, but versions property is set in container_info, then # for backwards compatibility feature is enabled. object_versions = container_info.get( 'sysmeta', {}).get('versions-location') if object_versions and isinstance(object_versions, six.text_type): object_versions = object_versions.encode('utf-8') elif not object_versions: object_versions = container_info.get('versions') # if allow_versioned_writes is not set in the configuration files # but 'versions' is configured, enable feature to maintain # backwards compatibility if not allow_versioned_writes and object_versions: is_enabled = True if is_enabled and object_versions: object_versions = unquote(object_versions) vw_ctx = VersionedWritesContext(self.app, self.logger) if req.method in ('PUT', 'COPY'): policy_idx = req.headers.get( 'X-Backend-Storage-Policy-Index', container_info['storage_policy']) resp = vw_ctx.handle_obj_versions_put( req, object_versions, object_name, policy_idx) else: # handle DELETE resp = vw_ctx.handle_obj_versions_delete( req, object_versions, account_name, container_name, object_name) if resp: return resp else: return self.app def __call__(self, env, start_response): # making a duplicate, because if this is a COPY request, we will # modify the PATH_INFO to find out if the 'Destination' is in a # versioned container req = Request(env.copy()) try: (version, account, container, obj) = req.split_path(3, 4, True) except ValueError: return self.app(env, start_response) # In case allow_versioned_writes is set in the filter configuration, # the middleware becomes the authority on whether object # versioning is enabled or not. In case it is not set, then # the option in the container configuration is still checked # for backwards compatibility # For a container request, first just check if option is set, # can be either true or false. # If set, check if enabled when actually trying to set container # header. If not set, let request be handled by container server # for backwards compatibility. # For an object request, also check if option is set (either T or F). # If set, check if enabled when checking versions container in # sysmeta property. If it is not set check 'versions' property in # container_info allow_versioned_writes = self.conf.get('allow_versioned_writes') if allow_versioned_writes and container and not obj: try: return self.container_request(req, start_response, allow_versioned_writes) except HTTPException as error_response: return error_response(env, start_response) elif obj and req.method in ('PUT', 'COPY', 'DELETE'): try: return self.object_request( req, version, account, container, obj, allow_versioned_writes)(env, start_response) except HTTPException as error_response: return error_response(env, start_response) else: return self.app(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) if config_true_value(conf.get('allow_versioned_writes')): register_swift_info('versioned_writes') def obj_versions_filter(app): return VersionedWritesMiddleware(app, conf) return obj_versions_filter swift-2.7.0/swift/common/middleware/xprofile.py0000664000567000056710000002262712675204037022777 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Profiling middleware for Swift Servers. The current implementation is based on eventlet aware profiler.(For the future, more profilers could be added in to collect more data for analysis.) Profiling all incoming requests and accumulating cpu timing statistics information for performance tuning and optimization. An mini web UI is also provided for profiling data analysis. It can be accessed from the URL as below. Index page for browse profile data:: http://SERVER_IP:PORT/__profile__ List all profiles to return profile ids in json format:: http://SERVER_IP:PORT/__profile__/ http://SERVER_IP:PORT/__profile__/all Retrieve specific profile data in different formats:: http://SERVER_IP:PORT/__profile__/PROFILE_ID?format=[default|json|csv|ods] http://SERVER_IP:PORT/__profile__/current?format=[default|json|csv|ods] http://SERVER_IP:PORT/__profile__/all?format=[default|json|csv|ods] Retrieve metrics from specific function in json format:: http://SERVER_IP:PORT/__profile__/PROFILE_ID/NFL?format=json http://SERVER_IP:PORT/__profile__/current/NFL?format=json http://SERVER_IP:PORT/__profile__/all/NFL?format=json NFL is defined by concatenation of file name, function name and the first line number. e.g.:: account.py:50(GETorHEAD) or with full path: opt/stack/swift/swift/proxy/controllers/account.py:50(GETorHEAD) A list of URL examples: http://localhost:8080/__profile__ (proxy server) http://localhost:6000/__profile__/all (object server) http://localhost:6001/__profile__/current (container server) http://localhost:6002/__profile__/12345?format=json (account server) The profiling middleware can be configured in paste file for WSGI servers such as proxy, account, container and object servers. Please refer to the sample configuration files in etc directory. The profiling data is provided with four formats such as binary(by default), json, csv and odf spreadsheet which requires installing odfpy library. sudo pip install odfpy There's also a simple visualization capability which is enabled by using matplotlib toolkit. it is also required to be installed if you want to use it to visualize statistic data. sudo apt-get install python-matplotlib """ import os import sys import time from eventlet import greenthread, GreenPool, patcher import eventlet.green.profile as eprofile import six from six.moves import urllib from swift import gettext_ as _ from swift.common.utils import get_logger, config_true_value from swift.common.swob import Request from x_profile.exceptions import NotFoundException, MethodNotAllowed,\ ProfileException from x_profile.html_viewer import HTMLViewer from x_profile.profile_model import ProfileLog DEFAULT_PROFILE_PREFIX = '/tmp/log/swift/profile/default.profile' # unwind the iterator; it may call start_response, do lots of work, etc PROFILE_EXEC_EAGER = """ app_iter = self.app(environ, start_response) app_iter_ = list(app_iter) if hasattr(app_iter, 'close'): app_iter.close() """ # don't unwind the iterator (don't consume resources) PROFILE_EXEC_LAZY = """ app_iter_ = self.app(environ, start_response) """ thread = patcher.original('thread') # non-monkeypatched module needed # This monkey patch code fix the problem of eventlet profile tool # which can not accumulate profiling results across multiple calls # of runcalls and runctx. def new_setup(self): self._has_setup = True self.cur = None self.timings = {} self.current_tasklet = greenthread.getcurrent() self.thread_id = thread.get_ident() self.simulate_call("profiler") def new_runctx(self, cmd, globals, locals): if not getattr(self, '_has_setup', False): self._setup() try: return self.base.runctx(self, cmd, globals, locals) finally: self.TallyTimings() def new_runcall(self, func, *args, **kw): if not getattr(self, '_has_setup', False): self._setup() try: return self.base.runcall(self, func, *args, **kw) finally: self.TallyTimings() class ProfileMiddleware(object): def __init__(self, app, conf): self.app = app self.logger = get_logger(conf, log_route='profile') self.log_filename_prefix = conf.get('log_filename_prefix', DEFAULT_PROFILE_PREFIX) dirname = os.path.dirname(self.log_filename_prefix) # Notes: this effort may fail due to permission denied. # it is better to be created and authorized to current # user in advance. if not os.path.exists(dirname): os.makedirs(dirname) self.dump_interval = float(conf.get('dump_interval', 5.0)) self.dump_timestamp = config_true_value(conf.get( 'dump_timestamp', 'no')) self.flush_at_shutdown = config_true_value(conf.get( 'flush_at_shutdown', 'no')) self.path = conf.get('path', '__profile__').replace('/', '') self.unwind = config_true_value(conf.get('unwind', 'no')) self.profile_module = conf.get('profile_module', 'eventlet.green.profile') self.profiler = get_profiler(self.profile_module) self.profile_log = ProfileLog(self.log_filename_prefix, self.dump_timestamp) self.viewer = HTMLViewer(self.path, self.profile_module, self.profile_log) self.dump_pool = GreenPool(1000) self.last_dump_at = None def __del__(self): if self.flush_at_shutdown: self.profile_log.clear(str(os.getpid())) def _combine_body_qs(self, request): wsgi_input = request.environ['wsgi.input'] query_dict = request.params qs_in_body = wsgi_input.read() query_dict.update(urllib.parse.parse_qs(qs_in_body, keep_blank_values=True, strict_parsing=False)) return query_dict def dump_checkpoint(self): current_time = time.time() if self.last_dump_at is None or self.last_dump_at +\ self.dump_interval < current_time: self.dump_pool.spawn_n(self.profile_log.dump_profile, self.profiler, os.getpid()) self.last_dump_at = current_time def __call__(self, environ, start_response): request = Request(environ) path_entry = request.path_info.split('/') # hijack favicon request sent by browser so that it doesn't # invoke profiling hook and contaminate the data. if path_entry[1] == 'favicon.ico': start_response('200 OK', []) return '' elif path_entry[1] == self.path: try: self.dump_checkpoint() query_dict = self._combine_body_qs(request) content, headers = self.viewer.render(request.url, request.method, path_entry, query_dict, self.renew_profile) start_response('200 OK', headers) if isinstance(content, six.text_type): content = content.encode('utf-8') return [content] except MethodNotAllowed as mx: start_response('405 Method Not Allowed', []) return '%s' % mx except NotFoundException as nx: start_response('404 Not Found', []) return '%s' % nx except ProfileException as pf: start_response('500 Internal Server Error', []) return '%s' % pf except Exception as ex: start_response('500 Internal Server Error', []) return _('Error on render profiling results: %s') % ex else: _locals = locals() code = self.unwind and PROFILE_EXEC_EAGER or\ PROFILE_EXEC_LAZY self.profiler.runctx(code, globals(), _locals) app_iter = _locals['app_iter_'] self.dump_checkpoint() return app_iter def renew_profile(self): self.profiler = get_profiler(self.profile_module) def get_profiler(profile_module): if profile_module == 'eventlet.green.profile': eprofile.Profile._setup = new_setup eprofile.Profile.runctx = new_runctx eprofile.Profile.runcall = new_runcall # hacked method to import profile module supported in python 2.6 __import__(profile_module) return sys.modules[profile_module].Profile() def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def profile_filter(app): return ProfileMiddleware(app, conf) return profile_filter swift-2.7.0/swift/common/middleware/catch_errors.py0000664000567000056710000000605112675204037023616 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swift import gettext_ as _ from swift.common.swob import Request, HTTPServerError from swift.common.utils import get_logger, generate_trans_id from swift.common.wsgi import WSGIContext class CatchErrorsContext(WSGIContext): def __init__(self, app, logger, trans_id_suffix=''): super(CatchErrorsContext, self).__init__(app) self.logger = logger self.trans_id_suffix = trans_id_suffix def handle_request(self, env, start_response): trans_id_suffix = self.trans_id_suffix trans_id_extra = env.get('HTTP_X_TRANS_ID_EXTRA') if trans_id_extra: trans_id_suffix += '-' + trans_id_extra[:32] trans_id = generate_trans_id(trans_id_suffix) env['swift.trans_id'] = trans_id self.logger.txn_id = trans_id try: # catch any errors in the pipeline resp = self._app_call(env) except: # noqa self.logger.exception(_('Error: An error occurred')) resp = HTTPServerError(request=Request(env), body='An error occurred', content_type='text/plain') resp.headers['X-Trans-Id'] = trans_id return resp(env, start_response) # make sure the response has the trans_id if self._response_headers is None: self._response_headers = [] self._response_headers.append(('X-Trans-Id', trans_id)) start_response(self._response_status, self._response_headers, self._response_exc_info) return resp class CatchErrorMiddleware(object): """ Middleware that provides high-level error handling and ensures that a transaction id will be set for every request. """ def __init__(self, app, conf): self.app = app self.logger = get_logger(conf, log_route='catch-errors') self.trans_id_suffix = conf.get('trans_id_suffix', '') def __call__(self, env, start_response): """ If used, this should be the first middleware in pipeline. """ context = CatchErrorsContext(self.app, self.logger, self.trans_id_suffix) return context.handle_request(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def except_filter(app): return CatchErrorMiddleware(app, conf) return except_filter swift-2.7.0/swift/common/middleware/recon.py0000664000567000056710000004103212675204037022244 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import json import os import time from swift import gettext_ as _ from swift import __version__ as swiftver from swift.common.storage_policy import POLICIES from swift.common.swob import Request, Response from swift.common.utils import get_logger, config_true_value, \ SWIFT_CONF_FILE from swift.common.constraints import check_mount from resource import getpagesize from hashlib import md5 class ReconMiddleware(object): """ Recon middleware used for monitoring. /recon/load|mem|async... will return various system metrics. Needs to be added to the pipeline and requires a filter declaration in the object-server.conf: [filter:recon] use = egg:swift#recon recon_cache_path = /var/cache/swift """ def __init__(self, app, conf, *args, **kwargs): self.app = app self.devices = conf.get('devices', '/srv/node') swift_dir = conf.get('swift_dir', '/etc/swift') self.logger = get_logger(conf, log_route='recon') self.recon_cache_path = conf.get('recon_cache_path', '/var/cache/swift') self.object_recon_cache = os.path.join(self.recon_cache_path, 'object.recon') self.container_recon_cache = os.path.join(self.recon_cache_path, 'container.recon') self.account_recon_cache = os.path.join(self.recon_cache_path, 'account.recon') self.drive_recon_cache = os.path.join(self.recon_cache_path, 'drive.recon') self.account_ring_path = os.path.join(swift_dir, 'account.ring.gz') self.container_ring_path = os.path.join(swift_dir, 'container.ring.gz') self.rings = [self.account_ring_path, self.container_ring_path] # include all object ring files (for all policies) for policy in POLICIES: self.rings.append(os.path.join(swift_dir, policy.ring_name + '.ring.gz')) self.mount_check = config_true_value(conf.get('mount_check', 'true')) def _from_recon_cache(self, cache_keys, cache_file, openr=open): """retrieve values from a recon cache file :params cache_keys: list of cache items to retrieve :params cache_file: cache file to retrieve items from. :params openr: open to use [for unittests] :return: dict of cache items and their values or none if not found """ try: with openr(cache_file, 'r') as f: recondata = json.load(f) return dict((key, recondata.get(key)) for key in cache_keys) except IOError: self.logger.exception(_('Error reading recon cache file')) except ValueError: self.logger.exception(_('Error parsing recon cache file')) except Exception: self.logger.exception(_('Error retrieving recon data')) return dict((key, None) for key in cache_keys) def get_version(self): """get swift version""" verinfo = {'version': swiftver} return verinfo def get_mounted(self, openr=open): """get ALL mounted fs from /proc/mounts""" mounts = [] with openr('/proc/mounts', 'r') as procmounts: for line in procmounts: mount = {} mount['device'], mount['path'], opt1, opt2, opt3, \ opt4 = line.rstrip().split() mounts.append(mount) return mounts def get_load(self, openr=open): """get info from /proc/loadavg""" loadavg = {} with openr('/proc/loadavg', 'r') as f: onemin, fivemin, ftmin, tasks, procs = f.read().rstrip().split() loadavg['1m'] = float(onemin) loadavg['5m'] = float(fivemin) loadavg['15m'] = float(ftmin) loadavg['tasks'] = tasks loadavg['processes'] = int(procs) return loadavg def get_mem(self, openr=open): """get info from /proc/meminfo""" meminfo = {} with openr('/proc/meminfo', 'r') as memlines: for i in memlines: entry = i.rstrip().split(":") meminfo[entry[0]] = entry[1].strip() return meminfo def get_async_info(self): """get # of async pendings""" return self._from_recon_cache(['async_pending'], self.object_recon_cache) def get_driveaudit_error(self): """get # of drive audit errors""" return self._from_recon_cache(['drive_audit_errors'], self.drive_recon_cache) def get_replication_info(self, recon_type): """get replication info""" replication_list = ['replication_time', 'replication_stats', 'replication_last'] if recon_type == 'account': return self._from_recon_cache(replication_list, self.account_recon_cache) elif recon_type == 'container': return self._from_recon_cache(replication_list, self.container_recon_cache) elif recon_type == 'object': replication_list += ['object_replication_time', 'object_replication_last'] return self._from_recon_cache(replication_list, self.object_recon_cache) else: return None def get_device_info(self): """get devices""" try: return {self.devices: os.listdir(self.devices)} except Exception: self.logger.exception(_('Error listing devices')) return {self.devices: None} def get_updater_info(self, recon_type): """get updater info""" if recon_type == 'container': return self._from_recon_cache(['container_updater_sweep'], self.container_recon_cache) elif recon_type == 'object': return self._from_recon_cache(['object_updater_sweep'], self.object_recon_cache) else: return None def get_expirer_info(self, recon_type): """get expirer info""" if recon_type == 'object': return self._from_recon_cache(['object_expiration_pass', 'expired_last_pass'], self.object_recon_cache) def get_auditor_info(self, recon_type): """get auditor info""" if recon_type == 'account': return self._from_recon_cache(['account_audits_passed', 'account_auditor_pass_completed', 'account_audits_since', 'account_audits_failed'], self.account_recon_cache) elif recon_type == 'container': return self._from_recon_cache(['container_audits_passed', 'container_auditor_pass_completed', 'container_audits_since', 'container_audits_failed'], self.container_recon_cache) elif recon_type == 'object': return self._from_recon_cache(['object_auditor_stats_ALL', 'object_auditor_stats_ZBF'], self.object_recon_cache) else: return None def get_unmounted(self): """list unmounted (failed?) devices""" mountlist = [] for entry in os.listdir(self.devices): if not os.path.isdir(os.path.join(self.devices, entry)): continue try: mounted = check_mount(self.devices, entry) except OSError as err: mounted = str(err) mpoint = {'device': entry, 'mounted': mounted} if mpoint['mounted'] is not True: mountlist.append(mpoint) return mountlist def get_diskusage(self): """get disk utilization statistics""" devices = [] for entry in os.listdir(self.devices): if not os.path.isdir(os.path.join(self.devices, entry)): continue try: mounted = check_mount(self.devices, entry) except OSError as err: devices.append({'device': entry, 'mounted': str(err), 'size': '', 'used': '', 'avail': ''}) continue if mounted: path = os.path.join(self.devices, entry) disk = os.statvfs(path) capacity = disk.f_bsize * disk.f_blocks available = disk.f_bsize * disk.f_bavail used = disk.f_bsize * (disk.f_blocks - disk.f_bavail) devices.append({'device': entry, 'mounted': True, 'size': capacity, 'used': used, 'avail': available}) else: devices.append({'device': entry, 'mounted': False, 'size': '', 'used': '', 'avail': ''}) return devices def get_ring_md5(self, openr=open): """get all ring md5sum's""" sums = {} for ringfile in self.rings: md5sum = md5() if os.path.exists(ringfile): try: with openr(ringfile, 'rb') as f: block = f.read(4096) while block: md5sum.update(block) block = f.read(4096) sums[ringfile] = md5sum.hexdigest() except IOError as err: sums[ringfile] = None if err.errno != errno.ENOENT: self.logger.exception(_('Error reading ringfile')) return sums def get_swift_conf_md5(self, openr=open): """get md5 of swift.conf""" md5sum = md5() try: with openr(SWIFT_CONF_FILE, 'r') as fh: chunk = fh.read(4096) while chunk: md5sum.update(chunk) chunk = fh.read(4096) except IOError as err: if err.errno != errno.ENOENT: self.logger.exception(_('Error reading swift.conf')) hexsum = None else: hexsum = md5sum.hexdigest() return {SWIFT_CONF_FILE: hexsum} def get_quarantine_count(self): """get obj/container/account quarantine counts""" qcounts = {"objects": 0, "containers": 0, "accounts": 0, "policies": {}} qdir = "quarantined" for device in os.listdir(self.devices): qpath = os.path.join(self.devices, device, qdir) if os.path.exists(qpath): for qtype in os.listdir(qpath): qtgt = os.path.join(qpath, qtype) linkcount = os.lstat(qtgt).st_nlink if linkcount > 2: if qtype.startswith('objects'): if '-' in qtype: pkey = qtype.split('-', 1)[1] else: pkey = '0' qcounts['policies'].setdefault(pkey, {'objects': 0}) qcounts['policies'][pkey]['objects'] \ += linkcount - 2 qcounts['objects'] += linkcount - 2 else: qcounts[qtype] += linkcount - 2 return qcounts def get_socket_info(self, openr=open): """ get info from /proc/net/sockstat and sockstat6 Note: The mem value is actually kernel pages, but we return bytes allocated based on the systems page size. """ sockstat = {} try: with openr('/proc/net/sockstat', 'r') as proc_sockstat: for entry in proc_sockstat: if entry.startswith("TCP: inuse"): tcpstats = entry.split() sockstat['tcp_in_use'] = int(tcpstats[2]) sockstat['orphan'] = int(tcpstats[4]) sockstat['time_wait'] = int(tcpstats[6]) sockstat['tcp_mem_allocated_bytes'] = \ int(tcpstats[10]) * getpagesize() except IOError as e: if e.errno != errno.ENOENT: raise try: with openr('/proc/net/sockstat6', 'r') as proc_sockstat6: for entry in proc_sockstat6: if entry.startswith("TCP6: inuse"): sockstat['tcp6_in_use'] = int(entry.split()[2]) except IOError as e: if e.errno != errno.ENOENT: raise return sockstat def get_time(self): """get current time""" return time.time() def GET(self, req): root, rcheck, rtype = req.split_path(1, 3, True) all_rtypes = ['account', 'container', 'object'] if rcheck == "mem": content = self.get_mem() elif rcheck == "load": content = self.get_load() elif rcheck == "async": content = self.get_async_info() elif rcheck == 'replication' and rtype in all_rtypes: content = self.get_replication_info(rtype) elif rcheck == 'replication' and rtype is None: # handle old style object replication requests content = self.get_replication_info('object') elif rcheck == "devices": content = self.get_device_info() elif rcheck == "updater" and rtype in ['container', 'object']: content = self.get_updater_info(rtype) elif rcheck == "auditor" and rtype in all_rtypes: content = self.get_auditor_info(rtype) elif rcheck == "expirer" and rtype == 'object': content = self.get_expirer_info(rtype) elif rcheck == "mounted": content = self.get_mounted() elif rcheck == "unmounted": content = self.get_unmounted() elif rcheck == "diskusage": content = self.get_diskusage() elif rcheck == "ringmd5": content = self.get_ring_md5() elif rcheck == "swiftconfmd5": content = self.get_swift_conf_md5() elif rcheck == "quarantined": content = self.get_quarantine_count() elif rcheck == "sockstat": content = self.get_socket_info() elif rcheck == "version": content = self.get_version() elif rcheck == "driveaudit": content = self.get_driveaudit_error() elif rcheck == "time": content = self.get_time() else: content = "Invalid path: %s" % req.path return Response(request=req, status="404 Not Found", body=content, content_type="text/plain") if content is not None: return Response(request=req, body=json.dumps(content), content_type="application/json") else: return Response(request=req, status="500 Server Error", body="Internal server error.", content_type="text/plain") def __call__(self, env, start_response): req = Request(env) if req.path.startswith('/recon/'): return self.GET(req)(env, start_response) else: return self.app(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def recon_filter(app): return ReconMiddleware(app, conf) return recon_filter swift-2.7.0/swift/common/middleware/proxy_logging.py0000664000567000056710000003566212675204037024041 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2011 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging middleware for the Swift proxy. This serves as both the default logging implementation and an example of how to plug in your own logging format/method. The logging format implemented below is as follows: client_ip remote_addr datetime request_method request_path protocol status_int referer user_agent auth_token bytes_recvd bytes_sent client_etag transaction_id headers request_time source log_info request_start_time request_end_time These values are space-separated, and each is url-encoded, so that they can be separated with a simple .split() * remote_addr is the contents of the REMOTE_ADDR environment variable, while client_ip is swift's best guess at the end-user IP, extracted variously from the X-Forwarded-For header, X-Cluster-Ip header, or the REMOTE_ADDR environment variable. * source (swift.source in the WSGI environment) indicates the code that generated the request, such as most middleware. (See below for more detail.) * log_info (swift.log_info in the WSGI environment) is for additional information that could prove quite useful, such as any x-delete-at value or other "behind the scenes" activity that might not otherwise be detectable from the plain log information. Code that wishes to add additional log information should use code like ``env.setdefault('swift.log_info', []).append(your_info)`` so as to not disturb others' log information. * Values that are missing (e.g. due to a header not being present) or zero are generally represented by a single hyphen ('-'). The proxy-logging can be used twice in the proxy server's pipeline when there is middleware installed that can return custom responses that don't follow the standard pipeline to the proxy server. For example, with staticweb, the middleware might intercept a request to /v1/AUTH_acc/cont/, make a subrequest to the proxy to retrieve /v1/AUTH_acc/cont/index.html and, in effect, respond to the client's original request using the 2nd request's body. In this instance the subrequest will be logged by the rightmost middleware (with a swift.source set) and the outgoing request (with body overridden) will be logged by leftmost middleware. Requests that follow the normal pipeline (use the same wsgi environment throughout) will not be double logged because an environment variable (swift.proxy_access_log_made) is checked/set when a log is made. All middleware making subrequests should take care to set swift.source when needed. With the doubled proxy logs, any consumer/processor of swift's proxy logs should look at the swift.source field, the rightmost log value, to decide if this is a middleware subrequest or not. A log processor calculating bandwidth usage will want to only sum up logs with no swift.source. """ import sys import time import six from six.moves.urllib.parse import quote, unquote from swift.common.swob import Request from swift.common.utils import (get_logger, get_remote_client, get_valid_utf8_str, config_true_value, InputProxy, list_from_csv, get_policy_index) from swift.common.storage_policy import POLICIES QUOTE_SAFE = '/:' class ProxyLoggingMiddleware(object): """ Middleware that logs Swift proxy requests in the swift log format. """ def __init__(self, app, conf, logger=None): self.app = app self.log_hdrs = config_true_value(conf.get( 'access_log_headers', conf.get('log_headers', 'no'))) log_hdrs_only = list_from_csv(conf.get( 'access_log_headers_only', '')) self.log_hdrs_only = [x.title() for x in log_hdrs_only] # The leading access_* check is in case someone assumes that # log_statsd_valid_http_methods behaves like the other log_statsd_* # settings. self.valid_methods = conf.get( 'access_log_statsd_valid_http_methods', conf.get('log_statsd_valid_http_methods', 'GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS')) self.valid_methods = [m.strip().upper() for m in self.valid_methods.split(',') if m.strip()] access_log_conf = {} for key in ('log_facility', 'log_name', 'log_level', 'log_udp_host', 'log_udp_port', 'log_statsd_host', 'log_statsd_port', 'log_statsd_default_sample_rate', 'log_statsd_sample_rate_factor', 'log_statsd_metric_prefix'): value = conf.get('access_' + key, conf.get(key, None)) if value: access_log_conf[key] = value self.access_logger = logger or get_logger(access_log_conf, log_route='proxy-access') self.access_logger.set_statsd_prefix('proxy-server') self.reveal_sensitive_prefix = int( conf.get('reveal_sensitive_prefix', 16)) def method_from_req(self, req): return req.environ.get('swift.orig_req_method', req.method) def req_already_logged(self, env): return env.get('swift.proxy_access_log_made') def mark_req_logged(self, env): env['swift.proxy_access_log_made'] = True def obscure_sensitive(self, value): if value and len(value) > self.reveal_sensitive_prefix: return value[:self.reveal_sensitive_prefix] + '...' return value def log_request(self, req, status_int, bytes_received, bytes_sent, start_time, end_time, resp_headers=None): """ Log a request. :param req: swob.Request object for the request :param status_int: integer code for the response status :param bytes_received: bytes successfully read from the request body :param bytes_sent: bytes yielded to the WSGI server :param start_time: timestamp request started :param end_time: timestamp request completed :param resp_headers: dict of the response headers """ resp_headers = resp_headers or {} req_path = get_valid_utf8_str(req.path) the_request = quote(unquote(req_path), QUOTE_SAFE) if req.query_string: the_request = the_request + '?' + req.query_string logged_headers = None if self.log_hdrs: if self.log_hdrs_only: logged_headers = '\n'.join('%s: %s' % (k, v) for k, v in req.headers.items() if k in self.log_hdrs_only) else: logged_headers = '\n'.join('%s: %s' % (k, v) for k, v in req.headers.items()) method = self.method_from_req(req) end_gmtime_str = time.strftime('%d/%b/%Y/%H/%M/%S', time.gmtime(end_time)) duration_time_str = "%.4f" % (end_time - start_time) start_time_str = "%.9f" % start_time end_time_str = "%.9f" % end_time policy_index = get_policy_index(req.headers, resp_headers) self.access_logger.info(' '.join( quote(str(x) if x else '-', QUOTE_SAFE) for x in ( get_remote_client(req), req.remote_addr, end_gmtime_str, method, the_request, req.environ.get('SERVER_PROTOCOL'), status_int, req.referer, req.user_agent, self.obscure_sensitive(req.headers.get('x-auth-token')), bytes_received, bytes_sent, req.headers.get('etag', None), req.environ.get('swift.trans_id'), logged_headers, duration_time_str, req.environ.get('swift.source'), ','.join(req.environ.get('swift.log_info') or ''), start_time_str, end_time_str, policy_index ))) # Log timing and bytes-transferred data to StatsD metric_name = self.statsd_metric_name(req, status_int, method) metric_name_policy = self.statsd_metric_name_policy(req, status_int, method, policy_index) # Only log data for valid controllers (or SOS) to keep the metric count # down (egregious errors will get logged by the proxy server itself). if metric_name: self.access_logger.timing(metric_name + '.timing', (end_time - start_time) * 1000) self.access_logger.update_stats(metric_name + '.xfer', bytes_received + bytes_sent) if metric_name_policy: self.access_logger.timing(metric_name_policy + '.timing', (end_time - start_time) * 1000) self.access_logger.update_stats(metric_name_policy + '.xfer', bytes_received + bytes_sent) def get_metric_name_type(self, req): if req.path.startswith('/v1/'): try: stat_type = [None, 'account', 'container', 'object'][req.path.strip('/').count('/')] except IndexError: stat_type = 'object' else: stat_type = req.environ.get('swift.source') return stat_type def statsd_metric_name(self, req, status_int, method): stat_type = self.get_metric_name_type(req) if stat_type is None: return None stat_method = method if method in self.valid_methods \ else 'BAD_METHOD' return '.'.join((stat_type, stat_method, str(status_int))) def statsd_metric_name_policy(self, req, status_int, method, policy_index): if policy_index is None: return None stat_type = self.get_metric_name_type(req) if stat_type == 'object': stat_method = method if method in self.valid_methods \ else 'BAD_METHOD' # The policy may not exist policy = POLICIES.get_by_index(policy_index) if policy: return '.'.join((stat_type, 'policy', str(policy_index), stat_method, str(status_int))) else: return None else: return None def __call__(self, env, start_response): if self.req_already_logged(env): return self.app(env, start_response) self.mark_req_logged(env) start_response_args = [None] input_proxy = InputProxy(env['wsgi.input']) env['wsgi.input'] = input_proxy start_time = time.time() def my_start_response(status, headers, exc_info=None): start_response_args[0] = (status, list(headers), exc_info) def status_int_for_logging(client_disconnect=False, start_status=None): # log disconnected clients as '499' status code if client_disconnect or input_proxy.client_disconnect: ret_status_int = 499 elif start_status is None: ret_status_int = int( start_response_args[0][0].split(' ', 1)[0]) else: ret_status_int = start_status return ret_status_int def iter_response(iterable): iterator = iter(iterable) try: chunk = next(iterator) while not chunk: chunk = next(iterator) except StopIteration: chunk = '' for h, v in start_response_args[0][1]: if h.lower() in ('content-length', 'transfer-encoding'): break else: if not chunk: start_response_args[0][1].append(('Content-Length', '0')) elif isinstance(iterable, list): start_response_args[0][1].append( ('Content-Length', str(sum(len(i) for i in iterable)))) resp_headers = dict(start_response_args[0][1]) start_response(*start_response_args[0]) req = Request(env) # Log timing information for time-to-first-byte (GET requests only) method = self.method_from_req(req) if method == 'GET': status_int = status_int_for_logging() policy_index = get_policy_index(req.headers, resp_headers) metric_name = self.statsd_metric_name(req, status_int, method) metric_name_policy = self.statsd_metric_name_policy( req, status_int, method, policy_index) if metric_name: self.access_logger.timing_since( metric_name + '.first-byte.timing', start_time) if metric_name_policy: self.access_logger.timing_since( metric_name_policy + '.first-byte.timing', start_time) bytes_sent = 0 client_disconnect = False try: while chunk: bytes_sent += len(chunk) yield chunk chunk = next(iterator) except GeneratorExit: # generator was closed before we finished client_disconnect = True raise finally: status_int = status_int_for_logging(client_disconnect) self.log_request( req, status_int, input_proxy.bytes_received, bytes_sent, start_time, time.time(), resp_headers=resp_headers) close_method = getattr(iterable, 'close', None) if callable(close_method): close_method() try: iterable = self.app(env, my_start_response) except Exception: exc_type, exc_value, exc_traceback = sys.exc_info() req = Request(env) status_int = status_int_for_logging(start_status=500) self.log_request( req, status_int, input_proxy.bytes_received, 0, start_time, time.time()) six.reraise(exc_type, exc_value, exc_traceback) else: return iter_response(iterable) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def proxy_logger(app): return ProxyLoggingMiddleware(app, conf) return proxy_logger swift-2.7.0/swift/common/middleware/tempurl.py0000664000567000056710000006143712675204037022641 0ustar jenkinsjenkins00000000000000# Copyright (c) 2011-2014 Greg Holt # Copyright (c) 2012-2013 John Dickinson # Copyright (c) 2012 Felipe Reyes # Copyright (c) 2012 Peter Portante # Copyright (c) 2012 Victor Rodionov # Copyright (c) 2013-2014 Samuel Merritt # Copyright (c) 2013 Chuck Thier # Copyright (c) 2013 David Goetz # Copyright (c) 2013 Dirk Mueller # Copyright (c) 2013 Donagh McCabe # Copyright (c) 2013 Fabien Boucher # Copyright (c) 2013 Greg Lange # Copyright (c) 2013 Kun Huang # Copyright (c) 2013 Richard Hawkins # Copyright (c) 2013 Tong Li # Copyright (c) 2013 ZhiQiang Fan # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ TempURL Middleware Allows the creation of URLs to provide temporary access to objects. For example, a website may wish to provide a link to download a large object in Swift, but the Swift account has no public access. The website can generate a URL that will provide GET access for a limited time to the resource. When the web browser user clicks on the link, the browser will download the object directly from Swift, obviating the need for the website to act as a proxy for the request. If the user were to share the link with all his friends, or accidentally post it on a forum, etc. the direct access would be limited to the expiration time set when the website created the link. ------------ Client Usage ------------ To create such temporary URLs, first an ``X-Account-Meta-Temp-URL-Key`` header must be set on the Swift account. Then, an HMAC-SHA1 (RFC 2104) signature is generated using the HTTP method to allow (``GET``, ``PUT``, ``DELETE``, etc.), the Unix timestamp the access should be allowed until, the full path to the object, and the key set on the account. For example, here is code generating the signature for a ``GET`` for 60 seconds on ``/v1/AUTH_account/container/object``:: import hmac from hashlib import sha1 from time import time method = 'GET' expires = int(time() + 60) path = '/v1/AUTH_account/container/object' key = 'mykey' hmac_body = '%s\\n%s\\n%s' % (method, expires, path) sig = hmac.new(key, hmac_body, sha1).hexdigest() Be certain to use the full path, from the ``/v1/`` onward. Let's say ``sig`` ends up equaling ``da39a3ee5e6b4b0d3255bfef95601890afd80709`` and ``expires`` ends up ``1323479485``. Then, for example, the website could provide a link to:: https://swift-cluster.example.com/v1/AUTH_account/container/object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=1323479485 Any alteration of the resource path or query arguments would result in ``401 Unauthorized``. Similarly, a ``PUT`` where ``GET`` was the allowed method would be rejected with ``401 Unauthorized``. However, ``HEAD`` is allowed if ``GET``, ``PUT``, or ``POST`` is allowed. Using this in combination with browser form post translation middleware could also allow direct-from-browser uploads to specific locations in Swift. TempURL supports both account and container level keys. Each allows up to two keys to be set, allowing key rotation without invalidating all existing temporary URLs. Account keys are specified by ``X-Account-Meta-Temp-URL-Key`` and ``X-Account-Meta-Temp-URL-Key-2``, while container keys are specified by ``X-Container-Meta-Temp-URL-Key`` and ``X-Container-Meta-Temp-URL-Key-2``. Signatures are checked against account and container keys, if present. With ``GET`` TempURLs, a ``Content-Disposition`` header will be set on the response so that browsers will interpret this as a file attachment to be saved. The filename chosen is based on the object name, but you can override this with a filename query parameter. Modifying the above example:: https://swift-cluster.example.com/v1/AUTH_account/container/object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=1323479485&filename=My+Test+File.pdf If you do not want the object to be downloaded, you can cause ``Content-Disposition: inline`` to be set on the response by adding the ``inline`` parameter to the query string, like so:: https://swift-cluster.example.com/v1/AUTH_account/container/object? temp_url_sig=da39a3ee5e6b4b0d3255bfef95601890afd80709& temp_url_expires=1323479485&inline --------------------- Cluster Configuration --------------------- This middleware understands the following configuration settings: ``incoming_remove_headers`` A whitespace-delimited list of the headers to remove from incoming requests. Names may optionally end with ``*`` to indicate a prefix match. ``incoming_allow_headers`` is a list of exceptions to these removals. Default: ``x-timestamp`` ``incoming_allow_headers`` A whitespace-delimited list of the headers allowed as exceptions to ``incoming_remove_headers``. Names may optionally end with ``*`` to indicate a prefix match. Default: None ``outgoing_remove_headers`` A whitespace-delimited list of the headers to remove from outgoing responses. Names may optionally end with ``*`` to indicate a prefix match. ``outgoing_allow_headers`` is a list of exceptions to these removals. Default: ``x-object-meta-*`` ``outgoing_allow_headers`` A whitespace-delimited list of the headers allowed as exceptions to ``outgoing_remove_headers``. Names may optionally end with ``*`` to indicate a prefix match. Default: ``x-object-meta-public-*`` ``methods`` A whitespace delimited list of request methods that are allowed to be used with a temporary URL. Default: ``GET HEAD PUT POST DELETE`` """ __all__ = ['TempURL', 'filter_factory', 'DEFAULT_INCOMING_REMOVE_HEADERS', 'DEFAULT_INCOMING_ALLOW_HEADERS', 'DEFAULT_OUTGOING_REMOVE_HEADERS', 'DEFAULT_OUTGOING_ALLOW_HEADERS'] from os.path import basename from time import time from six.moves.urllib.parse import parse_qs from six.moves.urllib.parse import urlencode from swift.proxy.controllers.base import get_account_info, get_container_info from swift.common.header_key_dict import HeaderKeyDict from swift.common.swob import header_to_environ_key, HTTPUnauthorized, \ HTTPBadRequest from swift.common.utils import split_path, get_valid_utf8_str, \ register_swift_info, get_hmac, streq_const_time, quote DISALLOWED_INCOMING_HEADERS = 'x-object-manifest' #: Default headers to remove from incoming requests. Simply a whitespace #: delimited list of header names and names can optionally end with '*' to #: indicate a prefix match. DEFAULT_INCOMING_ALLOW_HEADERS is a list of #: exceptions to these removals. DEFAULT_INCOMING_REMOVE_HEADERS = 'x-timestamp' #: Default headers as exceptions to DEFAULT_INCOMING_REMOVE_HEADERS. Simply a #: whitespace delimited list of header names and names can optionally end with #: '*' to indicate a prefix match. DEFAULT_INCOMING_ALLOW_HEADERS = '' #: Default headers to remove from outgoing responses. Simply a whitespace #: delimited list of header names and names can optionally end with '*' to #: indicate a prefix match. DEFAULT_OUTGOING_ALLOW_HEADERS is a list of #: exceptions to these removals. DEFAULT_OUTGOING_REMOVE_HEADERS = 'x-object-meta-*' #: Default headers as exceptions to DEFAULT_OUTGOING_REMOVE_HEADERS. Simply a #: whitespace delimited list of header names and names can optionally end with #: '*' to indicate a prefix match. DEFAULT_OUTGOING_ALLOW_HEADERS = 'x-object-meta-public-*' CONTAINER_SCOPE = 'container' ACCOUNT_SCOPE = 'account' def get_tempurl_keys_from_metadata(meta): """ Extracts the tempurl keys from metadata. :param meta: account metadata :returns: list of keys found (possibly empty if no keys set) Example: meta = get_account_info(...)['meta'] keys = get_tempurl_keys_from_metadata(meta) """ return [get_valid_utf8_str(value) for key, value in meta.items() if key.lower() in ('temp-url-key', 'temp-url-key-2')] def disposition_format(filename): return '''attachment; filename="%s"; filename*=UTF-8''%s''' % ( quote(filename, safe=' /'), quote(filename)) def authorize_same_account(account_to_match): def auth_callback_same_account(req): try: _ver, acc, _rest = req.split_path(2, 3, True) except ValueError: return HTTPUnauthorized(request=req) if acc == account_to_match: return None else: return HTTPUnauthorized(request=req) return auth_callback_same_account def authorize_same_container(account_to_match, container_to_match): def auth_callback_same_container(req): try: _ver, acc, con, _rest = req.split_path(3, 4, True) except ValueError: return HTTPUnauthorized(request=req) if acc == account_to_match and con == container_to_match: return None else: return HTTPUnauthorized(request=req) return auth_callback_same_container class TempURL(object): """ WSGI Middleware to grant temporary URLs specific access to Swift resources. See the overview for more information. The proxy logs created for any subrequests made will have swift.source set to "TU". :param app: The next WSGI filter or app in the paste.deploy chain. :param conf: The configuration dict for the middleware. """ def __init__(self, app, conf): #: The next WSGI application/filter in the paste.deploy pipeline. self.app = app #: The filter configuration dict. self.conf = conf self.disallowed_headers = set( header_to_environ_key(h) for h in DISALLOWED_INCOMING_HEADERS.split()) headers = [header_to_environ_key(h) for h in conf.get('incoming_remove_headers', DEFAULT_INCOMING_REMOVE_HEADERS.split())] #: Headers to remove from incoming requests. Uppercase WSGI env style, #: like `HTTP_X_PRIVATE`. self.incoming_remove_headers = \ [h for h in headers if not h.endswith('*')] #: Header with match prefixes to remove from incoming requests. #: Uppercase WSGI env style, like `HTTP_X_SENSITIVE_*`. self.incoming_remove_headers_startswith = \ [h[:-1] for h in headers if h.endswith('*')] headers = [header_to_environ_key(h) for h in conf.get('incoming_allow_headers', DEFAULT_INCOMING_ALLOW_HEADERS.split())] #: Headers to allow in incoming requests. Uppercase WSGI env style, #: like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY`. self.incoming_allow_headers = \ [h for h in headers if not h.endswith('*')] #: Header with match prefixes to allow in incoming requests. Uppercase #: WSGI env style, like `HTTP_X_MATCHES_REMOVE_PREFIX_BUT_OKAY_*`. self.incoming_allow_headers_startswith = \ [h[:-1] for h in headers if h.endswith('*')] headers = [h.title() for h in conf.get('outgoing_remove_headers', DEFAULT_OUTGOING_REMOVE_HEADERS.split())] #: Headers to remove from outgoing responses. Lowercase, like #: `x-account-meta-temp-url-key`. self.outgoing_remove_headers = \ [h for h in headers if not h.endswith('*')] #: Header with match prefixes to remove from outgoing responses. #: Lowercase, like `x-account-meta-private-*`. self.outgoing_remove_headers_startswith = \ [h[:-1] for h in headers if h.endswith('*')] headers = [h.title() for h in conf.get('outgoing_allow_headers', DEFAULT_OUTGOING_ALLOW_HEADERS.split())] #: Headers to allow in outgoing responses. Lowercase, like #: `x-matches-remove-prefix-but-okay`. self.outgoing_allow_headers = \ [h for h in headers if not h.endswith('*')] #: Header with match prefixes to allow in outgoing responses. #: Lowercase, like `x-matches-remove-prefix-but-okay-*`. self.outgoing_allow_headers_startswith = \ [h[:-1] for h in headers if h.endswith('*')] #: HTTP user agent to use for subrequests. self.agent = '%(orig)s TempURL' def __call__(self, env, start_response): """ Main hook into the WSGI paste.deploy filter/app pipeline. :param env: The WSGI environment dict. :param start_response: The WSGI start_response hook. :returns: Response as per WSGI. """ if env['REQUEST_METHOD'] == 'OPTIONS': return self.app(env, start_response) info = self._get_temp_url_info(env) temp_url_sig, temp_url_expires, filename, inline_disposition = info if temp_url_sig is None and temp_url_expires is None: return self.app(env, start_response) if not temp_url_sig or not temp_url_expires: return self._invalid(env, start_response) account, container = self._get_account_and_container(env) if not account: return self._invalid(env, start_response) keys = self._get_keys(env) if not keys: return self._invalid(env, start_response) if env['REQUEST_METHOD'] == 'HEAD': hmac_vals = ( self._get_hmacs(env, temp_url_expires, keys) + self._get_hmacs(env, temp_url_expires, keys, request_method='GET') + self._get_hmacs(env, temp_url_expires, keys, request_method='POST') + self._get_hmacs(env, temp_url_expires, keys, request_method='PUT')) else: hmac_vals = self._get_hmacs(env, temp_url_expires, keys) is_valid_hmac = False hmac_scope = None for hmac, scope in hmac_vals: # While it's true that we short-circuit, this doesn't affect the # timing-attack resistance since the only way this will # short-circuit is when a valid signature is passed in. if streq_const_time(temp_url_sig, hmac): is_valid_hmac = True hmac_scope = scope break if not is_valid_hmac: return self._invalid(env, start_response) # disallowed headers prevent accidentally allowing upload of a pointer # to data that the PUT tempurl would not otherwise allow access for. # It should be safe to provide a GET tempurl for data that an # untrusted client just uploaded with a PUT tempurl. resp = self._clean_disallowed_headers(env, start_response) if resp: return resp self._clean_incoming_headers(env) if hmac_scope == ACCOUNT_SCOPE: env['swift.authorize'] = authorize_same_account(account) else: env['swift.authorize'] = authorize_same_container(account, container) env['swift.authorize_override'] = True env['REMOTE_USER'] = '.wsgi.tempurl' qs = {'temp_url_sig': temp_url_sig, 'temp_url_expires': temp_url_expires} if filename: qs['filename'] = filename env['QUERY_STRING'] = urlencode(qs) def _start_response(status, headers, exc_info=None): headers = self._clean_outgoing_headers(headers) if env['REQUEST_METHOD'] == 'GET' and status[0] == '2': # figure out the right value for content-disposition # 1) use the value from the query string # 2) use the value from the object metadata # 3) use the object name (default) out_headers = [] existing_disposition = None for h, v in headers: if h.lower() != 'content-disposition': out_headers.append((h, v)) else: existing_disposition = v if inline_disposition: disposition_value = 'inline' elif filename: disposition_value = disposition_format(filename) elif existing_disposition: disposition_value = existing_disposition else: name = basename(env['PATH_INFO'].rstrip('/')) disposition_value = disposition_format(name) # this is probably just paranoia, I couldn't actually get a # newline into existing_disposition value = disposition_value.replace('\n', '%0A') out_headers.append(('Content-Disposition', value)) headers = out_headers return start_response(status, headers, exc_info) return self.app(env, _start_response) def _get_account_and_container(self, env): """ Returns just the account and container for the request, if it's an object request and one of the configured methods; otherwise, None is returned. :param env: The WSGI environment for the request. :returns: (Account str, container str) or (None, None). """ if env['REQUEST_METHOD'] in self.conf['methods']: try: ver, acc, cont, obj = split_path(env['PATH_INFO'], 4, 4, True) except ValueError: return (None, None) if ver == 'v1' and obj.strip('/'): return (acc, cont) return (None, None) def _get_temp_url_info(self, env): """ Returns the provided temporary URL parameters (sig, expires), if given and syntactically valid. Either sig or expires could be None if not provided. If provided, expires is also converted to an int if possible or 0 if not, and checked for expiration (returns 0 if expired). :param env: The WSGI environment for the request. :returns: (sig, expires, filename, inline) as described above. """ temp_url_sig = temp_url_expires = filename = inline = None qs = parse_qs(env.get('QUERY_STRING', ''), keep_blank_values=True) if 'temp_url_sig' in qs: temp_url_sig = qs['temp_url_sig'][0] if 'temp_url_expires' in qs: try: temp_url_expires = int(qs['temp_url_expires'][0]) except ValueError: temp_url_expires = 0 if temp_url_expires < time(): temp_url_expires = 0 if 'filename' in qs: filename = qs['filename'][0] if 'inline' in qs: inline = True return temp_url_sig, temp_url_expires, filename, inline def _get_keys(self, env): """ Returns the X-[Account|Container]-Meta-Temp-URL-Key[-2] header values for the account or container, or an empty list if none are set. Each value comes as a 2-tuple (key, scope), where scope is either CONTAINER_SCOPE or ACCOUNT_SCOPE. Returns 0-4 elements depending on how many keys are set in the account's or container's metadata. :param env: The WSGI environment for the request. :returns: [ (X-Account-Meta-Temp-URL-Key str value, ACCOUNT_SCOPE) if set, (X-Account-Meta-Temp-URL-Key-2 str value, ACCOUNT_SCOPE if set, (X-Container-Meta-Temp-URL-Key str value, CONTAINER_SCOPE) if set, (X-Container-Meta-Temp-URL-Key-2 str value, CONTAINER_SCOPE if set, ] """ account_info = get_account_info(env, self.app, swift_source='TU') account_keys = get_tempurl_keys_from_metadata(account_info['meta']) container_info = get_container_info(env, self.app, swift_source='TU') container_keys = get_tempurl_keys_from_metadata( container_info.get('meta', [])) return ([(ak, ACCOUNT_SCOPE) for ak in account_keys] + [(ck, CONTAINER_SCOPE) for ck in container_keys]) def _get_hmacs(self, env, expires, scoped_keys, request_method=None): """ :param env: The WSGI environment for the request. :param expires: Unix timestamp as an int for when the URL expires. :param scoped_keys: (key, scope) tuples like _get_keys() returns :param request_method: Optional override of the request in the WSGI env. For example, if a HEAD does not match, you may wish to override with GET to still allow the HEAD. :returns: a list of (hmac, scope) 2-tuples """ if not request_method: request_method = env['REQUEST_METHOD'] return [ (get_hmac(request_method, env['PATH_INFO'], expires, key), scope) for (key, scope) in scoped_keys] def _invalid(self, env, start_response): """ Performs the necessary steps to indicate a WSGI 401 Unauthorized response to the request. :param env: The WSGI environment for the request. :param start_response: The WSGI start_response hook. :returns: 401 response as per WSGI. """ if env['REQUEST_METHOD'] == 'HEAD': body = None else: body = '401 Unauthorized: Temp URL invalid\n' return HTTPUnauthorized(body=body)(env, start_response) def _clean_disallowed_headers(self, env, start_response): """ Validate the absence of disallowed headers for "unsafe" operations. :returns: None for safe operations or swob.HTTPBadResponse if the request includes disallowed headers. """ if env['REQUEST_METHOD'] in ('GET', 'HEAD', 'OPTIONS'): return for h in env: if h in self.disallowed_headers: return HTTPBadRequest( body='The header %r is not allowed in this tempurl' % h[len('HTTP_'):].title().replace('_', '-'))( env, start_response) def _clean_incoming_headers(self, env): """ Removes any headers from the WSGI environment as per the middleware configuration for incoming requests. :param env: The WSGI environment for the request. """ for h in env.keys(): if h in self.incoming_allow_headers: continue for p in self.incoming_allow_headers_startswith: if h.startswith(p): break else: if h in self.incoming_remove_headers: del env[h] continue for p in self.incoming_remove_headers_startswith: if h.startswith(p): del env[h] break def _clean_outgoing_headers(self, headers): """ Removes any headers as per the middleware configuration for outgoing responses. :param headers: A WSGI start_response style list of headers, [('header1', 'value), ('header2', 'value), ...] :returns: The same headers list, but with some headers removed as per the middlware configuration for outgoing responses. """ headers = HeaderKeyDict(headers) for h in headers.keys(): if h in self.outgoing_allow_headers: continue for p in self.outgoing_allow_headers_startswith: if h.startswith(p): break else: if h in self.outgoing_remove_headers: del headers[h] continue for p in self.outgoing_remove_headers_startswith: if h.startswith(p): del headers[h] break return headers.items() def filter_factory(global_conf, **local_conf): """Returns the WSGI filter for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) defaults = { 'methods': 'GET HEAD PUT POST DELETE', 'incoming_remove_headers': DEFAULT_INCOMING_REMOVE_HEADERS, 'incoming_allow_headers': DEFAULT_INCOMING_ALLOW_HEADERS, 'outgoing_remove_headers': DEFAULT_OUTGOING_REMOVE_HEADERS, 'outgoing_allow_headers': DEFAULT_OUTGOING_ALLOW_HEADERS, } info_conf = {k: conf.get(k, v).split() for k, v in defaults.items()} register_swift_info('tempurl', **info_conf) conf.update(info_conf) return lambda app: TempURL(app, conf) swift-2.7.0/swift/common/middleware/name_check.py0000664000567000056710000001172412675204037023220 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. ''' Created on February 27, 2012 A filter that disallows any paths that contain defined forbidden characters or that exceed a defined length. Place early in the proxy-server pipeline after the left-most occurrence of the ``proxy-logging`` middleware (if present) and before the final ``proxy-logging`` middleware (if present) or the ``proxy-serer`` app itself, e.g.:: [pipeline:main] pipeline = catch_errors healthcheck proxy-logging name_check cache \ ratelimit tempauth sos proxy-logging proxy-server [filter:name_check] use = egg:swift#name_check forbidden_chars = '"`<> maximum_length = 255 There are default settings for forbidden_chars (FORBIDDEN_CHARS) and maximum_length (MAX_LENGTH) The filter returns HTTPBadRequest if path is invalid. @author: eamonn-otoole ''' from six.moves.urllib.parse import unquote import re from swift.common.utils import get_logger from swift.common.swob import Request, HTTPBadRequest FORBIDDEN_CHARS = "\'\"`<>" MAX_LENGTH = 255 FORBIDDEN_REGEXP = "/\./|/\.\./|/\.$|/\.\.$" class NameCheckMiddleware(object): def __init__(self, app, conf): self.app = app self.conf = conf self.forbidden_chars = self.conf.get('forbidden_chars', FORBIDDEN_CHARS) self.maximum_length = int(self.conf.get('maximum_length', MAX_LENGTH)) self.forbidden_regexp = self.conf.get('forbidden_regexp', FORBIDDEN_REGEXP) if self.forbidden_regexp: self.forbidden_regexp_compiled = re.compile(self.forbidden_regexp) else: self.forbidden_regexp_compiled = None self.logger = get_logger(self.conf, log_route='name_check') def check_character(self, req): ''' Checks req.path for any forbidden characters Returns True if there are any forbidden characters Returns False if there aren't any forbidden characters ''' self.logger.debug("name_check: path %s" % req.path) self.logger.debug("name_check: self.forbidden_chars %s" % self.forbidden_chars) return any((c in unquote(req.path)) for c in self.forbidden_chars) def check_length(self, req): ''' Checks that req.path doesn't exceed the defined maximum length Returns True if the length exceeds the maximum Returns False if the length is <= the maximum ''' length = len(unquote(req.path)) return length > self.maximum_length def check_regexp(self, req): ''' Checks that req.path doesn't contain a substring matching regexps. Returns True if there are any forbidden substring Returns False if there aren't any forbidden substring ''' if self.forbidden_regexp_compiled is None: return False self.logger.debug("name_check: path %s" % req.path) self.logger.debug("name_check: self.forbidden_regexp %s" % self.forbidden_regexp) unquoted_path = unquote(req.path) match = self.forbidden_regexp_compiled.search(unquoted_path) return (match is not None) def __call__(self, env, start_response): req = Request(env) if self.check_character(req): return HTTPBadRequest( request=req, body=("Object/Container/Account name contains forbidden " "chars from %s" % self.forbidden_chars))(env, start_response) elif self.check_length(req): return HTTPBadRequest( request=req, body=("Object/Container/Account name longer than the " "allowed maximum " "%s" % self.maximum_length))(env, start_response) elif self.check_regexp(req): return HTTPBadRequest( request=req, body=("Object/Container/Account name contains a forbidden " "substring from regular expression %s" % self.forbidden_regexp))(env, start_response) else: # Pass on to downstream WSGI component return self.app(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def name_check_filter(app): return NameCheckMiddleware(app, conf) return name_check_filter swift-2.7.0/swift/common/middleware/cname_lookup.py0000664000567000056710000001515612675204037023622 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ CNAME Lookup Middleware Middleware that translates an unknown domain in the host header to something that ends with the configured storage_domain by looking up the given domain's CNAME record in DNS. This middleware will continue to follow a CNAME chain in DNS until it finds a record ending in the configured storage domain or it reaches the configured maximum lookup depth. If a match is found, the environment's Host header is rewritten and the request is passed further down the WSGI chain. """ from six.moves import range import socket from swift import gettext_ as _ try: import dns.resolver from dns.exception import DNSException from dns.resolver import NXDOMAIN, NoAnswer except ImportError: # catch this to allow docs to be built without the dependency MODULE_DEPENDENCY_MET = False else: # executed if the try block finishes with no errors MODULE_DEPENDENCY_MET = True from swift.common.swob import Request, HTTPBadRequest from swift.common.utils import cache_from_env, get_logger, list_from_csv def lookup_cname(domain): # pragma: no cover """ Given a domain, returns its DNS CNAME mapping and DNS ttl. :param domain: domain to query on :returns: (ttl, result) """ try: answer = dns.resolver.query(domain, 'CNAME').rrset ttl = answer.ttl result = answer.items[0].to_text() result = result.rstrip('.') return ttl, result except (DNSException, NXDOMAIN, NoAnswer): return 0, None def is_ip(domain): try: socket.inet_pton(socket.AF_INET, domain) return True except socket.error: try: socket.inet_pton(socket.AF_INET6, domain) return True except socket.error: return False class CNAMELookupMiddleware(object): """ CNAME Lookup Middleware See above for a full description. :param app: The next WSGI filter or app in the paste.deploy chain. :param conf: The configuration dict for the middleware. """ def __init__(self, app, conf): if not MODULE_DEPENDENCY_MET: # reraise the exception if the dependency wasn't met raise ImportError('dnspython is required for this module') self.app = app storage_domain = conf.get('storage_domain', 'example.com') self.storage_domain = ['.' + s for s in list_from_csv(storage_domain) if not s.startswith('.')] self.storage_domain += [s for s in list_from_csv(storage_domain) if s.startswith('.')] self.lookup_depth = int(conf.get('lookup_depth', '1')) self.memcache = None self.logger = get_logger(conf, log_route='cname-lookup') def _domain_endswith_in_storage_domain(self, a_domain): for domain in self.storage_domain: if a_domain.endswith(domain): return True return False def __call__(self, env, start_response): if not self.storage_domain: return self.app(env, start_response) if 'HTTP_HOST' in env: given_domain = env['HTTP_HOST'] else: given_domain = env['SERVER_NAME'] port = '' if ':' in given_domain: given_domain, port = given_domain.rsplit(':', 1) if is_ip(given_domain): return self.app(env, start_response) a_domain = given_domain if not self._domain_endswith_in_storage_domain(a_domain): if self.memcache is None: self.memcache = cache_from_env(env) error = True for tries in range(self.lookup_depth): found_domain = None if self.memcache: memcache_key = ''.join(['cname-', a_domain]) found_domain = self.memcache.get(memcache_key) if not found_domain: ttl, found_domain = lookup_cname(a_domain) if self.memcache: memcache_key = ''.join(['cname-', given_domain]) self.memcache.set(memcache_key, found_domain, time=ttl) if found_domain is None or found_domain == a_domain: # no CNAME records or we're at the last lookup error = True found_domain = None break elif self._domain_endswith_in_storage_domain(found_domain): # Found it! self.logger.info( _('Mapped %(given_domain)s to %(found_domain)s') % {'given_domain': given_domain, 'found_domain': found_domain}) if port: env['HTTP_HOST'] = ':'.join([found_domain, port]) else: env['HTTP_HOST'] = found_domain error = False break else: # try one more deep in the chain self.logger.debug( _('Following CNAME chain for ' '%(given_domain)s to %(found_domain)s') % {'given_domain': given_domain, 'found_domain': found_domain}) a_domain = found_domain if error: if found_domain: msg = 'CNAME lookup failed after %d tries' % \ self.lookup_depth else: msg = 'CNAME lookup failed to resolve to a valid domain' resp = HTTPBadRequest(request=Request(env), body=msg, content_type='text/plain') return resp(env, start_response) return self.app(env, start_response) def filter_factory(global_conf, **local_conf): # pragma: no cover conf = global_conf.copy() conf.update(local_conf) def cname_filter(app): return CNAMELookupMiddleware(app, conf) return cname_filter swift-2.7.0/swift/common/middleware/domain_remap.py0000664000567000056710000001420012675204037023566 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Domain Remap Middleware Middleware that translates container and account parts of a domain to path parameters that the proxy server understands. container.account.storageurl/object gets translated to container.account.storageurl/path_root/account/container/object account.storageurl/path_root/container/object gets translated to account.storageurl/path_root/account/container/object Browsers can convert a host header to lowercase, so check that reseller prefix on the account is the correct case. This is done by comparing the items in the reseller_prefixes config option to the found prefix. If they match except for case, the item from reseller_prefixes will be used instead of the found reseller prefix. When none match, the default reseller prefix is used. When no default reseller prefix is configured, any request with an account prefix not in that list will be ignored by this middleware. reseller_prefixes defaults to 'AUTH'. Note that this middleware requires that container names and account names (except as described above) must be DNS-compatible. This means that the account name created in the system and the containers created by users cannot exceed 63 characters or have UTF-8 characters. These are restrictions over and above what swift requires and are not explicitly checked. Simply put, the this middleware will do a best-effort attempt to derive account and container names from elements in the domain name and put those derived values into the URL path (leaving the Host header unchanged). Also note that using container sync with remapped domain names is not advised. With container sync, you should use the true storage end points as sync destinations. """ from swift.common.swob import Request, HTTPBadRequest from swift.common.utils import list_from_csv, register_swift_info class DomainRemapMiddleware(object): """ Domain Remap Middleware See above for a full description. :param app: The next WSGI filter or app in the paste.deploy chain. :param conf: The configuration dict for the middleware. """ def __init__(self, app, conf): self.app = app self.storage_domain = conf.get('storage_domain', 'example.com') if self.storage_domain and not self.storage_domain.startswith('.'): self.storage_domain = '.' + self.storage_domain self.path_root = conf.get('path_root', 'v1').strip('/') prefixes = conf.get('reseller_prefixes', 'AUTH') self.reseller_prefixes = list_from_csv(prefixes) self.reseller_prefixes_lower = [x.lower() for x in self.reseller_prefixes] self.default_reseller_prefix = conf.get('default_reseller_prefix') def __call__(self, env, start_response): if not self.storage_domain: return self.app(env, start_response) if 'HTTP_HOST' in env: given_domain = env['HTTP_HOST'] else: given_domain = env['SERVER_NAME'] port = '' if ':' in given_domain: given_domain, port = given_domain.rsplit(':', 1) if given_domain.endswith(self.storage_domain): parts_to_parse = given_domain[:-len(self.storage_domain)] parts_to_parse = parts_to_parse.strip('.').split('.') len_parts_to_parse = len(parts_to_parse) if len_parts_to_parse == 2: container, account = parts_to_parse elif len_parts_to_parse == 1: container, account = None, parts_to_parse[0] else: resp = HTTPBadRequest(request=Request(env), body='Bad domain in host header', content_type='text/plain') return resp(env, start_response) if len(self.reseller_prefixes) > 0: if '_' not in account and '-' in account: account = account.replace('-', '_', 1) account_reseller_prefix = account.split('_', 1)[0].lower() if account_reseller_prefix in self.reseller_prefixes_lower: prefix_index = self.reseller_prefixes_lower.index( account_reseller_prefix) real_prefix = self.reseller_prefixes[prefix_index] if not account.startswith(real_prefix): account_suffix = account[len(real_prefix):] account = real_prefix + account_suffix elif self.default_reseller_prefix: # account prefix is not in config list. Add default one. account = "%s_%s" % (self.default_reseller_prefix, account) else: # account prefix is not in config list. bail. return self.app(env, start_response) path = env['PATH_INFO'].strip('/') new_path_parts = ['', self.path_root, account] if container: new_path_parts.append(container) if path.startswith(self.path_root): path = path[len(self.path_root):].lstrip('/') if path: new_path_parts.append(path) new_path = '/'.join(new_path_parts) env['PATH_INFO'] = new_path return self.app(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) register_swift_info( 'domain_remap', default_reseller_prefix=conf.get('default_reseller_prefix')) def domain_filter(app): return DomainRemapMiddleware(app, conf) return domain_filter swift-2.7.0/swift/common/middleware/list_endpoints.py0000664000567000056710000002354212675204037024202 0ustar jenkinsjenkins00000000000000# Copyright (c) 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ List endpoints for an object, account or container. This middleware makes it possible to integrate swift with software that relies on data locality information to avoid network overhead, such as Hadoop. Using the original API, answers requests of the form:: /endpoints/{account}/{container}/{object} /endpoints/{account}/{container} /endpoints/{account} /endpoints/v1/{account}/{container}/{object} /endpoints/v1/{account}/{container} /endpoints/v1/{account} with a JSON-encoded list of endpoints of the form:: http://{server}:{port}/{dev}/{part}/{acc}/{cont}/{obj} http://{server}:{port}/{dev}/{part}/{acc}/{cont} http://{server}:{port}/{dev}/{part}/{acc} correspondingly, e.g.:: http://10.1.1.1:6000/sda1/2/a/c2/o1 http://10.1.1.1:6000/sda1/2/a/c2 http://10.1.1.1:6000/sda1/2/a Using the v2 API, answers requests of the form:: /endpoints/v2/{account}/{container}/{object} /endpoints/v2/{account}/{container} /endpoints/v2/{account} with a JSON-encoded dictionary containing a key 'endpoints' that maps to a list of endpoints having the same form as described above, and a key 'headers' that maps to a dictionary of headers that should be sent with a request made to the endpoints, e.g.:: { "endpoints": {"http://10.1.1.1:6010/sda1/2/a/c3/o1", "http://10.1.1.1:6030/sda3/2/a/c3/o1", "http://10.1.1.1:6040/sda4/2/a/c3/o1"}, "headers": {"X-Backend-Storage-Policy-Index": "1"}} In this example, the 'headers' dictionary indicates that requests to the endpoint URLs should include the header 'X-Backend-Storage-Policy-Index: 1' because the object's container is using storage policy index 1. The '/endpoints/' path is customizable ('list_endpoints_path' configuration parameter). Intended for consumption by third-party services living inside the cluster (as the endpoints make sense only inside the cluster behind the firewall); potentially written in a different language. This is why it's provided as a REST API and not just a Python API: to avoid requiring clients to write their own ring parsers in their languages, and to avoid the necessity to distribute the ring file to clients and keep it up-to-date. Note that the call is not authenticated, which means that a proxy with this middleware enabled should not be open to an untrusted environment (everyone can query the locality data using this middleware). """ import json from six.moves.urllib.parse import quote, unquote from swift.common.ring import Ring from swift.common.utils import get_logger, split_path from swift.common.swob import Request, Response from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed from swift.common.storage_policy import POLICIES from swift.proxy.controllers.base import get_container_info RESPONSE_VERSIONS = (1.0, 2.0) class ListEndpointsMiddleware(object): """ List endpoints for an object, account or container. See above for a full description. Uses configuration parameter `swift_dir` (default `/etc/swift`). :param app: The next WSGI filter or app in the paste.deploy chain. :param conf: The configuration dict for the middleware. """ def __init__(self, app, conf): self.app = app self.logger = get_logger(conf, log_route='endpoints') self.swift_dir = conf.get('swift_dir', '/etc/swift') self.account_ring = Ring(self.swift_dir, ring_name='account') self.container_ring = Ring(self.swift_dir, ring_name='container') self.endpoints_path = conf.get('list_endpoints_path', '/endpoints/') if not self.endpoints_path.endswith('/'): self.endpoints_path += '/' self.default_response_version = 1.0 self.response_map = { 1.0: self.v1_format_response, 2.0: self.v2_format_response, } def get_object_ring(self, policy_idx): """ Get the ring object to use to handle a request based on its policy. :policy_idx: policy index as defined in swift.conf :returns: appropriate ring object """ return POLICIES.get_object_ring(policy_idx, self.swift_dir) def _parse_version(self, raw_version): err_msg = 'Unsupported version %r' % raw_version try: version = float(raw_version.lstrip('v')) except ValueError: raise ValueError(err_msg) if not any(version == v for v in RESPONSE_VERSIONS): raise ValueError(err_msg) return version def _parse_path(self, request): """ Parse path parts of request into a tuple of version, account, container, obj. Unspecified path parts are filled in as None, except version which is always returned as a float using the configured default response version if not specified in the request. :param request: the swob request :returns: parsed path parts as a tuple with version filled in as configured default response version if not specified. :raises: ValueError if path is invalid, message will say why. """ clean_path = request.path[len(self.endpoints_path) - 1:] # try to peel off version try: raw_version, rest = split_path(clean_path, 1, 2, True) except ValueError: raise ValueError('No account specified') try: version = self._parse_version(raw_version) except ValueError: if raw_version.startswith('v') and '_' not in raw_version: # looks more like a invalid version than an account raise # probably no version specified, but if the client really # said /endpoints/v_3/account they'll probably be sorta # confused by the useless response and lack of error. version = self.default_response_version rest = clean_path else: rest = '/' + rest if rest else '/' try: account, container, obj = split_path(rest, 1, 3, True) except ValueError: raise ValueError('No account specified') return version, account, container, obj def v1_format_response(self, req, endpoints, **kwargs): return Response(json.dumps(endpoints), content_type='application/json') def v2_format_response(self, req, endpoints, storage_policy_index, **kwargs): resp = { 'endpoints': endpoints, 'headers': {}, } if storage_policy_index is not None: resp['headers'][ 'X-Backend-Storage-Policy-Index'] = str(storage_policy_index) return Response(json.dumps(resp), content_type='application/json') def __call__(self, env, start_response): request = Request(env) if not request.path.startswith(self.endpoints_path): return self.app(env, start_response) if request.method != 'GET': return HTTPMethodNotAllowed( req=request, headers={"Allow": "GET"})(env, start_response) try: version, account, container, obj = self._parse_path(request) except ValueError as err: return HTTPBadRequest(str(err))(env, start_response) if account is not None: account = unquote(account) if container is not None: container = unquote(container) if obj is not None: obj = unquote(obj) storage_policy_index = None if obj is not None: container_info = get_container_info( {'PATH_INFO': '/v1/%s/%s' % (account, container)}, self.app, swift_source='LE') storage_policy_index = container_info['storage_policy'] obj_ring = self.get_object_ring(storage_policy_index) partition, nodes = obj_ring.get_nodes( account, container, obj) endpoint_template = 'http://{ip}:{port}/{device}/{partition}/' + \ '{account}/{container}/{obj}' elif container is not None: partition, nodes = self.container_ring.get_nodes( account, container) endpoint_template = 'http://{ip}:{port}/{device}/{partition}/' + \ '{account}/{container}' else: partition, nodes = self.account_ring.get_nodes( account) endpoint_template = 'http://{ip}:{port}/{device}/{partition}/' + \ '{account}' endpoints = [] for node in nodes: endpoint = endpoint_template.format( ip=node['ip'], port=node['port'], device=node['device'], partition=partition, account=quote(account), container=quote(container or ''), obj=quote(obj or '')) endpoints.append(endpoint) resp = self.response_map[version]( request, endpoints=endpoints, storage_policy_index=storage_policy_index) return resp(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def list_endpoints_filter(app): return ListEndpointsMiddleware(app, conf) return list_endpoints_filter swift-2.7.0/swift/common/middleware/bulk.py0000664000567000056710000007363012675204037022104 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Middleware that will perform many operations on a single request. --------------- Extract Archive --------------- Expand tar files into a Swift account. Request must be a PUT with the query parameter ``?extract-archive=format`` specifying the format of archive file. Accepted formats are tar, tar.gz, and tar.bz2. For a PUT to the following url:: /v1/AUTH_Account/$UPLOAD_PATH?extract-archive=tar.gz UPLOAD_PATH is where the files will be expanded to. UPLOAD_PATH can be a container, a pseudo-directory within a container, or an empty string. The destination of a file in the archive will be built as follows:: /v1/AUTH_Account/$UPLOAD_PATH/$FILE_PATH Where FILE_PATH is the file name from the listing in the tar file. If the UPLOAD_PATH is an empty string, containers will be auto created accordingly and files in the tar that would not map to any container (files in the base directory) will be ignored. Only regular files will be uploaded. Empty directories, symlinks, etc will not be uploaded. ------------ Content Type ------------ If the content-type header is set in the extract-archive call, Swift will assign that content-type to all the underlying files. The bulk middleware will extract the archive file and send the internal files using PUT operations using the same headers from the original request (e.g. auth-tokens, content-Type, etc.). Notice that any middleware call that follows the bulk middleware does not know if this was a bulk request or if these were individual requests sent by the user. In order to make Swift detect the content-type for the files based on the file extension, the content-type in the extract-archive call should not be set. Alternatively, it is possible to explicitly tell Swift to detect the content type using this header:: X-Detect-Content-Type: true For example:: curl -X PUT http://127.0.0.1/v1/AUTH_acc/cont/$?extract-archive=tar -T backup.tar -H "Content-Type: application/x-tar" -H "X-Auth-Token: xxx" -H "X-Detect-Content-Type: true" ------------------ Assigning Metadata ------------------ The tar file format (1) allows for UTF-8 key/value pairs to be associated with each file in an archive. If a file has extended attributes, then tar will store those as key/value pairs. The bulk middleware can read those extended attributes and convert them to Swift object metadata. Attributes starting with "user.meta" are converted to object metadata, and "user.mime_type" is converted to Content-Type. For example:: setfattr -n user.mime_type -v "application/python-setup" setup.py setfattr -n user.meta.lunch -v "burger and fries" setup.py setfattr -n user.meta.dinner -v "baked ziti" setup.py setfattr -n user.stuff -v "whee" setup.py Will get translated to headers:: Content-Type: application/python-setup X-Object-Meta-Lunch: burger and fries X-Object-Meta-Dinner: baked ziti The bulk middleware will handle xattrs stored by both GNU and BSD tar (2). Only xattrs ``user.mime_type`` and ``user.meta.*`` are processed. Other attributes are ignored. Notes: (1) The POSIX 1003.1-2001 (pax) format. The default format on GNU tar 1.27.1 or later. (2) Even with pax-format tarballs, different encoders store xattrs slightly differently; for example, GNU tar stores the xattr "user.userattribute" as pax header "SCHILY.xattr.user.userattribute", while BSD tar (which uses libarchive) stores it as "LIBARCHIVE.xattr.user.userattribute". -------- Response -------- The response from bulk operations functions differently from other Swift responses. This is because a short request body sent from the client could result in many operations on the proxy server and precautions need to be made to prevent the request from timing out due to lack of activity. To this end, the client will always receive a 200 OK response, regardless of the actual success of the call. The body of the response must be parsed to determine the actual success of the operation. In addition to this the client may receive zero or more whitespace characters prepended to the actual response body while the proxy server is completing the request. The format of the response body defaults to text/plain but can be either json or xml depending on the ``Accept`` header. Acceptable formats are ``text/plain``, ``application/json``, ``application/xml``, and ``text/xml``. An example body is as follows:: {"Response Status": "201 Created", "Response Body": "", "Errors": [], "Number Files Created": 10} If all valid files were uploaded successfully the Response Status will be 201 Created. If any files failed to be created the response code corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on server errors), etc. In both cases the response body will specify the number of files successfully uploaded and a list of the files that failed. There are proxy logs created for each file (which becomes a subrequest) in the tar. The subrequest's proxy log will have a swift.source set to "EA" the log's content length will reflect the unzipped size of the file. If double proxy-logging is used the leftmost logger will not have a swift.source set and the content length will reflect the size of the payload sent to the proxy (the unexpanded size of the tar.gz). ----------- Bulk Delete ----------- Will delete multiple objects or containers from their account with a single request. Responds to POST requests with query parameter ``?bulk-delete`` set. The request url is your storage url. The Content-Type should be set to ``text/plain``. The body of the POST request will be a newline separated list of url encoded objects to delete. You can delete 10,000 (configurable) objects per request. The objects specified in the POST request body must be URL encoded and in the form:: /container_name/obj_name or for a container (which must be empty at time of delete):: /container_name The response is similar to extract archive as in every response will be a 200 OK and you must parse the response body for actual results. An example response is:: {"Number Not Found": 0, "Response Status": "200 OK", "Response Body": "", "Errors": [], "Number Deleted": 6} If all items were successfully deleted (or did not exist), the Response Status will be 200 OK. If any failed to delete, the response code corresponds to the subrequest's error. Possible codes are 400, 401, 502 (on server errors), etc. In all cases the response body will specify the number of items successfully deleted, not found, and a list of those that failed. The return body will be formatted in the way specified in the request's ``Accept`` header. Acceptable formats are ``text/plain``, ``application/json``, ``application/xml``, and ``text/xml``. There are proxy logs created for each object or container (which becomes a subrequest) that is deleted. The subrequest's proxy log will have a swift.source set to "BD" the log's content length of 0. If double proxy-logging is used the leftmost logger will not have a swift.source set and the content length will reflect the size of the payload sent to the proxy (the list of objects/containers to be deleted). """ import json from six.moves.urllib.parse import quote, unquote import tarfile from xml.sax import saxutils from time import time from eventlet import sleep import zlib from swift.common.swob import Request, HTTPBadGateway, \ HTTPCreated, HTTPBadRequest, HTTPNotFound, HTTPUnauthorized, HTTPOk, \ HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPNotAcceptable, \ HTTPLengthRequired, HTTPException, HTTPServerError, wsgify from swift.common.utils import get_logger, register_swift_info from swift.common import constraints from swift.common.http import HTTP_UNAUTHORIZED, HTTP_NOT_FOUND, HTTP_CONFLICT class CreateContainerError(Exception): def __init__(self, msg, status_int, status): self.status_int = status_int self.status = status super(CreateContainerError, self).__init__(msg) ACCEPTABLE_FORMATS = ['text/plain', 'application/json', 'application/xml', 'text/xml'] def get_response_body(data_format, data_dict, error_list): """ Returns a properly formatted response body according to format. Handles json and xml, otherwise will return text/plain. Note: xml response does not include xml declaration. :params data_format: resulting format :params data_dict: generated data about results. :params error_list: list of quoted filenames that failed """ if data_format == 'application/json': data_dict['Errors'] = error_list return json.dumps(data_dict) if data_format and data_format.endswith('/xml'): output = '\n' for key in sorted(data_dict): xml_key = key.replace(' ', '_').lower() output += '<%s>%s\n' % (xml_key, data_dict[key], xml_key) output += '\n' output += '\n'.join( ['' '%s%s' '' % (saxutils.escape(name), status) for name, status in error_list]) output += '\n\n' return output output = '' for key in sorted(data_dict): output += '%s: %s\n' % (key, data_dict[key]) output += 'Errors:\n' output += '\n'.join( ['%s, %s' % (name, status) for name, status in error_list]) return output def pax_key_to_swift_header(pax_key): if (pax_key == u"SCHILY.xattr.user.mime_type" or pax_key == u"LIBARCHIVE.xattr.user.mime_type"): return "Content-Type" elif pax_key.startswith(u"SCHILY.xattr.user.meta."): useful_part = pax_key[len(u"SCHILY.xattr.user.meta."):] return "X-Object-Meta-" + useful_part.encode("utf-8") elif pax_key.startswith(u"LIBARCHIVE.xattr.user.meta."): useful_part = pax_key[len(u"LIBARCHIVE.xattr.user.meta."):] return "X-Object-Meta-" + useful_part.encode("utf-8") else: # You can get things like atime/mtime/ctime or filesystem ACLs in # pax headers; those aren't really user metadata. The same goes for # other, non-user metadata. return None class Bulk(object): def __init__(self, app, conf, max_containers_per_extraction=10000, max_failed_extractions=1000, max_deletes_per_request=10000, max_failed_deletes=1000, yield_frequency=10, retry_count=0, retry_interval=1.5, logger=None): self.app = app self.logger = logger or get_logger(conf, log_route='bulk') self.max_containers = max_containers_per_extraction self.max_failed_extractions = max_failed_extractions self.max_failed_deletes = max_failed_deletes self.max_deletes_per_request = max_deletes_per_request self.yield_frequency = yield_frequency self.retry_count = retry_count self.retry_interval = retry_interval self.max_path_length = constraints.MAX_OBJECT_NAME_LENGTH \ + constraints.MAX_CONTAINER_NAME_LENGTH + 2 def create_container(self, req, container_path): """ Checks if the container exists and if not try to create it. :params container_path: an unquoted path to a container to be created :returns: True if created container, False if container exists :raises: CreateContainerError when unable to create container """ new_env = req.environ.copy() new_env['PATH_INFO'] = container_path new_env['swift.source'] = 'EA' new_env['REQUEST_METHOD'] = 'HEAD' head_cont_req = Request.blank(container_path, environ=new_env) resp = head_cont_req.get_response(self.app) if resp.is_success: return False if resp.status_int == 404: new_env = req.environ.copy() new_env['PATH_INFO'] = container_path new_env['swift.source'] = 'EA' new_env['REQUEST_METHOD'] = 'PUT' create_cont_req = Request.blank(container_path, environ=new_env) resp = create_cont_req.get_response(self.app) if resp.is_success: return True raise CreateContainerError( "Create Container Failed: " + container_path, resp.status_int, resp.status) def get_objs_to_delete(self, req): """ Will populate objs_to_delete with data from request input. :params req: a Swob request :returns: a list of the contents of req.body when separated by newline. :raises: HTTPException on failures """ line = '' data_remaining = True objs_to_delete = [] if req.content_length is None and \ req.headers.get('transfer-encoding', '').lower() != 'chunked': raise HTTPLengthRequired(request=req) while data_remaining: if '\n' in line: obj_to_delete, line = line.split('\n', 1) obj_to_delete = obj_to_delete.strip() objs_to_delete.append( {'name': unquote(obj_to_delete)}) else: data = req.body_file.read(self.max_path_length) if data: line += data else: data_remaining = False obj_to_delete = line.strip() if obj_to_delete: objs_to_delete.append( {'name': unquote(obj_to_delete)}) if len(objs_to_delete) > self.max_deletes_per_request: raise HTTPRequestEntityTooLarge( 'Maximum Bulk Deletes: %d per request' % self.max_deletes_per_request) if len(line) > self.max_path_length * 2: raise HTTPBadRequest('Invalid File Name') return objs_to_delete def handle_delete_iter(self, req, objs_to_delete=None, user_agent='BulkDelete', swift_source='BD', out_content_type='text/plain'): """ A generator that can be assigned to a swob Response's app_iter which, when iterated over, will delete the objects specified in request body. Will occasionally yield whitespace while request is being processed. When the request is completed will yield a response body that can be parsed to determine success. See above documentation for details. :params req: a swob Request :params objs_to_delete: a list of dictionaries that specifies the objects to be deleted. If None, uses self.get_objs_to_delete to query request. """ last_yield = time() separator = '' failed_files = [] resp_dict = {'Response Status': HTTPOk().status, 'Response Body': '', 'Number Deleted': 0, 'Number Not Found': 0} try: if not out_content_type: raise HTTPNotAcceptable(request=req) if out_content_type.endswith('/xml'): yield '\n' try: vrs, account, _junk = req.split_path(2, 3, True) except ValueError: raise HTTPNotFound(request=req) incoming_format = req.headers.get('Content-Type') if incoming_format and \ not incoming_format.startswith('text/plain'): # For now only accept newline separated object names raise HTTPNotAcceptable(request=req) if objs_to_delete is None: objs_to_delete = self.get_objs_to_delete(req) failed_file_response = {'type': HTTPBadRequest} req.environ['eventlet.minimum_write_chunk_size'] = 0 for obj_to_delete in objs_to_delete: if last_yield + self.yield_frequency < time(): separator = '\r\n\r\n' last_yield = time() yield ' ' obj_name = obj_to_delete['name'] if not obj_name: continue if len(failed_files) >= self.max_failed_deletes: raise HTTPBadRequest('Max delete failures exceeded') if obj_to_delete.get('error'): if obj_to_delete['error']['code'] == HTTP_NOT_FOUND: resp_dict['Number Not Found'] += 1 else: failed_files.append([quote(obj_name), obj_to_delete['error']['message']]) continue delete_path = '/'.join(['', vrs, account, obj_name.lstrip('/')]) if not constraints.check_utf8(delete_path): failed_files.append([quote(obj_name), HTTPPreconditionFailed().status]) continue new_env = req.environ.copy() new_env['PATH_INFO'] = delete_path del(new_env['wsgi.input']) new_env['CONTENT_LENGTH'] = 0 new_env['REQUEST_METHOD'] = 'DELETE' new_env['HTTP_USER_AGENT'] = \ '%s %s' % (req.environ.get('HTTP_USER_AGENT'), user_agent) new_env['swift.source'] = swift_source self._process_delete(delete_path, obj_name, new_env, resp_dict, failed_files, failed_file_response) if failed_files: resp_dict['Response Status'] = \ failed_file_response['type']().status elif not (resp_dict['Number Deleted'] or resp_dict['Number Not Found']): resp_dict['Response Status'] = HTTPBadRequest().status resp_dict['Response Body'] = 'Invalid bulk delete.' except HTTPException as err: resp_dict['Response Status'] = err.status resp_dict['Response Body'] = err.body except Exception: self.logger.exception('Error in bulk delete.') resp_dict['Response Status'] = HTTPServerError().status yield separator + get_response_body(out_content_type, resp_dict, failed_files) def handle_extract_iter(self, req, compress_type, out_content_type='text/plain'): """ A generator that can be assigned to a swob Response's app_iter which, when iterated over, will extract and PUT the objects pulled from the request body. Will occasionally yield whitespace while request is being processed. When the request is completed will yield a response body that can be parsed to determine success. See above documentation for details. :params req: a swob Request :params compress_type: specifying the compression type of the tar. Accepts '', 'gz', or 'bz2' """ resp_dict = {'Response Status': HTTPCreated().status, 'Response Body': '', 'Number Files Created': 0} failed_files = [] last_yield = time() separator = '' containers_accessed = set() try: if not out_content_type: raise HTTPNotAcceptable(request=req) if out_content_type.endswith('/xml'): yield '\n' if req.content_length is None and \ req.headers.get('transfer-encoding', '').lower() != 'chunked': raise HTTPLengthRequired(request=req) try: vrs, account, extract_base = req.split_path(2, 3, True) except ValueError: raise HTTPNotFound(request=req) extract_base = extract_base or '' extract_base = extract_base.rstrip('/') tar = tarfile.open(mode='r|' + compress_type, fileobj=req.body_file) failed_response_type = HTTPBadRequest req.environ['eventlet.minimum_write_chunk_size'] = 0 containers_created = 0 while True: if last_yield + self.yield_frequency < time(): separator = '\r\n\r\n' last_yield = time() yield ' ' tar_info = next(tar) if tar_info is None or \ len(failed_files) >= self.max_failed_extractions: break if tar_info.isfile(): obj_path = tar_info.name if obj_path.startswith('./'): obj_path = obj_path[2:] obj_path = obj_path.lstrip('/') if extract_base: obj_path = extract_base + '/' + obj_path if '/' not in obj_path: continue # ignore base level file destination = '/'.join( ['', vrs, account, obj_path]) container = obj_path.split('/', 1)[0] if not constraints.check_utf8(destination): failed_files.append( [quote(obj_path[:self.max_path_length]), HTTPPreconditionFailed().status]) continue if tar_info.size > constraints.MAX_FILE_SIZE: failed_files.append([ quote(obj_path[:self.max_path_length]), HTTPRequestEntityTooLarge().status]) continue container_failure = None if container not in containers_accessed: cont_path = '/'.join(['', vrs, account, container]) try: if self.create_container(req, cont_path): containers_created += 1 if containers_created > self.max_containers: raise HTTPBadRequest( 'More than %d containers to create ' 'from tar.' % self.max_containers) except CreateContainerError as err: # the object PUT to this container still may # succeed if acls are set container_failure = [ quote(cont_path[:self.max_path_length]), err.status] if err.status_int == HTTP_UNAUTHORIZED: raise HTTPUnauthorized(request=req) except ValueError: failed_files.append([ quote(obj_path[:self.max_path_length]), HTTPBadRequest().status]) continue tar_file = tar.extractfile(tar_info) new_env = req.environ.copy() new_env['REQUEST_METHOD'] = 'PUT' new_env['wsgi.input'] = tar_file new_env['PATH_INFO'] = destination new_env['CONTENT_LENGTH'] = tar_info.size new_env['swift.source'] = 'EA' new_env['HTTP_USER_AGENT'] = \ '%s BulkExpand' % req.environ.get('HTTP_USER_AGENT') create_obj_req = Request.blank(destination, new_env) for pax_key, pax_value in tar_info.pax_headers.items(): header_name = pax_key_to_swift_header(pax_key) if header_name: # Both pax_key and pax_value are unicode # strings; the key is already UTF-8 encoded, but # we still have to encode the value. create_obj_req.headers[header_name] = \ pax_value.encode("utf-8") resp = create_obj_req.get_response(self.app) containers_accessed.add(container) if resp.is_success: resp_dict['Number Files Created'] += 1 else: if container_failure: failed_files.append(container_failure) if resp.status_int == HTTP_UNAUTHORIZED: failed_files.append([ quote(obj_path[:self.max_path_length]), HTTPUnauthorized().status]) raise HTTPUnauthorized(request=req) if resp.status_int // 100 == 5: failed_response_type = HTTPBadGateway failed_files.append([ quote(obj_path[:self.max_path_length]), resp.status]) if failed_files: resp_dict['Response Status'] = failed_response_type().status elif not resp_dict['Number Files Created']: resp_dict['Response Status'] = HTTPBadRequest().status resp_dict['Response Body'] = 'Invalid Tar File: No Valid Files' except HTTPException as err: resp_dict['Response Status'] = err.status resp_dict['Response Body'] = err.body except (tarfile.TarError, zlib.error) as tar_error: resp_dict['Response Status'] = HTTPBadRequest().status resp_dict['Response Body'] = 'Invalid Tar File: %s' % tar_error except Exception: self.logger.exception('Error in extract archive.') resp_dict['Response Status'] = HTTPServerError().status yield separator + get_response_body( out_content_type, resp_dict, failed_files) def _process_delete(self, delete_path, obj_name, env, resp_dict, failed_files, failed_file_response, retry=0): delete_obj_req = Request.blank(delete_path, env) resp = delete_obj_req.get_response(self.app) if resp.status_int // 100 == 2: resp_dict['Number Deleted'] += 1 elif resp.status_int == HTTP_NOT_FOUND: resp_dict['Number Not Found'] += 1 elif resp.status_int == HTTP_UNAUTHORIZED: failed_files.append([quote(obj_name), HTTPUnauthorized().status]) elif resp.status_int == HTTP_CONFLICT and \ self.retry_count > 0 and self.retry_count > retry: retry += 1 sleep(self.retry_interval ** retry) self._process_delete(delete_path, obj_name, env, resp_dict, failed_files, failed_file_response, retry) else: if resp.status_int // 100 == 5: failed_file_response['type'] = HTTPBadGateway failed_files.append([quote(obj_name), resp.status]) @wsgify def __call__(self, req): extract_type = req.params.get('extract-archive') resp = None if extract_type is not None and req.method == 'PUT': archive_type = { 'tar': '', 'tar.gz': 'gz', 'tar.bz2': 'bz2'}.get(extract_type.lower().strip('.')) if archive_type is not None: resp = HTTPOk(request=req) out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS) if out_content_type: resp.content_type = out_content_type resp.app_iter = self.handle_extract_iter( req, archive_type, out_content_type=out_content_type) else: resp = HTTPBadRequest("Unsupported archive format") if 'bulk-delete' in req.params and req.method in ['POST', 'DELETE']: resp = HTTPOk(request=req) out_content_type = req.accept.best_match(ACCEPTABLE_FORMATS) if out_content_type: resp.content_type = out_content_type resp.app_iter = self.handle_delete_iter( req, out_content_type=out_content_type) return resp or self.app def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) max_containers_per_extraction = \ int(conf.get('max_containers_per_extraction', 10000)) max_failed_extractions = int(conf.get('max_failed_extractions', 1000)) max_deletes_per_request = int(conf.get('max_deletes_per_request', 10000)) max_failed_deletes = int(conf.get('max_failed_deletes', 1000)) yield_frequency = int(conf.get('yield_frequency', 10)) retry_count = int(conf.get('delete_container_retry_count', 0)) retry_interval = 1.5 register_swift_info( 'bulk_upload', max_containers_per_extraction=max_containers_per_extraction, max_failed_extractions=max_failed_extractions) register_swift_info( 'bulk_delete', max_deletes_per_request=max_deletes_per_request, max_failed_deletes=max_failed_deletes) def bulk_filter(app): return Bulk( app, conf, max_containers_per_extraction=max_containers_per_extraction, max_failed_extractions=max_failed_extractions, max_deletes_per_request=max_deletes_per_request, max_failed_deletes=max_failed_deletes, yield_frequency=yield_frequency, retry_count=retry_count, retry_interval=retry_interval) return bulk_filter swift-2.7.0/swift/common/middleware/crossdomain.py0000664000567000056710000000661612675204037023470 0ustar jenkinsjenkins00000000000000# Copyright (c) 2013 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swift.common.swob import Request, Response from swift.common.utils import register_swift_info class CrossDomainMiddleware(object): """ Cross domain middleware used to respond to requests for cross domain policy information. If the path is /crossdomain.xml it will respond with an xml cross domain policy document. This allows web pages hosted elsewhere to use client side technologies such as Flash, Java and Silverlight to interact with the Swift API. To enable this middleware, add it to the pipeline in your proxy-server.conf file. It should be added before any authentication (e.g., tempauth or keystone) middleware. In this example ellipsis (...) indicate other middleware you may have chosen to use:: [pipeline:main] pipeline = ... crossdomain ... authtoken ... proxy-server And add a filter section, such as:: [filter:crossdomain] use = egg:swift#crossdomain cross_domain_policy = For continuation lines, put some whitespace before the continuation text. Ensure you put a completely blank line to terminate the cross_domain_policy value. The cross_domain_policy name/value is optional. If omitted, the policy defaults as if you had specified:: cross_domain_policy = """ def __init__(self, app, conf, *args, **kwargs): self.app = app self.conf = conf default_domain_policy = '' self.cross_domain_policy = self.conf.get('cross_domain_policy', default_domain_policy) def GET(self, req): """Returns a 200 response with cross domain policy information """ body = '\n' \ '\n' \ '\n' \ '%s\n' \ '' % self.cross_domain_policy return Response(request=req, body=body, content_type="application/xml") def __call__(self, env, start_response): req = Request(env) if req.path == '/crossdomain.xml' and req.method == 'GET': return self.GET(req)(env, start_response) else: return self.app(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) register_swift_info('crossdomain') def crossdomain_filter(app): return CrossDomainMiddleware(app, conf) return crossdomain_filter swift-2.7.0/swift/common/middleware/x_profile/0000775000567000056710000000000012675204211022545 5ustar jenkinsjenkins00000000000000swift-2.7.0/swift/common/middleware/x_profile/exceptions.py0000664000567000056710000000204112675204037025303 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swift import gettext_ as _ class ProfileException(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return _('Profiling Error: %s') % self.msg class NotFoundException(ProfileException): pass class MethodNotAllowed(ProfileException): pass class ODFLIBNotInstalled(ProfileException): pass class PLOTLIBNotInstalled(ProfileException): pass class DataLoadFailure(ProfileException): pass swift-2.7.0/swift/common/middleware/x_profile/html_viewer.py0000664000567000056710000005105712675204037025462 0ustar jenkinsjenkins00000000000000# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import cgi import os import random import re import string import tempfile from swift import gettext_ as _ from exceptions import PLOTLIBNotInstalled, ODFLIBNotInstalled,\ NotFoundException, MethodNotAllowed, DataLoadFailure, ProfileException from profile_model import Stats2 PLOTLIB_INSTALLED = True try: import matplotlib # use agg backend for writing to file, not for rendering in a window. # otherwise some platform will complain "no display name and $DISPLAY # environment variable" matplotlib.use('agg') import matplotlib.pyplot as plt except ImportError: PLOTLIB_INSTALLED = False empty_description = """ The default profile of current process or the profile you requested is empty. """ profile_tmpl = """ """ sort_tmpl = """ """ limit_tmpl = """ """ fulldirs_tmpl = """ """ mode_tmpl = """ """ nfl_filter_tmpl = """ """ formelements_tmpl = """
Profile Sort Limit Full Path Filter Plot Metric Plot Type Format
${profile} ${sort} ${limit} ${fulldirs} ${nfl_filter}
""" index_tmpl = """ profile results
${description}

${formelements}
${profilehtml}
    
""" class HTMLViewer(object): format_dict = {'default': 'application/octet-stream', 'json': 'application/json', 'csv': 'text/csv', 'ods': 'application/vnd.oasis.opendocument.spreadsheet', 'python': 'text/html'} def __init__(self, app_path, profile_module, profile_log): self.app_path = app_path self.profile_module = profile_module self.profile_log = profile_log def _get_param(self, query_dict, key, default=None, multiple=False): value = query_dict.get(key, default) if value is None or value == '': return default if multiple: return value if isinstance(value, list): return eval(value[0]) if isinstance(default, int) else value[0] else: return value def render(self, url, method, path_entry, query_dict, clear_callback): plot = self._get_param(query_dict, 'plot', None) download = self._get_param(query_dict, 'download', None) clear = self._get_param(query_dict, 'clear', None) action = plot or download or clear profile_id = self._get_param(query_dict, 'profile', 'current') sort = self._get_param(query_dict, 'sort', 'time') limit = self._get_param(query_dict, 'limit', -1) fulldirs = self._get_param(query_dict, 'fulldirs', 0) nfl_filter = self._get_param(query_dict, 'nfl_filter', '').strip() metric_selected = self._get_param(query_dict, 'metric', 'cc') plot_type = self._get_param(query_dict, 'plottype', 'bar') download_format = self._get_param(query_dict, 'format', 'default') content = '' # GET /__profile, POST /__profile if len(path_entry) == 2 and method in ['GET', 'POST']: log_files = self.profile_log.get_logfiles(profile_id) if action == 'plot': content, headers = self.plot(log_files, sort, limit, nfl_filter, metric_selected, plot_type) elif action == 'download': content, headers = self.download(log_files, sort, limit, nfl_filter, download_format) else: if action == 'clear': self.profile_log.clear(profile_id) clear_callback and clear_callback() content, headers = self.index_page(log_files, sort, limit, fulldirs, nfl_filter, profile_id, url) # GET /__profile__/all # GET /__profile__/current # GET /__profile__/profile_id # GET /__profile__/profile_id/ # GET /__profile__/profile_id/account.py:50(GETorHEAD) # GET /__profile__/profile_id/swift/proxy/controllers # /account.py:50(GETorHEAD) # with QUERY_STRING: ?format=[default|json|csv|ods] elif len(path_entry) > 2 and method == 'GET': profile_id = path_entry[2] log_files = self.profile_log.get_logfiles(profile_id) pids = self.profile_log.get_all_pids() # return all profiles in a json format by default. # GET /__profile__/ if profile_id == '': content = '{"profile_ids": ["' + '","'.join(pids) + '"]}' headers = [('content-type', self.format_dict['json'])] else: if len(path_entry) > 3 and path_entry[3] != '': nfl_filter = '/'.join(path_entry[3:]) if path_entry[-1].find(':0') == -1: nfl_filter = '/' + nfl_filter content, headers = self.download(log_files, sort, -1, nfl_filter, download_format) headers.append(('Access-Control-Allow-Origin', '*')) else: raise MethodNotAllowed(_('method %s is not allowed.') % method) return content, headers def index_page(self, log_files=None, sort='time', limit=-1, fulldirs=0, nfl_filter='', profile_id='current', url='#'): headers = [('content-type', 'text/html')] if len(log_files) == 0: return empty_description, headers try: stats = Stats2(*log_files) except (IOError, ValueError): raise DataLoadFailure(_('Can not load profile data from %s.') % log_files) if not fulldirs: stats.strip_dirs() stats.sort_stats(sort) nfl_filter_esc =\ nfl_filter.replace('(', '\(').replace(')', '\)') amount = [nfl_filter_esc, limit] if nfl_filter_esc else [limit] profile_html = self.generate_stats_html(stats, self.app_path, profile_id, *amount) description = "Profiling information is generated by using\ '%s' profiler." % self.profile_module sort_repl = '' % (p, p) for p in self.profile_log.get_all_pids()]) profile_element = string.Template(profile_tmpl).substitute( {'profile_list': plist}) profile_repl = '