swift-2.17.1/0000775000175000017500000000000013435012120012747 5ustar zuulzuul00000000000000swift-2.17.1/swift.egg-info/0000775000175000017500000000000013435012120015575 5ustar zuulzuul00000000000000swift-2.17.1/swift.egg-info/not-zip-safe0000664000175000017500000000000113435012120020023 0ustar zuulzuul00000000000000 swift-2.17.1/swift.egg-info/entry_points.txt0000664000175000017500000000424713435012120021102 0ustar zuulzuul00000000000000[paste.app_factory] account = swift.account.server:app_factory container = swift.container.server:app_factory mem_object = swift.obj.mem_server:app_factory object = swift.obj.server:app_factory proxy = swift.proxy.server:app_factory [paste.filter_factory] account_quotas = swift.common.middleware.account_quotas:filter_factory bulk = swift.common.middleware.bulk:filter_factory catch_errors = swift.common.middleware.catch_errors:filter_factory cname_lookup = swift.common.middleware.cname_lookup:filter_factory container_quotas = swift.common.middleware.container_quotas:filter_factory container_sync = swift.common.middleware.container_sync:filter_factory copy = swift.common.middleware.copy:filter_factory crossdomain = swift.common.middleware.crossdomain:filter_factory dlo = swift.common.middleware.dlo:filter_factory domain_remap = swift.common.middleware.domain_remap:filter_factory encryption = swift.common.middleware.crypto:filter_factory formpost = swift.common.middleware.formpost:filter_factory gatekeeper = swift.common.middleware.gatekeeper:filter_factory healthcheck = swift.common.middleware.healthcheck:filter_factory keymaster = swift.common.middleware.crypto.keymaster:filter_factory keystoneauth = swift.common.middleware.keystoneauth:filter_factory kms_keymaster = swift.common.middleware.crypto.kms_keymaster:filter_factory list_endpoints = swift.common.middleware.list_endpoints:filter_factory listing_formats = swift.common.middleware.listing_formats:filter_factory memcache = swift.common.middleware.memcache:filter_factory name_check = swift.common.middleware.name_check:filter_factory proxy_logging = swift.common.middleware.proxy_logging:filter_factory ratelimit = swift.common.middleware.ratelimit:filter_factory recon = swift.common.middleware.recon:filter_factory slo = swift.common.middleware.slo:filter_factory staticweb = swift.common.middleware.staticweb:filter_factory symlink = swift.common.middleware.symlink:filter_factory tempauth = swift.common.middleware.tempauth:filter_factory tempurl = swift.common.middleware.tempurl:filter_factory versioned_writes = swift.common.middleware.versioned_writes:filter_factory xprofile = swift.common.middleware.xprofile:filter_factory swift-2.17.1/swift.egg-info/requires.txt0000664000175000017500000000074313435012120020201 0ustar zuulzuul00000000000000dnspython>=1.14.0 eventlet>=0.17.4 greenlet>=0.3.1 netifaces!=0.10.0,!=0.10.1,>=0.5 pastedeploy>=1.3.3 six>=1.9.0 xattr>=0.4 PyECLib>=1.3.1 cryptography!=2.0,>=1.6 [kms_keymaster] oslo.config!=4.3.0,!=4.4.0,>=4.0.0 castellan>=0.13.0 [test] hacking<0.12,>=0.11.0 coverage>=3.6 nose nosexcover nosehtmloutput>=0.0.3 sphinx>=1.6.2 openstackdocstheme>=1.11.0 os-api-ref>=1.0.0 os-testr>=0.8.0 mock>=2.0 python-swiftclient python-keystoneclient!=2.1.0,>=2.0.0 reno>=1.8.0 bandit>=1.1.0 swift-2.17.1/swift.egg-info/top_level.txt0000664000175000017500000000000613435012120020323 0ustar zuulzuul00000000000000swift swift-2.17.1/swift.egg-info/dependency_links.txt0000664000175000017500000000000113435012120021643 0ustar zuulzuul00000000000000 swift-2.17.1/swift.egg-info/PKG-INFO0000664000175000017500000001732013435012120016675 0ustar zuulzuul00000000000000Metadata-Version: 2.1 Name: swift Version: 2.17.1 Summary: OpenStack Object Storage Home-page: https://docs.openstack.org/swift/latest/ Author: OpenStack Author-email: openstack-dev@lists.openstack.org License: UNKNOWN Description: ======================== Team and repository tags ======================== .. image:: https://governance.openstack.org/badges/swift.svg :target: https://governance.openstack.org/reference/tags/index.html .. Change things from this point on Swift ===== A distributed object storage system designed to scale from a single machine to thousands of servers. Swift is optimized for multi-tenancy and high concurrency. Swift is ideal for backups, web and mobile content, and any other unstructured data that can grow without bound. Swift provides a simple, REST-based API fully documented at https://docs.openstack.org/. Swift was originally developed as the basis for Rackspace's Cloud Files and was open-sourced in 2010 as part of the OpenStack project. It has since grown to include contributions from many companies and has spawned a thriving ecosystem of 3rd party tools. Swift's contributors are listed in the AUTHORS file. Docs ---- To build documentation install sphinx (``pip install sphinx``), run ``python setup.py build_sphinx``, and then browse to /doc/build/html/index.html. These docs are auto-generated after every commit and available online at https://docs.openstack.org/swift/latest/. For Developers -------------- Getting Started ~~~~~~~~~~~~~~~ Swift is part of OpenStack and follows the code contribution, review, and testing processes common to all OpenStack projects. If you would like to start contributing, check out these `notes `__ to help you get started. The best place to get started is the `"SAIO - Swift All In One" `__. This document will walk you through setting up a development cluster of Swift in a VM. The SAIO environment is ideal for running small-scale tests against swift and trying out new features and bug fixes. Tests ~~~~~ There are three types of tests included in Swift's source tree. #. Unit tests #. Functional tests #. Probe tests Unit tests check that small sections of the code behave properly. For example, a unit test may test a single function to ensure that various input gives the expected output. This validates that the code is correct and regressions are not introduced. Functional tests check that the client API is working as expected. These can be run against any endpoint claiming to support the Swift API (although some tests require multiple accounts with different privilege levels). These are "black box" tests that ensure that client apps written against Swift will continue to work. Probe tests are "white box" tests that validate the internal workings of a Swift cluster. They are written to work against the `"SAIO - Swift All In One" `__ dev environment. For example, a probe test may create an object, delete one replica, and ensure that the background consistency processes find and correct the error. You can run unit tests with ``.unittests``, functional tests with ``.functests``, and probe tests with ``.probetests``. There is an additional ``.alltests`` script that wraps the other three. To fully run the tests, the target environment must use a filesystem that supports large xattrs. XFS is strongly recommended. For unit tests and in- process functional tests, either mount ``/tmp`` with XFS or provide another XFS filesystem via the ``TMPDIR`` environment variable. Without this setting, tests should still pass, but a very large number will be skipped. Code Organization ~~~~~~~~~~~~~~~~~ - bin/: Executable scripts that are the processes run by the deployer - doc/: Documentation - etc/: Sample config files - examples/: Config snippets used in the docs - swift/: Core code - account/: account server - cli/: code that backs some of the CLI tools in bin/ - common/: code shared by different modules - middleware/: "standard", officially-supported middleware - ring/: code implementing Swift's ring - container/: container server - locale/: internationalization (translation) data - obj/: object server - proxy/: proxy server - test/: Unit, functional, and probe tests Data Flow ~~~~~~~~~ Swift is a WSGI application and uses eventlet's WSGI server. After the processes are running, the entry point for new requests is the ``Application`` class in ``swift/proxy/server.py``. From there, a controller is chosen, and the request is processed. The proxy may choose to forward the request to a back-end server. For example, the entry point for requests to the object server is the ``ObjectController`` class in ``swift/obj/server.py``. For Deployers ------------- Deployer docs are also available at https://docs.openstack.org/swift/latest/. A good starting point is at https://docs.openstack.org/swift/latest/deployment_guide.html There is an `ops runbook `__ that gives information about how to diagnose and troubleshoot common issues when running a Swift cluster. You can run functional tests against a swift cluster with ``.functests``. These functional tests require ``/etc/swift/test.conf`` to run. A sample config file can be found in this source tree in ``test/sample.conf``. For Client Apps --------------- For client applications, official Python language bindings are provided at https://github.com/openstack/python-swiftclient. Complete API documentation at https://developer.openstack.org/api-ref/object-store/ There is a large ecosystem of applications and libraries that support and work with OpenStack Swift. Several are listed on the `associated projects `__ page. -------------- For more information come hang out in #openstack-swift on freenode. Thanks, The Swift Development Team Platform: UNKNOWN Classifier: Development Status :: 5 - Production/Stable Classifier: Environment :: OpenStack Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Provides-Extra: kms_keymaster Provides-Extra: test swift-2.17.1/swift.egg-info/SOURCES.txt0000664000175000017500000005151413435012120017467 0ustar zuulzuul00000000000000.alltests .coveragerc .functests .mailmap .manpages .probetests .testr.conf .unittests .zuul.yaml AUTHORS CHANGELOG CONTRIBUTING.rst LICENSE MANIFEST.in README.rst REVIEW_GUIDELINES.rst babel.cfg bandit.yaml bindep.txt requirements.txt setup.cfg setup.py test-requirements.txt tox.ini api-ref/source/conf.py api-ref/source/index.rst api-ref/source/metadata_header_encoding.inc api-ref/source/metadata_header_syntax.inc api-ref/source/parameters.yaml api-ref/source/storage-account-services.inc api-ref/source/storage-container-services.inc api-ref/source/storage-object-services.inc api-ref/source/storage_endpoints.inc api-ref/source/storage_info.inc api-ref/source/samples/account-containers-list-http-request-json.txt api-ref/source/samples/account-containers-list-http-request-xml.txt api-ref/source/samples/account-containers-list-http-response-json.txt api-ref/source/samples/account-containers-list-http-response-xml.txt api-ref/source/samples/account-containers-list-response.json api-ref/source/samples/account-containers-list-response.xml api-ref/source/samples/capabilities-list-response.json api-ref/source/samples/containers-list-http-request.txt api-ref/source/samples/containers-list-http-response.txt api-ref/source/samples/endpoints-list-response-headers.json api-ref/source/samples/endpoints-list-response.json api-ref/source/samples/goodbyeworld.txt api-ref/source/samples/helloworld.txt api-ref/source/samples/objects-list-http-response-json.txt api-ref/source/samples/objects-list-http-response-xml.txt api-ref/source/samples/objects-list-response.json api-ref/source/samples/objects-list-response.xml bin/swift-account-audit bin/swift-account-auditor bin/swift-account-info bin/swift-account-reaper bin/swift-account-replicator bin/swift-account-server bin/swift-config bin/swift-container-auditor bin/swift-container-info bin/swift-container-reconciler bin/swift-container-replicator bin/swift-container-server bin/swift-container-sync bin/swift-container-updater bin/swift-dispersion-populate bin/swift-dispersion-report bin/swift-drive-audit bin/swift-form-signature bin/swift-get-nodes bin/swift-init bin/swift-object-auditor bin/swift-object-expirer bin/swift-object-info bin/swift-object-reconstructor bin/swift-object-relinker bin/swift-object-replicator bin/swift-object-server bin/swift-object-updater bin/swift-oldies bin/swift-orphans bin/swift-proxy-server bin/swift-recon bin/swift-recon-cron bin/swift-reconciler-enqueue bin/swift-ring-builder bin/swift-ring-builder-analyzer doc/manpages/account-server.conf.5 doc/manpages/container-reconciler.conf.5 doc/manpages/container-server.conf.5 doc/manpages/container-sync-realms.conf.5 doc/manpages/dispersion.conf.5 doc/manpages/object-expirer.conf.5 doc/manpages/object-server.conf.5 doc/manpages/proxy-server.conf.5 doc/manpages/swift-account-audit.1 doc/manpages/swift-account-auditor.1 doc/manpages/swift-account-info.1 doc/manpages/swift-account-reaper.1 doc/manpages/swift-account-replicator.1 doc/manpages/swift-account-server.1 doc/manpages/swift-config.1 doc/manpages/swift-container-auditor.1 doc/manpages/swift-container-info.1 doc/manpages/swift-container-reconciler.1 doc/manpages/swift-container-replicator.1 doc/manpages/swift-container-server.1 doc/manpages/swift-container-sync.1 doc/manpages/swift-container-updater.1 doc/manpages/swift-dispersion-populate.1 doc/manpages/swift-dispersion-report.1 doc/manpages/swift-drive-audit.1 doc/manpages/swift-form-signature.1 doc/manpages/swift-get-nodes.1 doc/manpages/swift-init.1 doc/manpages/swift-object-auditor.1 doc/manpages/swift-object-expirer.1 doc/manpages/swift-object-info.1 doc/manpages/swift-object-reconstructor.1 doc/manpages/swift-object-replicator.1 doc/manpages/swift-object-server.1 doc/manpages/swift-object-updater.1 doc/manpages/swift-oldies.1 doc/manpages/swift-orphans.1 doc/manpages/swift-proxy-server.1 doc/manpages/swift-recon-cron.1 doc/manpages/swift-recon.1 doc/manpages/swift-reconciler-enqueue.1 doc/manpages/swift-ring-builder-analyzer.1 doc/manpages/swift-ring-builder.1 doc/manpages/swift.conf.5 doc/saio/rsyncd.conf doc/saio/bin/remakerings doc/saio/bin/resetswift doc/saio/bin/startmain doc/saio/bin/startrest doc/saio/rsyslog.d/10-swift.conf doc/saio/swift/container-reconciler.conf doc/saio/swift/container-sync-realms.conf doc/saio/swift/object-expirer.conf doc/saio/swift/proxy-server.conf doc/saio/swift/swift.conf doc/saio/swift/account-server/1.conf doc/saio/swift/account-server/2.conf doc/saio/swift/account-server/3.conf doc/saio/swift/account-server/4.conf doc/saio/swift/container-server/1.conf doc/saio/swift/container-server/2.conf doc/saio/swift/container-server/3.conf doc/saio/swift/container-server/4.conf doc/saio/swift/object-server/1.conf doc/saio/swift/object-server/2.conf doc/saio/swift/object-server/3.conf doc/saio/swift/object-server/4.conf doc/source/account.rst doc/source/admin_guide.rst doc/source/apache_deployment_guide.rst doc/source/associated_projects.rst doc/source/conf.py doc/source/container.rst doc/source/cors.rst doc/source/crossdomain.rst doc/source/db.rst doc/source/deployment_guide.rst doc/source/development_auth.rst doc/source/development_guidelines.rst doc/source/development_middleware.rst doc/source/development_ondisk_backends.rst doc/source/development_saio.rst doc/source/first_contribution_swift.rst doc/source/getting_started.rst doc/source/howto_installmultinode.rst doc/source/index.rst doc/source/logs.rst doc/source/middleware.rst doc/source/misc.rst doc/source/object.rst doc/source/overview_acl.rst doc/source/overview_architecture.rst doc/source/overview_auth.rst doc/source/overview_backing_store.rst doc/source/overview_container_sync.rst doc/source/overview_encryption.rst doc/source/overview_erasure_code.rst doc/source/overview_expiring_objects.rst doc/source/overview_global_cluster.rst doc/source/overview_large_objects.rst doc/source/overview_object_versioning.rst doc/source/overview_policies.rst doc/source/overview_reaper.rst doc/source/overview_replication.rst doc/source/overview_ring.rst doc/source/policies_saio.rst doc/source/proxy.rst doc/source/ratelimit.rst doc/source/replication_network.rst doc/source/ring.rst doc/source/ring_background.rst doc/source/ring_partpower.rst doc/source/test-cors.html doc/source/_extra/.htaccess doc/source/admin/index.rst doc/source/admin/objectstorage-EC.rst doc/source/admin/objectstorage-account-reaper.rst doc/source/admin/objectstorage-admin.rst doc/source/admin/objectstorage-arch.rst doc/source/admin/objectstorage-auditors.rst doc/source/admin/objectstorage-characteristics.rst doc/source/admin/objectstorage-components.rst doc/source/admin/objectstorage-features.rst doc/source/admin/objectstorage-intro.rst doc/source/admin/objectstorage-large-objects.rst doc/source/admin/objectstorage-monitoring.rst doc/source/admin/objectstorage-replication.rst doc/source/admin/objectstorage-ringbuilder.rst doc/source/admin/objectstorage-tenant-specific-image-storage.rst doc/source/admin/objectstorage-troubleshoot.rst doc/source/admin/figures/objectstorage-accountscontainers.png doc/source/admin/figures/objectstorage-arch.png doc/source/admin/figures/objectstorage-buildingblocks.png doc/source/admin/figures/objectstorage-nodes.png doc/source/admin/figures/objectstorage-partitions.png doc/source/admin/figures/objectstorage-replication.png doc/source/admin/figures/objectstorage-ring.png doc/source/admin/figures/objectstorage-usecase.png doc/source/admin/figures/objectstorage-zones.png doc/source/admin/figures/objectstorage.png doc/source/api/authentication.rst doc/source/api/container_quotas.rst doc/source/api/discoverability.rst doc/source/api/form_post_middleware.rst doc/source/api/large_objects.rst doc/source/api/object_api_v1_overview.rst doc/source/api/object_versioning.rst doc/source/api/temporary_url_middleware.rst doc/source/api/use_content-encoding_metadata.rst doc/source/api/use_the_content-disposition_metadata.rst doc/source/images/ec_overview.png doc/source/install/controller-common_prerequisites.txt doc/source/install/controller-include.txt doc/source/install/controller-install-debian.rst doc/source/install/controller-install-obs.rst doc/source/install/controller-install-rdo.rst doc/source/install/controller-install-ubuntu.rst doc/source/install/controller-install.rst doc/source/install/edit_hosts_file.txt doc/source/install/environment-networking.rst doc/source/install/finalize-installation-obs.rst doc/source/install/finalize-installation-rdo.rst doc/source/install/finalize-installation-ubuntu-debian.rst doc/source/install/finalize-installation.rst doc/source/install/get_started.rst doc/source/install/index.rst doc/source/install/initial-rings.rst doc/source/install/next-steps.rst doc/source/install/storage-include1.txt doc/source/install/storage-include2.txt doc/source/install/storage-include3.txt doc/source/install/storage-install-obs.rst doc/source/install/storage-install-rdo.rst doc/source/install/storage-install-ubuntu-debian.rst doc/source/install/storage-install.rst doc/source/install/verify.rst doc/source/ops_runbook/diagnose.rst doc/source/ops_runbook/index.rst doc/source/ops_runbook/maintenance.rst doc/source/ops_runbook/procedures.rst doc/source/ops_runbook/troubleshooting.rst etc/account-server.conf-sample etc/container-reconciler.conf-sample etc/container-server.conf-sample etc/container-sync-realms.conf-sample etc/dispersion.conf-sample etc/drive-audit.conf-sample etc/internal-client.conf-sample etc/keymaster.conf-sample etc/memcache.conf-sample etc/mime.types-sample etc/object-expirer.conf-sample etc/object-server.conf-sample etc/proxy-server.conf-sample etc/rsyncd.conf-sample etc/swift-rsyslog.conf-sample etc/swift.conf-sample examples/apache2/account-server.template examples/apache2/container-server.template examples/apache2/object-server.template examples/apache2/proxy-server.template examples/wsgi/account-server.wsgi.template examples/wsgi/container-server.wsgi.template examples/wsgi/object-server.wsgi.template examples/wsgi/proxy-server.wsgi.template releasenotes/notes/2_10_0_release-666a76f4975657a5.yaml releasenotes/notes/2_11_0_release-ac1d256e455d347e.yaml releasenotes/notes/2_12_0_release-06af226abc7b91ef.yaml releasenotes/notes/2_13_0_release-875e1fb1ef59f015.yaml releasenotes/notes/2_14_0_release-7c3ef515ebded888.yaml releasenotes/notes/2_15_0_release-0a05a011fb85a9c9.yaml releasenotes/notes/2_15_1_release-be25e67bfc5e886a.yaml releasenotes/notes/2_16_0_release-d48cb9b2629df8ab.yaml releasenotes/notes/2_17_0_release-bd35f18c41c5ef18.yaml releasenotes/notes/2_17_1_release-dd6e6879cbb94f85.yaml releasenotes/source/conf.py releasenotes/source/current.rst releasenotes/source/index.rst releasenotes/source/newton.rst releasenotes/source/ocata.rst releasenotes/source/pike.rst swift/__init__.py swift.egg-info/PKG-INFO swift.egg-info/SOURCES.txt swift.egg-info/dependency_links.txt swift.egg-info/entry_points.txt swift.egg-info/not-zip-safe swift.egg-info/pbr.json swift.egg-info/requires.txt swift.egg-info/top_level.txt swift/account/__init__.py swift/account/auditor.py swift/account/backend.py swift/account/reaper.py swift/account/replicator.py swift/account/server.py swift/account/utils.py swift/cli/__init__.py swift/cli/dispersion_report.py swift/cli/form_signature.py swift/cli/info.py swift/cli/recon.py swift/cli/relinker.py swift/cli/ring_builder_analyzer.py swift/cli/ringbuilder.py swift/common/__init__.py swift/common/base_storage_server.py swift/common/bufferedhttp.py swift/common/constraints.py swift/common/container_sync_realms.py swift/common/daemon.py swift/common/db.py swift/common/db_replicator.py swift/common/direct_client.py swift/common/exceptions.py swift/common/header_key_dict.py swift/common/http.py swift/common/internal_client.py swift/common/linkat.py swift/common/manager.py swift/common/memcached.py swift/common/request_helpers.py swift/common/splice.py swift/common/storage_policy.py swift/common/swob.py swift/common/utils.py swift/common/wsgi.py swift/common/middleware/__init__.py swift/common/middleware/account_quotas.py swift/common/middleware/acl.py swift/common/middleware/bulk.py swift/common/middleware/catch_errors.py swift/common/middleware/cname_lookup.py swift/common/middleware/container_quotas.py swift/common/middleware/container_sync.py swift/common/middleware/copy.py swift/common/middleware/crossdomain.py swift/common/middleware/dlo.py swift/common/middleware/domain_remap.py swift/common/middleware/formpost.py swift/common/middleware/gatekeeper.py swift/common/middleware/healthcheck.py swift/common/middleware/keystoneauth.py swift/common/middleware/list_endpoints.py swift/common/middleware/listing_formats.py swift/common/middleware/memcache.py swift/common/middleware/name_check.py swift/common/middleware/proxy_logging.py swift/common/middleware/ratelimit.py swift/common/middleware/recon.py swift/common/middleware/slo.py swift/common/middleware/staticweb.py swift/common/middleware/symlink.py swift/common/middleware/tempauth.py swift/common/middleware/tempurl.py swift/common/middleware/versioned_writes.py swift/common/middleware/xprofile.py swift/common/middleware/crypto/__init__.py swift/common/middleware/crypto/crypto_utils.py swift/common/middleware/crypto/decrypter.py swift/common/middleware/crypto/encrypter.py swift/common/middleware/crypto/keymaster.py swift/common/middleware/crypto/kms_keymaster.py swift/common/middleware/x_profile/__init__.py swift/common/middleware/x_profile/exceptions.py swift/common/middleware/x_profile/html_viewer.py swift/common/middleware/x_profile/profile_model.py swift/common/ring/__init__.py swift/common/ring/builder.py swift/common/ring/composite_builder.py swift/common/ring/ring.py swift/common/ring/utils.py swift/container/__init__.py swift/container/auditor.py swift/container/backend.py swift/container/reconciler.py swift/container/replicator.py swift/container/server.py swift/container/sync.py swift/container/sync_store.py swift/container/updater.py swift/locale/de/LC_MESSAGES/swift.po swift/locale/en_GB/LC_MESSAGES/swift.po swift/locale/es/LC_MESSAGES/swift.po swift/locale/fr/LC_MESSAGES/swift.po swift/locale/it/LC_MESSAGES/swift.po swift/locale/ja/LC_MESSAGES/swift.po swift/locale/ko_KR/LC_MESSAGES/swift.po swift/locale/pt_BR/LC_MESSAGES/swift.po swift/locale/ru/LC_MESSAGES/swift.po swift/locale/tr_TR/LC_MESSAGES/swift.po swift/locale/zh_CN/LC_MESSAGES/swift.po swift/locale/zh_TW/LC_MESSAGES/swift.po swift/obj/__init__.py swift/obj/auditor.py swift/obj/diskfile.py swift/obj/expirer.py swift/obj/mem_diskfile.py swift/obj/mem_server.py swift/obj/reconstructor.py swift/obj/replicator.py swift/obj/server.py swift/obj/ssync_receiver.py swift/obj/ssync_sender.py swift/obj/updater.py swift/proxy/__init__.py swift/proxy/server.py swift/proxy/controllers/__init__.py swift/proxy/controllers/account.py swift/proxy/controllers/base.py swift/proxy/controllers/container.py swift/proxy/controllers/info.py swift/proxy/controllers/obj.py test/__init__.py test/sample.conf test/functional/__init__.py test/functional/mock_swift_key_manager.py test/functional/swift_test_client.py test/functional/test_access_control.py test/functional/test_account.py test/functional/test_container.py test/functional/test_dlo.py test/functional/test_object.py test/functional/test_slo.py test/functional/test_symlink.py test/functional/test_tempurl.py test/functional/test_versioned_writes.py test/functional/tests.py test/probe/__init__.py test/probe/brain.py test/probe/common.py test/probe/test_account_failures.py test/probe/test_account_get_fake_responses_match.py test/probe/test_account_reaper.py test/probe/test_container_failures.py test/probe/test_container_merge_policy_index.py test/probe/test_container_sync.py test/probe/test_db_replicator.py test/probe/test_empty_device_handoff.py test/probe/test_object_async_update.py test/probe/test_object_expirer.py test/probe/test_object_failures.py test/probe/test_object_handoff.py test/probe/test_object_metadata_replication.py test/probe/test_object_partpower_increase.py test/probe/test_reconstructor_rebuild.py test/probe/test_reconstructor_revert.py test/probe/test_replication_servers_working.py test/probe/test_signals.py test/unit/__init__.py test/unit/helpers.py test/unit/account/__init__.py test/unit/account/test_auditor.py test/unit/account/test_backend.py test/unit/account/test_reaper.py test/unit/account/test_replicator.py test/unit/account/test_server.py test/unit/account/test_utils.py test/unit/cli/__init__.py test/unit/cli/test_default_output.stub test/unit/cli/test_default_output_id_assigned.stub test/unit/cli/test_default_sorted_output.stub test/unit/cli/test_dispersion_report.py test/unit/cli/test_form_signature.py test/unit/cli/test_info.py test/unit/cli/test_ipv6_output.stub test/unit/cli/test_recon.py test/unit/cli/test_relinker.py test/unit/cli/test_ring_builder_analyzer.py test/unit/cli/test_ringbuilder.py test/unit/common/__init__.py test/unit/common/corrupted_example.db test/unit/common/malformed_example.db test/unit/common/malformed_schema_example.db test/unit/common/test_base_storage_server.py test/unit/common/test_bufferedhttp.py test/unit/common/test_constraints.py test/unit/common/test_container_sync_realms.py test/unit/common/test_daemon.py test/unit/common/test_db.py test/unit/common/test_db_replicator.py test/unit/common/test_direct_client.py test/unit/common/test_exceptions.py test/unit/common/test_header_key_dict.py test/unit/common/test_internal_client.py test/unit/common/test_linkat.py test/unit/common/test_manager.py test/unit/common/test_memcached.py test/unit/common/test_request_helpers.py test/unit/common/test_splice.py test/unit/common/test_storage_policy.py test/unit/common/test_swob.py test/unit/common/test_utils.py test/unit/common/test_wsgi.py test/unit/common/middleware/__init__.py test/unit/common/middleware/helpers.py test/unit/common/middleware/test_account_quotas.py test/unit/common/middleware/test_acl.py test/unit/common/middleware/test_bulk.py test/unit/common/middleware/test_cname_lookup.py test/unit/common/middleware/test_container_sync.py test/unit/common/middleware/test_copy.py test/unit/common/middleware/test_crossdomain.py test/unit/common/middleware/test_dlo.py test/unit/common/middleware/test_domain_remap.py test/unit/common/middleware/test_except.py test/unit/common/middleware/test_formpost.py test/unit/common/middleware/test_gatekeeper.py test/unit/common/middleware/test_healthcheck.py test/unit/common/middleware/test_keystoneauth.py test/unit/common/middleware/test_list_endpoints.py test/unit/common/middleware/test_listing_formats.py test/unit/common/middleware/test_memcache.py test/unit/common/middleware/test_name_check.py test/unit/common/middleware/test_proxy_logging.py test/unit/common/middleware/test_quotas.py test/unit/common/middleware/test_ratelimit.py test/unit/common/middleware/test_recon.py test/unit/common/middleware/test_slo.py test/unit/common/middleware/test_staticweb.py test/unit/common/middleware/test_subrequest_logging.py test/unit/common/middleware/test_symlink.py test/unit/common/middleware/test_tempauth.py test/unit/common/middleware/test_tempurl.py test/unit/common/middleware/test_versioned_writes.py test/unit/common/middleware/test_xprofile.py test/unit/common/middleware/crypto/__init__.py test/unit/common/middleware/crypto/crypto_helpers.py test/unit/common/middleware/crypto/test_crypto.py test/unit/common/middleware/crypto/test_crypto_utils.py test/unit/common/middleware/crypto/test_decrypter.py test/unit/common/middleware/crypto/test_encrypter.py test/unit/common/middleware/crypto/test_encryption.py test/unit/common/middleware/crypto/test_keymaster.py test/unit/common/middleware/crypto/test_kms_keymaster.py test/unit/common/ring/__init__.py test/unit/common/ring/test_builder.py test/unit/common/ring/test_composite_builder.py test/unit/common/ring/test_ring.py test/unit/common/ring/test_utils.py test/unit/container/__init__.py test/unit/container/test_auditor.py test/unit/container/test_backend.py test/unit/container/test_reconciler.py test/unit/container/test_replicator.py test/unit/container/test_server.py test/unit/container/test_sync.py test/unit/container/test_sync_store.py test/unit/container/test_updater.py test/unit/obj/__init__.py test/unit/obj/common.py test/unit/obj/test_auditor.py test/unit/obj/test_diskfile.py test/unit/obj/test_expirer.py test/unit/obj/test_reconstructor.py test/unit/obj/test_replicator.py test/unit/obj/test_server.py test/unit/obj/test_ssync.py test/unit/obj/test_ssync_receiver.py test/unit/obj/test_ssync_sender.py test/unit/obj/test_updater.py test/unit/proxy/__init__.py test/unit/proxy/test_mem_server.py test/unit/proxy/test_server.py test/unit/proxy/test_sysmeta.py test/unit/proxy/controllers/__init__.py test/unit/proxy/controllers/test_account.py test/unit/proxy/controllers/test_base.py test/unit/proxy/controllers/test_container.py test/unit/proxy/controllers/test_info.py test/unit/proxy/controllers/test_obj.py test/unit/test_locale/README test/unit/test_locale/__init__.py test/unit/test_locale/eo.po test/unit/test_locale/messages.mo test/unit/test_locale/test_locale.py test/unit/test_locale/eo/LC_MESSAGES/swift.mo tools/test-setup.sh tools/playbooks/dsvm/post.yaml tools/playbooks/dsvm/pre.yaml tools/playbooks/dsvm/run.yamlswift-2.17.1/swift.egg-info/pbr.json0000664000175000017500000000005613435012120017254 0ustar zuulzuul00000000000000{"git_version": "ecbf74f", "is_release": true}swift-2.17.1/bandit.yaml0000666000175000017500000001722513435012003015105 0ustar zuulzuul00000000000000 ### This config may optionally select a subset of tests to run or skip by ### filling out the 'tests' and 'skips' lists given below. If no tests are ### specified for inclusion then it is assumed all tests are desired. The skips ### set will remove specific tests from the include set. This can be controlled ### using the -t/-s CLI options. Note that the same test ID should not appear ### in both 'tests' and 'skips', this would be nonsensical and is detected by ### Bandit at runtime. # Available tests: # B101 : assert_used # B102 : exec_used # B103 : set_bad_file_permissions # B104 : hardcoded_bind_all_interfaces # B105 : hardcoded_password_string # B106 : hardcoded_password_funcarg # B107 : hardcoded_password_default # B108 : hardcoded_tmp_directory # B109 : password_config_option_not_marked_secret # B110 : try_except_pass # B111 : execute_with_run_as_root_equals_true # B112 : try_except_continue # B201 : flask_debug_true # B301 : pickle # B302 : marshal # B303 : md5 # B304 : ciphers # B305 : cipher_modes # B306 : mktemp_q # B307 : eval # B308 : mark_safe # B309 : httpsconnection # B310 : urllib_urlopen # B311 : random # B312 : telnetlib # B313 : xml_bad_cElementTree # B314 : xml_bad_ElementTree # B315 : xml_bad_expatreader # B316 : xml_bad_expatbuilder # B317 : xml_bad_sax # B318 : xml_bad_minidom # B319 : xml_bad_pulldom # B320 : xml_bad_etree # B321 : ftplib # B401 : import_telnetlib # B402 : import_ftplib # B403 : import_pickle # B404 : import_subprocess # B405 : import_xml_etree # B406 : import_xml_sax # B407 : import_xml_expat # B408 : import_xml_minidom # B409 : import_xml_pulldom # B410 : import_lxml # B411 : import_xmlrpclib # B412 : import_httpoxy # B501 : request_with_no_cert_validation # B502 : ssl_with_bad_version # B503 : ssl_with_bad_defaults # B504 : ssl_with_no_version # B505 : weak_cryptographic_key # B506 : yaml_load # B601 : paramiko_calls # B602 : subprocess_popen_with_shell_equals_true # B603 : subprocess_without_shell_equals_true # B604 : any_other_function_with_shell_equals_true # B605 : start_process_with_a_shell # B606 : start_process_with_no_shell # B607 : start_process_with_partial_path # B608 : hardcoded_sql_expressions # B609 : linux_commands_wildcard_injection # B701 : jinja2_autoescape_false # B702 : use_of_mako_templates # (optional) list included test IDs here, eg '[B101, B406]': tests: [B102, B103, B302, B306, B308, B309, B310, B401, B501, B502, B506, B601, B602, B609] # (optional) list skipped test IDs here, eg '[B101, B406]': skips: ### (optional) plugin settings - some test plugins require configuration data ### that may be given here, per-plugin. All bandit test plugins have a built in ### set of sensible defaults and these will be used if no configuration is ### provided. It is not necessary to provide settings for every (or any) plugin ### if the defaults are acceptable. #any_other_function_with_shell_equals_true: # no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv, os.execve, os.execvp, # os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, # os.spawnvp, os.spawnvpe, os.startfile] # shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, # popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, # utils.execute, utils.execute_with_timeout] #execute_with_run_as_root_equals_true: # function_names: [ceilometer.utils.execute, cinder.utils.execute, neutron.agent.linux.utils.execute, # nova.utils.execute, nova.utils.trycmd] #hardcoded_tmp_directory: # tmp_dirs: [/tmp, /var/tmp, /dev/shm] #linux_commands_wildcard_injection: # no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv, os.execve, os.execvp, # os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, # os.spawnvp, os.spawnvpe, os.startfile] # shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, # popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, # utils.execute, utils.execute_with_timeout] #password_config_option_not_marked_secret: # function_names: [oslo.config.cfg.StrOpt, oslo_config.cfg.StrOpt] #ssl_with_bad_defaults: # bad_protocol_versions: [PROTOCOL_SSLv2, SSLv2_METHOD, SSLv23_METHOD, PROTOCOL_SSLv3, # PROTOCOL_TLSv1, SSLv3_METHOD, TLSv1_METHOD] #ssl_with_bad_version: # bad_protocol_versions: [PROTOCOL_SSLv2, SSLv2_METHOD, SSLv23_METHOD, PROTOCOL_SSLv3, # PROTOCOL_TLSv1, SSLv3_METHOD, TLSv1_METHOD] #start_process_with_a_shell: # no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv, os.execve, os.execvp, # os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, # os.spawnvp, os.spawnvpe, os.startfile] # shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, # popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, # utils.execute, utils.execute_with_timeout] #start_process_with_no_shell: # no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv, os.execve, os.execvp, # os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, # os.spawnvp, os.spawnvpe, os.startfile] # shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, # popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, # utils.execute, utils.execute_with_timeout] #start_process_with_partial_path: # no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv, os.execve, os.execvp, # os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, # os.spawnvp, os.spawnvpe, os.startfile] # shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, # popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, # utils.execute, utils.execute_with_timeout] #subprocess_popen_with_shell_equals_true: # no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv, os.execve, os.execvp, # os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, # os.spawnvp, os.spawnvpe, os.startfile] # shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, # popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, # utils.execute, utils.execute_with_timeout] #subprocess_without_shell_equals_true: # no_shell: [os.execl, os.execle, os.execlp, os.execlpe, os.execv, os.execve, os.execvp, # os.execvpe, os.spawnl, os.spawnle, os.spawnlp, os.spawnlpe, os.spawnv, os.spawnve, # os.spawnvp, os.spawnvpe, os.startfile] # shell: [os.system, os.popen, os.popen2, os.popen3, os.popen4, popen2.popen2, popen2.popen3, # popen2.popen4, popen2.Popen3, popen2.Popen4, commands.getoutput, commands.getstatusoutput] # subprocess: [subprocess.Popen, subprocess.call, subprocess.check_call, subprocess.check_output, # utils.execute, utils.execute_with_timeout] #try_except_continue: {check_typed_exception: false} #try_except_pass: {check_typed_exception: false} swift-2.17.1/MANIFEST.in0000666000175000017500000000050113435012015014506 0ustar zuulzuul00000000000000include AUTHORS LICENSE .functests .unittests .probetests test/__init__.py include CHANGELOG CONTRIBUTING.rst README.rst include babel.cfg include test/sample.conf include tox.ini include requirements.txt test-requirements.txt graft doc graft etc graft swift/locale graft test/functional graft test/probe graft test/unit swift-2.17.1/swift/0000775000175000017500000000000013435012120014103 5ustar zuulzuul00000000000000swift-2.17.1/swift/proxy/0000775000175000017500000000000013435012120015264 5ustar zuulzuul00000000000000swift-2.17.1/swift/proxy/server.py0000666000175000017500000007627313435012015017170 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import mimetypes import os import socket from collections import defaultdict from swift import gettext_ as _ from random import shuffle from time import time import functools import sys from eventlet import Timeout from swift import __canonical_version__ as swift_version from swift.common import constraints from swift.common.storage_policy import POLICIES from swift.common.ring import Ring from swift.common.utils import cache_from_env, get_logger, \ get_remote_client, split_path, config_true_value, generate_trans_id, \ affinity_key_function, affinity_locality_predicate, list_from_csv, \ register_swift_info, readconf, config_auto_int_value from swift.common.constraints import check_utf8, valid_api_version from swift.proxy.controllers import AccountController, ContainerController, \ ObjectControllerRouter, InfoController from swift.proxy.controllers.base import get_container_info, NodeIter, \ DEFAULT_RECHECK_CONTAINER_EXISTENCE, DEFAULT_RECHECK_ACCOUNT_EXISTENCE from swift.common.swob import HTTPBadRequest, HTTPForbidden, \ HTTPMethodNotAllowed, HTTPNotFound, HTTPPreconditionFailed, \ HTTPServerError, HTTPException, Request, HTTPServiceUnavailable from swift.common.exceptions import APIVersionError # List of entry points for mandatory middlewares. # # Fields: # # "name" (required) is the entry point name from setup.py. # # "after_fn" (optional) a function that takes a PipelineWrapper object as its # single argument and returns a list of middlewares that this middleware # should come after. Any middlewares in the returned list that are not present # in the pipeline will be ignored, so you can safely name optional middlewares # to come after. For example, ["catch_errors", "bulk"] would install this # middleware after catch_errors and bulk if both were present, but if bulk # were absent, would just install it after catch_errors. required_filters = [ {'name': 'catch_errors'}, {'name': 'gatekeeper', 'after_fn': lambda pipe: (['catch_errors'] if pipe.startswith('catch_errors') else [])}, {'name': 'listing_formats', 'after_fn': lambda _junk: [ 'catch_errors', 'gatekeeper', 'proxy_logging', 'memcache']}, # Put copy before dlo, slo and versioned_writes {'name': 'copy', 'after_fn': lambda _junk: [ 'staticweb', 'tempauth', 'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']}, {'name': 'dlo', 'after_fn': lambda _junk: [ 'copy', 'staticweb', 'tempauth', 'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']}, {'name': 'versioned_writes', 'after_fn': lambda _junk: [ 'slo', 'dlo', 'copy', 'staticweb', 'tempauth', 'keystoneauth', 'catch_errors', 'gatekeeper', 'proxy_logging']}, ] def _label_for_policy(policy): if policy is not None: return 'policy %s (%s)' % (policy.idx, policy.name) return '(default)' class ProxyOverrideOptions(object): """ Encapsulates proxy server options that may be overridden e.g. for policy specific configurations. :param conf: the proxy-server config dict. :param override_conf: a dict of overriding configuration options. """ def __init__(self, base_conf, override_conf): def get(key, default): return override_conf.get(key, base_conf.get(key, default)) self.sorting_method = get('sorting_method', 'shuffle').lower() self.read_affinity = get('read_affinity', '') try: self.read_affinity_sort_key = affinity_key_function( self.read_affinity) except ValueError as err: # make the message a little more useful raise ValueError("Invalid read_affinity value: %r (%s)" % (self.read_affinity, err.message)) self.write_affinity = get('write_affinity', '') try: self.write_affinity_is_local_fn \ = affinity_locality_predicate(self.write_affinity) except ValueError as err: # make the message a little more useful raise ValueError("Invalid write_affinity value: %r (%s)" % (self.write_affinity, err.message)) self.write_affinity_node_count = get( 'write_affinity_node_count', '2 * replicas').lower() value = self.write_affinity_node_count.split() if len(value) == 1: wanc_value = int(value[0]) self.write_affinity_node_count_fn = lambda replicas: wanc_value elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas': wanc_value = int(value[0]) self.write_affinity_node_count_fn = \ lambda replicas: wanc_value * replicas else: raise ValueError( 'Invalid write_affinity_node_count value: %r' % (' '.join(value))) self.write_affinity_handoff_delete_count = config_auto_int_value( get('write_affinity_handoff_delete_count', 'auto'), None ) def __repr__(self): return '%s({}, {%s})' % (self.__class__.__name__, ', '.join( '%r: %r' % (k, getattr(self, k)) for k in ( 'sorting_method', 'read_affinity', 'write_affinity', 'write_affinity_node_count', 'write_affinity_handoff_delete_count'))) def __eq__(self, other): if not isinstance(other, ProxyOverrideOptions): return False return all(getattr(self, k) == getattr(other, k) for k in ( 'sorting_method', 'read_affinity', 'write_affinity', 'write_affinity_node_count', 'write_affinity_handoff_delete_count')) class Application(object): """WSGI application for the proxy server.""" def __init__(self, conf, memcache=None, logger=None, account_ring=None, container_ring=None): if conf is None: conf = {} if logger is None: self.logger = get_logger(conf, log_route='proxy-server') else: self.logger = logger self._override_options = self._load_per_policy_config(conf) self.sorts_by_timing = any(pc.sorting_method == 'timing' for pc in self._override_options.values()) self._error_limiting = {} swift_dir = conf.get('swift_dir', '/etc/swift') self.swift_dir = swift_dir self.node_timeout = float(conf.get('node_timeout', 10)) self.recoverable_node_timeout = float( conf.get('recoverable_node_timeout', self.node_timeout)) self.conn_timeout = float(conf.get('conn_timeout', 0.5)) self.client_timeout = int(conf.get('client_timeout', 60)) self.put_queue_depth = int(conf.get('put_queue_depth', 10)) self.object_chunk_size = int(conf.get('object_chunk_size', 65536)) self.client_chunk_size = int(conf.get('client_chunk_size', 65536)) self.trans_id_suffix = conf.get('trans_id_suffix', '') self.post_quorum_timeout = float(conf.get('post_quorum_timeout', 0.5)) self.error_suppression_interval = \ int(conf.get('error_suppression_interval', 60)) self.error_suppression_limit = \ int(conf.get('error_suppression_limit', 10)) self.recheck_container_existence = \ int(conf.get('recheck_container_existence', DEFAULT_RECHECK_CONTAINER_EXISTENCE)) self.recheck_account_existence = \ int(conf.get('recheck_account_existence', DEFAULT_RECHECK_ACCOUNT_EXISTENCE)) self.allow_account_management = \ config_true_value(conf.get('allow_account_management', 'no')) self.container_ring = container_ring or Ring(swift_dir, ring_name='container') self.account_ring = account_ring or Ring(swift_dir, ring_name='account') # ensure rings are loaded for all configured storage policies for policy in POLICIES: policy.load_ring(swift_dir) self.obj_controller_router = ObjectControllerRouter() self.memcache = memcache mimetypes.init(mimetypes.knownfiles + [os.path.join(swift_dir, 'mime.types')]) self.account_autocreate = \ config_true_value(conf.get('account_autocreate', 'no')) self.auto_create_account_prefix = ( conf.get('auto_create_account_prefix') or '.') self.expiring_objects_account = self.auto_create_account_prefix + \ (conf.get('expiring_objects_account_name') or 'expiring_objects') self.expiring_objects_container_divisor = \ int(conf.get('expiring_objects_container_divisor') or 86400) self.max_containers_per_account = \ int(conf.get('max_containers_per_account') or 0) self.max_containers_whitelist = [ a.strip() for a in conf.get('max_containers_whitelist', '').split(',') if a.strip()] self.deny_host_headers = [ host.strip() for host in conf.get('deny_host_headers', '').split(',') if host.strip()] self.log_handoffs = config_true_value(conf.get('log_handoffs', 'true')) self.cors_allow_origin = [ a.strip() for a in conf.get('cors_allow_origin', '').split(',') if a.strip()] self.cors_expose_headers = [ a.strip() for a in conf.get('cors_expose_headers', '').split(',') if a.strip()] self.strict_cors_mode = config_true_value( conf.get('strict_cors_mode', 't')) self.node_timings = {} self.timing_expiry = int(conf.get('timing_expiry', 300)) self.concurrent_gets = \ config_true_value(conf.get('concurrent_gets')) self.concurrency_timeout = float(conf.get('concurrency_timeout', self.conn_timeout)) value = conf.get('request_node_count', '2 * replicas').lower().split() if len(value) == 1: rnc_value = int(value[0]) self.request_node_count = lambda replicas: rnc_value elif len(value) == 3 and value[1] == '*' and value[2] == 'replicas': rnc_value = int(value[0]) self.request_node_count = lambda replicas: rnc_value * replicas else: raise ValueError( 'Invalid request_node_count value: %r' % ''.join(value)) # swift_owner_headers are stripped by the account and container # controllers; we should extend header stripping to object controller # when a privileged object header is implemented. swift_owner_headers = conf.get( 'swift_owner_headers', 'x-container-read, x-container-write, ' 'x-container-sync-key, x-container-sync-to, ' 'x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, ' 'x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, ' 'x-account-access-control') self.swift_owner_headers = [ name.strip().title() for name in swift_owner_headers.split(',') if name.strip()] # Initialization was successful, so now apply the client chunk size # parameter as the default read / write buffer size for the network # sockets. # # NOTE WELL: This is a class setting, so until we get set this on a # per-connection basis, this affects reading and writing on ALL # sockets, those between the proxy servers and external clients, and # those between the proxy servers and the other internal servers. # # ** Because it affects the client as well, currently, we use the # client chunk size as the govenor and not the object chunk size. socket._fileobject.default_bufsize = self.client_chunk_size self.expose_info = config_true_value( conf.get('expose_info', 'yes')) self.disallowed_sections = list_from_csv( conf.get('disallowed_sections', 'swift.valid_api_versions')) self.admin_key = conf.get('admin_key', None) register_swift_info( version=swift_version, strict_cors_mode=self.strict_cors_mode, policies=POLICIES.get_policy_info(), allow_account_management=self.allow_account_management, account_autocreate=self.account_autocreate, **constraints.EFFECTIVE_CONSTRAINTS) def _make_policy_override(self, policy, conf, override_conf): label_for_policy = _label_for_policy(policy) try: override = ProxyOverrideOptions(conf, override_conf) self.logger.debug("Loaded override config for %s: %r" % (label_for_policy, override)) return override except ValueError as err: raise ValueError(err.message + ' for %s' % label_for_policy) def _load_per_policy_config(self, conf): """ Loads per-policy config override values from proxy server conf file. :param conf: the proxy server local conf dict :return: a dict mapping :class:`BaseStoragePolicy` to an instance of :class:`ProxyOverrideOptions` that has policy-specific config attributes """ # the default options will be used when looking up a policy that had no # override options default_options = self._make_policy_override(None, conf, {}) overrides = defaultdict(lambda: default_options) # force None key to be set in the defaultdict so that it is found when # iterating over items in check_config overrides[None] = default_options for index, override_conf in conf.get('policy_config', {}).items(): try: index = int(index) except ValueError: # require policies to be referenced by index; using index *or* # name isn't possible because names such as "3" are allowed raise ValueError( 'Override config must refer to policy index: %r' % index) try: policy = POLICIES[index] except KeyError: raise ValueError( "No policy found for override config, index: %s" % index) override = self._make_policy_override(policy, conf, override_conf) overrides[policy] = override return overrides def get_policy_options(self, policy): """ Return policy specific options. :param policy: an instance of :class:`BaseStoragePolicy` :return: an instance of :class:`ProxyOverrideOptions` """ return self._override_options[policy] def check_config(self): """ Check the configuration for possible errors """ for policy, options in self._override_options.items(): if options.read_affinity and options.sorting_method != 'affinity': self.logger.warning( _("sorting_method is set to '%(method)s', not 'affinity'; " "%(label)s read_affinity setting will have no effect."), {'label': _label_for_policy(policy), 'method': options.sorting_method}) def get_object_ring(self, policy_idx): """ Get the ring object to use to handle a request based on its policy. :param policy_idx: policy index as defined in swift.conf :returns: appropriate ring object """ return POLICIES.get_object_ring(policy_idx, self.swift_dir) def get_controller(self, req): """ Get the controller to handle a request. :param req: the request :returns: tuple of (controller class, path dictionary) :raises ValueError: (thrown by split_path) if given invalid path """ if req.path == '/info': d = dict(version=None, expose_info=self.expose_info, disallowed_sections=self.disallowed_sections, admin_key=self.admin_key) return InfoController, d version, account, container, obj = split_path(req.path, 1, 4, True) d = dict(version=version, account_name=account, container_name=container, object_name=obj) if account and not valid_api_version(version): raise APIVersionError('Invalid path') if obj and container and account: info = get_container_info(req.environ, self) policy_index = req.headers.get('X-Backend-Storage-Policy-Index', info['storage_policy']) policy = POLICIES.get_by_index(policy_index) if not policy: # This indicates that a new policy has been created, # with rings, deployed, released (i.e. deprecated = # False), used by a client to create a container via # another proxy that was restarted after the policy # was released, and is now cached - all before this # worker was HUPed to stop accepting new # connections. There should never be an "unknown" # index - but when there is - it's probably operator # error and hopefully temporary. raise HTTPServiceUnavailable('Unknown Storage Policy') return self.obj_controller_router[policy], d elif container and account: return ContainerController, d elif account and not container and not obj: return AccountController, d return None, d def __call__(self, env, start_response): """ WSGI entry point. Wraps env in swob.Request object and passes it down. :param env: WSGI environment dictionary :param start_response: WSGI callable """ try: if self.memcache is None: self.memcache = cache_from_env(env, True) req = self.update_request(Request(env)) return self.handle_request(req)(env, start_response) except UnicodeError: err = HTTPPreconditionFailed( request=req, body='Invalid UTF8 or contains NULL') return err(env, start_response) except (Exception, Timeout): start_response('500 Server Error', [('Content-Type', 'text/plain')]) return ['Internal server error.\n'] def update_request(self, req): if 'x-storage-token' in req.headers and \ 'x-auth-token' not in req.headers: req.headers['x-auth-token'] = req.headers['x-storage-token'] return req def handle_request(self, req): """ Entry point for proxy server. Should return a WSGI-style callable (such as swob.Response). :param req: swob.Request object """ try: self.logger.set_statsd_prefix('proxy-server') if req.content_length and req.content_length < 0: self.logger.increment('errors') return HTTPBadRequest(request=req, body='Invalid Content-Length') try: if not check_utf8(req.path_info): self.logger.increment('errors') return HTTPPreconditionFailed( request=req, body='Invalid UTF8 or contains NULL') except UnicodeError: self.logger.increment('errors') return HTTPPreconditionFailed( request=req, body='Invalid UTF8 or contains NULL') try: controller, path_parts = self.get_controller(req) except APIVersionError: self.logger.increment('errors') return HTTPBadRequest(request=req) except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) if not controller: self.logger.increment('errors') return HTTPPreconditionFailed(request=req, body='Bad URL') if self.deny_host_headers and \ req.host.split(':')[0] in self.deny_host_headers: return HTTPForbidden(request=req, body='Invalid host header') self.logger.set_statsd_prefix('proxy-server.' + controller.server_type.lower()) controller = controller(self, **path_parts) if 'swift.trans_id' not in req.environ: # if this wasn't set by an earlier middleware, set it now trans_id_suffix = self.trans_id_suffix trans_id_extra = req.headers.get('x-trans-id-extra') if trans_id_extra: trans_id_suffix += '-' + trans_id_extra[:32] trans_id = generate_trans_id(trans_id_suffix) req.environ['swift.trans_id'] = trans_id self.logger.txn_id = trans_id req.headers['x-trans-id'] = req.environ['swift.trans_id'] controller.trans_id = req.environ['swift.trans_id'] self.logger.client_ip = get_remote_client(req) if req.method not in controller.allowed_methods: return HTTPMethodNotAllowed(request=req, headers={ 'Allow': ', '.join(controller.allowed_methods)}) handler = getattr(controller, req.method) old_authorize = None if 'swift.authorize' in req.environ: # We call authorize before the handler, always. If authorized, # we remove the swift.authorize hook so isn't ever called # again. If not authorized, we return the denial unless the # controller's method indicates it'd like to gather more # information and try again later. resp = req.environ['swift.authorize'](req) if not resp: # No resp means authorized, no delayed recheck required. old_authorize = req.environ['swift.authorize'] else: # Response indicates denial, but we might delay the denial # and recheck later. If not delayed, return the error now. if not getattr(handler, 'delay_denial', None): return resp # Save off original request method (GET, POST, etc.) in case it # gets mutated during handling. This way logging can display the # method the client actually sent. req.environ.setdefault('swift.orig_req_method', req.method) try: if old_authorize: req.environ.pop('swift.authorize', None) return handler(req) finally: if old_authorize: req.environ['swift.authorize'] = old_authorize except HTTPException as error_response: return error_response except (Exception, Timeout): self.logger.exception(_('ERROR Unhandled exception in request')) return HTTPServerError(request=req) def sort_nodes(self, nodes, policy=None): """ Sorts nodes in-place (and returns the sorted list) according to the configured strategy. The default "sorting" is to randomly shuffle the nodes. If the "timing" strategy is chosen, the nodes are sorted according to the stored timing data. :param nodes: a list of nodes :param policy: an instance of :class:`BaseStoragePolicy` """ # In the case of timing sorting, shuffling ensures that close timings # (ie within the rounding resolution) won't prefer one over another. # Python's sort is stable (http://wiki.python.org/moin/HowTo/Sorting/) shuffle(nodes) policy_options = self.get_policy_options(policy) if policy_options.sorting_method == 'timing': now = time() def key_func(node): timing, expires = self.node_timings.get(node['ip'], (-1.0, 0)) return timing if expires > now else -1.0 nodes.sort(key=key_func) elif policy_options.sorting_method == 'affinity': nodes.sort(key=policy_options.read_affinity_sort_key) return nodes def set_node_timing(self, node, timing): if not self.sorts_by_timing: return now = time() timing = round(timing, 3) # sort timings to the millisecond self.node_timings[node['ip']] = (timing, now + self.timing_expiry) def _error_limit_node_key(self, node): return "{ip}:{port}/{device}".format(**node) def error_limited(self, node): """ Check if the node is currently error limited. :param node: dictionary of node to check :returns: True if error limited, False otherwise """ now = time() node_key = self._error_limit_node_key(node) error_stats = self._error_limiting.get(node_key) if error_stats is None or 'errors' not in error_stats: return False if 'last_error' in error_stats and error_stats['last_error'] < \ now - self.error_suppression_interval: self._error_limiting.pop(node_key, None) return False limited = error_stats['errors'] > self.error_suppression_limit if limited: self.logger.debug( _('Node error limited %(ip)s:%(port)s (%(device)s)'), node) return limited def error_limit(self, node, msg): """ Mark a node as error limited. This immediately pretends the node received enough errors to trigger error suppression. Use this for errors like Insufficient Storage. For other errors use :func:`error_occurred`. :param node: dictionary of node to error limit :param msg: error message """ node_key = self._error_limit_node_key(node) error_stats = self._error_limiting.setdefault(node_key, {}) error_stats['errors'] = self.error_suppression_limit + 1 error_stats['last_error'] = time() self.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'), {'msg': msg, 'ip': node['ip'], 'port': node['port'], 'device': node['device']}) def _incr_node_errors(self, node): node_key = self._error_limit_node_key(node) error_stats = self._error_limiting.setdefault(node_key, {}) error_stats['errors'] = error_stats.get('errors', 0) + 1 error_stats['last_error'] = time() def error_occurred(self, node, msg): """ Handle logging, and handling of errors. :param node: dictionary of node to handle errors for :param msg: error message """ self._incr_node_errors(node) self.logger.error(_('%(msg)s %(ip)s:%(port)s/%(device)s'), {'msg': msg.decode('utf-8'), 'ip': node['ip'], 'port': node['port'], 'device': node['device']}) def iter_nodes(self, ring, partition, node_iter=None, policy=None): return NodeIter(self, ring, partition, node_iter=node_iter, policy=policy) def exception_occurred(self, node, typ, additional_info, **kwargs): """ Handle logging of generic exceptions. :param node: dictionary of node to log the error for :param typ: server type :param additional_info: additional information to log """ self._incr_node_errors(node) if 'level' in kwargs: log = functools.partial(self.logger.log, kwargs.pop('level')) if 'exc_info' not in kwargs: kwargs['exc_info'] = sys.exc_info() else: log = self.logger.exception log(_('ERROR with %(type)s server %(ip)s:%(port)s/%(device)s' ' re: %(info)s'), {'type': typ, 'ip': node['ip'], 'port': node['port'], 'device': node['device'], 'info': additional_info.decode('utf-8')}, **kwargs) def modify_wsgi_pipeline(self, pipe): """ Called during WSGI pipeline creation. Modifies the WSGI pipeline context to ensure that mandatory middleware is present in the pipeline. :param pipe: A PipelineWrapper object """ pipeline_was_modified = False for filter_spec in reversed(required_filters): filter_name = filter_spec['name'] if filter_name not in pipe: afters = filter_spec.get('after_fn', lambda _junk: [])(pipe) insert_at = 0 for after in afters: try: insert_at = max(insert_at, pipe.index(after) + 1) except ValueError: # not in pipeline; ignore it pass self.logger.info( _('Adding required filter %(filter_name)s to pipeline at ' 'position %(insert_at)d'), {'filter_name': filter_name, 'insert_at': insert_at}) ctx = pipe.create_filter(filter_name) pipe.insert_filter(ctx, index=insert_at) pipeline_was_modified = True if pipeline_was_modified: self.logger.info(_("Pipeline was modified. " "New pipeline is \"%s\"."), pipe) else: self.logger.debug(_("Pipeline is \"%s\""), pipe) def parse_per_policy_config(conf): """ Search the config file for any per-policy config sections and load those sections to a dict mapping policy reference (name or index) to policy options. :param conf: the proxy server conf dict :return: a dict mapping policy reference -> dict of policy options :raises ValueError: if a policy config section has an invalid name """ policy_config = {} all_conf = readconf(conf['__file__']) policy_section_prefix = conf['__name__'] + ':policy:' for section, options in all_conf.items(): if not section.startswith(policy_section_prefix): continue policy_ref = section[len(policy_section_prefix):] policy_config[policy_ref] = options return policy_config def app_factory(global_conf, **local_conf): """paste.deploy app factory for creating WSGI proxy apps.""" conf = global_conf.copy() conf.update(local_conf) # Do this here so that the use of conf['__file__'] and conf['__name__'] is # isolated from the Application. This also simplifies tests that construct # an Application instance directly. conf['policy_config'] = parse_per_policy_config(conf) app = Application(conf) app.check_config() return app swift-2.17.1/swift/proxy/__init__.py0000666000175000017500000000000013435012003017365 0ustar zuulzuul00000000000000swift-2.17.1/swift/proxy/controllers/0000775000175000017500000000000013435012120017632 5ustar zuulzuul00000000000000swift-2.17.1/swift/proxy/controllers/__init__.py0000666000175000017500000000172613435012003021753 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swift.proxy.controllers.base import Controller from swift.proxy.controllers.info import InfoController from swift.proxy.controllers.obj import ObjectControllerRouter from swift.proxy.controllers.account import AccountController from swift.proxy.controllers.container import ContainerController __all__ = [ 'AccountController', 'ContainerController', 'Controller', 'InfoController', 'ObjectControllerRouter', ] swift-2.17.1/swift/proxy/controllers/container.py0000666000175000017500000002577113435012015022207 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swift import gettext_ as _ from six.moves.urllib.parse import unquote from swift.common.utils import public, csv_append, Timestamp from swift.common.constraints import check_metadata from swift.common.http import HTTP_ACCEPTED, is_success from swift.proxy.controllers.base import Controller, delay_denial, \ cors_validation, set_info_cache, clear_info_cache from swift.common.storage_policy import POLICIES from swift.common.swob import HTTPBadRequest, HTTPForbidden, \ HTTPNotFound, HTTPServerError class ContainerController(Controller): """WSGI controller for container requests""" server_type = 'Container' # Ensure these are all lowercase pass_through_headers = ['x-container-read', 'x-container-write', 'x-container-sync-key', 'x-container-sync-to', 'x-versions-location'] def __init__(self, app, account_name, container_name, **kwargs): super(ContainerController, self).__init__(app) self.account_name = unquote(account_name) self.container_name = unquote(container_name) def _x_remove_headers(self): st = self.server_type.lower() return ['x-remove-%s-read' % st, 'x-remove-%s-write' % st, 'x-remove-versions-location', 'x-remove-%s-sync-key' % st, 'x-remove-%s-sync-to' % st] def _convert_policy_to_index(self, req): """ Helper method to convert a policy name (from a request from a client) to a policy index (for a request to a backend). :param req: incoming request """ policy_name = req.headers.get('X-Storage-Policy') if not policy_name: return policy = POLICIES.get_by_name(policy_name) if not policy: raise HTTPBadRequest(request=req, content_type="text/plain", body=("Invalid %s '%s'" % ('X-Storage-Policy', policy_name))) if policy.is_deprecated: body = 'Storage Policy %r is deprecated' % (policy.name) raise HTTPBadRequest(request=req, body=body) return int(policy) def clean_acls(self, req): if 'swift.clean_acl' in req.environ: for header in ('x-container-read', 'x-container-write'): if header in req.headers: try: req.headers[header] = \ req.environ['swift.clean_acl'](header, req.headers[header]) except ValueError as err: return HTTPBadRequest(request=req, body=str(err)) return None def GETorHEAD(self, req): """Handler for HTTP GET/HEAD requests.""" ai = self.account_info(self.account_name, req) if not ai[1]: if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: # Don't cache this. It doesn't reflect the state of the # container, just that the user can't access it. return aresp # Don't cache this. The lack of account will be cached, and that # is sufficient. return HTTPNotFound(request=req) part = self.app.container_ring.get_part( self.account_name, self.container_name) concurrency = self.app.container_ring.replica_count \ if self.app.concurrent_gets else 1 node_iter = self.app.iter_nodes(self.app.container_ring, part) params = req.params params['format'] = 'json' req.params = params resp = self.GETorHEAD_base( req, _('Container'), node_iter, part, req.swift_entity_path, concurrency) # Cache this. We just made a request to a storage node and got # up-to-date information for the container. resp.headers['X-Backend-Recheck-Container-Existence'] = str( self.app.recheck_container_existence) set_info_cache(self.app, req.environ, self.account_name, self.container_name, resp) if 'swift.authorize' in req.environ: req.acl = resp.headers.get('x-container-read') aresp = req.environ['swift.authorize'](req) if aresp: # Don't cache this. It doesn't reflect the state of the # container, just that the user can't access it. return aresp if not req.environ.get('swift_owner', False): for key in self.app.swift_owner_headers: if key in resp.headers: del resp.headers[key] return resp @public @delay_denial @cors_validation def GET(self, req): """Handler for HTTP GET requests.""" return self.GETorHEAD(req) @public @delay_denial @cors_validation def HEAD(self, req): """Handler for HTTP HEAD requests.""" return self.GETorHEAD(req) @public @cors_validation def PUT(self, req): """HTTP PUT request handler.""" error_response = \ self.clean_acls(req) or check_metadata(req, 'container') if error_response: return error_response policy_index = self._convert_policy_to_index(req) if not req.environ.get('swift_owner'): for key in self.app.swift_owner_headers: req.headers.pop(key, None) length_limit = self.get_name_length_limit() if len(self.container_name) > length_limit: resp = HTTPBadRequest(request=req) resp.body = 'Container name length of %d longer than %d' % \ (len(self.container_name), length_limit) return resp account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts and self.app.account_autocreate: if not self.autocreate_account(req, self.account_name): return HTTPServerError(request=req) account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts: return HTTPNotFound(request=req) if 0 < self.app.max_containers_per_account <= container_count and \ self.account_name not in self.app.max_containers_whitelist: container_info = \ self.container_info(self.account_name, self.container_name, req) if not is_success(container_info.get('status')): resp = HTTPForbidden(request=req) resp.body = 'Reached container limit of %s' % \ self.app.max_containers_per_account return resp container_partition, containers = self.app.container_ring.get_nodes( self.account_name, self.container_name) headers = self._backend_requests(req, len(containers), account_partition, accounts, policy_index) resp = self.make_requests( req, self.app.container_ring, container_partition, 'PUT', req.swift_entity_path, headers) clear_info_cache(self.app, req.environ, self.account_name, self.container_name) return resp @public @cors_validation def POST(self, req): """HTTP POST request handler.""" error_response = \ self.clean_acls(req) or check_metadata(req, 'container') if error_response: return error_response if not req.environ.get('swift_owner'): for key in self.app.swift_owner_headers: req.headers.pop(key, None) account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts: return HTTPNotFound(request=req) container_partition, containers = self.app.container_ring.get_nodes( self.account_name, self.container_name) headers = self.generate_request_headers(req, transfer=True) clear_info_cache(self.app, req.environ, self.account_name, self.container_name) resp = self.make_requests( req, self.app.container_ring, container_partition, 'POST', req.swift_entity_path, [headers] * len(containers)) return resp @public @cors_validation def DELETE(self, req): """HTTP DELETE request handler.""" account_partition, accounts, container_count = \ self.account_info(self.account_name, req) if not accounts: return HTTPNotFound(request=req) container_partition, containers = self.app.container_ring.get_nodes( self.account_name, self.container_name) headers = self._backend_requests(req, len(containers), account_partition, accounts) clear_info_cache(self.app, req.environ, self.account_name, self.container_name) resp = self.make_requests( req, self.app.container_ring, container_partition, 'DELETE', req.swift_entity_path, headers) # Indicates no server had the container if resp.status_int == HTTP_ACCEPTED: return HTTPNotFound(request=req) return resp def _backend_requests(self, req, n_outgoing, account_partition, accounts, policy_index=None): additional = {'X-Timestamp': Timestamp.now().internal} if policy_index is None: additional['X-Backend-Storage-Policy-Default'] = \ int(POLICIES.default) else: additional['X-Backend-Storage-Policy-Index'] = str(policy_index) headers = [self.generate_request_headers(req, transfer=True, additional=additional) for _junk in range(n_outgoing)] for i, account in enumerate(accounts): i = i % len(headers) headers[i]['X-Account-Partition'] = account_partition headers[i]['X-Account-Host'] = csv_append( headers[i].get('X-Account-Host'), '%(ip)s:%(port)s' % account) headers[i]['X-Account-Device'] = csv_append( headers[i].get('X-Account-Device'), account['device']) return headers swift-2.17.1/swift/proxy/controllers/obj.py0000666000175000017500000036105313435012015020773 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE: swift_conn # You'll see swift_conn passed around a few places in this file. This is the # source bufferedhttp connection of whatever it is attached to. # It is used when early termination of reading from the connection should # happen, such as when a range request is satisfied but there's still more the # source connection would like to send. To prevent having to read all the data # that could be left, the source connection can be .close() and then reads # commence to empty out any buffers. # These shenanigans are to ensure all related objects can be garbage # collected. We've seen objects hang around forever otherwise. from six.moves.urllib.parse import unquote import collections import itertools import json import mimetypes import time import math import random from hashlib import md5 from swift import gettext_ as _ from greenlet import GreenletExit from eventlet import GreenPile from eventlet.queue import Queue from eventlet.timeout import Timeout from swift.common.utils import ( clean_content_type, config_true_value, ContextPool, csv_append, GreenAsyncPile, GreenthreadSafeIterator, Timestamp, normalize_delete_at_timestamp, public, get_expirer_container, document_iters_to_http_response_body, parse_content_range, quorum_size, reiterate, close_if_possible, safe_json_loads) from swift.common.bufferedhttp import http_connect from swift.common.constraints import check_metadata, check_object_creation from swift.common import constraints from swift.common.exceptions import ChunkReadTimeout, \ ChunkWriteTimeout, ConnectionTimeout, ResponseTimeout, \ InsufficientStorage, FooterNotSupported, MultiphasePUTNotSupported, \ PutterConnectError, ChunkReadError from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import ( is_informational, is_success, is_client_error, is_server_error, is_redirection, HTTP_CONTINUE, HTTP_INTERNAL_SERVER_ERROR, HTTP_SERVICE_UNAVAILABLE, HTTP_INSUFFICIENT_STORAGE, HTTP_PRECONDITION_FAILED, HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE) from swift.common.storage_policy import (POLICIES, REPL_POLICY, EC_POLICY, ECDriverError, PolicyError) from swift.proxy.controllers.base import Controller, delay_denial, \ cors_validation, ResumingGetter, update_headers from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPNotFound, \ HTTPPreconditionFailed, HTTPRequestEntityTooLarge, HTTPRequestTimeout, \ HTTPServerError, HTTPServiceUnavailable, HTTPClientDisconnect, \ HTTPUnprocessableEntity, Response, HTTPException, \ HTTPRequestedRangeNotSatisfiable, Range, HTTPInternalServerError from swift.common.request_helpers import update_etag_is_at_header, \ resolve_etag_is_at_header def check_content_type(req): if not req.environ.get('swift.content_type_overridden') and \ ';' in req.headers.get('content-type', ''): for param in req.headers['content-type'].split(';')[1:]: if param.lstrip().startswith('swift_'): return HTTPBadRequest("Invalid Content-Type, " "swift_* is not a valid parameter name.") return None def num_container_updates(container_replicas, container_quorum, object_replicas, object_quorum): """ We need to send container updates via enough object servers such that, if the object PUT succeeds, then the container update is durable (either it's synchronously updated or written to async pendings). Define: Qc = the quorum size for the container ring Qo = the quorum size for the object ring Rc = the replica count for the container ring Ro = the replica count (or EC N+K) for the object ring A durable container update is one that's made it to at least Qc nodes. To always be durable, we have to send enough container updates so that, if only Qo object PUTs succeed, and all the failed object PUTs had container updates, at least Qc updates remain. Since (Ro - Qo) object PUTs may fail, we must have at least Qc + Ro - Qo container updates to ensure that Qc of them remain. Also, each container replica is named in at least one object PUT request so that, when all requests succeed, no work is generated for the container replicator. Thus, at least Rc updates are necessary. :param container_replicas: replica count for the container ring (Rc) :param container_quorum: quorum size for the container ring (Qc) :param object_replicas: replica count for the object ring (Ro) :param object_quorum: quorum size for the object ring (Qo) """ return max( # Qc + Ro - Qo container_quorum + object_replicas - object_quorum, # Rc container_replicas) class ObjectControllerRouter(object): policy_type_to_controller_map = {} @classmethod def register(cls, policy_type): """ Decorator for Storage Policy implementations to register their ObjectController implementations. This also fills in a policy_type attribute on the class. """ def register_wrapper(controller_cls): if policy_type in cls.policy_type_to_controller_map: raise PolicyError( '%r is already registered for the policy_type %r' % ( cls.policy_type_to_controller_map[policy_type], policy_type)) cls.policy_type_to_controller_map[policy_type] = controller_cls controller_cls.policy_type = policy_type return controller_cls return register_wrapper def __init__(self): self.policy_to_controller_cls = {} for policy in POLICIES: self.policy_to_controller_cls[policy] = \ self.policy_type_to_controller_map[policy.policy_type] def __getitem__(self, policy): return self.policy_to_controller_cls[policy] class BaseObjectController(Controller): """Base WSGI controller for object requests.""" server_type = 'Object' def __init__(self, app, account_name, container_name, object_name, **kwargs): super(BaseObjectController, self).__init__(app) self.account_name = unquote(account_name) self.container_name = unquote(container_name) self.object_name = unquote(object_name) def iter_nodes_local_first(self, ring, partition, policy=None, local_handoffs_first=False): """ Yields nodes for a ring partition. If the 'write_affinity' setting is non-empty, then this will yield N local nodes (as defined by the write_affinity setting) first, then the rest of the nodes as normal. It is a re-ordering of the nodes such that the local ones come first; no node is omitted. The effect is that the request will be serviced by local object servers first, but nonlocal ones will be employed if not enough local ones are available. :param ring: ring to get nodes from :param partition: ring partition to yield nodes for :param policy: optional, an instance of :class:`~swift.common.storage_policy.BaseStoragePolicy` :param local_handoffs_first: optional, if True prefer primaries and local handoff nodes first before looking elsewhere. """ policy_options = self.app.get_policy_options(policy) is_local = policy_options.write_affinity_is_local_fn if is_local is None: return self.app.iter_nodes(ring, partition, policy=policy) primary_nodes = ring.get_part_nodes(partition) handoff_nodes = ring.get_more_nodes(partition) all_nodes = itertools.chain(primary_nodes, handoff_nodes) if local_handoffs_first: num_locals = policy_options.write_affinity_handoff_delete_count if num_locals is None: local_primaries = [node for node in primary_nodes if is_local(node)] num_locals = len(primary_nodes) - len(local_primaries) first_local_handoffs = list(itertools.islice( (node for node in handoff_nodes if is_local(node)), num_locals) ) preferred_nodes = primary_nodes + first_local_handoffs else: num_locals = policy_options.write_affinity_node_count_fn( len(primary_nodes) ) preferred_nodes = list(itertools.islice( (node for node in all_nodes if is_local(node)), num_locals) ) # refresh it; it moved when we computed preferred_nodes handoff_nodes = ring.get_more_nodes(partition) all_nodes = itertools.chain(primary_nodes, handoff_nodes) node_iter = itertools.chain( preferred_nodes, (node for node in all_nodes if node not in preferred_nodes) ) return self.app.iter_nodes(ring, partition, node_iter=node_iter, policy=policy) def GETorHEAD(self, req): """Handle HTTP GET or HEAD requests.""" container_info = self.container_info( self.account_name, self.container_name, req) req.acl = container_info['read_acl'] # pass the policy index to storage nodes via req header policy_index = req.headers.get('X-Backend-Storage-Policy-Index', container_info['storage_policy']) policy = POLICIES.get_by_index(policy_index) obj_ring = self.app.get_object_ring(policy_index) req.headers['X-Backend-Storage-Policy-Index'] = policy_index if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp partition = obj_ring.get_part( self.account_name, self.container_name, self.object_name) node_iter = self.app.iter_nodes(obj_ring, partition, policy=policy) resp = self._get_or_head_response(req, node_iter, partition, policy) if ';' in resp.headers.get('content-type', ''): resp.content_type = clean_content_type( resp.headers['content-type']) return resp @public @cors_validation @delay_denial def GET(self, req): """Handler for HTTP GET requests.""" return self.GETorHEAD(req) @public @cors_validation @delay_denial def HEAD(self, req): """Handler for HTTP HEAD requests.""" return self.GETorHEAD(req) @public @cors_validation @delay_denial def POST(self, req): """HTTP POST request handler.""" container_info = self.container_info( self.account_name, self.container_name, req) container_partition = container_info['partition'] container_nodes = container_info['nodes'] req.acl = container_info['write_acl'] if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp if not container_nodes: return HTTPNotFound(request=req) error_response = check_metadata(req, 'object') if error_response: return error_response req.headers['X-Timestamp'] = Timestamp.now().internal req, delete_at_container, delete_at_part, \ delete_at_nodes = self._config_obj_expiration(req) # pass the policy index to storage nodes via req header policy_index = req.headers.get('X-Backend-Storage-Policy-Index', container_info['storage_policy']) obj_ring = self.app.get_object_ring(policy_index) req.headers['X-Backend-Storage-Policy-Index'] = policy_index next_part_power = getattr(obj_ring, 'next_part_power', None) if next_part_power: req.headers['X-Backend-Next-Part-Power'] = next_part_power partition, nodes = obj_ring.get_nodes( self.account_name, self.container_name, self.object_name) headers = self._backend_requests( req, len(nodes), container_partition, container_nodes, delete_at_container, delete_at_part, delete_at_nodes) return self._post_object(req, obj_ring, partition, headers) def _backend_requests(self, req, n_outgoing, container_partition, containers, delete_at_container=None, delete_at_partition=None, delete_at_nodes=None): policy_index = req.headers['X-Backend-Storage-Policy-Index'] policy = POLICIES.get_by_index(policy_index) headers = [self.generate_request_headers(req, additional=req.headers) for _junk in range(n_outgoing)] def set_container_update(index, container): headers[index]['X-Container-Partition'] = container_partition headers[index]['X-Container-Host'] = csv_append( headers[index].get('X-Container-Host'), '%(ip)s:%(port)s' % container) headers[index]['X-Container-Device'] = csv_append( headers[index].get('X-Container-Device'), container['device']) def set_delete_at_headers(index, delete_at_node): headers[index]['X-Delete-At-Container'] = delete_at_container headers[index]['X-Delete-At-Partition'] = delete_at_partition headers[index]['X-Delete-At-Host'] = csv_append( headers[index].get('X-Delete-At-Host'), '%(ip)s:%(port)s' % delete_at_node) headers[index]['X-Delete-At-Device'] = csv_append( headers[index].get('X-Delete-At-Device'), delete_at_node['device']) n_updates_needed = num_container_updates( len(containers), quorum_size(len(containers)), n_outgoing, policy.quorum) container_iter = itertools.cycle(containers) dan_iter = itertools.cycle(delete_at_nodes or []) existing_updates = 0 while existing_updates < n_updates_needed: index = existing_updates % n_outgoing set_container_update(index, next(container_iter)) if delete_at_nodes: # We reverse the index in order to distribute the updates # across all nodes. set_delete_at_headers(n_outgoing - 1 - index, next(dan_iter)) existing_updates += 1 # Keep the number of expirer-queue deletes to a reasonable number. # # In the best case, at least one object server writes out an # async_pending for an expirer-queue update. In the worst case, no # object server does so, and an expirer-queue row remains that # refers to an already-deleted object. In this case, upon attempting # to delete the object, the object expirer will notice that the # object does not exist and then remove the row from the expirer # queue. # # In other words: expirer-queue updates on object DELETE are nice to # have, but not strictly necessary for correct operation. # # Also, each queue update results in an async_pending record, which # causes the object updater to talk to all container servers. If we # have N async_pendings and Rc container replicas, we cause N * Rc # requests from object updaters to container servers (possibly more, # depending on retries). Thus, it is helpful to keep this number # small. n_desired_queue_updates = 2 for i in range(len(headers)): headers[i].setdefault('X-Backend-Clean-Expiring-Object-Queue', 't' if i < n_desired_queue_updates else 'f') return headers def _get_conn_response(self, putter, path, logger_thread_locals, final_phase, **kwargs): self.app.logger.thread_locals = logger_thread_locals try: resp = putter.await_response( self.app.node_timeout, not final_phase) except (Exception, Timeout): resp = None if final_phase: status_type = 'final' else: status_type = 'commit' self.app.exception_occurred( putter.node, _('Object'), _('Trying to get %(status_type)s status of PUT to %(path)s') % {'status_type': status_type, 'path': path}) return (putter, resp) def _have_adequate_put_responses(self, statuses, num_nodes, min_responses): """ Test for sufficient PUT responses from backend nodes to proceed with PUT handling. :param statuses: a list of response statuses. :param num_nodes: number of backend nodes to which PUT requests may be issued. :param min_responses: (optional) minimum number of nodes required to have responded with satisfactory status code. :return: True if sufficient backend responses have returned a satisfactory status code. """ raise NotImplementedError def _get_put_responses(self, req, putters, num_nodes, final_phase=True, min_responses=None): """ Collect object responses to a PUT request and determine if a satisfactory number of nodes have returned success. Returns lists of accumulated status codes, reasons, bodies and etags. :param req: the request :param putters: list of putters for the request :param num_nodes: number of nodes involved :param final_phase: boolean indicating if this is the last phase :param min_responses: minimum needed when not requiring quorum :return: a tuple of lists of status codes, reasons, bodies and etags. The list of bodies and etags is only populated for the final phase of a PUT transaction. """ statuses = [] reasons = [] bodies = [] etags = set() pile = GreenAsyncPile(len(putters)) for putter in putters: if putter.failed: continue pile.spawn(self._get_conn_response, putter, req.path, self.app.logger.thread_locals, final_phase=final_phase) def _handle_response(putter, response): statuses.append(response.status) reasons.append(response.reason) if final_phase: body = response.read() else: body = '' bodies.append(body) if response.status == HTTP_INSUFFICIENT_STORAGE: putter.failed = True self.app.error_limit(putter.node, _('ERROR Insufficient Storage')) elif response.status >= HTTP_INTERNAL_SERVER_ERROR: putter.failed = True self.app.error_occurred( putter.node, _('ERROR %(status)d %(body)s From Object Server ' 're: %(path)s') % {'status': response.status, 'body': body[:1024], 'path': req.path}) elif is_success(response.status): etags.add(response.getheader('etag').strip('"')) for (putter, response) in pile: if response: _handle_response(putter, response) if self._have_adequate_put_responses( statuses, num_nodes, min_responses): break else: putter.failed = True # give any pending requests *some* chance to finish finished_quickly = pile.waitall(self.app.post_quorum_timeout) for (putter, response) in finished_quickly: if response: _handle_response(putter, response) if final_phase: while len(statuses) < num_nodes: statuses.append(HTTP_SERVICE_UNAVAILABLE) reasons.append('') bodies.append('') return statuses, reasons, bodies, etags def _config_obj_expiration(self, req): delete_at_container = None delete_at_part = None delete_at_nodes = None req = constraints.check_delete_headers(req) if 'x-delete-at' in req.headers: x_delete_at = int(normalize_delete_at_timestamp( int(req.headers['x-delete-at']))) req.environ.setdefault('swift.log_info', []).append( 'x-delete-at:%s' % x_delete_at) delete_at_container = get_expirer_container( x_delete_at, self.app.expiring_objects_container_divisor, self.account_name, self.container_name, self.object_name) delete_at_part, delete_at_nodes = \ self.app.container_ring.get_nodes( self.app.expiring_objects_account, delete_at_container) return req, delete_at_container, delete_at_part, delete_at_nodes def _update_content_type(self, req): # Sometimes the 'content-type' header exists, but is set to None. detect_content_type = \ config_true_value(req.headers.get('x-detect-content-type')) if detect_content_type or not req.headers.get('content-type'): guessed_type, _junk = mimetypes.guess_type(req.path_info) req.headers['Content-Type'] = guessed_type or \ 'application/octet-stream' if detect_content_type: req.headers.pop('x-detect-content-type') def _update_x_timestamp(self, req): # The container sync feature includes an x-timestamp header with # requests. If present this is checked and preserved, otherwise a fresh # timestamp is added. if 'x-timestamp' in req.headers: try: req_timestamp = Timestamp(req.headers['X-Timestamp']) except ValueError: raise HTTPBadRequest( request=req, content_type='text/plain', body='X-Timestamp should be a UNIX timestamp float value; ' 'was %r' % req.headers['x-timestamp']) req.headers['X-Timestamp'] = req_timestamp.internal else: req.headers['X-Timestamp'] = Timestamp.now().internal return None def _check_failure_put_connections(self, putters, req, min_conns): """ Identify any failed connections and check minimum connection count. :param putters: a list of Putter instances :param req: request :param min_conns: minimum number of putter connections required """ if req.if_none_match is not None and '*' in req.if_none_match: statuses = [ putter.resp.status for putter in putters if putter.resp] if HTTP_PRECONDITION_FAILED in statuses: # If we find any copy of the file, it shouldn't be uploaded self.app.logger.debug( _('Object PUT returning 412, %(statuses)r'), {'statuses': statuses}) raise HTTPPreconditionFailed(request=req) if any(putter for putter in putters if putter.resp and putter.resp.status == HTTP_CONFLICT): status_times = ['%(status)s (%(timestamp)s)' % { 'status': putter.resp.status, 'timestamp': HeaderKeyDict( putter.resp.getheaders()).get( 'X-Backend-Timestamp', 'unknown') } for putter in putters if putter.resp] self.app.logger.debug( _('Object PUT returning 202 for 409: ' '%(req_timestamp)s <= %(timestamps)r'), {'req_timestamp': req.timestamp.internal, 'timestamps': ', '.join(status_times)}) raise HTTPAccepted(request=req) self._check_min_conn(req, putters, min_conns) def _make_putter(self, node, part, req, headers): """ Returns a putter object for handling streaming of object to object servers. Subclasses must implement this method. :param node: a storage node :param part: ring partition number :param req: a swob Request :param headers: request headers :return: an instance of a Putter """ raise NotImplementedError def _connect_put_node(self, nodes, part, req, headers, logger_thread_locals): """ Make connection to storage nodes Connects to the first working node that it finds in nodes iter and sends over the request headers. Returns a Putter to handle the rest of the streaming, or None if no working nodes were found. :param nodes: an iterator of the target storage nodes :param part: ring partition number :param req: a swob Request :param headers: request headers :param logger_thread_locals: The thread local values to be set on the self.app.logger to retain transaction logging information. :return: an instance of a Putter """ self.app.logger.thread_locals = logger_thread_locals for node in nodes: try: putter = self._make_putter(node, part, req, headers) self.app.set_node_timing(node, putter.connect_duration) return putter except InsufficientStorage: self.app.error_limit(node, _('ERROR Insufficient Storage')) except PutterConnectError as e: self.app.error_occurred( node, _('ERROR %(status)d Expect: 100-continue ' 'From Object Server') % { 'status': e.status}) except (Exception, Timeout): self.app.exception_occurred( node, _('Object'), _('Expect: 100-continue on %s') % req.swift_entity_path) def _get_put_connections(self, req, nodes, partition, outgoing_headers, policy): """ Establish connections to storage nodes for PUT request """ obj_ring = policy.object_ring node_iter = GreenthreadSafeIterator( self.iter_nodes_local_first(obj_ring, partition, policy=policy)) pile = GreenPile(len(nodes)) for nheaders in outgoing_headers: # RFC2616:8.2.3 disallows 100-continue without a body if (req.content_length > 0) or req.is_chunked: nheaders['Expect'] = '100-continue' pile.spawn(self._connect_put_node, node_iter, partition, req, nheaders, self.app.logger.thread_locals) putters = [putter for putter in pile if putter] return putters def _check_min_conn(self, req, putters, min_conns, msg=None): msg = msg or _('Object PUT returning 503, %(conns)s/%(nodes)s ' 'required connections') if len(putters) < min_conns: self.app.logger.error((msg), {'conns': len(putters), 'nodes': min_conns}) raise HTTPServiceUnavailable(request=req) def _get_footers(self, req): footers = HeaderKeyDict() footer_callback = req.environ.get( 'swift.callback.update_footers', lambda _footer: None) footer_callback(footers) return footers def _store_object(self, req, data_source, nodes, partition, outgoing_headers): """ This method is responsible for establishing connection with storage nodes and sending the data to each one of those nodes. The process of transferring data is specific to each Storage Policy, thus it is required for each policy specific ObjectController to provide their own implementation of this method. :param req: the PUT Request :param data_source: an iterator of the source of the data :param nodes: an iterator of the target storage nodes :param partition: ring partition number :param outgoing_headers: system headers to storage nodes :return: Response object """ raise NotImplementedError() def _delete_object(self, req, obj_ring, partition, headers): """Delete object considering write-affinity. When deleting object in write affinity deployment, also take configured handoff nodes number into consideration, instead of just sending requests to primary nodes. Otherwise (write-affinity is disabled), go with the same way as before. :param req: the DELETE Request :param obj_ring: the object ring :param partition: ring partition number :param headers: system headers to storage nodes :return: Response object """ policy_index = req.headers.get('X-Backend-Storage-Policy-Index') policy = POLICIES.get_by_index(policy_index) node_count = None node_iterator = None policy_options = self.app.get_policy_options(policy) is_local = policy_options.write_affinity_is_local_fn if is_local is not None: primaries = obj_ring.get_part_nodes(partition) node_count = len(primaries) local_handoffs = policy_options.write_affinity_handoff_delete_count if local_handoffs is None: local_primaries = [node for node in primaries if is_local(node)] local_handoffs = len(primaries) - len(local_primaries) node_count += local_handoffs node_iterator = self.iter_nodes_local_first( obj_ring, partition, policy=policy, local_handoffs_first=True ) status_overrides = {404: 204} resp = self.make_requests(req, obj_ring, partition, 'DELETE', req.swift_entity_path, headers, overrides=status_overrides, node_count=node_count, node_iterator=node_iterator) return resp def _post_object(self, req, obj_ring, partition, headers): """ send object POST request to storage nodes. :param req: the POST Request :param obj_ring: the object ring :param partition: ring partition number :param headers: system headers to storage nodes :return: Response object """ resp = self.make_requests(req, obj_ring, partition, 'POST', req.swift_entity_path, headers) return resp @public @cors_validation @delay_denial def PUT(self, req): """HTTP PUT request handler.""" if req.if_none_match is not None and '*' not in req.if_none_match: # Sending an etag with if-none-match isn't currently supported return HTTPBadRequest(request=req, content_type='text/plain', body='If-None-Match only supports *') container_info = self.container_info( self.account_name, self.container_name, req) policy_index = req.headers.get('X-Backend-Storage-Policy-Index', container_info['storage_policy']) obj_ring = self.app.get_object_ring(policy_index) container_nodes = container_info['nodes'] container_partition = container_info['partition'] partition, nodes = obj_ring.get_nodes( self.account_name, self.container_name, self.object_name) # pass the policy index to storage nodes via req header req.headers['X-Backend-Storage-Policy-Index'] = policy_index next_part_power = getattr(obj_ring, 'next_part_power', None) if next_part_power: req.headers['X-Backend-Next-Part-Power'] = next_part_power req.acl = container_info['write_acl'] req.environ['swift_sync_key'] = container_info['sync_key'] # is request authorized if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp if not container_nodes: return HTTPNotFound(request=req) # update content type in case it is missing self._update_content_type(req) self._update_x_timestamp(req) # check constraints on object name and request headers error_response = check_object_creation(req, self.object_name) or \ check_content_type(req) if error_response: return error_response def reader(): try: return req.environ['wsgi.input'].read( self.app.client_chunk_size) except (ValueError, IOError) as e: raise ChunkReadError(str(e)) data_source = iter(reader, '') # check if object is set to be automatically deleted (i.e. expired) req, delete_at_container, delete_at_part, \ delete_at_nodes = self._config_obj_expiration(req) # add special headers to be handled by storage nodes outgoing_headers = self._backend_requests( req, len(nodes), container_partition, container_nodes, delete_at_container, delete_at_part, delete_at_nodes) # send object to storage nodes resp = self._store_object( req, data_source, nodes, partition, outgoing_headers) return resp @public @cors_validation @delay_denial def DELETE(self, req): """HTTP DELETE request handler.""" container_info = self.container_info( self.account_name, self.container_name, req) # pass the policy index to storage nodes via req header policy_index = req.headers.get('X-Backend-Storage-Policy-Index', container_info['storage_policy']) obj_ring = self.app.get_object_ring(policy_index) # pass the policy index to storage nodes via req header req.headers['X-Backend-Storage-Policy-Index'] = policy_index next_part_power = getattr(obj_ring, 'next_part_power', None) if next_part_power: req.headers['X-Backend-Next-Part-Power'] = next_part_power container_partition = container_info['partition'] container_nodes = container_info['nodes'] req.acl = container_info['write_acl'] req.environ['swift_sync_key'] = container_info['sync_key'] if 'swift.authorize' in req.environ: aresp = req.environ['swift.authorize'](req) if aresp: return aresp if not container_nodes: return HTTPNotFound(request=req) partition, nodes = obj_ring.get_nodes( self.account_name, self.container_name, self.object_name) self._update_x_timestamp(req) # Include local handoff nodes if write-affinity is enabled. node_count = len(nodes) policy = POLICIES.get_by_index(policy_index) policy_options = self.app.get_policy_options(policy) is_local = policy_options.write_affinity_is_local_fn if is_local is not None: local_handoffs = policy_options.write_affinity_handoff_delete_count if local_handoffs is None: local_primaries = [node for node in nodes if is_local(node)] local_handoffs = len(nodes) - len(local_primaries) node_count += local_handoffs headers = self._backend_requests( req, node_count, container_partition, container_nodes) return self._delete_object(req, obj_ring, partition, headers) @ObjectControllerRouter.register(REPL_POLICY) class ReplicatedObjectController(BaseObjectController): def _get_or_head_response(self, req, node_iter, partition, policy): concurrency = self.app.get_object_ring(policy.idx).replica_count \ if self.app.concurrent_gets else 1 resp = self.GETorHEAD_base( req, _('Object'), node_iter, partition, req.swift_entity_path, concurrency) return resp def _make_putter(self, node, part, req, headers): if req.environ.get('swift.callback.update_footers'): putter = MIMEPutter.connect( node, part, req.swift_entity_path, headers, conn_timeout=self.app.conn_timeout, node_timeout=self.app.node_timeout, logger=self.app.logger, need_multiphase=False) else: putter = Putter.connect( node, part, req.swift_entity_path, headers, conn_timeout=self.app.conn_timeout, node_timeout=self.app.node_timeout, logger=self.app.logger, chunked=req.is_chunked) return putter def _transfer_data(self, req, data_source, putters, nodes): """ Transfer data for a replicated object. This method was added in the PUT method extraction change """ bytes_transferred = 0 def send_chunk(chunk): for putter in list(putters): if not putter.failed: putter.send_chunk(chunk) else: putter.close() putters.remove(putter) self._check_min_conn( req, putters, min_conns, msg=_('Object PUT exceptions during send, ' '%(conns)s/%(nodes)s required connections')) min_conns = quorum_size(len(nodes)) try: with ContextPool(len(nodes)) as pool: for putter in putters: putter.spawn_sender_greenthread( pool, self.app.put_queue_depth, self.app.node_timeout, self.app.exception_occurred) while True: with ChunkReadTimeout(self.app.client_timeout): try: chunk = next(data_source) except StopIteration: break bytes_transferred += len(chunk) if bytes_transferred > constraints.MAX_FILE_SIZE: raise HTTPRequestEntityTooLarge(request=req) send_chunk(chunk) if req.content_length and ( bytes_transferred < req.content_length): req.client_disconnect = True self.app.logger.warning( _('Client disconnected without sending enough data')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) trail_md = self._get_footers(req) for putter in putters: # send any footers set by middleware putter.end_of_object_data(footer_metadata=trail_md) for putter in putters: putter.wait() self._check_min_conn( req, [p for p in putters if not p.failed], min_conns, msg=_('Object PUT exceptions after last send, ' '%(conns)s/%(nodes)s required connections')) except ChunkReadTimeout as err: self.app.logger.warning( _('ERROR Client read timeout (%ss)'), err.seconds) self.app.logger.increment('client_timeouts') raise HTTPRequestTimeout(request=req) except HTTPException: raise except ChunkReadError: req.client_disconnect = True self.app.logger.warning( _('Client disconnected without sending last chunk')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) except Timeout: self.app.logger.exception( _('ERROR Exception causing client disconnect')) raise HTTPClientDisconnect(request=req) except Exception: self.app.logger.exception( _('ERROR Exception transferring data to object servers %s'), {'path': req.path}) raise HTTPInternalServerError(request=req) def _have_adequate_put_responses(self, statuses, num_nodes, min_responses): return self.have_quorum(statuses, num_nodes) def _store_object(self, req, data_source, nodes, partition, outgoing_headers): """ Store a replicated object. This method is responsible for establishing connection with storage nodes and sending object to each one of those nodes. After sending the data, the "best" response will be returned based on statuses from all connections """ policy_index = req.headers.get('X-Backend-Storage-Policy-Index') policy = POLICIES.get_by_index(policy_index) if not nodes: return HTTPNotFound() putters = self._get_put_connections( req, nodes, partition, outgoing_headers, policy) min_conns = quorum_size(len(nodes)) try: # check that a minimum number of connections were established and # meet all the correct conditions set in the request self._check_failure_put_connections(putters, req, min_conns) # transfer data self._transfer_data(req, data_source, putters, nodes) # get responses statuses, reasons, bodies, etags = \ self._get_put_responses(req, putters, len(nodes)) except HTTPException as resp: return resp finally: for putter in putters: putter.close() if len(etags) > 1: self.app.logger.error( _('Object servers returned %s mismatched etags'), len(etags)) return HTTPServerError(request=req) etag = etags.pop() if len(etags) else None resp = self.best_response(req, statuses, reasons, bodies, _('Object PUT'), etag=etag) resp.last_modified = math.ceil( float(Timestamp(req.headers['X-Timestamp']))) return resp class ECAppIter(object): """ WSGI iterable that decodes EC fragment archives (or portions thereof) into the original object (or portions thereof). :param path: object's path, sans v1 (e.g. /a/c/o) :param policy: storage policy for this object :param internal_parts_iters: list of the response-document-parts iterators for the backend GET responses. For an M+K erasure code, the caller must supply M such iterables. :param range_specs: list of dictionaries describing the ranges requested by the client. Each dictionary contains the start and end of the client's requested byte range as well as the start and end of the EC segments containing that byte range. :param fa_length: length of the fragment archive, in bytes, if the response is a 200. If it's a 206, then this is ignored. :param obj_length: length of the object, in bytes. Learned from the headers in the GET response from the object server. :param logger: a logger """ def __init__(self, path, policy, internal_parts_iters, range_specs, fa_length, obj_length, logger): self.path = path self.policy = policy self.internal_parts_iters = internal_parts_iters self.range_specs = range_specs self.fa_length = fa_length self.obj_length = obj_length if obj_length is not None else 0 self.boundary = '' self.logger = logger self.mime_boundary = None self.learned_content_type = None self.stashed_iter = None def close(self): # close down the stashed iter first so the ContextPool can # cleanup the frag queue feeding coros that may be currently # executing the internal_parts_iters. if self.stashed_iter: self.stashed_iter.close() for it in self.internal_parts_iters: close_if_possible(it) def kickoff(self, req, resp): """ Start pulling data from the backends so that we can learn things like the real Content-Type that might only be in the multipart/byteranges response body. Update our response accordingly. Also, this is the first point at which we can learn the MIME boundary that our response has in the headers. We grab that so we can also use it in the body. :returns: None :raises HTTPException: on error """ self.mime_boundary = resp.boundary try: self.stashed_iter = reiterate(self._real_iter(req, resp.headers)) except Exception: self.close() raise if self.learned_content_type is not None: resp.content_type = self.learned_content_type resp.content_length = self.obj_length def _next_range(self): # Each FA part should have approximately the same headers. We really # only care about Content-Range and Content-Type, and that'll be the # same for all the different FAs. frag_iters = [] headers = None for parts_iter in self.internal_parts_iters: part_info = next(parts_iter) frag_iters.append(part_info['part_iter']) headers = part_info['headers'] headers = HeaderKeyDict(headers) return headers, frag_iters def _actual_range(self, req_start, req_end, entity_length): try: rng = Range("bytes=%s-%s" % ( req_start if req_start is not None else '', req_end if req_end is not None else '')) except ValueError: return (None, None) rfl = rng.ranges_for_length(entity_length) if not rfl: return (None, None) else: # ranges_for_length() adds 1 to the last byte's position # because webob once made a mistake return (rfl[0][0], rfl[0][1] - 1) def _fill_out_range_specs_from_obj_length(self, range_specs): # Add a few fields to each range spec: # # * resp_client_start, resp_client_end: the actual bytes that will # be delivered to the client for the requested range. This may # differ from the requested bytes if, say, the requested range # overlaps the end of the object. # # * resp_segment_start, resp_segment_end: the actual offsets of the # segments that will be decoded for the requested range. These # differ from resp_client_start/end in that these are aligned # to segment boundaries, while resp_client_start/end are not # necessarily so. # # * satisfiable: a boolean indicating whether the range is # satisfiable or not (i.e. the requested range overlaps the # object in at least one byte). # # This is kept separate from _fill_out_range_specs_from_fa_length() # because this computation can be done with just the response # headers from the object servers (in particular # X-Object-Sysmeta-Ec-Content-Length), while the computation in # _fill_out_range_specs_from_fa_length() requires the beginnings of # the response bodies. for spec in range_specs: cstart, cend = self._actual_range( spec['req_client_start'], spec['req_client_end'], self.obj_length) spec['resp_client_start'] = cstart spec['resp_client_end'] = cend spec['satisfiable'] = (cstart is not None and cend is not None) sstart, send = self._actual_range( spec['req_segment_start'], spec['req_segment_end'], self.obj_length) seg_size = self.policy.ec_segment_size if spec['req_segment_start'] is None and sstart % seg_size != 0: # Segment start may, in the case of a suffix request, need # to be rounded up (not down!) to the nearest segment boundary. # This reflects the trimming of leading garbage (partial # fragments) from the retrieved fragments. sstart += seg_size - (sstart % seg_size) spec['resp_segment_start'] = sstart spec['resp_segment_end'] = send def _fill_out_range_specs_from_fa_length(self, fa_length, range_specs): # Add two fields to each range spec: # # * resp_fragment_start, resp_fragment_end: the start and end of # the fragments that compose this byterange. These values are # aligned to fragment boundaries. # # This way, ECAppIter has the knowledge it needs to correlate # response byteranges with requested ones for when some byteranges # are omitted from the response entirely and also to put the right # Content-Range headers in a multipart/byteranges response. for spec in range_specs: fstart, fend = self._actual_range( spec['req_fragment_start'], spec['req_fragment_end'], fa_length) spec['resp_fragment_start'] = fstart spec['resp_fragment_end'] = fend def __iter__(self): if self.stashed_iter is not None: return iter(self.stashed_iter) else: raise ValueError("Failed to call kickoff() before __iter__()") def _real_iter(self, req, resp_headers): if not self.range_specs: client_asked_for_range = False range_specs = [{ 'req_client_start': 0, 'req_client_end': (None if self.obj_length is None else self.obj_length - 1), 'resp_client_start': 0, 'resp_client_end': (None if self.obj_length is None else self.obj_length - 1), 'req_segment_start': 0, 'req_segment_end': (None if self.obj_length is None else self.obj_length - 1), 'resp_segment_start': 0, 'resp_segment_end': (None if self.obj_length is None else self.obj_length - 1), 'req_fragment_start': 0, 'req_fragment_end': self.fa_length - 1, 'resp_fragment_start': 0, 'resp_fragment_end': self.fa_length - 1, 'satisfiable': self.obj_length > 0, }] else: client_asked_for_range = True range_specs = self.range_specs self._fill_out_range_specs_from_obj_length(range_specs) multipart = (len([rs for rs in range_specs if rs['satisfiable']]) > 1) # Multipart responses are not required to be in the same order as # the Range header; the parts may be in any order the server wants. # Further, if multiple ranges are requested and only some are # satisfiable, then only the satisfiable ones appear in the response # at all. Thus, we cannot simply iterate over range_specs in order; # we must use the Content-Range header from each part to figure out # what we've been given. # # We do, however, make the assumption that all the object-server # responses have their ranges in the same order. Otherwise, a # streaming decode would be impossible. def convert_ranges_iter(): seen_first_headers = False ranges_for_resp = {} while True: # this'll raise StopIteration and exit the loop next_range = self._next_range() headers, frag_iters = next_range content_type = headers['Content-Type'] content_range = headers.get('Content-Range') if content_range is not None: fa_start, fa_end, fa_length = parse_content_range( content_range) elif self.fa_length <= 0: fa_start = None fa_end = None fa_length = 0 else: fa_start = 0 fa_end = self.fa_length - 1 fa_length = self.fa_length if not seen_first_headers: # This is the earliest we can possibly do this. On a # 200 or 206-single-byterange response, we can learn # the FA's length from the HTTP response headers. # However, on a 206-multiple-byteranges response, we # don't learn it until the first part of the # response body, in the headers of the first MIME # part. # # Similarly, the content type of a # 206-multiple-byteranges response is # "multipart/byteranges", not the object's actual # content type. self._fill_out_range_specs_from_fa_length( fa_length, range_specs) satisfiable = False for range_spec in range_specs: satisfiable |= range_spec['satisfiable'] key = (range_spec['resp_fragment_start'], range_spec['resp_fragment_end']) ranges_for_resp.setdefault(key, []).append(range_spec) # The client may have asked for an unsatisfiable set of # ranges, but when converted to fragments, the object # servers see it as satisfiable. For example, imagine a # request for bytes 800-900 of a 750-byte object with a # 1024-byte segment size. The object servers will see a # request for bytes 0-${fragsize-1}, and that's # satisfiable, so they return 206. It's not until we # learn the object size that we can check for this # condition. # # Note that some unsatisfiable ranges *will* be caught # by the object servers, like bytes 1800-1900 of a # 100-byte object with 1024-byte segments. That's not # what we're dealing with here, though. if client_asked_for_range and not satisfiable: req.environ[ 'swift.non_client_disconnect'] = True raise HTTPRequestedRangeNotSatisfiable( request=req, headers=resp_headers) self.learned_content_type = content_type seen_first_headers = True range_spec = ranges_for_resp[(fa_start, fa_end)].pop(0) seg_iter = self._decode_segments_from_fragments(frag_iters) if not range_spec['satisfiable']: # This'll be small; just a single small segment. Discard # it. for x in seg_iter: pass continue byterange_iter = self._iter_one_range(range_spec, seg_iter) converted = { "start_byte": range_spec["resp_client_start"], "end_byte": range_spec["resp_client_end"], "content_type": content_type, "part_iter": byterange_iter} if self.obj_length is not None: converted["entity_length"] = self.obj_length yield converted return document_iters_to_http_response_body( convert_ranges_iter(), self.mime_boundary, multipart, self.logger) def _iter_one_range(self, range_spec, segment_iter): client_start = range_spec['resp_client_start'] client_end = range_spec['resp_client_end'] segment_start = range_spec['resp_segment_start'] segment_end = range_spec['resp_segment_end'] # It's entirely possible that the client asked for a range that # includes some bytes we have and some we don't; for example, a # range of bytes 1000-20000000 on a 1500-byte object. segment_end = (min(segment_end, self.obj_length - 1) if segment_end is not None else self.obj_length - 1) client_end = (min(client_end, self.obj_length - 1) if client_end is not None else self.obj_length - 1) num_segments = int( math.ceil(float(segment_end + 1 - segment_start) / self.policy.ec_segment_size)) # We get full segments here, but the client may have requested a # byte range that begins or ends in the middle of a segment. # Thus, we have some amount of overrun (extra decoded bytes) # that we trim off so the client gets exactly what they # requested. start_overrun = client_start - segment_start end_overrun = segment_end - client_end for i, next_seg in enumerate(segment_iter): # We may have a start_overrun of more than one segment in # the case of suffix-byte-range requests. However, we never # have an end_overrun of more than one segment. if start_overrun > 0: seglen = len(next_seg) if seglen <= start_overrun: start_overrun -= seglen continue else: next_seg = next_seg[start_overrun:] start_overrun = 0 if i == (num_segments - 1) and end_overrun: next_seg = next_seg[:-end_overrun] yield next_seg def _decode_segments_from_fragments(self, fragment_iters): # Decodes the fragments from the object servers and yields one # segment at a time. queues = [Queue(1) for _junk in range(len(fragment_iters))] def put_fragments_in_queue(frag_iter, queue): try: for fragment in frag_iter: if fragment.startswith(' '): raise Exception('Leading whitespace on fragment.') queue.put(fragment) except GreenletExit: # killed by contextpool pass except ChunkReadTimeout: # unable to resume in GetOrHeadHandler self.logger.exception(_("Timeout fetching fragments for %r"), self.path) except: # noqa self.logger.exception(_("Exception fetching fragments for" " %r"), self.path) finally: queue.resize(2) # ensure there's room queue.put(None) frag_iter.close() with ContextPool(len(fragment_iters)) as pool: for frag_iter, queue in zip(fragment_iters, queues): pool.spawn(put_fragments_in_queue, frag_iter, queue) while True: fragments = [] for queue in queues: fragment = queue.get() queue.task_done() fragments.append(fragment) # If any object server connection yields out a None; we're # done. Either they are all None, and we've finished # successfully; or some un-recoverable failure has left us # with an un-reconstructible list of fragments - so we'll # break out of the iter so WSGI can tear down the broken # connection. if not all(fragments): break try: segment = self.policy.pyeclib_driver.decode(fragments) except ECDriverError: self.logger.exception(_("Error decoding fragments for" " %r"), self.path) raise yield segment def app_iter_range(self, start, end): return self def app_iter_ranges(self, ranges, content_type, boundary, content_size): return self def client_range_to_segment_range(client_start, client_end, segment_size): """ Takes a byterange from the client and converts it into a byterange spanning the necessary segments. Handles prefix, suffix, and fully-specified byte ranges. Examples: client_range_to_segment_range(100, 700, 512) = (0, 1023) client_range_to_segment_range(100, 700, 256) = (0, 767) client_range_to_segment_range(300, None, 256) = (256, None) :param client_start: first byte of the range requested by the client :param client_end: last byte of the range requested by the client :param segment_size: size of an EC segment, in bytes :returns: a 2-tuple (seg_start, seg_end) where * seg_start is the first byte of the first segment, or None if this is a suffix byte range * seg_end is the last byte of the last segment, or None if this is a prefix byte range """ # the index of the first byte of the first segment segment_start = ( int(client_start // segment_size) * segment_size) if client_start is not None else None # the index of the last byte of the last segment segment_end = ( # bytes M- None if client_end is None else # bytes M-N (((int(client_end // segment_size) + 1) * segment_size) - 1) if client_start is not None else # bytes -N: we get some extra bytes to make sure we # have all we need. # # To see why, imagine a 100-byte segment size, a # 340-byte object, and a request for the last 50 # bytes. Naively requesting the last 100 bytes would # result in a truncated first segment and hence a # truncated download. (Of course, the actual # obj-server requests are for fragments, not # segments, but that doesn't change the # calculation.) # # This does mean that we fetch an extra segment if # the object size is an exact multiple of the # segment size. It's a little wasteful, but it's # better to be a little wasteful than to get some # range requests completely wrong. (int(math.ceil(( float(client_end) / segment_size) + 1)) # nsegs * segment_size)) return (segment_start, segment_end) def segment_range_to_fragment_range(segment_start, segment_end, segment_size, fragment_size): """ Takes a byterange spanning some segments and converts that into a byterange spanning the corresponding fragments within their fragment archives. Handles prefix, suffix, and fully-specified byte ranges. :param segment_start: first byte of the first segment :param segment_end: last byte of the last segment :param segment_size: size of an EC segment, in bytes :param fragment_size: size of an EC fragment, in bytes :returns: a 2-tuple (frag_start, frag_end) where * frag_start is the first byte of the first fragment, or None if this is a suffix byte range * frag_end is the last byte of the last fragment, or None if this is a prefix byte range """ # Note: segment_start and (segment_end + 1) are # multiples of segment_size, so we don't have to worry # about integer math giving us rounding troubles. # # There's a whole bunch of +1 and -1 in here; that's because HTTP wants # byteranges to be inclusive of the start and end, so e.g. bytes 200-300 # is a range containing 101 bytes. Python has half-inclusive ranges, of # course, so we have to convert back and forth. We try to keep things in # HTTP-style byteranges for consistency. # the index of the first byte of the first fragment fragment_start = (( segment_start / segment_size * fragment_size) if segment_start is not None else None) # the index of the last byte of the last fragment fragment_end = ( # range unbounded on the right None if segment_end is None else # range unbounded on the left; no -1 since we're # asking for the last N bytes, not to have a # particular byte be the last one ((segment_end + 1) / segment_size * fragment_size) if segment_start is None else # range bounded on both sides; the -1 is because the # rest of the expression computes the length of the # fragment, and a range of N bytes starts at index M # and ends at M + N - 1. ((segment_end + 1) / segment_size * fragment_size) - 1) return (fragment_start, fragment_end) NO_DATA_SENT = 1 SENDING_DATA = 2 DATA_SENT = 3 DATA_ACKED = 4 COMMIT_SENT = 5 class Putter(object): """ Putter for backend PUT requests. Encapsulates all the actions required to establish a connection with a storage node and stream data to that node. :param conn: an HTTPConnection instance :param node: dict describing storage node :param resp: an HTTPResponse instance if connect() received final response :param path: the object path to send to the storage node :param connect_duration: time taken to initiate the HTTPConnection :param logger: a Logger instance :param chunked: boolean indicating if the request encoding is chunked """ def __init__(self, conn, node, resp, path, connect_duration, logger, chunked=False): # Note: you probably want to call Putter.connect() instead of # instantiating one of these directly. self.conn = conn self.node = node self.resp = self.final_resp = resp self.path = path self.connect_duration = connect_duration # for handoff nodes node_index is None self.node_index = node.get('index') self.failed = False self.queue = None self.state = NO_DATA_SENT self.chunked = chunked self.logger = logger def await_response(self, timeout, informational=False): """ Get 100-continue response indicating the end of 1st phase of a 2-phase commit or the final response, i.e. the one with status >= 200. Might or might not actually wait for anything. If we said Expect: 100-continue but got back a non-100 response, that'll be the thing returned, and we won't do any network IO to get it. OTOH, if we got a 100 Continue response and sent up the PUT request's body, then we'll actually read the 2xx-5xx response off the network here. :param timeout: time to wait for a response :param informational: if True then try to get a 100-continue response, otherwise try to get a final response. :returns: HTTPResponse :raises Timeout: if the response took too long """ # don't do this update of self.resp if the Expect response during # connect() was actually a final response if not self.final_resp: with Timeout(timeout): if informational: self.resp = self.conn.getexpect() else: self.resp = self.conn.getresponse() return self.resp def spawn_sender_greenthread(self, pool, queue_depth, write_timeout, exception_handler): """Call before sending the first chunk of request body""" self.queue = Queue(queue_depth) pool.spawn(self._send_file, write_timeout, exception_handler) def wait(self): if self.queue.unfinished_tasks: self.queue.join() def _start_object_data(self): # Called immediately before the first chunk of object data is sent. # Subclasses may implement custom behaviour pass def send_chunk(self, chunk): if not chunk: # If we're not using chunked transfer-encoding, sending a 0-byte # chunk is just wasteful. If we *are* using chunked # transfer-encoding, sending a 0-byte chunk terminates the # request body. Neither one of these is good. return elif self.state == DATA_SENT: raise ValueError("called send_chunk after end_of_object_data") if self.state == NO_DATA_SENT: self._start_object_data() self.state = SENDING_DATA self.queue.put(chunk) def end_of_object_data(self, **kwargs): """ Call when there is no more data to send. """ if self.state == DATA_SENT: raise ValueError("called end_of_object_data twice") self.queue.put('') self.state = DATA_SENT def _send_file(self, write_timeout, exception_handler): """ Method for a file PUT coroutine. Takes chunks from a queue and sends them down a socket. If something goes wrong, the "failed" attribute will be set to true and the exception handler will be called. """ while True: chunk = self.queue.get() if not self.failed: if self.chunked: to_send = "%x\r\n%s\r\n" % (len(chunk), chunk) else: to_send = chunk try: with ChunkWriteTimeout(write_timeout): self.conn.send(to_send) except (Exception, ChunkWriteTimeout): self.failed = True exception_handler(self.node, _('Object'), _('Trying to write to %s') % self.path) self.queue.task_done() def close(self): # release reference to response to ensure connection really does close, # see bug https://bugs.launchpad.net/swift/+bug/1594739 self.resp = self.final_resp = None self.conn.close() @classmethod def _make_connection(cls, node, part, path, headers, conn_timeout, node_timeout): start_time = time.time() with ConnectionTimeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'PUT', path, headers) connect_duration = time.time() - start_time with ResponseTimeout(node_timeout): resp = conn.getexpect() if resp.status == HTTP_INSUFFICIENT_STORAGE: raise InsufficientStorage if is_server_error(resp.status): raise PutterConnectError(resp.status) final_resp = None if (is_success(resp.status) or resp.status in (HTTP_CONFLICT, HTTP_UNPROCESSABLE_ENTITY) or (headers.get('If-None-Match', None) is not None and resp.status == HTTP_PRECONDITION_FAILED)): final_resp = resp return conn, resp, final_resp, connect_duration @classmethod def connect(cls, node, part, path, headers, conn_timeout, node_timeout, logger=None, chunked=False, **kwargs): """ Connect to a backend node and send the headers. :returns: Putter instance :raises ConnectionTimeout: if initial connection timed out :raises ResponseTimeout: if header retrieval timed out :raises InsufficientStorage: on 507 response from node :raises PutterConnectError: on non-507 server error response from node """ conn, expect_resp, final_resp, connect_duration = cls._make_connection( node, part, path, headers, conn_timeout, node_timeout) return cls(conn, node, final_resp, path, connect_duration, logger, chunked=chunked) class MIMEPutter(Putter): """ Putter for backend PUT requests that use MIME. This is here mostly to wrap up the fact that all multipart PUTs are chunked because of the mime boundary footer trick and the first half of the two-phase PUT conversation handling. An HTTP PUT request that supports streaming. """ def __init__(self, conn, node, resp, req, connect_duration, logger, mime_boundary, multiphase=False): super(MIMEPutter, self).__init__(conn, node, resp, req, connect_duration, logger) # Note: you probably want to call MimePutter.connect() instead of # instantiating one of these directly. self.chunked = True # MIME requests always send chunked body self.mime_boundary = mime_boundary self.multiphase = multiphase def _start_object_data(self): # We're sending the object plus other stuff in the same request # body, all wrapped up in multipart MIME, so we'd better start # off the MIME document before sending any object data. self.queue.put("--%s\r\nX-Document: object body\r\n\r\n" % (self.mime_boundary,)) def end_of_object_data(self, footer_metadata=None): """ Call when there is no more data to send. Overrides superclass implementation to send any footer metadata after object data. :param footer_metadata: dictionary of metadata items to be sent as footers. """ if self.state == DATA_SENT: raise ValueError("called end_of_object_data twice") elif self.state == NO_DATA_SENT and self.mime_boundary: self._start_object_data() footer_body = json.dumps(footer_metadata) footer_md5 = md5(footer_body).hexdigest() tail_boundary = ("--%s" % (self.mime_boundary,)) if not self.multiphase: # this will be the last part sent tail_boundary = tail_boundary + "--" message_parts = [ ("\r\n--%s\r\n" % self.mime_boundary), "X-Document: object metadata\r\n", "Content-MD5: %s\r\n" % footer_md5, "\r\n", footer_body, "\r\n", tail_boundary, "\r\n", ] self.queue.put("".join(message_parts)) self.queue.put('') self.state = DATA_SENT def send_commit_confirmation(self): """ Call when there are > quorum 2XX responses received. Send commit confirmations to all object nodes to finalize the PUT. """ if not self.multiphase: raise ValueError( "called send_commit_confirmation but multiphase is False") if self.state == COMMIT_SENT: raise ValueError("called send_commit_confirmation twice") self.state = DATA_ACKED if self.mime_boundary: body = "put_commit_confirmation" tail_boundary = ("--%s--" % (self.mime_boundary,)) message_parts = [ "X-Document: put commit\r\n", "\r\n", body, "\r\n", tail_boundary, ] self.queue.put("".join(message_parts)) self.queue.put('') self.state = COMMIT_SENT @classmethod def connect(cls, node, part, req, headers, conn_timeout, node_timeout, logger=None, need_multiphase=True, **kwargs): """ Connect to a backend node and send the headers. Override superclass method to notify object of need for support for multipart body with footers and optionally multiphase commit, and verify object server's capabilities. :param need_multiphase: if True then multiphase support is required of the object server :raises FooterNotSupported: if need_metadata_footer is set but backend node can't process footers :raises MultiphasePUTNotSupported: if need_multiphase is set but backend node can't handle multiphase PUT """ mime_boundary = "%.64x" % random.randint(0, 16 ** 64) headers = HeaderKeyDict(headers) # when using a multipart mime request to backend the actual # content-length is not equal to the object content size, so move the # object content size to X-Backend-Obj-Content-Length if that has not # already been set by the EC PUT path. headers.setdefault('X-Backend-Obj-Content-Length', headers.pop('Content-Length', None)) # We're going to be adding some unknown amount of data to the # request, so we can't use an explicit content length, and thus # we must use chunked encoding. headers['Transfer-Encoding'] = 'chunked' headers['Expect'] = '100-continue' headers['X-Backend-Obj-Multipart-Mime-Boundary'] = mime_boundary headers['X-Backend-Obj-Metadata-Footer'] = 'yes' if need_multiphase: headers['X-Backend-Obj-Multiphase-Commit'] = 'yes' conn, expect_resp, final_resp, connect_duration = cls._make_connection( node, part, req, headers, conn_timeout, node_timeout) if is_informational(expect_resp.status): continue_headers = HeaderKeyDict(expect_resp.getheaders()) can_send_metadata_footer = config_true_value( continue_headers.get('X-Obj-Metadata-Footer', 'no')) can_handle_multiphase_put = config_true_value( continue_headers.get('X-Obj-Multiphase-Commit', 'no')) if not can_send_metadata_footer: raise FooterNotSupported() if need_multiphase and not can_handle_multiphase_put: raise MultiphasePUTNotSupported() return cls(conn, node, final_resp, req, connect_duration, logger, mime_boundary, multiphase=need_multiphase) def chunk_transformer(policy): """ A generator to transform a source chunk to erasure coded chunks for each `send` call. The number of erasure coded chunks is as policy.ec_n_unique_fragments. """ segment_size = policy.ec_segment_size buf = collections.deque() total_buf_len = 0 chunk = yield while chunk: buf.append(chunk) total_buf_len += len(chunk) if total_buf_len >= segment_size: chunks_to_encode = [] # extract as many chunks as we can from the input buffer while total_buf_len >= segment_size: to_take = segment_size pieces = [] while to_take > 0: piece = buf.popleft() if len(piece) > to_take: buf.appendleft(piece[to_take:]) piece = piece[:to_take] pieces.append(piece) to_take -= len(piece) total_buf_len -= len(piece) chunks_to_encode.append(''.join(pieces)) frags_by_byte_order = [] for chunk_to_encode in chunks_to_encode: frags_by_byte_order.append( policy.pyeclib_driver.encode(chunk_to_encode)) # Sequential calls to encode() have given us a list that # looks like this: # # [[frag_A1, frag_B1, frag_C1, ...], # [frag_A2, frag_B2, frag_C2, ...], ...] # # What we need is a list like this: # # [(frag_A1 + frag_A2 + ...), # destined for node A # (frag_B1 + frag_B2 + ...), # destined for node B # (frag_C1 + frag_C2 + ...), # destined for node C # ...] obj_data = [''.join(frags) for frags in zip(*frags_by_byte_order)] chunk = yield obj_data else: # didn't have enough data to encode chunk = yield None # Now we've gotten an empty chunk, which indicates end-of-input. # Take any leftover bytes and encode them. last_bytes = ''.join(buf) if last_bytes: last_frags = policy.pyeclib_driver.encode(last_bytes) yield last_frags else: yield [''] * policy.ec_n_unique_fragments def trailing_metadata(policy, client_obj_hasher, bytes_transferred_from_client, fragment_archive_index): return HeaderKeyDict({ # etag and size values are being added twice here. # The container override header is used to update the container db # with these values as they represent the correct etag and size for # the whole object and not just the FA. # The object sysmeta headers will be saved on each FA of the object. 'X-Object-Sysmeta-EC-Etag': client_obj_hasher.hexdigest(), 'X-Object-Sysmeta-EC-Content-Length': str(bytes_transferred_from_client), # older style x-backend-container-update-override-* headers are used # here (rather than x-object-sysmeta-container-update-override-* # headers) for backwards compatibility: the request may be to an object # server that has not yet been upgraded to accept the newer style # x-object-sysmeta-container-update-override- headers. 'X-Backend-Container-Update-Override-Etag': client_obj_hasher.hexdigest(), 'X-Backend-Container-Update-Override-Size': str(bytes_transferred_from_client), 'X-Object-Sysmeta-Ec-Frag-Index': str(fragment_archive_index), # These fields are for debuggability, # AKA "what is this thing?" 'X-Object-Sysmeta-EC-Scheme': policy.ec_scheme_description, 'X-Object-Sysmeta-EC-Segment-Size': str(policy.ec_segment_size), }) class ECGetResponseBucket(object): """ A helper class to encapsulate the properties of buckets in which fragment getters and alternate nodes are collected. """ def __init__(self, policy, timestamp_str): """ :param policy: an instance of ECStoragePolicy :param timestamp_str: a string representation of a timestamp """ self.policy = policy self.timestamp_str = timestamp_str self.gets = collections.defaultdict(list) self.alt_nodes = collections.defaultdict(list) self._durable = False self.status = self.headers = None def set_durable(self): self._durable = True def add_response(self, getter, parts_iter): if not self.gets: self.status = getter.last_status # stash first set of backend headers, which will be used to # populate a client response # TODO: each bucket is for a single *data* timestamp, but sources # in the same bucket may have different *metadata* timestamps if # some backends have more recent .meta files than others. Currently # we just use the last received metadata headers - this behavior is # ok and is consistent with a replication policy GET which # similarly does not attempt to find the backend with the most # recent metadata. We could alternatively choose to the *newest* # metadata headers for self.headers by selecting the source with # the latest X-Timestamp. self.headers = getter.last_headers elif (getter.last_headers.get('X-Object-Sysmeta-Ec-Etag') != self.headers.get('X-Object-Sysmeta-Ec-Etag')): # Fragments at the same timestamp with different etags are never # expected. If somehow it happens then ignore those fragments # to avoid mixing fragments that will not reconstruct otherwise # an exception from pyeclib is almost certain. This strategy leaves # a possibility that a set of consistent frags will be gathered. raise ValueError("ETag mismatch") frag_index = getter.last_headers.get('X-Object-Sysmeta-Ec-Frag-Index') frag_index = int(frag_index) if frag_index is not None else None self.gets[frag_index].append((getter, parts_iter)) def get_responses(self): """ Return a list of all useful sources. Where there are multiple sources associated with the same frag_index then only one is included. :return: a list of sources, each source being a tuple of form (ResumingGetter, iter) """ all_sources = [] for frag_index, sources in self.gets.items(): if frag_index is None: # bad responses don't have a frag_index (and fake good # responses from some unit tests) all_sources.extend(sources) else: all_sources.extend(sources[:1]) return all_sources def add_alternate_nodes(self, node, frag_indexes): for frag_index in frag_indexes: self.alt_nodes[frag_index].append(node) @property def shortfall(self): # A non-durable bucket always has a shortfall of at least 1 result = self.policy.ec_ndata - len(self.get_responses()) return max(result, 0 if self._durable else 1) @property def shortfall_with_alts(self): # The shortfall that we expect to have if we were to send requests # for frags on the alt nodes. alts = set(self.alt_nodes.keys()).difference(set(self.gets.keys())) result = self.policy.ec_ndata - (len(self.get_responses()) + len(alts)) return max(result, 0 if self._durable else 1) def __str__(self): # return a string summarising bucket state, useful for debugging. return '<%s, %s, %s, %s(%s), %s>' \ % (self.timestamp_str, self.status, self._durable, self.shortfall, self.shortfall_with_alts, len(self.gets)) class ECGetResponseCollection(object): """ Manages all successful EC GET responses gathered by ResumingGetters. A response comprises a tuple of (, ). All responses having the same data timestamp are placed in an ECGetResponseBucket for that timestamp. The buckets are stored in the 'buckets' dict which maps timestamp-> bucket. This class encapsulates logic for selecting the best bucket from the collection, and for choosing alternate nodes. """ def __init__(self, policy): """ :param policy: an instance of ECStoragePolicy """ self.policy = policy self.buckets = {} self.node_iter_count = 0 def _get_bucket(self, timestamp_str): """ :param timestamp_str: a string representation of a timestamp :return: ECGetResponseBucket for given timestamp """ return self.buckets.setdefault( timestamp_str, ECGetResponseBucket(self.policy, timestamp_str)) def add_response(self, get, parts_iter): """ Add a response to the collection. :param get: An instance of :class:`~swift.proxy.controllers.base.ResumingGetter` :param parts_iter: An iterator over response body parts :raises ValueError: if the response etag or status code values do not match any values previously received for the same timestamp """ headers = get.last_headers # Add the response to the appropriate bucket keyed by data file # timestamp. Fall back to using X-Backend-Timestamp as key for object # servers that have not been upgraded. t_data_file = headers.get('X-Backend-Data-Timestamp') t_obj = headers.get('X-Backend-Timestamp', headers.get('X-Timestamp')) self._get_bucket(t_data_file or t_obj).add_response(get, parts_iter) # The node may also have alternate fragments indexes (possibly at # different timestamps). For each list of alternate fragments indexes, # find the bucket for their data file timestamp and add the node and # list to that bucket's alternate nodes. frag_sets = safe_json_loads(headers.get('X-Backend-Fragments')) or {} for t_frag, frag_set in frag_sets.items(): self._get_bucket(t_frag).add_alternate_nodes(get.node, frag_set) # If the response includes a durable timestamp then mark that bucket as # durable. Note that this may be a different bucket than the one this # response got added to, and that we may never go and get a durable # frag from this node; it is sufficient that we have been told that a # durable frag exists, somewhere, at t_durable. t_durable = headers.get('X-Backend-Durable-Timestamp') if not t_durable and not t_data_file: # obj server not upgraded so assume this response's frag is durable t_durable = t_obj if t_durable: self._get_bucket(t_durable).set_durable() def _sort_buckets(self): def key_fn(bucket): # Returns a tuple to use for sort ordering: # buckets with no shortfall sort higher, # otherwise buckets with lowest shortfall_with_alts sort higher, # finally buckets with newer timestamps sort higher. return (bucket.shortfall <= 0, (not (bucket.shortfall <= 0) and (-1 * bucket.shortfall_with_alts)), bucket.timestamp_str) return sorted(self.buckets.values(), key=key_fn, reverse=True) @property def best_bucket(self): """ Return the best bucket in the collection. The "best" bucket is the newest timestamp with sufficient getters, or the closest to having sufficient getters, unless it is bettered by a bucket with potential alternate nodes. :return: An instance of :class:`~ECGetResponseBucket` or None if there are no buckets in the collection. """ sorted_buckets = self._sort_buckets() if sorted_buckets: return sorted_buckets[0] return None def _get_frag_prefs(self): # Construct the current frag_prefs list, with best_bucket prefs first. frag_prefs = [] for bucket in self._sort_buckets(): if bucket.timestamp_str: exclusions = [fi for fi in bucket.gets if fi is not None] prefs = {'timestamp': bucket.timestamp_str, 'exclude': exclusions} frag_prefs.append(prefs) return frag_prefs def get_extra_headers(self): frag_prefs = self._get_frag_prefs() return {'X-Backend-Fragment-Preferences': json.dumps(frag_prefs)} def _get_alternate_nodes(self): if self.node_iter_count <= self.policy.ec_ndata: # It makes sense to wait before starting to use alternate nodes, # because if we find sufficient frags on *distinct* nodes then we # spread work across mode nodes. There's no formal proof that # waiting for ec_ndata GETs is the right answer, but it seems # reasonable to try *at least* that many primary nodes before # resorting to alternate nodes. return None bucket = self.best_bucket if (bucket is None) or (bucket.shortfall <= 0): return None alt_frags = set(bucket.alt_nodes.keys()) got_frags = set(bucket.gets.keys()) wanted_frags = list(alt_frags.difference(got_frags)) # We may have the same frag_index on more than one node so shuffle to # avoid using the same frag_index consecutively, since we may not get a # response from the last node provided before being asked to provide # another node. random.shuffle(wanted_frags) for frag_index in wanted_frags: nodes = bucket.alt_nodes.get(frag_index) if nodes: return nodes return None def has_alternate_node(self): return True if self._get_alternate_nodes() else False def provide_alternate_node(self): """ Callback function that is installed in a NodeIter. Called on every call to NodeIter.next(), which means we can track the number of nodes to which GET requests have been made and selectively inject an alternate node, if we have one. :return: A dict describing a node to which the next GET request should be made. """ self.node_iter_count += 1 nodes = self._get_alternate_nodes() if nodes: return nodes.pop(0).copy() @ObjectControllerRouter.register(EC_POLICY) class ECObjectController(BaseObjectController): def _fragment_GET_request(self, req, node_iter, partition, policy, header_provider=None): """ Makes a GET request for a fragment. """ backend_headers = self.generate_request_headers( req, additional=req.headers) getter = ResumingGetter(self.app, req, 'Object', node_iter, partition, req.swift_entity_path, backend_headers, client_chunk_size=policy.fragment_size, newest=False, header_provider=header_provider) return (getter, getter.response_parts_iter(req)) def _convert_range(self, req, policy): """ Take the requested range(s) from the client and convert it to range(s) to be sent to the object servers. This includes widening requested ranges to full segments, then converting those ranges to fragments so that we retrieve the minimum number of fragments from the object server. Mutates the request passed in. Returns a list of range specs (dictionaries with the different byte indices in them). """ # Since segments and fragments have different sizes, we need # to modify the Range header sent to the object servers to # make sure we get the right fragments out of the fragment # archives. segment_size = policy.ec_segment_size fragment_size = policy.fragment_size range_specs = [] new_ranges = [] for client_start, client_end in req.range.ranges: # TODO: coalesce ranges that overlap segments. For # example, "bytes=0-10,20-30,40-50" with a 64 KiB # segment size will result in a a Range header in the # object request of "bytes=0-65535,0-65535,0-65535", # which is wasteful. We should be smarter and only # request that first segment once. segment_start, segment_end = client_range_to_segment_range( client_start, client_end, segment_size) fragment_start, fragment_end = \ segment_range_to_fragment_range( segment_start, segment_end, segment_size, fragment_size) new_ranges.append((fragment_start, fragment_end)) range_specs.append({'req_client_start': client_start, 'req_client_end': client_end, 'req_segment_start': segment_start, 'req_segment_end': segment_end, 'req_fragment_start': fragment_start, 'req_fragment_end': fragment_end}) req.range = "bytes=" + ",".join( "%s-%s" % (s if s is not None else "", e if e is not None else "") for s, e in new_ranges) return range_specs def _get_or_head_response(self, req, node_iter, partition, policy): update_etag_is_at_header(req, "X-Object-Sysmeta-Ec-Etag") if req.method == 'HEAD': # no fancy EC decoding here, just one plain old HEAD request to # one object server because all fragments hold all metadata # information about the object. concurrency = policy.ec_ndata if self.app.concurrent_gets else 1 resp = self.GETorHEAD_base( req, _('Object'), node_iter, partition, req.swift_entity_path, concurrency) self._fix_response(req, resp) return resp # GET request orig_range = None range_specs = [] if req.range: orig_range = req.range range_specs = self._convert_range(req, policy) safe_iter = GreenthreadSafeIterator(node_iter) # Sending the request concurrently to all nodes, and responding # with the first response isn't something useful for EC as all # nodes contain different fragments. Also EC has implemented it's # own specific implementation of concurrent gets to ec_ndata nodes. # So we don't need to worry about plumbing and sending a # concurrency value to ResumingGetter. with ContextPool(policy.ec_ndata) as pool: pile = GreenAsyncPile(pool) buckets = ECGetResponseCollection(policy) node_iter.set_node_provider(buckets.provide_alternate_node) # include what may well be an empty X-Backend-Fragment-Preferences # header from the buckets.get_extra_headers to let the object # server know that it is ok to return non-durable fragments for _junk in range(policy.ec_ndata): pile.spawn(self._fragment_GET_request, req, safe_iter, partition, policy, buckets.get_extra_headers) bad_bucket = ECGetResponseBucket(policy, None) bad_bucket.set_durable() best_bucket = None extra_requests = 0 # max_extra_requests is an arbitrary hard limit for spawning extra # getters in case some unforeseen scenario, or a misbehaving object # server, causes us to otherwise make endless requests e.g. if an # object server were to ignore frag_prefs and always respond with # a frag that is already in a bucket. Now we're assuming it should # be limit at most 2 * replicas. max_extra_requests = ( (policy.object_ring.replica_count * 2) - policy.ec_ndata) for get, parts_iter in pile: if get.last_status is None: # We may have spawned getters that find the node iterator # has been exhausted. Ignore them. # TODO: turns out that node_iter.nodes_left can bottom # out at >0 when number of devs in ring is < 2* replicas, # which definitely happens in tests and results in status # of None. We should fix that but keep this guard because # there is also a race between testing nodes_left/spawning # a getter and an existing getter calling next(node_iter). continue try: if is_success(get.last_status): # 2xx responses are managed by a response collection buckets.add_response(get, parts_iter) else: # all other responses are lumped into a single bucket bad_bucket.add_response(get, parts_iter) except ValueError as err: self.app.logger.error( _("Problem with fragment response: %s"), err) shortfall = bad_bucket.shortfall best_bucket = buckets.best_bucket if best_bucket: shortfall = min(best_bucket.shortfall, shortfall) if (extra_requests < max_extra_requests and shortfall > pile._pending and (node_iter.nodes_left > 0 or buckets.has_alternate_node())): # we need more matching responses to reach ec_ndata # than we have pending gets, as long as we still have # nodes in node_iter we can spawn another extra_requests += 1 pile.spawn(self._fragment_GET_request, req, safe_iter, partition, policy, buckets.get_extra_headers) req.range = orig_range if best_bucket and best_bucket.shortfall <= 0: # headers can come from any of the getters resp_headers = best_bucket.headers resp_headers.pop('Content-Range', None) eccl = resp_headers.get('X-Object-Sysmeta-Ec-Content-Length') obj_length = int(eccl) if eccl is not None else None # This is only true if we didn't get a 206 response, but # that's the only time this is used anyway. fa_length = int(resp_headers['Content-Length']) app_iter = ECAppIter( req.swift_entity_path, policy, [parts_iter for _getter, parts_iter in best_bucket.get_responses()], range_specs, fa_length, obj_length, self.app.logger) resp = Response( request=req, conditional_response=True, app_iter=app_iter) update_headers(resp, resp_headers) try: app_iter.kickoff(req, resp) except HTTPException as err_resp: # catch any HTTPException response here so that we can # process response headers uniformly in _fix_response resp = err_resp else: # TODO: we can get here if all buckets are successful but none # have ec_ndata getters, so bad_bucket may have no gets and we will # return a 503 when a 404 may be more appropriate. We can also get # here with less than ec_ndata 416's and may then return a 416 # which is also questionable because a non-range get for same # object would return 404 or 503. statuses = [] reasons = [] bodies = [] headers = [] for getter, _parts_iter in bad_bucket.get_responses(): statuses.extend(getter.statuses) reasons.extend(getter.reasons) bodies.extend(getter.bodies) headers.extend(getter.source_headers) resp = self.best_response( req, statuses, reasons, bodies, 'Object', headers=headers) self._fix_response(req, resp) return resp def _fix_response(self, req, resp): # EC fragment archives each have different bytes, hence different # etags. However, they all have the original object's etag stored in # sysmeta, so we copy that here (if it exists) so the client gets it. resp.headers['Etag'] = resp.headers.get('X-Object-Sysmeta-Ec-Etag') # We're about to invoke conditional response checking so set the # correct conditional etag from wherever X-Backend-Etag-Is-At points, # if it exists at all. resp._conditional_etag = resolve_etag_is_at_header(req, resp.headers) if (is_success(resp.status_int) or is_redirection(resp.status_int) or resp.status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE): resp.accept_ranges = 'bytes' if is_success(resp.status_int): resp.headers['Content-Length'] = resp.headers.get( 'X-Object-Sysmeta-Ec-Content-Length') resp.fix_conditional_response() if resp.status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE: resp.headers['Content-Range'] = 'bytes */%s' % resp.headers[ 'X-Object-Sysmeta-Ec-Content-Length'] def _make_putter(self, node, part, req, headers): return MIMEPutter.connect( node, part, req.swift_entity_path, headers, conn_timeout=self.app.conn_timeout, node_timeout=self.app.node_timeout, logger=self.app.logger, need_multiphase=True) def _determine_chunk_destinations(self, putters, policy): """ Given a list of putters, return a dict where the key is the putter and the value is the frag index to use. This is done so that we line up handoffs using the same frag index (in the primary part list) as the primary that the handoff is standing in for. This lets erasure-code fragment archives wind up on the preferred local primary nodes when possible. :param putters: a list of swift.proxy.controllers.obj.MIMEPutter instance :param policy: A policy instance which should be one of ECStoragePolicy """ # Give each putter a "frag index": the index of the # transformed chunk that we'll send to it. # # For primary nodes, that's just its index (primary 0 gets # chunk 0, primary 1 gets chunk 1, and so on). For handoffs, # we assign the chunk index of a missing primary. handoff_conns = [] putter_to_frag_index = {} for p in putters: if p.node_index is not None: putter_to_frag_index[p] = policy.get_backend_index( p.node_index) else: handoff_conns.append(p) # Note: we may have more holes than handoffs. This is okay; it # just means that we failed to connect to one or more storage # nodes. Holes occur when a storage node is down, in which # case the connection is not replaced, and when a storage node # returns 507, in which case a handoff is used to replace it. # lack_list is a dict of list to keep hole indexes # e.g. if we have 2 holes for frag index 0 with ec_duplication_factor=2 # lack_list is like {0: [0], 1: [0]}, and then, if 1 hole found # for frag index 1, lack_list will be {0: [0, 1], 1: [0]}. # After that, holes will be filled from bigger key # (i.e. 1:[0] at first) # Grouping all missing fragment indexes for each frag_index available_indexes = putter_to_frag_index.values() lack_list = collections.defaultdict(list) for frag_index in range(policy.ec_n_unique_fragments): # Set the missing index to lack_list available_count = available_indexes.count(frag_index) # N.B. it should be duplication_factor >= lack >= 0 lack = policy.ec_duplication_factor - available_count # now we are missing one or more nodes to store the frag index for lack_tier in range(lack): lack_list[lack_tier].append(frag_index) # Extract the lack_list to a flat list holes = [] for lack_tier, indexes in sorted(lack_list.items(), reverse=True): holes.extend(indexes) # Fill putter_to_frag_index list with the hole list for hole, p in zip(holes, handoff_conns): putter_to_frag_index[p] = hole return putter_to_frag_index def _transfer_data(self, req, policy, data_source, putters, nodes, min_conns, etag_hasher): """ Transfer data for an erasure coded object. This method was added in the PUT method extraction change """ bytes_transferred = 0 chunk_transform = chunk_transformer(policy) chunk_transform.send(None) frag_hashers = collections.defaultdict(md5) def send_chunk(chunk): # Note: there's two different hashers in here. etag_hasher is # hashing the original object so that we can validate the ETag # that the client sent (and etag_hasher is None if the client # didn't send one). The hasher in frag_hashers is hashing the # fragment archive being sent to the client; this lets us guard # against data corruption on the network between proxy and # object server. if etag_hasher: etag_hasher.update(chunk) backend_chunks = chunk_transform.send(chunk) if backend_chunks is None: # If there's not enough bytes buffered for erasure-encoding # or whatever we're doing, the transform will give us None. return updated_frag_indexes = set() for putter in list(putters): frag_index = putter_to_frag_index[putter] backend_chunk = backend_chunks[frag_index] if not putter.failed: # N.B. same frag_index will appear when using # ec_duplication_factor >= 2. So skip to feed the chunk # to hasher if the frag was updated already. if frag_index not in updated_frag_indexes: frag_hashers[frag_index].update(backend_chunk) updated_frag_indexes.add(frag_index) putter.send_chunk(backend_chunk) else: putter.close() putters.remove(putter) self._check_min_conn( req, putters, min_conns, msg=_('Object PUT exceptions during send, ' '%(conns)s/%(nodes)s required connections')) try: with ContextPool(len(putters)) as pool: # build our putter_to_frag_index dict to place handoffs in the # same part nodes index as the primaries they are covering putter_to_frag_index = self._determine_chunk_destinations( putters, policy) for putter in putters: putter.spawn_sender_greenthread( pool, self.app.put_queue_depth, self.app.node_timeout, self.app.exception_occurred) while True: with ChunkReadTimeout(self.app.client_timeout): try: chunk = next(data_source) except StopIteration: break bytes_transferred += len(chunk) if bytes_transferred > constraints.MAX_FILE_SIZE: raise HTTPRequestEntityTooLarge(request=req) send_chunk(chunk) if req.content_length and ( bytes_transferred < req.content_length): req.client_disconnect = True self.app.logger.warning( _('Client disconnected without sending enough data')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) computed_etag = (etag_hasher.hexdigest() if etag_hasher else None) received_etag = req.headers.get( 'etag', '').strip('"') if (computed_etag and received_etag and computed_etag != received_etag): raise HTTPUnprocessableEntity(request=req) send_chunk('') # flush out any buffered data footers = self._get_footers(req) received_etag = footers.get( 'etag', '').strip('"') if (computed_etag and received_etag and computed_etag != received_etag): raise HTTPUnprocessableEntity(request=req) # Remove any EC reserved metadata names from footers footers = {(k, v) for k, v in footers.items() if not k.lower().startswith('x-object-sysmeta-ec-')} for putter in putters: frag_index = putter_to_frag_index[putter] # Update any footers set by middleware with EC footers trail_md = trailing_metadata( policy, etag_hasher, bytes_transferred, frag_index) trail_md.update(footers) # Etag footer must always be hash of what we sent trail_md['Etag'] = frag_hashers[frag_index].hexdigest() putter.end_of_object_data(footer_metadata=trail_md) for putter in putters: putter.wait() # for storage policies requiring 2-phase commit (e.g. # erasure coding), enforce >= 'quorum' number of # 100-continue responses - this indicates successful # object data and metadata commit and is a necessary # condition to be met before starting 2nd PUT phase final_phase = False statuses, reasons, bodies, _junk = \ self._get_put_responses( req, putters, len(nodes), final_phase=final_phase, min_responses=min_conns) if not self.have_quorum( statuses, len(nodes), quorum=min_conns): self.app.logger.error( _('Not enough object servers ack\'ed (got %d)'), statuses.count(HTTP_CONTINUE)) raise HTTPServiceUnavailable(request=req) elif not self._have_adequate_informational( statuses, min_conns): resp = self.best_response(req, statuses, reasons, bodies, _('Object PUT'), quorum_size=min_conns) if is_client_error(resp.status_int): # if 4xx occurred in this state it is absolutely # a bad conversation between proxy-server and # object-server (even if it's # HTTP_UNPROCESSABLE_ENTITY) so we should regard this # as HTTPServiceUnavailable. raise HTTPServiceUnavailable(request=req) else: # Other errors should use raw best_response raise resp # quorum achieved, start 2nd phase - send commit # confirmation to participating object servers # so they write a .durable state file indicating # a successful PUT for putter in putters: putter.send_commit_confirmation() for putter in putters: putter.wait() except ChunkReadTimeout as err: self.app.logger.warning( _('ERROR Client read timeout (%ss)'), err.seconds) self.app.logger.increment('client_timeouts') raise HTTPRequestTimeout(request=req) except ChunkReadError: req.client_disconnect = True self.app.logger.warning( _('Client disconnected without sending last chunk')) self.app.logger.increment('client_disconnects') raise HTTPClientDisconnect(request=req) except HTTPException: raise except Timeout: self.app.logger.exception( _('ERROR Exception causing client disconnect')) raise HTTPClientDisconnect(request=req) except Exception: self.app.logger.exception( _('ERROR Exception transferring data to object servers %s'), {'path': req.path}) raise HTTPInternalServerError(request=req) def _have_adequate_responses( self, statuses, min_responses, conditional_func): """ Given a list of statuses from several requests, determine if a satisfactory number of nodes have responded with 1xx or 2xx statuses to deem the transaction for a successful response to the client. :param statuses: list of statuses returned so far :param min_responses: minimal pass criterion for number of successes :param conditional_func: a callable function to check http status code :returns: True or False, depending on current number of successes """ if sum(1 for s in statuses if (conditional_func(s))) >= min_responses: return True return False def _have_adequate_successes(self, statuses, min_responses): """ Partial method of _have_adequate_responses for 2xx """ return self._have_adequate_responses( statuses, min_responses, is_success) def _have_adequate_informational(self, statuses, min_responses): """ Partial method of _have_adequate_responses for 1xx """ return self._have_adequate_responses( statuses, min_responses, is_informational) def _have_adequate_put_responses(self, statuses, num_nodes, min_responses): # For an EC PUT we require a quorum of responses with success statuses # in order to move on to next phase of PUT request handling without # having to wait for *all* responses. # TODO: this implies that in the first phase of the backend PUTs when # we are actually expecting 1xx responses that we will end up waiting # for *all* responses. That seems inefficient since we only need a # quorum of 1xx responses to proceed. return self._have_adequate_successes(statuses, min_responses) def _store_object(self, req, data_source, nodes, partition, outgoing_headers): """ Store an erasure coded object. """ policy_index = int(req.headers.get('X-Backend-Storage-Policy-Index')) policy = POLICIES.get_by_index(policy_index) expected_frag_size = None if req.content_length: # TODO: PyECLib <= 1.2.0 looks to return the segment info # different from the input for aligned data efficiency but # Swift never does. So calculate the fragment length Swift # will actually send to object sever by making two different # get_segment_info calls (until PyECLib fixed). # policy.fragment_size makes the call using segment size, # and the next call is to get info for the last segment # get number of fragments except the tail - use truncation // num_fragments = req.content_length // policy.ec_segment_size expected_frag_size = policy.fragment_size * num_fragments # calculate the tail fragment_size by hand and add it to # expected_frag_size last_segment_size = req.content_length % policy.ec_segment_size if last_segment_size: last_info = policy.pyeclib_driver.get_segment_info( last_segment_size, policy.ec_segment_size) expected_frag_size += last_info['fragment_size'] for headers in outgoing_headers: headers['X-Backend-Obj-Content-Length'] = expected_frag_size # the object server will get different bytes, so these # values do not apply. headers.pop('Content-Length', None) headers.pop('Etag', None) # Since the request body sent from client -> proxy is not # the same as the request body sent proxy -> object, we # can't rely on the object-server to do the etag checking - # so we have to do it here. etag_hasher = md5() min_conns = policy.quorum putters = self._get_put_connections( req, nodes, partition, outgoing_headers, policy) try: # check that a minimum number of connections were established and # meet all the correct conditions set in the request self._check_failure_put_connections(putters, req, min_conns) self._transfer_data(req, policy, data_source, putters, nodes, min_conns, etag_hasher) # The durable state will propagate in a replicated fashion; if # one fragment is durable then the reconstructor will spread the # durable status around. # In order to avoid successfully writing an object, but refusing # to serve it on a subsequent GET because don't have enough # durable data fragments - we require the same number of durable # writes as quorum fragment writes. If object servers are in the # future able to serve their non-durable fragment archives we may # be able to reduce this quorum count if needed. # ignore response etags statuses, reasons, bodies, _etags = \ self._get_put_responses(req, putters, len(nodes), final_phase=True, min_responses=min_conns) except HTTPException as resp: return resp finally: for putter in putters: putter.close() etag = etag_hasher.hexdigest() resp = self.best_response(req, statuses, reasons, bodies, _('Object PUT'), etag=etag, quorum_size=min_conns) resp.last_modified = math.ceil( float(Timestamp(req.headers['X-Timestamp']))) return resp swift-2.17.1/swift/proxy/controllers/base.py0000666000175000017500000024131713435012015021133 0ustar zuulzuul00000000000000# Copyright (c) 2010-2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # NOTE: swift_conn # You'll see swift_conn passed around a few places in this file. This is the # source bufferedhttp connection of whatever it is attached to. # It is used when early termination of reading from the connection should # happen, such as when a range request is satisfied but there's still more the # source connection would like to send. To prevent having to read all the data # that could be left, the source connection can be .close() and then reads # commence to empty out any buffers. # These shenanigans are to ensure all related objects can be garbage # collected. We've seen objects hang around forever otherwise. from six.moves.urllib.parse import quote import os import time import functools import inspect import itertools import operator from copy import deepcopy from sys import exc_info from swift import gettext_ as _ from eventlet import sleep from eventlet.timeout import Timeout import six from swift.common.wsgi import make_pre_authed_env from swift.common.utils import Timestamp, config_true_value, \ public, split_path, list_from_csv, GreenthreadSafeIterator, \ GreenAsyncPile, quorum_size, parse_content_type, \ document_iters_to_http_response_body from swift.common.bufferedhttp import http_connect from swift.common import constraints from swift.common.exceptions import ChunkReadTimeout, ChunkWriteTimeout, \ ConnectionTimeout, RangeAlreadyComplete from swift.common.header_key_dict import HeaderKeyDict from swift.common.http import is_informational, is_success, is_redirection, \ is_server_error, HTTP_OK, HTTP_PARTIAL_CONTENT, HTTP_MULTIPLE_CHOICES, \ HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVICE_UNAVAILABLE, \ HTTP_INSUFFICIENT_STORAGE, HTTP_UNAUTHORIZED, HTTP_CONTINUE, HTTP_GONE from swift.common.swob import Request, Response, Range, \ HTTPException, HTTPRequestedRangeNotSatisfiable, HTTPServiceUnavailable, \ status_map from swift.common.request_helpers import strip_sys_meta_prefix, \ strip_user_meta_prefix, is_user_meta, is_sys_meta, is_sys_or_user_meta, \ http_response_to_document_iters, is_object_transient_sysmeta, \ strip_object_transient_sysmeta_prefix from swift.common.storage_policy import POLICIES DEFAULT_RECHECK_ACCOUNT_EXISTENCE = 60 # seconds DEFAULT_RECHECK_CONTAINER_EXISTENCE = 60 # seconds def update_headers(response, headers): """ Helper function to update headers in the response. :param response: swob.Response object :param headers: dictionary headers """ if hasattr(headers, 'items'): headers = headers.items() for name, value in headers: if name == 'etag': response.headers[name] = value.replace('"', '') elif name.lower() not in ( 'date', 'content-length', 'content-type', 'connection', 'x-put-timestamp', 'x-delete-after'): response.headers[name] = value def source_key(resp): """ Provide the timestamp of the swift http response as a floating point value. Used as a sort key. :param resp: bufferedhttp response object """ return Timestamp(resp.getheader('x-backend-data-timestamp') or resp.getheader('x-backend-timestamp') or resp.getheader('x-put-timestamp') or resp.getheader('x-timestamp') or 0) def delay_denial(func): """ Decorator to declare which methods should have any swift.authorize call delayed. This is so the method can load the Request object up with additional information that may be needed by the authorization system. :param func: function for which authorization will be delayed """ func.delay_denial = True return func def _prep_headers_to_info(headers, server_type): """ Helper method that iterates once over a dict of headers, converting all keys to lower case and separating into subsets containing user metadata, system metadata and other headers. """ meta = {} sysmeta = {} other = {} for key, val in dict(headers).items(): lkey = key.lower() if is_user_meta(server_type, lkey): meta[strip_user_meta_prefix(server_type, lkey)] = val elif is_sys_meta(server_type, lkey): sysmeta[strip_sys_meta_prefix(server_type, lkey)] = val else: other[lkey] = val return other, meta, sysmeta def headers_to_account_info(headers, status_int=HTTP_OK): """ Construct a cacheable dict of account info based on response headers. """ headers, meta, sysmeta = _prep_headers_to_info(headers, 'account') account_info = { 'status': status_int, # 'container_count' anomaly: # Previous code sometimes expects an int sometimes a string # Current code aligns to str and None, yet translates to int in # deprecated functions as needed 'container_count': headers.get('x-account-container-count'), 'total_object_count': headers.get('x-account-object-count'), 'bytes': headers.get('x-account-bytes-used'), 'storage_policies': {policy.idx: { 'container_count': int(headers.get( 'x-account-storage-policy-{}-container-count'.format( policy.name), 0)), 'object_count': int(headers.get( 'x-account-storage-policy-{}-object-count'.format( policy.name), 0)), 'bytes': int(headers.get( 'x-account-storage-policy-{}-bytes-used'.format( policy.name), 0))} for policy in POLICIES }, 'meta': meta, 'sysmeta': sysmeta, } if is_success(status_int): account_info['account_really_exists'] = not config_true_value( headers.get('x-backend-fake-account-listing')) return account_info def headers_to_container_info(headers, status_int=HTTP_OK): """ Construct a cacheable dict of container info based on response headers. """ headers, meta, sysmeta = _prep_headers_to_info(headers, 'container') return { 'status': status_int, 'read_acl': headers.get('x-container-read'), 'write_acl': headers.get('x-container-write'), 'sync_key': headers.get('x-container-sync-key'), 'object_count': headers.get('x-container-object-count'), 'bytes': headers.get('x-container-bytes-used'), 'versions': headers.get('x-versions-location'), 'storage_policy': headers.get('x-backend-storage-policy-index', '0'), 'cors': { 'allow_origin': meta.get('access-control-allow-origin'), 'expose_headers': meta.get('access-control-expose-headers'), 'max_age': meta.get('access-control-max-age') }, 'meta': meta, 'sysmeta': sysmeta, } def headers_to_object_info(headers, status_int=HTTP_OK): """ Construct a cacheable dict of object info based on response headers. """ headers, meta, sysmeta = _prep_headers_to_info(headers, 'object') transient_sysmeta = {} for key, val in six.iteritems(headers): if is_object_transient_sysmeta(key): key = strip_object_transient_sysmeta_prefix(key.lower()) transient_sysmeta[key] = val info = {'status': status_int, 'length': headers.get('content-length'), 'type': headers.get('content-type'), 'etag': headers.get('etag'), 'meta': meta, 'sysmeta': sysmeta, 'transient_sysmeta': transient_sysmeta } return info def cors_validation(func): """ Decorator to check if the request is a CORS request and if so, if it's valid. :param func: function to check """ @functools.wraps(func) def wrapped(*a, **kw): controller = a[0] req = a[1] # The logic here was interpreted from # http://www.w3.org/TR/cors/#resource-requests # Is this a CORS request? req_origin = req.headers.get('Origin', None) if req_origin: # Yes, this is a CORS request so test if the origin is allowed container_info = \ controller.container_info(controller.account_name, controller.container_name, req) cors_info = container_info.get('cors', {}) # Call through to the decorated method resp = func(*a, **kw) if controller.app.strict_cors_mode and \ not controller.is_origin_allowed(cors_info, req_origin): return resp # Expose, # - simple response headers, # http://www.w3.org/TR/cors/#simple-response-header # - swift specific: etag, x-timestamp, x-trans-id # - headers provided by the operator in cors_expose_headers # - user metadata headers # - headers provided by the user in # x-container-meta-access-control-expose-headers if 'Access-Control-Expose-Headers' not in resp.headers: expose_headers = set([ 'cache-control', 'content-language', 'content-type', 'expires', 'last-modified', 'pragma', 'etag', 'x-timestamp', 'x-trans-id', 'x-openstack-request-id']) expose_headers.update(controller.app.cors_expose_headers) for header in resp.headers: if header.startswith('X-Container-Meta') or \ header.startswith('X-Object-Meta'): expose_headers.add(header.lower()) if cors_info.get('expose_headers'): expose_headers = expose_headers.union( [header_line.strip().lower() for header_line in cors_info['expose_headers'].split(' ') if header_line.strip()]) resp.headers['Access-Control-Expose-Headers'] = \ ', '.join(expose_headers) # The user agent won't process the response if the Allow-Origin # header isn't included if 'Access-Control-Allow-Origin' not in resp.headers: if cors_info['allow_origin'] and \ cors_info['allow_origin'].strip() == '*': resp.headers['Access-Control-Allow-Origin'] = '*' else: resp.headers['Access-Control-Allow-Origin'] = req_origin return resp else: # Not a CORS request so make the call as normal return func(*a, **kw) return wrapped def get_object_info(env, app, path=None, swift_source=None): """ Get the info structure for an object, based on env and app. This is useful to middlewares. .. note:: This call bypasses auth. Success does not imply that the request has authorization to the object. """ (version, account, container, obj) = \ split_path(path or env['PATH_INFO'], 4, 4, True) info = _get_object_info(app, env, account, container, obj, swift_source=swift_source) if info: info = deepcopy(info) else: info = headers_to_object_info({}, 0) for field in ('length',): if info.get(field) is None: info[field] = 0 else: info[field] = int(info[field]) return info def get_container_info(env, app, swift_source=None): """ Get the info structure for a container, based on env and app. This is useful to middlewares. .. note:: This call bypasses auth. Success does not imply that the request has authorization to the container. """ (version, account, container, unused) = \ split_path(env['PATH_INFO'], 3, 4, True) # Check in environment cache and in memcache (in that order) info = _get_info_from_caches(app, env, account, container) if not info: # Cache miss; go HEAD the container and populate the caches env.setdefault('swift.infocache', {}) # Before checking the container, make sure the account exists. # # If it is an autocreateable account, just assume it exists; don't # HEAD the account, as a GET or HEAD response for an autocreateable # account is successful whether the account actually has .db files # on disk or not. is_autocreate_account = account.startswith( getattr(app, 'auto_create_account_prefix', '.')) if not is_autocreate_account: account_info = get_account_info(env, app, swift_source) if not account_info or not is_success(account_info['status']): return headers_to_container_info({}, 0) req = _prepare_pre_auth_info_request( env, ("/%s/%s/%s" % (version, account, container)), (swift_source or 'GET_CONTAINER_INFO')) resp = req.get_response(app) # Check in infocache to see if the proxy (or anyone else) already # populated the cache for us. If they did, just use what's there. # # See similar comment in get_account_info() for justification. info = _get_info_from_infocache(env, account, container) if info is None: info = set_info_cache(app, env, account, container, resp) if info: info = deepcopy(info) # avoid mutating what's in swift.infocache else: info = headers_to_container_info({}, 0) # Old data format in memcache immediately after a Swift upgrade; clean # it up so consumers of get_container_info() aren't exposed to it. if 'object_count' not in info and 'container_size' in info: info['object_count'] = info.pop('container_size') for field in ('storage_policy', 'bytes', 'object_count'): if info.get(field) is None: info[field] = 0 else: info[field] = int(info[field]) return info def get_account_info(env, app, swift_source=None): """ Get the info structure for an account, based on env and app. This is useful to middlewares. .. note:: This call bypasses auth. Success does not imply that the request has authorization to the account. :raises ValueError: when path doesn't contain an account """ (version, account, _junk, _junk) = \ split_path(env['PATH_INFO'], 2, 4, True) # Check in environment cache and in memcache (in that order) info = _get_info_from_caches(app, env, account) # Cache miss; go HEAD the account and populate the caches if not info: env.setdefault('swift.infocache', {}) req = _prepare_pre_auth_info_request( env, "/%s/%s" % (version, account), (swift_source or 'GET_ACCOUNT_INFO')) resp = req.get_response(app) # Check in infocache to see if the proxy (or anyone else) already # populated the cache for us. If they did, just use what's there. # # The point of this is to avoid setting the value in memcached # twice. Otherwise, we're needlessly sending requests across the # network. # # If the info didn't make it into the cache, we'll compute it from # the response and populate the cache ourselves. # # Note that this is taking "exists in infocache" to imply "exists in # memcache". That's because we're trying to avoid superfluous # network traffic, and checking in memcache prior to setting in # memcache would defeat the purpose. info = _get_info_from_infocache(env, account) if info is None: info = set_info_cache(app, env, account, None, resp) if info: info = info.copy() # avoid mutating what's in swift.infocache else: info = headers_to_account_info({}, 0) for field in ('container_count', 'bytes', 'total_object_count'): if info.get(field) is None: info[field] = 0 else: info[field] = int(info[field]) return info def get_cache_key(account, container=None, obj=None): """ Get the keys for both memcache and env['swift.infocache'] (cache_key) where info about accounts, containers, and objects is cached :param account: The name of the account :param container: The name of the container (or None if account) :param obj: The name of the object (or None if account or container) :returns: a string cache_key """ if obj: if not (account and container): raise ValueError('Object cache key requires account and container') cache_key = 'object/%s/%s/%s' % (account, container, obj) elif container: if not account: raise ValueError('Container cache key requires account') cache_key = 'container/%s/%s' % (account, container) else: cache_key = 'account/%s' % account # Use a unique environment cache key per account and one container. # This allows caching both account and container and ensures that when we # copy this env to form a new request, it won't accidentally reuse the # old container or account info return cache_key def set_info_cache(app, env, account, container, resp): """ Cache info in both memcache and env. :param app: the application object :param account: the unquoted account name :param container: the unquoted container name or None :param resp: the response received or None if info cache should be cleared :returns: the info that was placed into the cache, or None if the request status was not in (404, 410, 2xx). """ infocache = env.setdefault('swift.infocache', {}) cache_time = None if container and resp: cache_time = int(resp.headers.get( 'X-Backend-Recheck-Container-Existence', DEFAULT_RECHECK_CONTAINER_EXISTENCE)) elif resp: cache_time = int(resp.headers.get( 'X-Backend-Recheck-Account-Existence', DEFAULT_RECHECK_ACCOUNT_EXISTENCE)) cache_key = get_cache_key(account, container) if resp: if resp.status_int in (HTTP_NOT_FOUND, HTTP_GONE): cache_time *= 0.1 elif not is_success(resp.status_int): cache_time = None # Next actually set both memcache and the env cache memcache = getattr(app, 'memcache', None) or env.get('swift.cache') if cache_time is None: infocache.pop(cache_key, None) if memcache: memcache.delete(cache_key) return if container: info = headers_to_container_info(resp.headers, resp.status_int) else: info = headers_to_account_info(resp.headers, resp.status_int) if memcache: memcache.set(cache_key, info, time=cache_time) infocache[cache_key] = info return info def set_object_info_cache(app, env, account, container, obj, resp): """ Cache object info in the WSGI environment, but not in memcache. Caching in memcache would lead to cache pressure and mass evictions due to the large number of objects in a typical Swift cluster. This is a per-request cache only. :param app: the application object :param env: the environment used by the current request :param account: the unquoted account name :param container: the unquoted container name :param obj: the unquoted object name :param resp: a GET or HEAD response received from an object server, or None if info cache should be cleared :returns: the object info """ cache_key = get_cache_key(account, container, obj) if 'swift.infocache' in env and not resp: env['swift.infocache'].pop(cache_key, None) return info = headers_to_object_info(resp.headers, resp.status_int) env.setdefault('swift.infocache', {})[cache_key] = info return info def clear_info_cache(app, env, account, container=None): """ Clear the cached info in both memcache and env :param app: the application object :param env: the WSGI environment :param account: the account name :param container: the containr name or None if setting info for containers """ set_info_cache(app, env, account, container, None) def _get_info_from_infocache(env, account, container=None): """ Get cached account or container information from request-environment cache (swift.infocache). :param env: the environment used by the current request :param account: the account name :param container: the container name :returns: a dictionary of cached info on cache hit, None on miss """ cache_key = get_cache_key(account, container) if 'swift.infocache' in env and cache_key in env['swift.infocache']: return env['swift.infocache'][cache_key] return None def _get_info_from_memcache(app, env, account, container=None): """ Get cached account or container information from memcache :param app: the application object :param env: the environment used by the current request :param account: the account name :param container: the container name :returns: a dictionary of cached info on cache hit, None on miss. Also returns None if memcache is not in use. """ cache_key = get_cache_key(account, container) memcache = getattr(app, 'memcache', None) or env.get('swift.cache') if memcache: info = memcache.get(cache_key) if info: for key in info: if isinstance(info[key], six.text_type): info[key] = info[key].encode("utf-8") elif isinstance(info[key], dict): for subkey, value in info[key].items(): if isinstance(value, six.text_type): info[key][subkey] = value.encode("utf-8") env.setdefault('swift.infocache', {})[cache_key] = info return info return None def _get_info_from_caches(app, env, account, container=None): """ Get the cached info from env or memcache (if used) in that order. Used for both account and container info. :param app: the application object :param env: the environment used by the current request :returns: the cached info or None if not cached """ info = _get_info_from_infocache(env, account, container) if info is None: info = _get_info_from_memcache(app, env, account, container) return info def _prepare_pre_auth_info_request(env, path, swift_source): """ Prepares a pre authed request to obtain info using a HEAD. :param env: the environment used by the current request :param path: The unquoted request path :param swift_source: value for swift.source in WSGI environment :returns: the pre authed request """ # Set the env for the pre_authed call without a query string newenv = make_pre_authed_env(env, 'HEAD', path, agent='Swift', query_string='', swift_source=swift_source) # This is a sub request for container metadata- drop the Origin header from # the request so the it is not treated as a CORS request. newenv.pop('HTTP_ORIGIN', None) # ACLs are only shown to account owners, so let's make sure this request # looks like it came from the account owner. newenv['swift_owner'] = True # Note that Request.blank expects quoted path return Request.blank(quote(path), environ=newenv) def get_info(app, env, account, container=None, swift_source=None): """ Get info about accounts or containers Note: This call bypasses auth. Success does not imply that the request has authorization to the info. :param app: the application object :param env: the environment used by the current request :param account: The unquoted name of the account :param container: The unquoted name of the container (or None if account) :param swift_source: swift source logged for any subrequests made while retrieving the account or container info :returns: information about the specified entity in a dictionary. See get_account_info and get_container_info for details on what's in the dictionary. """ env.setdefault('swift.infocache', {}) if container: path = '/v1/%s/%s' % (account, container) path_env = env.copy() path_env['PATH_INFO'] = path return get_container_info(path_env, app, swift_source=swift_source) else: # account info path = '/v1/%s' % (account,) path_env = env.copy() path_env['PATH_INFO'] = path return get_account_info(path_env, app, swift_source=swift_source) def _get_object_info(app, env, account, container, obj, swift_source=None): """ Get the info about object Note: This call bypasses auth. Success does not imply that the request has authorization to the info. :param app: the application object :param env: the environment used by the current request :param account: The unquoted name of the account :param container: The unquoted name of the container :param obj: The unquoted name of the object :returns: the cached info or None if cannot be retrieved """ cache_key = get_cache_key(account, container, obj) info = env.get('swift.infocache', {}).get(cache_key) if info: return info # Not in cache, let's try the object servers path = '/v1/%s/%s/%s' % (account, container, obj) req = _prepare_pre_auth_info_request(env, path, swift_source) resp = req.get_response(app) # Unlike get_account_info() and get_container_info(), we don't save # things in memcache, so we can store the info without network traffic, # *and* the proxy doesn't cache object info for us, so there's no chance # that the object info would be in the environment. Thus, we just # compute the object info based on the response and stash it in # swift.infocache. info = set_object_info_cache(app, env, account, container, obj, resp) return info def close_swift_conn(src): """ Force close the http connection to the backend. :param src: the response from the backend """ try: # Since the backends set "Connection: close" in their response # headers, the response object (src) is solely responsible for the # socket. The connection object (src.swift_conn) has no references # to the socket, so calling its close() method does nothing, and # therefore we don't do it. # # Also, since calling the response's close() method might not # close the underlying socket but only decrement some # reference-counter, we have a special method here that really, # really kills the underlying socket with a close() syscall. src.nuke_from_orbit() # it's the only way to be sure except Exception: pass def bytes_to_skip(record_size, range_start): """ Assume an object is composed of N records, where the first N-1 are all the same size and the last is at most that large, but may be smaller. When a range request is made, it might start with a partial record. This must be discarded, lest the consumer get bad data. This is particularly true of suffix-byte-range requests, e.g. "Range: bytes=-12345" where the size of the object is unknown at the time the request is made. This function computes the number of bytes that must be discarded to ensure only whole records are yielded. Erasure-code decoding needs this. This function could have been inlined, but it took enough tries to get right that some targeted unit tests were desirable, hence its extraction. """ return (record_size - (range_start % record_size)) % record_size class ResumingGetter(object): def __init__(self, app, req, server_type, node_iter, partition, path, backend_headers, concurrency=1, client_chunk_size=None, newest=None, header_provider=None): self.app = app self.node_iter = node_iter self.server_type = server_type self.partition = partition self.path = path self.backend_headers = backend_headers self.client_chunk_size = client_chunk_size self.skip_bytes = 0 self.bytes_used_from_backend = 0 self.used_nodes = [] self.used_source_etag = '' self.concurrency = concurrency self.node = None self.header_provider = header_provider self.latest_404_timestamp = Timestamp(0) # stuff from request self.req_method = req.method self.req_path = req.path self.req_query_string = req.query_string if newest is None: self.newest = config_true_value(req.headers.get('x-newest', 'f')) else: self.newest = newest # populated when finding source self.statuses = [] self.reasons = [] self.bodies = [] self.source_headers = [] self.sources = [] # populated from response headers self.start_byte = self.end_byte = self.length = None def fast_forward(self, num_bytes): """ Will skip num_bytes into the current ranges. :params num_bytes: the number of bytes that have already been read on this request. This will change the Range header so that the next req will start where it left off. :raises HTTPRequestedRangeNotSatisfiable: if begin + num_bytes > end of range + 1 :raises RangeAlreadyComplete: if begin + num_bytes == end of range + 1 """ try: req_range = Range(self.backend_headers.get('Range')) except ValueError: req_range = None if req_range: begin, end = req_range.ranges[0] if begin is None: # this is a -50 range req (last 50 bytes of file) end -= num_bytes if end == 0: # we sent out exactly the first range's worth of bytes, so # we're done with it raise RangeAlreadyComplete() else: begin += num_bytes if end is not None and begin == end + 1: # we sent out exactly the first range's worth of bytes, so # we're done with it raise RangeAlreadyComplete() if end is not None and (begin > end or end < 0): raise HTTPRequestedRangeNotSatisfiable() req_range.ranges = [(begin, end)] + req_range.ranges[1:] self.backend_headers['Range'] = str(req_range) else: self.backend_headers['Range'] = 'bytes=%d-' % num_bytes # Reset so if we need to do this more than once, we don't double-up self.bytes_used_from_backend = 0 def pop_range(self): """ Remove the first byterange from our Range header. This is used after a byterange has been completely sent to the client; this way, should we need to resume the download from another object server, we do not re-fetch byteranges that the client already has. If we have no Range header, this is a no-op. """ if 'Range' in self.backend_headers: try: req_range = Range(self.backend_headers['Range']) except ValueError: # there's a Range header, but it's garbage, so get rid of it self.backend_headers.pop('Range') return begin, end = req_range.ranges.pop(0) if len(req_range.ranges) > 0: self.backend_headers['Range'] = str(req_range) else: self.backend_headers.pop('Range') def learn_size_from_content_range(self, start, end, length): """ If client_chunk_size is set, makes sure we yield things starting on chunk boundaries based on the Content-Range header in the response. Sets our Range header's first byterange to the value learned from the Content-Range header in the response; if we were given a fully-specified range (e.g. "bytes=123-456"), this is a no-op. If we were given a half-specified range (e.g. "bytes=123-" or "bytes=-456"), then this changes the Range header to a semantically-equivalent one *and* it lets us resume on a proper boundary instead of just in the middle of a piece somewhere. """ if length == 0: return if self.client_chunk_size: self.skip_bytes = bytes_to_skip(self.client_chunk_size, start) if 'Range' in self.backend_headers: try: req_range = Range(self.backend_headers['Range']) new_ranges = [(start, end)] + req_range.ranges[1:] except ValueError: new_ranges = [(start, end)] else: new_ranges = [(start, end)] self.backend_headers['Range'] = ( "bytes=" + (",".join("%s-%s" % (s if s is not None else '', e if e is not None else '') for s, e in new_ranges))) def is_good_source(self, src): """ Indicates whether or not the request made to the backend found what it was looking for. :param src: the response from the backend :returns: True if found, False if not """ if self.server_type == 'Object' and src.status == 416: return True return is_success(src.status) or is_redirection(src.status) def response_parts_iter(self, req): source, node = self._get_source_and_node() it = None if source: it = self._get_response_parts_iter(req, node, source) return it def _get_response_parts_iter(self, req, node, source): # Someday we can replace this [mess] with python 3's "nonlocal" source = [source] node = [node] try: client_chunk_size = self.client_chunk_size node_timeout = self.app.node_timeout if self.server_type == 'Object': node_timeout = self.app.recoverable_node_timeout # This is safe; it sets up a generator but does not call next() # on it, so no IO is performed. parts_iter = [ http_response_to_document_iters( source[0], read_chunk_size=self.app.object_chunk_size)] def get_next_doc_part(): while True: try: # This call to next() performs IO when we have a # multipart/byteranges response; it reads the MIME # boundary and part headers. # # If we don't have a multipart/byteranges response, # but just a 200 or a single-range 206, then this # performs no IO, and either just returns source or # raises StopIteration. with ChunkReadTimeout(node_timeout): # if StopIteration is raised, it escapes and is # handled elsewhere start_byte, end_byte, length, headers, part = next( parts_iter[0]) return (start_byte, end_byte, length, headers, part) except ChunkReadTimeout: new_source, new_node = self._get_source_and_node() if new_source: self.app.exception_occurred( node[0], _('Object'), _('Trying to read during GET (retrying)')) # Close-out the connection as best as possible. if getattr(source[0], 'swift_conn', None): close_swift_conn(source[0]) source[0] = new_source node[0] = new_node # This is safe; it sets up a generator but does # not call next() on it, so no IO is performed. parts_iter[0] = http_response_to_document_iters( new_source, read_chunk_size=self.app.object_chunk_size) else: raise StopIteration() def iter_bytes_from_response_part(part_file): nchunks = 0 buf = '' while True: try: with ChunkReadTimeout(node_timeout): chunk = part_file.read(self.app.object_chunk_size) nchunks += 1 buf += chunk except ChunkReadTimeout: exc_type, exc_value, exc_traceback = exc_info() if self.newest or self.server_type != 'Object': six.reraise(exc_type, exc_value, exc_traceback) try: self.fast_forward(self.bytes_used_from_backend) except (HTTPException, ValueError): six.reraise(exc_type, exc_value, exc_traceback) except RangeAlreadyComplete: break buf = '' new_source, new_node = self._get_source_and_node() if new_source: self.app.exception_occurred( node[0], _('Object'), _('Trying to read during GET (retrying)')) # Close-out the connection as best as possible. if getattr(source[0], 'swift_conn', None): close_swift_conn(source[0]) source[0] = new_source node[0] = new_node # This is safe; it just sets up a generator but # does not call next() on it, so no IO is # performed. parts_iter[0] = http_response_to_document_iters( new_source, read_chunk_size=self.app.object_chunk_size) try: _junk, _junk, _junk, _junk, part_file = \ get_next_doc_part() except StopIteration: # Tried to find a new node from which to # finish the GET, but failed. There's # nothing more to do here. return else: six.reraise(exc_type, exc_value, exc_traceback) else: if buf and self.skip_bytes: if self.skip_bytes < len(buf): buf = buf[self.skip_bytes:] self.bytes_used_from_backend += self.skip_bytes self.skip_bytes = 0 else: self.skip_bytes -= len(buf) self.bytes_used_from_backend += len(buf) buf = '' if not chunk: if buf: with ChunkWriteTimeout( self.app.client_timeout): self.bytes_used_from_backend += len(buf) yield buf buf = '' break if client_chunk_size is not None: while len(buf) >= client_chunk_size: client_chunk = buf[:client_chunk_size] buf = buf[client_chunk_size:] with ChunkWriteTimeout( self.app.client_timeout): self.bytes_used_from_backend += \ len(client_chunk) yield client_chunk else: with ChunkWriteTimeout(self.app.client_timeout): self.bytes_used_from_backend += len(buf) yield buf buf = '' # This is for fairness; if the network is outpacing # the CPU, we'll always be able to read and write # data without encountering an EWOULDBLOCK, and so # eventlet will not switch greenthreads on its own. # We do it manually so that clients don't starve. # # The number 5 here was chosen by making stuff up. # It's not every single chunk, but it's not too big # either, so it seemed like it would probably be an # okay choice. # # Note that we may trampoline to other greenthreads # more often than once every 5 chunks, depending on # how blocking our network IO is; the explicit sleep # here simply provides a lower bound on the rate of # trampolining. if nchunks % 5 == 0: sleep() part_iter = None try: while True: start_byte, end_byte, length, headers, part = \ get_next_doc_part() self.learn_size_from_content_range( start_byte, end_byte, length) self.bytes_used_from_backend = 0 part_iter = iter_bytes_from_response_part(part) yield {'start_byte': start_byte, 'end_byte': end_byte, 'entity_length': length, 'headers': headers, 'part_iter': part_iter} self.pop_range() except StopIteration: req.environ['swift.non_client_disconnect'] = True finally: if part_iter: part_iter.close() except ChunkReadTimeout: self.app.exception_occurred(node[0], _('Object'), _('Trying to read during GET')) raise except ChunkWriteTimeout: self.app.logger.warning( _('Client did not read from proxy within %ss') % self.app.client_timeout) self.app.logger.increment('client_timeouts') except GeneratorExit: exc_type, exc_value, exc_traceback = exc_info() warn = True try: req_range = Range(self.backend_headers['Range']) except ValueError: req_range = None if req_range and len(req_range.ranges) == 1: begin, end = req_range.ranges[0] if end is not None and begin is not None: if end - begin + 1 == self.bytes_used_from_backend: warn = False if not req.environ.get('swift.non_client_disconnect') and warn: self.app.logger.warning(_('Client disconnected on read')) six.reraise(exc_type, exc_value, exc_traceback) except Exception: self.app.logger.exception(_('Trying to send to client')) raise finally: # Close-out the connection as best as possible. if getattr(source[0], 'swift_conn', None): close_swift_conn(source[0]) @property def last_status(self): if self.statuses: return self.statuses[-1] else: return None @property def last_headers(self): if self.source_headers: return HeaderKeyDict(self.source_headers[-1]) else: return None def _make_node_request(self, node, node_timeout, logger_thread_locals): self.app.logger.thread_locals = logger_thread_locals if node in self.used_nodes: return False req_headers = dict(self.backend_headers) # a request may be specialised with specific backend headers if self.header_provider: req_headers.update(self.header_provider()) start_node_timing = time.time() try: with ConnectionTimeout(self.app.conn_timeout): conn = http_connect( node['ip'], node['port'], node['device'], self.partition, self.req_method, self.path, headers=req_headers, query_string=self.req_query_string) self.app.set_node_timing(node, time.time() - start_node_timing) with Timeout(node_timeout): possible_source = conn.getresponse() # See NOTE: swift_conn at top of file about this. possible_source.swift_conn = conn except (Exception, Timeout): self.app.exception_occurred( node, self.server_type, _('Trying to %(method)s %(path)s') % {'method': self.req_method, 'path': self.req_path}) return False if self.is_good_source(possible_source): # 404 if we know we don't have a synced copy if not float(possible_source.getheader('X-PUT-Timestamp', 1)): self.statuses.append(HTTP_NOT_FOUND) self.reasons.append('') self.bodies.append('') self.source_headers.append([]) close_swift_conn(possible_source) else: src_headers = dict( (k.lower(), v) for k, v in possible_source.getheaders()) if self.used_source_etag and \ self.used_source_etag != src_headers.get( 'x-object-sysmeta-ec-etag', src_headers.get('etag', '')).strip('"'): self.statuses.append(HTTP_NOT_FOUND) self.reasons.append('') self.bodies.append('') self.source_headers.append([]) return False # a possible source should only be added as a valid source # if its timestamp is newer than previously found tombstones ps_timestamp = Timestamp( src_headers.get('x-backend-data-timestamp') or src_headers.get('x-backend-timestamp') or src_headers.get('x-put-timestamp') or src_headers.get('x-timestamp') or 0) if ps_timestamp >= self.latest_404_timestamp: self.statuses.append(possible_source.status) self.reasons.append(possible_source.reason) self.bodies.append(None) self.source_headers.append(possible_source.getheaders()) self.sources.append((possible_source, node)) if not self.newest: # one good source is enough return True else: self.statuses.append(possible_source.status) self.reasons.append(possible_source.reason) self.bodies.append(possible_source.read()) self.source_headers.append(possible_source.getheaders()) # if 404, record the timestamp. If a good source shows up, its # timestamp will be compared to the latest 404. # For now checking only on objects, but future work could include # the same check for account and containers. See lp 1560574. if self.server_type == 'Object' and \ possible_source.status == HTTP_NOT_FOUND: hdrs = HeaderKeyDict(possible_source.getheaders()) ts = Timestamp(hdrs.get('X-Backend-Timestamp', 0)) if ts > self.latest_404_timestamp: self.latest_404_timestamp = ts if possible_source.status == HTTP_INSUFFICIENT_STORAGE: self.app.error_limit(node, _('ERROR Insufficient Storage')) elif is_server_error(possible_source.status): self.app.error_occurred( node, _('ERROR %(status)d %(body)s ' 'From %(type)s Server') % {'status': possible_source.status, 'body': self.bodies[-1][:1024], 'type': self.server_type}) return False def _get_source_and_node(self): self.statuses = [] self.reasons = [] self.bodies = [] self.source_headers = [] self.sources = [] nodes = GreenthreadSafeIterator(self.node_iter) node_timeout = self.app.node_timeout if self.server_type == 'Object' and not self.newest: node_timeout = self.app.recoverable_node_timeout pile = GreenAsyncPile(self.concurrency) for node in nodes: pile.spawn(self._make_node_request, node, node_timeout, self.app.logger.thread_locals) _timeout = self.app.concurrency_timeout \ if pile.inflight < self.concurrency else None if pile.waitfirst(_timeout): break else: # ran out of nodes, see if any stragglers will finish any(pile) # this helps weed out any sucess status that were found before a 404 # and added to the list in the case of x-newest. if self.sources: self.sources = [s for s in self.sources if source_key(s[0]) >= self.latest_404_timestamp] if self.sources: self.sources.sort(key=lambda s: source_key(s[0])) source, node = self.sources.pop() for src, _junk in self.sources: close_swift_conn(src) self.used_nodes.append(node) src_headers = dict( (k.lower(), v) for k, v in source.getheaders()) # Save off the source etag so that, if we lose the connection # and have to resume from a different node, we can be sure that # we have the same object (replication) or a fragment archive # from the same object (EC). Otherwise, if the cluster has two # versions of the same object, we might end up switching between # old and new mid-stream and giving garbage to the client. self.used_source_etag = src_headers.get( 'x-object-sysmeta-ec-etag', src_headers.get('etag', '')).strip('"') self.node = node return source, node return None, None class GetOrHeadHandler(ResumingGetter): def _make_app_iter(self, req, node, source): """ Returns an iterator over the contents of the source (via its read func). There is also quite a bit of cleanup to ensure garbage collection works and the underlying socket of the source is closed. :param req: incoming request object :param source: The httplib.Response object this iterator should read from. :param node: The node the source is reading from, for logging purposes. """ ct = source.getheader('Content-Type') if ct: content_type, content_type_attrs = parse_content_type(ct) is_multipart = content_type == 'multipart/byteranges' else: is_multipart = False boundary = "dontcare" if is_multipart: # we need some MIME boundary; fortunately, the object server has # furnished one for us, so we'll just re-use it boundary = dict(content_type_attrs)["boundary"] parts_iter = self._get_response_parts_iter(req, node, source) def add_content_type(response_part): response_part["content_type"] = \ HeaderKeyDict(response_part["headers"]).get("Content-Type") return response_part return document_iters_to_http_response_body( (add_content_type(pi) for pi in parts_iter), boundary, is_multipart, self.app.logger) def get_working_response(self, req): source, node = self._get_source_and_node() res = None if source: res = Response(request=req) res.status = source.status update_headers(res, source.getheaders()) if req.method == 'GET' and \ source.status in (HTTP_OK, HTTP_PARTIAL_CONTENT): res.app_iter = self._make_app_iter(req, node, source) # See NOTE: swift_conn at top of file about this. res.swift_conn = source.swift_conn if not res.environ: res.environ = {} res.environ['swift_x_timestamp'] = \ source.getheader('x-timestamp') res.accept_ranges = 'bytes' res.content_length = source.getheader('Content-Length') if source.getheader('Content-Type'): res.charset = None res.content_type = source.getheader('Content-Type') return res class NodeIter(object): """ Yields nodes for a ring partition, skipping over error limited nodes and stopping at the configurable number of nodes. If a node yielded subsequently gets error limited, an extra node will be yielded to take its place. Note that if you're going to iterate over this concurrently from multiple greenthreads, you'll want to use a swift.common.utils.GreenthreadSafeIterator to serialize access. Otherwise, you may get ValueErrors from concurrent access. (You also may not, depending on how logging is configured, the vagaries of socket IO and eventlet, and the phase of the moon.) :param app: a proxy app :param ring: ring to get yield nodes from :param partition: ring partition to yield nodes for :param node_iter: optional iterable of nodes to try. Useful if you want to filter or reorder the nodes. :param policy: an instance of :class:`BaseStoragePolicy`. This should be None for an account or container ring. """ def __init__(self, app, ring, partition, node_iter=None, policy=None): self.app = app self.ring = ring self.partition = partition part_nodes = ring.get_part_nodes(partition) if node_iter is None: node_iter = itertools.chain( part_nodes, ring.get_more_nodes(partition)) num_primary_nodes = len(part_nodes) self.nodes_left = self.app.request_node_count(num_primary_nodes) self.expected_handoffs = self.nodes_left - num_primary_nodes # Use of list() here forcibly yanks the first N nodes (the primary # nodes) from node_iter, so the rest of its values are handoffs. self.primary_nodes = self.app.sort_nodes( list(itertools.islice(node_iter, num_primary_nodes)), policy=policy) self.handoff_iter = node_iter self._node_provider = None def __iter__(self): self._node_iter = self._node_gen() return self def log_handoffs(self, handoffs): """ Log handoff requests if handoff logging is enabled and the handoff was not expected. We only log handoffs when we've pushed the handoff count further than we would normally have expected under normal circumstances, that is (request_node_count - num_primaries), when handoffs goes higher than that it means one of the primaries must have been skipped because of error limiting before we consumed all of our nodes_left. """ if not self.app.log_handoffs: return extra_handoffs = handoffs - self.expected_handoffs if extra_handoffs > 0: self.app.logger.increment('handoff_count') self.app.logger.warning( 'Handoff requested (%d)' % handoffs) if (extra_handoffs == len(self.primary_nodes)): # all the primaries were skipped, and handoffs didn't help self.app.logger.increment('handoff_all_count') def set_node_provider(self, callback): """ Install a callback function that will be used during a call to next() to get an alternate node instead of returning the next node from the iterator. :param callback: A no argument function that should return a node dict or None. """ self._node_provider = callback def _node_gen(self): for node in self.primary_nodes: if not self.app.error_limited(node): yield node if not self.app.error_limited(node): self.nodes_left -= 1 if self.nodes_left <= 0: return handoffs = 0 for node in self.handoff_iter: if not self.app.error_limited(node): handoffs += 1 self.log_handoffs(handoffs) yield node if not self.app.error_limited(node): self.nodes_left -= 1 if self.nodes_left <= 0: return def next(self): if self._node_provider: # give node provider the opportunity to inject a node node = self._node_provider() if node: return node return next(self._node_iter) def __next__(self): return self.next() class Controller(object): """Base WSGI controller class for the proxy""" server_type = 'Base' # Ensure these are all lowercase pass_through_headers = [] def __init__(self, app): """ Creates a controller attached to an application instance :param app: the application instance """ self.account_name = None self.app = app self.trans_id = '-' self._allowed_methods = None @property def allowed_methods(self): if self._allowed_methods is None: self._allowed_methods = set() all_methods = inspect.getmembers(self, predicate=inspect.ismethod) for name, m in all_methods: if getattr(m, 'publicly_accessible', False): self._allowed_methods.add(name) return self._allowed_methods def _x_remove_headers(self): """ Returns a list of headers that must not be sent to the backend :returns: a list of header """ return [] def transfer_headers(self, src_headers, dst_headers): """ Transfer legal headers from an original client request to dictionary that will be used as headers by the backend request :param src_headers: A dictionary of the original client request headers :param dst_headers: A dictionary of the backend request headers """ st = self.server_type.lower() x_remove = 'x-remove-%s-meta-' % st dst_headers.update((k.lower().replace('-remove', '', 1), '') for k in src_headers if k.lower().startswith(x_remove) or k.lower() in self._x_remove_headers()) dst_headers.update((k.lower(), v) for k, v in src_headers.items() if k.lower() in self.pass_through_headers or is_sys_or_user_meta(st, k)) def generate_request_headers(self, orig_req=None, additional=None, transfer=False): """ Create a list of headers to be used in backend requests :param orig_req: the original request sent by the client to the proxy :param additional: additional headers to send to the backend :param transfer: If True, transfer headers from original client request :returns: a dictionary of headers """ # Use the additional headers first so they don't overwrite the headers # we require. headers = HeaderKeyDict(additional) if additional else HeaderKeyDict() if transfer: self.transfer_headers(orig_req.headers, headers) headers.setdefault('x-timestamp', Timestamp.now().internal) if orig_req: referer = orig_req.as_referer() else: referer = '' headers['x-trans-id'] = self.trans_id headers['connection'] = 'close' headers['user-agent'] = 'proxy-server %s' % os.getpid() headers['referer'] = referer return headers def account_info(self, account, req=None): """ Get account information, and also verify that the account exists. :param account: name of the account to get the info for :param req: caller's HTTP request context object (optional) :returns: tuple of (account partition, account nodes, container_count) or (None, None, None) if it does not exist """ partition, nodes = self.app.account_ring.get_nodes(account) if req: env = getattr(req, 'environ', {}) else: env = {} env.setdefault('swift.infocache', {}) path_env = env.copy() path_env['PATH_INFO'] = "/v1/%s" % (account,) info = get_account_info(path_env, self.app) if (not info or not is_success(info['status']) or not info.get('account_really_exists', True)): return None, None, None container_count = info['container_count'] return partition, nodes, container_count def container_info(self, account, container, req=None): """ Get container information and thusly verify container existence. This will also verify account existence. :param account: account name for the container :param container: container name to look up :param req: caller's HTTP request context object (optional) :returns: dict containing at least container partition ('partition'), container nodes ('containers'), container read acl ('read_acl'), container write acl ('write_acl'), and container sync key ('sync_key'). Values are set to None if the container does not exist. """ part, nodes = self.app.container_ring.get_nodes(account, container) if req: env = getattr(req, 'environ', {}) else: env = {} env.setdefault('swift.infocache', {}) path_env = env.copy() path_env['PATH_INFO'] = "/v1/%s/%s" % (account, container) info = get_container_info(path_env, self.app) if not info or not is_success(info.get('status')): info = headers_to_container_info({}, 0) info['partition'] = None info['nodes'] = None else: info['partition'] = part info['nodes'] = nodes return info def _make_request(self, nodes, part, method, path, headers, query, logger_thread_locals): """ Iterates over the given node iterator, sending an HTTP request to one node at a time. The first non-informational, non-server-error response is returned. If no non-informational, non-server-error response is received from any of the nodes, returns None. :param nodes: an iterator of the backend server and handoff servers :param part: the partition number :param method: the method to send to the backend :param path: the path to send to the backend (full path ends up being /<$device>/<$part>/<$path>) :param headers: dictionary of headers :param query: query string to send to the backend. :param logger_thread_locals: The thread local values to be set on the self.app.logger to retain transaction logging information. :returns: a swob.Response object, or None if no responses were received """ self.app.logger.thread_locals = logger_thread_locals for node in nodes: try: start_node_timing = time.time() with ConnectionTimeout(self.app.conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, method, path, headers=headers, query_string=query) conn.node = node self.app.set_node_timing(node, time.time() - start_node_timing) with Timeout(self.app.node_timeout): resp = conn.getresponse() if not is_informational(resp.status) and \ not is_server_error(resp.status): return resp.status, resp.reason, resp.getheaders(), \ resp.read() elif resp.status == HTTP_INSUFFICIENT_STORAGE: self.app.error_limit(node, _('ERROR Insufficient Storage')) elif is_server_error(resp.status): self.app.error_occurred( node, _('ERROR %(status)d ' 'Trying to %(method)s %(path)s' ' From %(type)s Server') % { 'status': resp.status, 'method': method, 'path': path, 'type': self.server_type}) except (Exception, Timeout): self.app.exception_occurred( node, self.server_type, _('Trying to %(method)s %(path)s') % {'method': method, 'path': path}) def make_requests(self, req, ring, part, method, path, headers, query_string='', overrides=None, node_count=None, node_iterator=None): """ Sends an HTTP request to multiple nodes and aggregates the results. It attempts the primary nodes concurrently, then iterates over the handoff nodes as needed. :param req: a request sent by the client :param ring: the ring used for finding backend servers :param part: the partition number :param method: the method to send to the backend :param path: the path to send to the backend (full path ends up being /<$device>/<$part>/<$path>) :param headers: a list of dicts, where each dict represents one backend request that should be made. :param query_string: optional query string to send to the backend :param overrides: optional return status override map used to override the returned status of a request. :param node_count: optional number of nodes to send request to. :param node_iterator: optional node iterator. :returns: a swob.Response object """ nodes = GreenthreadSafeIterator( node_iterator or self.app.iter_nodes(ring, part) ) node_number = node_count or len(ring.get_part_nodes(part)) pile = GreenAsyncPile(node_number) for head in headers: pile.spawn(self._make_request, nodes, part, method, path, head, query_string, self.app.logger.thread_locals) response = [] statuses = [] for resp in pile: if not resp: continue response.append(resp) statuses.append(resp[0]) if self.have_quorum(statuses, node_number): break # give any pending requests *some* chance to finish finished_quickly = pile.waitall(self.app.post_quorum_timeout) for resp in finished_quickly: if not resp: continue response.append(resp) statuses.append(resp[0]) while len(response) < node_number: response.append((HTTP_SERVICE_UNAVAILABLE, '', '', '')) statuses, reasons, resp_headers, bodies = zip(*response) return self.best_response(req, statuses, reasons, bodies, '%s %s' % (self.server_type, req.method), overrides=overrides, headers=resp_headers) def _quorum_size(self, n): """ Number of successful backend responses needed for the proxy to consider the client request successful. """ return quorum_size(n) def have_quorum(self, statuses, node_count, quorum=None): """ Given a list of statuses from several requests, determine if a quorum response can already be decided. :param statuses: list of statuses returned :param node_count: number of nodes being queried (basically ring count) :param quorum: number of statuses required for quorum :returns: True or False, depending on if quorum is established """ if quorum is None: quorum = self._quorum_size(node_count) if len(statuses) >= quorum: for hundred in (HTTP_CONTINUE, HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST): if sum(1 for s in statuses if hundred <= s < hundred + 100) >= quorum: return True return False def best_response(self, req, statuses, reasons, bodies, server_type, etag=None, headers=None, overrides=None, quorum_size=None): """ Given a list of responses from several servers, choose the best to return to the API. :param req: swob.Request object :param statuses: list of statuses returned :param reasons: list of reasons for each status :param bodies: bodies of each response :param server_type: type of server the responses came from :param etag: etag :param headers: headers of each response :param overrides: overrides to apply when lacking quorum :param quorum_size: quorum size to use :returns: swob.Response object with the correct status, body, etc. set """ if quorum_size is None: quorum_size = self._quorum_size(len(statuses)) resp = self._compute_quorum_response( req, statuses, reasons, bodies, etag, headers, quorum_size=quorum_size) if overrides and not resp: faked_up_status_indices = set() transformed = [] for (i, (status, reason, hdrs, body)) in enumerate(zip( statuses, reasons, headers, bodies)): if status in overrides: faked_up_status_indices.add(i) transformed.append((overrides[status], '', '', '')) else: transformed.append((status, reason, hdrs, body)) statuses, reasons, headers, bodies = zip(*transformed) resp = self._compute_quorum_response( req, statuses, reasons, bodies, etag, headers, indices_to_avoid=faked_up_status_indices, quorum_size=quorum_size) if not resp: resp = HTTPServiceUnavailable(request=req) self.app.logger.error(_('%(type)s returning 503 for %(statuses)s'), {'type': server_type, 'statuses': statuses}) return resp def _compute_quorum_response(self, req, statuses, reasons, bodies, etag, headers, quorum_size, indices_to_avoid=()): if not statuses: return None for hundred in (HTTP_OK, HTTP_MULTIPLE_CHOICES, HTTP_BAD_REQUEST): hstatuses = \ [(i, s) for i, s in enumerate(statuses) if hundred <= s < hundred + 100] if len(hstatuses) >= quorum_size: try: status_index, status = max( ((i, stat) for i, stat in hstatuses if i not in indices_to_avoid), key=operator.itemgetter(1)) except ValueError: # All statuses were indices to avoid continue resp = status_map[status](request=req) resp.status = '%s %s' % (status, reasons[status_index]) resp.body = bodies[status_index] if headers: update_headers(resp, headers[status_index]) if etag: resp.headers['etag'] = etag.strip('"') return resp return None @public def GET(self, req): """ Handler for HTTP GET requests. :param req: The client request :returns: the response to the client """ return self.GETorHEAD(req) @public def HEAD(self, req): """ Handler for HTTP HEAD requests. :param req: The client request :returns: the response to the client """ return self.GETorHEAD(req) def autocreate_account(self, req, account): """ Autocreate an account :param req: request leading to this autocreate :param account: the unquoted account name """ partition, nodes = self.app.account_ring.get_nodes(account) path = '/%s' % account headers = {'X-Timestamp': Timestamp.now().internal, 'X-Trans-Id': self.trans_id, 'X-Openstack-Request-Id': self.trans_id, 'Connection': 'close'} # transfer any x-account-sysmeta headers from original request # to the autocreate PUT headers.update((k, v) for k, v in req.headers.items() if is_sys_meta('account', k)) resp = self.make_requests(Request.blank('/v1' + path), self.app.account_ring, partition, 'PUT', path, [headers] * len(nodes)) if is_success(resp.status_int): self.app.logger.info(_('autocreate account %r'), path) clear_info_cache(self.app, req.environ, account) return True else: self.app.logger.warning(_('Could not autocreate account %r'), path) return False def GETorHEAD_base(self, req, server_type, node_iter, partition, path, concurrency=1, client_chunk_size=None): """ Base handler for HTTP GET or HEAD requests. :param req: swob.Request object :param server_type: server type used in logging :param node_iter: an iterator to obtain nodes from :param partition: partition :param path: path for the request :param concurrency: number of requests to run concurrently :param client_chunk_size: chunk size for response body iterator :returns: swob.Response object """ backend_headers = self.generate_request_headers( req, additional=req.headers) handler = GetOrHeadHandler(self.app, req, self.server_type, node_iter, partition, path, backend_headers, concurrency, client_chunk_size=client_chunk_size) res = handler.get_working_response(req) if not res: res = self.best_response( req, handler.statuses, handler.reasons, handler.bodies, '%s %s' % (server_type, req.method), headers=handler.source_headers) # if a backend policy index is present in resp headers, translate it # here with the friendly policy name if 'X-Backend-Storage-Policy-Index' in res.headers and \ is_success(res.status_int): policy = \ POLICIES.get_by_index( res.headers['X-Backend-Storage-Policy-Index']) if policy: res.headers['X-Storage-Policy'] = policy.name else: self.app.logger.error( 'Could not translate %s (%r) from %r to policy', 'X-Backend-Storage-Policy-Index', res.headers['X-Backend-Storage-Policy-Index'], path) return res def is_origin_allowed(self, cors_info, origin): """ Is the given Origin allowed to make requests to this resource :param cors_info: the resource's CORS related metadata headers :param origin: the origin making the request :return: True or False """ allowed_origins = set() if cors_info.get('allow_origin'): allowed_origins.update( [a.strip() for a in cors_info['allow_origin'].split(' ') if a.strip()]) if self.app.cors_allow_origin: allowed_origins.update(self.app.cors_allow_origin) return origin in allowed_origins or '*' in allowed_origins @public def OPTIONS(self, req): """ Base handler for OPTIONS requests :param req: swob.Request object :returns: swob.Response object """ # Prepare the default response headers = {'Allow': ', '.join(self.allowed_methods)} resp = Response(status=200, request=req, headers=headers) # If this isn't a CORS pre-flight request then return now req_origin_value = req.headers.get('Origin', None) if not req_origin_value: return resp # This is a CORS preflight request so check it's allowed try: container_info = \ self.container_info(self.account_name, self.container_name, req) except AttributeError: # This should only happen for requests to the Account. A future # change could allow CORS requests to the Account level as well. return resp cors = container_info.get('cors', {}) # If the CORS origin isn't allowed return a 401 if not self.is_origin_allowed(cors, req_origin_value) or ( req.headers.get('Access-Control-Request-Method') not in self.allowed_methods): resp.status = HTTP_UNAUTHORIZED return resp # Populate the response with the CORS preflight headers if cors.get('allow_origin') and \ cors.get('allow_origin').strip() == '*': headers['access-control-allow-origin'] = '*' else: headers['access-control-allow-origin'] = req_origin_value if 'vary' in headers: headers['vary'] += ', Origin' else: headers['vary'] = 'Origin' if cors.get('max_age') is not None: headers['access-control-max-age'] = cors.get('max_age') headers['access-control-allow-methods'] = \ ', '.join(self.allowed_methods) # Allow all headers requested in the request. The CORS # specification does leave the door open for this, as mentioned in # http://www.w3.org/TR/cors/#resource-preflight-requests # Note: Since the list of headers can be unbounded # simply returning headers can be enough. allow_headers = set( list_from_csv(req.headers.get('Access-Control-Request-Headers'))) if allow_headers: headers['access-control-allow-headers'] = ', '.join(allow_headers) if 'vary' in headers: headers['vary'] += ', Access-Control-Request-Headers' else: headers['vary'] = 'Access-Control-Request-Headers' resp.headers = headers return resp def get_name_length_limit(self): if self.account_name.startswith(self.app.auto_create_account_prefix): multiplier = 2 else: multiplier = 1 if self.server_type == 'Account': return constraints.MAX_ACCOUNT_NAME_LENGTH * multiplier elif self.server_type == 'Container': return constraints.MAX_CONTAINER_NAME_LENGTH * multiplier else: raise ValueError( "server_type can only be 'account' or 'container'") swift-2.17.1/swift/proxy/controllers/info.py0000666000175000017500000000731113435012015021146 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import json from time import time from swift.common.utils import public, get_hmac, get_swift_info, \ streq_const_time from swift.proxy.controllers.base import Controller, delay_denial from swift.common.swob import HTTPOk, HTTPForbidden, HTTPUnauthorized class InfoController(Controller): """WSGI controller for info requests""" server_type = 'Info' def __init__(self, app, version, expose_info, disallowed_sections, admin_key): super(InfoController, self).__init__(app) self.expose_info = expose_info self.disallowed_sections = disallowed_sections self.admin_key = admin_key self.allowed_hmac_methods = { 'HEAD': ['HEAD', 'GET'], 'GET': ['GET']} @public @delay_denial def GET(self, req): return self.GETorHEAD(req) @public @delay_denial def HEAD(self, req): return self.GETorHEAD(req) @public @delay_denial def OPTIONS(self, req): return HTTPOk(request=req, headers={'Allow': 'HEAD, GET, OPTIONS'}) def GETorHEAD(self, req): """Handler for HTTP GET/HEAD requests.""" """ Handles requests to /info Should return a WSGI-style callable (such as swob.Response). :param req: swob.Request object """ if not self.expose_info: return HTTPForbidden(request=req) admin_request = False sig = req.params.get('swiftinfo_sig', '') expires = req.params.get('swiftinfo_expires', '') if sig != '' or expires != '': admin_request = True if not self.admin_key: return HTTPForbidden(request=req) try: expires = int(expires) except ValueError: return HTTPUnauthorized(request=req) if expires < time(): return HTTPUnauthorized(request=req) valid_sigs = [] for method in self.allowed_hmac_methods[req.method]: valid_sigs.append(get_hmac(method, '/info', expires, self.admin_key)) # While it's true that any() will short-circuit, this doesn't # affect the timing-attack resistance since the only way this will # short-circuit is when a valid signature is passed in. is_valid_hmac = any(streq_const_time(valid_sig, sig) for valid_sig in valid_sigs) if not is_valid_hmac: return HTTPUnauthorized(request=req) headers = {} if 'Origin' in req.headers: headers['Access-Control-Allow-Origin'] = req.headers['Origin'] headers['Access-Control-Expose-Headers'] = ', '.join( ['x-trans-id']) info = json.dumps(get_swift_info( admin=admin_request, disallowed_sections=self.disallowed_sections)) return HTTPOk(request=req, headers=headers, body=info, content_type='application/json; charset=UTF-8') swift-2.17.1/swift/proxy/controllers/account.py0000666000175000017500000002027013435012003021643 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from six.moves.urllib.parse import unquote from swift import gettext_ as _ from swift.account.utils import account_listing_response from swift.common.middleware.acl import parse_acl, format_acl from swift.common.utils import public from swift.common.constraints import check_metadata from swift.common.http import HTTP_NOT_FOUND, HTTP_GONE from swift.proxy.controllers.base import Controller, clear_info_cache, \ set_info_cache from swift.common.middleware import listing_formats from swift.common.swob import HTTPBadRequest, HTTPMethodNotAllowed from swift.common.request_helpers import get_sys_meta_prefix class AccountController(Controller): """WSGI controller for account requests""" server_type = 'Account' def __init__(self, app, account_name, **kwargs): super(AccountController, self).__init__(app) self.account_name = unquote(account_name) if not self.app.allow_account_management: self.allowed_methods.remove('PUT') self.allowed_methods.remove('DELETE') def add_acls_from_sys_metadata(self, resp): if resp.environ['REQUEST_METHOD'] in ('HEAD', 'GET', 'PUT', 'POST'): prefix = get_sys_meta_prefix('account') + 'core-' name = 'access-control' (extname, intname) = ('x-account-' + name, prefix + name) acl_dict = parse_acl(version=2, data=resp.headers.pop(intname)) if acl_dict: # treat empty dict as empty header resp.headers[extname] = format_acl( version=2, acl_dict=acl_dict) def GETorHEAD(self, req): """Handler for HTTP GET/HEAD requests.""" length_limit = self.get_name_length_limit() if len(self.account_name) > length_limit: resp = HTTPBadRequest(request=req) resp.body = 'Account name length of %d longer than %d' % \ (len(self.account_name), length_limit) # Don't cache this. We know the account doesn't exist because # the name is bad; we don't need to cache that because it's # really cheap to recompute. return resp partition = self.app.account_ring.get_part(self.account_name) concurrency = self.app.account_ring.replica_count \ if self.app.concurrent_gets else 1 node_iter = self.app.iter_nodes(self.app.account_ring, partition) params = req.params params['format'] = 'json' req.params = params resp = self.GETorHEAD_base( req, _('Account'), node_iter, partition, req.swift_entity_path.rstrip('/'), concurrency) if resp.status_int == HTTP_NOT_FOUND: if resp.headers.get('X-Account-Status', '').lower() == 'deleted': resp.status = HTTP_GONE elif self.app.account_autocreate: # This is kind of a lie; we pretend like the account is # there, but it's not. We'll create it as soon as something # tries to write to it, but we don't need databases on disk # to tell us that nothing's there. # # We set a header so that certain consumers can tell it's a # fake listing. The important one is the PUT of a container # to an autocreate account; the proxy checks to see if the # account exists before actually performing the PUT and # creates the account if necessary. If we feed it a perfect # lie, it'll just try to create the container without # creating the account, and that'll fail. resp = account_listing_response( self.account_name, req, listing_formats.get_listing_content_type(req)) resp.headers['X-Backend-Fake-Account-Listing'] = 'yes' # Cache this. We just made a request to a storage node and got # up-to-date information for the account. resp.headers['X-Backend-Recheck-Account-Existence'] = str( self.app.recheck_account_existence) set_info_cache(self.app, req.environ, self.account_name, None, resp) if req.environ.get('swift_owner'): self.add_acls_from_sys_metadata(resp) else: for header in self.app.swift_owner_headers: resp.headers.pop(header, None) return resp @public def PUT(self, req): """HTTP PUT request handler.""" if not self.app.allow_account_management: return HTTPMethodNotAllowed( request=req, headers={'Allow': ', '.join(self.allowed_methods)}) error_response = check_metadata(req, 'account') if error_response: return error_response length_limit = self.get_name_length_limit() if len(self.account_name) > length_limit: resp = HTTPBadRequest(request=req) resp.body = 'Account name length of %d longer than %d' % \ (len(self.account_name), length_limit) return resp account_partition, accounts = \ self.app.account_ring.get_nodes(self.account_name) headers = self.generate_request_headers(req, transfer=True) clear_info_cache(self.app, req.environ, self.account_name) resp = self.make_requests( req, self.app.account_ring, account_partition, 'PUT', req.swift_entity_path, [headers] * len(accounts)) self.add_acls_from_sys_metadata(resp) return resp @public def POST(self, req): """HTTP POST request handler.""" length_limit = self.get_name_length_limit() if len(self.account_name) > length_limit: resp = HTTPBadRequest(request=req) resp.body = 'Account name length of %d longer than %d' % \ (len(self.account_name), length_limit) return resp error_response = check_metadata(req, 'account') if error_response: return error_response account_partition, accounts = \ self.app.account_ring.get_nodes(self.account_name) headers = self.generate_request_headers(req, transfer=True) clear_info_cache(self.app, req.environ, self.account_name) resp = self.make_requests( req, self.app.account_ring, account_partition, 'POST', req.swift_entity_path, [headers] * len(accounts)) if resp.status_int == HTTP_NOT_FOUND and self.app.account_autocreate: self.autocreate_account(req, self.account_name) resp = self.make_requests( req, self.app.account_ring, account_partition, 'POST', req.swift_entity_path, [headers] * len(accounts)) self.add_acls_from_sys_metadata(resp) return resp @public def DELETE(self, req): """HTTP DELETE request handler.""" # Extra safety in case someone typos a query string for an # account-level DELETE request that was really meant to be caught by # some middleware. if req.query_string: return HTTPBadRequest(request=req) if not self.app.allow_account_management: return HTTPMethodNotAllowed( request=req, headers={'Allow': ', '.join(self.allowed_methods)}) account_partition, accounts = \ self.app.account_ring.get_nodes(self.account_name) headers = self.generate_request_headers(req) clear_info_cache(self.app, req.environ, self.account_name) resp = self.make_requests( req, self.app.account_ring, account_partition, 'DELETE', req.swift_entity_path, [headers] * len(accounts)) return resp swift-2.17.1/swift/locale/0000775000175000017500000000000013435012120015342 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/ko_KR/0000775000175000017500000000000013435012120016347 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/ko_KR/LC_MESSAGES/0000775000175000017500000000000013435012120020134 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/ko_KR/LC_MESSAGES/swift.po0000666000175000017500000007212713435012015021646 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Mario Cho , 2014 # Ying Chun Guo , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-03-08 01:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ko_KR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Korean (South Korea)\n" msgid "" "\n" "user quit" msgstr "" "\n" "ì‚¬ìš©ìž ì¢…ë£Œ" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - 병렬, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)dê°œ 접미부를 검사함 - %(hashed).2f%%ê°œ 해시ë¨, %(synced).2f%%ê°œ ë™" "기화ë¨" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d(%(percentage).2f%%)ê°œ íŒŒí‹°ì…˜ì´ %(time).2fì´ˆ" "(%(rate).2f/ì´ˆ, %(remaining)s 남ìŒ) ì•ˆì— ë³µì œë¨" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)sê°œ 성공, %(failure)sê°œ 실패" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)sì—서 %(statuses)sì— ëŒ€í•´ 503ì„ ë¦¬í„´í•¨" #, python-format msgid "%s already started..." msgstr "%sì´(ê°€) ì´ë¯¸ 시작ë˜ì—ˆìŒ..." #, python-format msgid "%s does not exist" msgstr "%sì´(ê°€) 존재하지 않ìŒ" #, python-format msgid "%s is not mounted" msgstr "%sì´(ê°€) 마운트ë˜ì§€ 않ìŒ" #, python-format msgid "%s responded as unmounted" msgstr "%sì´(ê°€) 마운트 í•´ì œëœ ê²ƒìœ¼ë¡œ ì‘답" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: 피어ì—서 ì—°ê²° 재설정" #, python-format msgid ", %s containers deleted" msgstr ", %s 지워진 컨테ì´ë„ˆ" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s ì—¬ë¶„ì˜ ì»¨í…Œì´ë„ˆ" #, python-format msgid ", %s containers remaining" msgstr ", %s ë‚¨ì€ ì»¨í…Œì´ë„ˆ" #, python-format msgid ", %s objects deleted" msgstr ", %s 지워진 오브ì íЏ" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s oì—¬ë¶„ì˜ ì˜¤ë¸Œì íЏ" #, python-format msgid ", %s objects remaining" msgstr ", %s ë‚¨ì€ ì˜¤ë¸Œì íЏ" #, python-format msgid ", elapsed: %.02fs" msgstr ", 경과ë¨: %.02fs" msgid ", return codes: " msgstr ", 반환 코드들:" msgid "Account" msgstr "계정" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "계정 ê°ì‚¬ \"한 번\"모드가 완료: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "ì •ìƒìœ¼ë¡œ íŒì •난 계정: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "%(time).5fì´ˆ(%(rate).5f/s)ì— %(count)dê°œì˜ ë°ì´í„°ë² ì´ìŠ¤ë¥¼ 복제하려고 함" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "ìž˜ëª»ëœ rsync 리턴 코드: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "계정 ê°ì‚¬ \"한 번\"모드로 시작" msgid "Begin account audit pass." msgstr "계정 검사 시작." msgid "Begin container audit \"once\" mode" msgstr "컨테ì´ë„ˆ ê°ì‚¬ \"ì¼ íšŒ\" 모드 시작" msgid "Begin container audit pass." msgstr "컨테ì´ë„ˆ ê°ì‚¬ ì „ë‹¬ì´ ì‹œìž‘ë©ë‹ˆë‹¤." msgid "Begin container sync \"once\" mode" msgstr "컨테ì´ë„ˆ ë™ê¸°í™” \"ì¼ íšŒ\" 모드 시작" msgid "Begin container update single threaded sweep" msgstr "컨테ì´ë„ˆ ì—…ë°ì´íЏ ë‹¨ì¼ ìŠ¤ë ˆë“œ 스윕 시작" msgid "Begin container update sweep" msgstr "컨테ì´ë„ˆ ì—…ë°ì´íЏ 스윕 시작" msgid "Begin object update single threaded sweep" msgstr "오브ì íЏ ì—…ë°ì´íЏ ë‹¨ì¼ ìŠ¤ë ˆë“œ 스윕 시작" msgid "Begin object update sweep" msgstr "오브ì íЏ ì—…ë°ì´íЏ 스윕 시작" #, python-format msgid "Beginning pass on account %s" msgstr "계정 패스 시작 %s" msgid "Beginning replication run" msgstr "복제 실행 시작" msgid "Broker error trying to rollback locked connection" msgstr "잠긴 ì—°ê²°ì„ ë¡¤ë°±í•˜ëŠ” 중 브로커 오류 ë°œìƒ" #, python-format msgid "Can not access the file %s." msgstr "íŒŒì¼ %sì— ì•¡ì„¸ìŠ¤í•  수 없습니다." #, python-format msgid "Can not load profile data from %s." msgstr "%sì—서 í”„ë¡œíŒŒì¼ ë°ì´í„°ë¥¼ 로드할 수 없습니다." #, python-format msgid "Client did not read from proxy within %ss" msgstr "í´ë¼ì´ì–¸íЏì—서 %ss ë‚´ì— í”„ë¡ì‹œë¥¼ ì½ì„ 수 없었ìŒ" msgid "Client disconnected on read" msgstr "ì½ê¸° 시 í´ë¼ì´ì–¸íЏ ì—°ê²°ì´ ëŠì–´ì§" msgid "Client disconnected without sending enough data" msgstr "ë°ì´í„°ë¥¼ ëª¨ë‘ ì „ì†¡í•˜ê¸° ì „ì— í´ë¼ì´ì–¸íЏ ì—°ê²°ì´ ëŠì–´ì§" msgid "Client disconnected without sending last chunk" msgstr "마지막 ì²­í¬ë¥¼ 전송하기 ì „ì— í´ë¼ì´ì–¸íЏ ì—°ê²°ì´ ëŠì–´ì§" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "í´ë¼ì´ì–¸íЏ 경로 %(client)sì´(ê°€) 오브ì íЏ 메타ë°ì´í„° %(meta)sì— ì €ìž¥ëœ ê²½ë¡œ" "와 ì¼ì¹˜í•˜ì§€ 않ìŒ" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "구성 옵션 internal_client_conf_pathê°€ ì •ì˜ë˜ì§€ 않았습니다. 기본 구성 사용 시 " "internal-client.conf-sampleì—서 ì˜µì…˜ì„ ì°¸ì¡°í•˜ì‹­ì‹œì˜¤." msgid "Connection refused" msgstr "ì—°ê²°ì´ ê±°ë¶€ë¨" msgid "Connection timeout" msgstr "ì—°ê²° 제한시간 초과" msgid "Container" msgstr "컨테ì´ë„ˆ" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "컨테ì´ë„ˆ ê°ì‚¬ \"ì¼ íšŒ\" 모드 완료: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "컨테ì´ë„ˆ ê°ì‚¬ 전달 완료: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "컨테ì´ë„ˆ ë™ê¸°í™” \"ì¼ íšŒ\" 모드 완료: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "컨테ì´ë„ˆ ì—…ë°ì´íЏ ë‹¨ì¼ ìŠ¤ë ˆë“œ 스윕 완료: %(elapsed).02fs, %(success)sê°œ 성" "ê³µ, %(fail)sê°œ 실패, %(no_change)sê°œ 변경 ì—†ìŒ" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "컨테ì´ë„ˆ ì—…ë°ì´íЏ 스윕 완료: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)sì˜ ì»¨í…Œì´ë„ˆ ì—…ë°ì´íЏ 스윕 완료: %(elapsed).02fs, %(success)sê°œ 성공, " "%(fail)sê°œ 실패, %(no_change)sê°œ 변경 ì—†ìŒ" #, python-format msgid "Data download error: %s" msgstr "ë°ì´í„° 다운로드 오류: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "장치 패스 완료 : %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "오류 %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "오류 %(status)d %(body)s, %(type)s 서버 발신" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "오류 %(status)d %(body)s, 오브ì íЏ 서버 발신, 회신: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "오류 %(status)d. 예ìƒ: 100-continue, 오브ì íЏ 서버 발신" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "오류. %(ip)s:%(port)s/%(device)s(으)로 계정 ì—…ë°ì´íЏ 실패(ë‚˜ì¤‘ì— ë‹¤ì‹œ 시ë„): " "ì‘답 %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "오류. %(host)sì˜ ìž˜ëª»ëœ ì‘답 %(status)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERROR í´ë¼ì´ì–¸íЏ ì½ê¸° 시간 초과 (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "오류. 컨테ì´ë„ˆ ì—…ë°ì´íЏ 실패(ì´í›„ 비ë™ê¸° ì—…ë°ì´íŠ¸ìš©ìœ¼ë¡œ 저장): %(status)dì‘" "답. 출처: %(ip)s:%(port)s/%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "오류는 %sì˜ ê³„ì • 정보를 ì–»ì„ ìˆ˜ 없습니다" #, python-format msgid "ERROR Could not get container info %s" msgstr "오류. 컨테ì´ë„ˆ ì •ë³´ %sì„(를) 가져올 수 ì—†ìŒ" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "오류. ë””ìŠ¤í¬ íŒŒì¼ %(data_file)s 닫기 실패: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "오류. 예외로 ì¸í•´ í´ë¼ì´ì–¸íЏ ì—°ê²°ì´ ëŠì–´ì§" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "ERROR 오브ì íЏ 서버 %sì— ë°ì´í„°ë¥¼ 전송하는 ì¤‘ì— ì˜ˆì™¸ ë°œìƒ" msgid "ERROR Failed to get my own IPs?" msgstr "오류. ìžì²´ IP를 가져오는 중 오류 ë°œìƒ ì—¬ë¶€" msgid "ERROR Insufficient Storage" msgstr "오류. 스토리지 ê³µê°„ì´ ì¶©ë¶„í•˜ì§€ 않ìŒ" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "오류. 오브ì íЏ %(obj)sì˜ ê°ì‚¬ê°€ 실패하여 격리ë¨: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "오류. 문제가 ë°œìƒí•¨, %s 격리 중" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "오류. ì›ê²© 드ë¼ì´ë¸Œê°€ 마운트ë˜ì§€ 않ìŒ. %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "%(db_file)s %(row)s ë™ê¸°í™” 오류" #, python-format msgid "ERROR Syncing %s" msgstr "%s ë™ê¸°í™” 오류" #, python-format msgid "ERROR Trying to audit %s" msgstr "%s ê°ì‚¬ 중 오류 ë°œìƒ" msgid "ERROR Unhandled exception in request" msgstr "오류. ìš”ì²­ì— ì²˜ë¦¬ë˜ì§€ ì•Šì€ ì˜ˆì™¸ê°€ 있ìŒ" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "오류. %(method)s %(path)sì— __call__ 오류 ë°œìƒ" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "오류. %(ip)s:%(port)s/%(device)s(으)로 계정 ì—…ë°ì´íЏ 실패(ë‚˜ì¤‘ì— ë‹¤ì‹œ 시ë„)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "오류. %(ip)s:%(port)s/%(device)s(으)로 계정 ì—…ë°ì´íЏ 실패(ë‚˜ì¤‘ì— ë‹¤ì‹œ 시ë„): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "오류. 비ë™ê¸° 보류 파ì¼ì— 예ìƒì¹˜ 못한 ì´ë¦„ %sì„(를) 사용함" msgid "ERROR auditing" msgstr "검사 오류" #, python-format msgid "ERROR auditing: %s" msgstr "ê°ì‚¬ 오류: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "오류. %(ip)s:%(port)s/%(dev)s(으)로 컨테ì´ë„ˆ ì—…ë°ì´íЏ 실패(ì´í›„ 비ë™ê¸° ì—…ë°ì´" "트용으로 저장)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "%sì—서 HTTP ì‘ë‹µì„ ì½ëŠ” 중 오류 ë°œìƒ" #, python-format msgid "ERROR reading db %s" msgstr "ë°ì´í„°ë² ì´ìФ %sì„(를) ì½ëŠ” 중 오류 ë°œìƒ" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "오류. %(code)sì˜ rsyncê°€ 실패함: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "%(file)sì„(를) 노드 %(node)sê³¼(와) ë™ê¸°í™”하는 중 오류 ë°œìƒ" msgid "ERROR trying to replicate" msgstr "복제 중 오류 ë°œìƒ" #, python-format msgid "ERROR while trying to clean up %s" msgstr "%s 정리 중 오류 ë°œìƒ" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)s 서버 %(ip)s:%(port)s/%(device)s 오류, 회신: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "%sì—서 억제를 로드하는 중 오류 ë°œìƒ: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ì›ê²© 서버 %(ip)s:%(port)s/%(device)sì— ì˜¤ë¥˜ ë°œìƒ" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "오류: 드ë¼ì´ë¸Œ íŒŒí‹°ì…˜ì— ëŒ€í•œ 경로를 가져오지 못함: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "오류: %(path)sì— ì•¡ì„¸ìŠ¤í•  수 ì—†ìŒ: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "오류: ê°ì‚¬ë¥¼ 실행할 수 ì—†ìŒ: %s" msgid "Error hashing suffix" msgstr "접미부를 해싱하는 중 오류 ë°œìƒ" msgid "Error listing devices" msgstr "디바ì´ìФ 나열 중 오류 ë°œìƒ" #, python-format msgid "Error on render profiling results: %s" msgstr "프로파ì¼ë§ 결과를 ë Œë”ë§í•˜ëŠ” 중 오류 ë°œìƒ: %s" msgid "Error parsing recon cache file" msgstr "ì¡°ì • ìºì‹œ 파ì¼ì„ 구문 ë¶„ì„하는 중 오류 ë°œìƒ" msgid "Error reading recon cache file" msgstr "ì¡°ì • ìºì‹œ 파ì¼ì„ ì½ëŠ” 중 오류 ë°œìƒ" msgid "Error reading ringfile" msgstr "ë§ íŒŒì¼ì„ ì½ëŠ” 중 오류 ë°œìƒ" msgid "Error reading swift.conf" msgstr "swift.conf를 ì½ëŠ” 중 오류 ë°œìƒ" msgid "Error retrieving recon data" msgstr "ì¡°ì • ë°ì´í„°ë¥¼ 검색하는 ì¤‘ì— ì˜¤ë¥˜ ë°œìƒ" msgid "Error syncing handoff partition" msgstr "핸드오프 파티션 ë™ê¸°í™” 중 오류 ë°œìƒ" msgid "Error syncing partition" msgstr "파티션 ë™ê¸° 오류 " #, python-format msgid "Error syncing with node: %s" msgstr "노드 ë™ê¸° 오류: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "%(path)s policy#%(policy)d frag#%(frag_index)sì„(를) 다시 빌드하려는 중 오류 " "ë°œìƒ" msgid "Error: An error occurred" msgstr "오류: 오류 ë°œìƒ" msgid "Error: missing config path argument" msgstr "오류: 구성 경로 ì¸ìˆ˜ 누ë½" #, python-format msgid "Error: unable to locate %s" msgstr "오류: %sì„(를) ì°¾ì„ ìˆ˜ ì—†ìŒ" msgid "Exception dumping recon cache" msgstr "ì¡°ì • ìºì‹œ ë¤í”„ 중 예외 ë°œìƒ" msgid "Exception in top-level account reaper loop" msgstr "최ìƒìœ„ 계정 ë£¨í”„ì˜ ì˜ˆì™¸ " msgid "Exception in top-level replication loop" msgstr "최ìƒìœ„ 레벨 복제 루프ì—서 예외 ë°œìƒ" msgid "Exception in top-levelreconstruction loop" msgstr "최ìƒìœ„ 레벨 재구성 루프ì—서 예외 ë°œìƒ" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 예외" #, python-format msgid "Exception with account %s" msgstr "예외 계정 %s" #, python-format msgid "Exception with containers for account %s" msgstr "계정 콘테ì´ë„ˆì˜ 예외 %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "계정 %(account)sì˜ ì»¨í…Œì´ë„ˆ %(container)sì— ëŒ€í•œ 오브ì íŠ¸ì— ì˜ˆì™¸ ë°œìƒ" #, python-format msgid "Expect: 100-continue on %s" msgstr "%sì—서 100-continue 예ìƒ" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)sì—서 %(found_domain)s(으)ë¡œì˜ ë‹¤ìŒ CNAME ì²´ì¸" msgid "Found configs:" msgstr "구성 발견:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "핸드오프 첫 ëª¨ë“œì— ì—¬ì „ížˆ 핸드오프가 남아 있습니다. 현재 복제 ì „ë‹¬ì„ ì¤‘ë‹¨í•©ë‹ˆ" "다." msgid "Host unreachable" msgstr "호스트 ë„달 불가능" #, python-format msgid "Incomplete pass on account %s" msgstr "계정 패스 미완료 %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "올바르지 ì•Šì€ X-Container-Sync-To í˜•ì‹ %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-Toì— ì˜¬ë°”ë¥´ì§€ ì•Šì€ í˜¸ìŠ¤íŠ¸ %rì´(ê°€) 있ìŒ" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "올바르지 ì•Šì€ ë³´ë¥˜ 항목 %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "%(full_path)sì—서 올바르지 ì•Šì€ ì‘답 %(resp)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)sì˜ ì˜¬ë°”ë¥´ì§€ ì•Šì€ ì‘답 %(resp)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "X-Container-Sync-To 올바르지 ì•Šì€ ìŠ¤í‚¤ë§ˆ %rì´(ê°€) 있습니다. \"//\", \"http\" " "ë˜ëŠ” \"https\"여야 합니다." #, python-format msgid "Killing long-running rsync: %s" msgstr "장기 실행 ì¤‘ì¸ rsync ê°•ì œ 종료: %s" msgid "Lockup detected.. killing live coros." msgstr "잠금 발견.. 활성 coros를 ê°•ì œ 종료합니다." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)sì„(를) %(found_domain)s(으)로 맵핑함" #, python-format msgid "No %s running" msgstr "%sì´(ê°€) 실행ë˜ì§€ 않ìŒ" #, python-format msgid "No permission to signal PID %d" msgstr "PID %dì„(를) 표시할 ê¶Œí•œì´ ì—†ìŒ" #, python-format msgid "No policy with index %s" msgstr "ì¸ë±ìŠ¤ê°€ %sì¸ ì •ì±…ì´ ì—†ìŒ" #, python-format msgid "No realm key for %r" msgstr "%rì— ëŒ€í•œ ì˜ì—­ 키가 ì—†ìŒ" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "노드 오류로 %(ip)s:%(port)s(%(device)s)ì´(ê°€) 제한ë¨" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "승ì¸ëœ 오브ì íЏ 서버가 부족함(%dì„(를) ë°›ìŒ)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "ì°¾ì„ ìˆ˜ ì—†ìŒ %(sync_from)r => %(sync_to)r - 오브ì " "트%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%sì´ˆ ë™ì•ˆ ìž¬êµ¬ì„±ëœ ê²ƒì´ ì—†ìŠµë‹ˆë‹¤." #, python-format msgid "Nothing replicated for %s seconds." msgstr "%sì´ˆ ë™ì•ˆ ë³µì œëœ ê²ƒì´ ì—†ìŠµë‹ˆë‹¤." msgid "Object" msgstr "오브ì íЏ" msgid "Object PUT" msgstr "Object PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Object PUTì—서 409ì— ëŒ€í•´ 202를 리턴함: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Object PUTì—서 412를 리턴함, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "오브ì íЏ ê°ì‚¬(%(type)s) \"%(mode)s\" 모드 완료: %(elapsed).02fs. ì´ ê²©ë¦¬ í•­" "목: %(quars)d, ì´ ì˜¤ë¥˜ 수: %(errors)d, ì´ íŒŒì¼/ì´ˆ: %(frate).2f, ì´ ë°”ì´íЏ/" "ì´ˆ: %(brate).2f, ê°ì‚¬ 시간: %(audit).2f, ì†ë„: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "오브ì íЏ ê°ì‚¬(%(type)s). %(start_time)s ì´í›„: 로컬: %(passes)dê°œ 통과, " "%(quars)dê°œ 격리, %(errors)dê°œ 오류, 파ì¼/ì´ˆ: %(frate).2f, ë°”ì´íЏ/ì´ˆ: " "%(brate).2f, ì´ ì‹œê°„: %(total).2f, ê°ì‚¬ 시간: %(audit).2f, ì†ë„: " "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "오브ì íЏ ê°ì‚¬ 통계: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "오브ì íЏ 재구성 완료(ì¼ íšŒ). (%.02fë¶„)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "오브ì íЏ 재구성 완료. (%.02fë¶„)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "오브ì íЏ 복제 완료(ì¼ íšŒ). (%.02fë¶„)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "오브ì íЏ 복제 완료. (%.02fë¶„)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "오브ì íЏ 서버ì—서 %sê°œì˜ ë¶ˆì¼ì¹˜ etag를 리턴함" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "오브ì íЏ ì—…ë°ì´íЏ 스윕 완료: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "X-Container-Sync-Toì— ë§¤ê°œë³€ìˆ˜, 조회, ë‹¨íŽ¸ì´ í—ˆìš©ë˜ì§€ 않ìŒ" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "파티션 시간: 최대 %(max).4fì´ˆ, 최소 %(min).4fì´ˆ, 중간 %(med).4fì´ˆ" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-Toì— ê²½ë¡œê°€ 필요함" #, python-format msgid "Problem cleaning up %s" msgstr "%s 정리 문제 ë°œìƒ" #, python-format msgid "Profiling Error: %s" msgstr "프로파ì¼ë§ 오류: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "디렉토리가 아니어서 %(hsh_path)sì„(를) %(quar_path)sì— ê²©ë¦¬í•¨" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "디렉토리가 아니어서 %(object_path)sì„(를) %(quar_path)sì— ê²©ë¦¬í•¨" #, python-format msgid "Quarantining DB %s" msgstr "ë°ì´í„°ë² ì´ìФ %s 격리" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "%(account)s/%(container)s/%(object)sì— ëŒ€í•œ Ratelimit 휴면 로그: %(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d ë°ì´í„°ë² ì´ìŠ¤ë¥¼ 제거함" #, python-format msgid "Removing %s objects" msgstr "%s 오브ì íЏ 제거 중" #, python-format msgid "Removing partition: %s" msgstr "파티션 제거: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "ìž˜ëª»ëœ pid %(pid)dì˜ pid íŒŒì¼ %(pid_file)s 제거" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "pidê°€ 올바르지 ì•Šì€ pid íŒŒì¼ %s 제거" #, python-format msgid "Removing stale pid file %s" msgstr "ì‹œê°„ì´ ê²½ê³¼ëœ pid íŒŒì¼ %sì„(를) 제거하는 중 " msgid "Replication run OVER" msgstr "복제 실행 대ìƒ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "블랙리스트 지정으로 ì¸í•´ 497ì´ ë¦¬í„´ë¨: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "%(acc)s/%(cont)s/%(obj)s(으)로 %(meth)sì— ëŒ€í•œ 498ì„ ë¦¬í„´í•©ë‹ˆë‹¤. 전송률 제한" "(최대 휴면) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "ë§ ë³€ê²½ì´ ë°œê²¬ë˜ì—ˆìŠµë‹ˆë‹¤. 현재 재구성 ì „ë‹¬ì„ ì¤‘ë‹¨í•©ë‹ˆë‹¤." msgid "Ring change detected. Aborting current replication pass." msgstr "ë§ ë³€ê²½ì´ ë°œê²¬ë˜ì—ˆìŠµë‹ˆë‹¤. 현재 복제 ì „ë‹¬ì„ ì¤‘ë‹¨í•©ë‹ˆë‹¤." #, python-format msgid "Running %s once" msgstr "%sì„(를) 한 번 실행" msgid "Running object reconstructor in script mode." msgstr "오브ì íЏ 재구성ìžë¥¼ 스í¬ë¦½íЏ 모드로 실행 중입니다." msgid "Running object replicator in script mode." msgstr "오브ì íЏ 복제ìžë¥¼ 스í¬ë¦½íЏ 모드로 실행 중입니다." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "%(time)s ì´í›„: %(sync)s ë™ê¸°í™”ë¨ [%(delete)s ì‚­ì œ, %(put)s 배치], %(skip)s ê±´" "너뜀, %(fail)s 실패" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "검사 경과 시간 %(time)s: 계정 검사A: %(passed)s ì •ìƒ ,%(failed)s 실패" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "%(time)s ì´í›„: 컨테ì´ë„ˆ ê°ì‚¬: %(pass)s ê°ì‚¬ 전달, %(fail)s ê°ì‚¬ 실패" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "마운트ë˜ì§€ 않았으므로 %(device)sì„(를) 건너뜀" #, python-format msgid "Skipping %s as it is not mounted" msgstr "마운트ë˜ì§€ 않는 %s를 건너 뛰기" #, python-format msgid "Starting %s" msgstr "%s 시작 중" msgid "Starting object reconstruction pass." msgstr "오브ì íЏ 재구성 ì „ë‹¬ì„ ì‹œìž‘í•©ë‹ˆë‹¤." msgid "Starting object reconstructor in daemon mode." msgstr "오브ì íЏ 재구성ìžë¥¼ 디먼 모드로 시작합니다." msgid "Starting object replication pass." msgstr "오브ì íЏ 복제 ì „ë‹¬ì„ ì‹œìž‘í•©ë‹ˆë‹¤." msgid "Starting object replicator in daemon mode." msgstr "오브ì íЏ 복제ìžë¥¼ 디먼 모드로 시작합니다." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "%(dst)s(%(time).03f)ì—서 %(src)sì˜ rsync 성공" msgid "The file type are forbidden to access!" msgstr "ì´ íŒŒì¼ ìœ í˜•ì— ëŒ€í•œ 액세스가 금지ë˜ì—ˆìŠµë‹ˆë‹¤!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "컨테ì´ë„ˆì˜ ì´ %(key)sê°€ (%(total)s) ê³¼ %(key)sì˜ ì´í•© (%(sum)s)ê°€ ì¼ì¹˜í•˜ì§€ " "않습니다." #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)sì—서 제한시간 초과 예외 ë°œìƒ" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s ì‹œë„ ì¤‘" #, python-format msgid "Trying to GET %(full_path)s" msgstr "GET %(full_path)s ì‹œë„ ì¤‘" msgid "Trying to read during GET" msgstr "가져오기 중 ì½ê¸°ë¥¼ 시ë„함" msgid "Trying to read during GET (retrying)" msgstr "가져오기(재시ë„) 중 ì½ê¸°ë¥¼ 시ë„함" msgid "Trying to send to client" msgstr "í´ë¼ì´ì–¸íŠ¸ë¡œ 전송 ì‹œë„ ì¤‘" #, python-format msgid "Trying to sync suffixes with %s" msgstr "%sê³¼(와) 접미사를 ë™ê¸°í™”하려고 시ë„" #, python-format msgid "Trying to write to %s" msgstr "%sì— ì“°ê¸° ì‹œë„ ì¤‘" msgid "UNCAUGHT EXCEPTION" msgstr "미발견 예외" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "libcì—서 %sì„(를) ì°¾ì„ ìˆ˜ 없습니다. no-op로 남겨 둡니다." #, python-format msgid "Unable to locate config for %s" msgstr "%sì˜ êµ¬ì„±ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "libcì—서 fallocate, posix_fallocate를 ì°¾ì„ ìˆ˜ 없습니다. no-op로 남겨 둡니다." #, python-format msgid "Unable to read config from %s" msgstr "%sì—서 êµ¬ì„±ì„ ì½ì„ 수 ì—†ìŒ" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "권한 부여 í•´ì œ %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "처리ë˜ì§€ ì•Šì€ ì˜ˆì™¸" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "GETì„ ì‹œë„하는 중 알 수 없는 예외 ë°œìƒ: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)sì˜ ì—…ë°ì´íЏ 보고서 실패" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)sì˜ ì—…ë°ì´íЏ 보고서를 발송함" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "경고: SSLì€ í…ŒìŠ¤íŠ¸ìš©ìœ¼ë¡œë§Œ 사용해야 합니다. 프로ë•ì…˜ 배치ì—는 외부 SSL 종료" "를 사용하십시오." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "경고: íŒŒì¼ ë””ìŠ¤í¬ë¦½í„° 한계를 수정할 수 없습니다. 비루트로 실행 중ì¸ì§€ 확ì¸í•˜" "십시오." msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "경고: 최대 프로세스 한계를 수정할 수 없습니다. 비루트로 실행 중ì¸ì§€ 확ì¸í•˜ì‹­" "시오." msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "경고: 메모리 한계를 수정할 수 없습니다. 비루트로 실행 중ì¸ì§€ 확ì¸í•˜ì‹­ì‹œì˜¤." msgid "Warning: Cannot ratelimit without a memcached client" msgstr "경고: memcached í´ë¼ì´ì–¸íЏ ì—†ì´ ì „ì†¡ë¥ ì„ ì œí•œí•  수 ì—†ìŒ" #, python-format msgid "method %s is not allowed." msgstr "메소드 %sì´(ê°€) 허용ë˜ì§€ 않습니다." msgid "no log file found" msgstr "로그 파ì¼ì„ ì°¾ì„ ìˆ˜ ì—†ìŒ" msgid "odfpy not installed." msgstr "odfpyê°€ 설치ë˜ì–´ 있지 않습니다." #, python-format msgid "plotting results failed due to %s" msgstr "%s(으)로 ì¸í•´ ê²°ê³¼ 표시 실패" msgid "python-matplotlib not installed." msgstr "python-matplotlibê°€ 설치ë˜ì–´ 있지 않습니다." swift-2.17.1/swift/locale/pt_BR/0000775000175000017500000000000013435012120016350 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/pt_BR/LC_MESSAGES/0000775000175000017500000000000013435012120020135 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/pt_BR/LC_MESSAGES/swift.po0000666000175000017500000007135713435012015021653 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andre Campos Bezerra , 2015 # Lucas Ribeiro , 2014 # thiagol , 2015 # Volmar Oliveira Junior , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-03-08 01:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: pt_BR\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Portuguese (Brazil)\n" msgid "" "\n" "user quit" msgstr "" "\n" "encerramento do usuário" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - paralelo, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d sufixos verificados – %(hashed).2f%% de hash, %(synced).2f%% " "sincronizados" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partições replicadas em " "%(time).2fs (%(rate).2f/seg, %(remaining)s restantes)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s sucessos, %(failure)s falhas" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s retornando 503 para %(statuses)s" #, python-format msgid "%s already started..." msgstr "%s já iniciado..." #, python-format msgid "%s does not exist" msgstr "%s não existe" #, python-format msgid "%s is not mounted" msgstr "%s não está montado" #, python-format msgid "%s responded as unmounted" msgstr "%s respondeu como não montado" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Reconfiguração da conexão por peer" #, python-format msgid ", %s containers deleted" msgstr ", %s containers apagados" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s containers possivelmente restando" #, python-format msgid ", %s containers remaining" msgstr ", %s containers restando" #, python-format msgid ", %s objects deleted" msgstr ", %s objetos apagados" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objetos possivelmente restando" #, python-format msgid ", %s objects remaining" msgstr ", %s objetos restando" #, python-format msgid ", elapsed: %.02fs" msgstr ", passados: %.02fs" msgid ", return codes: " msgstr ", códigos de retorno:" msgid "Account" msgstr "Conta" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Auditoria de conta em modo \"único\" finalizado: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Passo de auditoria de conta finalizado: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Tentativa de replicação do %(count)d dbs em%(time).5f segundos (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Código de retorno de ressincronização inválido: %(ret)d <-%(args)s" msgid "Begin account audit \"once\" mode" msgstr "Iniciar auditoria de conta em modo \"único\"" msgid "Begin account audit pass." msgstr "Iniciando passo de auditoria de conta." msgid "Begin container audit \"once\" mode" msgstr "Inicie o modo \"único\" da auditoria do contêiner" msgid "Begin container audit pass." msgstr "Inicie a aprovação da auditoria do contêiner." msgid "Begin container sync \"once\" mode" msgstr "Inicie o modo \"único\" de sincronização do contêiner" msgid "Begin container update single threaded sweep" msgstr "Inicie a varredura de encadeamento único da atualização do contêiner" msgid "Begin container update sweep" msgstr "Inicie a varredura de atualização do contêiner" msgid "Begin object update single threaded sweep" msgstr "Inicie a varredura de encadeamento único da atualização do objeto" msgid "Begin object update sweep" msgstr "Inicie a varredura da atualização do objeto" #, python-format msgid "Beginning pass on account %s" msgstr "Iniciando a estapa nas contas %s" msgid "Beginning replication run" msgstr "Começando execução de replicação" msgid "Broker error trying to rollback locked connection" msgstr "Erro do Broker ao tentar retroceder a conexão bloqueada" #, python-format msgid "Can not access the file %s." msgstr "Não é possível acessar o arquivo %s." #, python-format msgid "Can not load profile data from %s." msgstr "Não é possível carregar dados do perfil a partir de %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "O cliente não leu no proxy dentro de %ss" msgid "Client disconnected on read" msgstr "Cliente desconectado durante leitura" msgid "Client disconnected without sending enough data" msgstr "Cliente desconecatdo sem ter enviado dados suficientes" msgid "Client disconnected without sending last chunk" msgstr "Cliente desconectado sem ter enviado o último chunk" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Caminho do cliente %(client)s não corresponde ao caminho armazenado nos " "metadados do objeto %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Opção de configuração internal_client_conf_path não definida. Usando a " "configuração padrão. Consulte internal-client.conf-sample para obter opções" msgid "Connection refused" msgstr "Conexão recusada" msgid "Connection timeout" msgstr "Tempo limite de conexão" msgid "Container" msgstr "Contêiner" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Modo \"único\" da auditoria do contêiner concluído: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Aprovação da auditoria do contêiner concluída: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Modo \"único\" de sincronização do contêiner concluído: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Varredura de encadeamento único da atualização do contêiner concluída: " "%(elapsed).02fs, %(success)s com êxito, %(fail)s com falha, %(no_change)s " "sem alterações" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Varredura da atualização do contêiner concluída: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Varredura da atualização do contêiner de %(path)s concluída: " "%(elapsed).02fs, %(success)s com êxito, %(fail)s com falha, %(no_change)s " "sem alterações" #, python-format msgid "Data download error: %s" msgstr "Erro ao fazer download de dados: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Dispositivos finalizados: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERRO %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERRO %(status)d %(body)s Do Servidor %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERRO %(status)d %(body)s No Servidor de Objetos re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERRO %(status)d Expectativa: 100-continuar Do Servidor de Objeto" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente " "novamente mais tarde): Resposta %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERRO Resposta inválida %(status)s a partir de %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERRO Tempo limite de leitura do cliente (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERRO A atualização do contêiner falhou (salvando para atualização assíncrona " "posterior): %(status)d resposta do %(ip)s:%(port)s/%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERRO Não foi possível recuperar as informações da conta %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERRO Não foi possível obter informações do contêiner %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ERROR DiskFile %(data_file)s falha ao fechar: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERRO Exceção causando clientes a desconectar" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "ERRO Exceção ao transferir dados para os servidores de objeto %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERRO Falha ao pegar meu próprio IPs?" msgid "ERROR Insufficient Storage" msgstr "ERRO Capacidade insuficiente" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "ERRO O objeto %(obj)s falhou ao auditar e ficou em quarentena: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERRO Problema de seleção, em quarentena %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERRO Drive remoto não montado %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERRO Sincronizando %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERRO Sincronizando %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERRO Tentando auditar %s" msgid "ERROR Unhandled exception in request" msgstr "ERRO Exceção não manipulada na solicitação" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ erro com %(method)s %(path)s" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente " "novamente mais tarde)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERRO A atualização da conta falhou com %(ip)s:%(port)s/%(device)s (tente " "novamente mais tarde): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "ERRO arquivo pendente assíncrono com nome inesperado %s" msgid "ERROR auditing" msgstr "Erro auditando" #, python-format msgid "ERROR auditing: %s" msgstr "ERRO auditoria: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERRO A atualização de contêiner falhou com %(ip)s:%(port)s/%(dev)s (salvando " "para atualização assíncrona posterior)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERRO lendo resposta HTTP de %s" #, python-format msgid "ERROR reading db %s" msgstr "ERRO lendo db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERRO rsync falhou com %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERRO sincronizando %(file)s com nodo %(node)s" msgid "ERROR trying to replicate" msgstr "ERRO tentando replicar" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERRO enquanto tentaava limpar %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "ERRO com %(type)s do servidor %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERRO com as supressões de carregamento a partir de %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERRO com o servidor remoto %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "ERRO: Falha ao obter caminhos para partições de unidade: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERRO: Não é possível acessar %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERRO: Não é possível executar a auditoria: %s" msgid "Error hashing suffix" msgstr "Erro ao efetuar hash do sufixo" msgid "Error listing devices" msgstr "Erro ao listar dispositivos" #, python-format msgid "Error on render profiling results: %s" msgstr "Erro na renderização de resultados de criação de perfil: %s" msgid "Error parsing recon cache file" msgstr "Erro ao analisar o arquivo de cache de reconhecimento" msgid "Error reading recon cache file" msgstr "Erro ao ler o arquivo de cache de reconhecimento" msgid "Error reading ringfile" msgstr "Erro na leitura do ringfile" msgid "Error reading swift.conf" msgstr "Erro ao ler swift.conf" msgid "Error retrieving recon data" msgstr "Erro ao recuperar dados de reconhecimento" msgid "Error syncing handoff partition" msgstr "Erro ao sincronizar a partição de handoff" msgid "Error syncing partition" msgstr "Erro ao sincronizar partição" #, python-format msgid "Error syncing with node: %s" msgstr "Erro ao sincronizar com o nó: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Erro ao tentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Erro: Ocorreu um erro" msgid "Error: missing config path argument" msgstr "Erro: argumento do caminho de configuração ausente" #, python-format msgid "Error: unable to locate %s" msgstr "Erro: não é possível localizar %s" msgid "Exception dumping recon cache" msgstr "Exceção dump de cache de reconhecimento" msgid "Exception in top-level account reaper loop" msgstr "Exceção no loop do removedor da conta de nível superior" msgid "Exception in top-level replication loop" msgstr "Exceção no loop de replicação de nível superior" msgid "Exception in top-levelreconstruction loop" msgstr "Exceção no loop de reconstrução de nível superior" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exceção com %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Exceção com a conta %s" #, python-format msgid "Exception with containers for account %s" msgstr "Exceção com os containers para a conta %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Exceção com objetos para o container %(container)s para conta %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Expectativa: 100-continuar em %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Cadeia CNAME a seguir para %(given_domain)s para%(found_domain)s" msgid "Found configs:" msgstr "Localizados arquivos de configuração:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "O primeiro modo de handoffs ainda possui handoffs. Interrompendo a aprovação " "da replicação atual." msgid "Host unreachable" msgstr "Destino inalcançável" #, python-format msgid "Incomplete pass on account %s" msgstr "Estapa incompleta nas contas %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Formato X-Container-Sync-To inválido %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Host inválido %r em X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrada pendente inválida %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Resposta inválida %(resp)s a partir de %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Resposta inválida %(resp)s a partir de %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Esquema inválido %r em X-Container-Sync-To, deve ser \" // \", \"http\" ou " "\"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Eliminando a ressincronização de longa execução: %s" msgid "Lockup detected.. killing live coros." msgstr "Bloqueio detectado... eliminando núcleos em tempo real." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s mapeado para %(found_domain)s" #, python-format msgid "No %s running" msgstr "Nenhum %s rodando" #, python-format msgid "No permission to signal PID %d" msgstr "Nenhuma permissão para PID do sinal %d" #, python-format msgid "No policy with index %s" msgstr "Nenhuma política com índice %s" #, python-format msgid "No realm key for %r" msgstr "Nenhuma chave do domínio para %r" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Erro de nó limitado %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" "Insuficiente número de servidores de objeto confirmaram (%d confirmados)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Não localizado %(sync_from)r => %(sync_to)r – objeto " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Nada foi reconstruído durante %s segundos." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nada foi replicado para %s segundos." msgid "Object" msgstr "Objeto" msgid "Object PUT" msgstr "PUT de objeto" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Objeto PUT retornando 202 para a versão 409: %(req_timestamp)s < = " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "PUT de objeto retornando 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Modo \"%(mode)s\" da auditoria de objeto (%(type)s) concluído: " "%(elapsed).02fs. Total em quarentena: %(quars)d, Total de erros: %(errors)d, " "Total de arquivos/seg: %(frate).2f, Total de bytes/seg: %(brate).2f, Tempo " "de auditoria: %(audit).2f, Taxa: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Auditoria de objeto (%(type)s). Desde %(start_time)s: Localmente: %(passes)d " "aprovado, %(quars)d em quarentena, %(errors)d erros, arquivos/s: " "%(frate).2f, bytes/seg: %(brate).2f, Tempo total: %(total).2f, Tempo de " "auditoria: %(audit).2f, Taxa: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Estatísticas de auditoria do objeto: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Reconstrução do objeto concluída (única). (%.02f minutos)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Reconstrução do objeto concluída. (%.02f minutos)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Replicação completa do objeto (única). (%.02f minutos)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Replicação completa do objeto. (%.02f minutos)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Servidores de objeto retornaram %s etags incompatíveis" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Varredura da atualização de objeto concluída: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "Parâmetros, consultas e fragmentos não permitidos em X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Tempos de partição: máximo %(max).4fs, mínimo %(min).4fs, médio %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "Caminho necessário em X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problema ao limpar %s" #, python-format msgid "Profiling Error: %s" msgstr "Erro da Criação de Perfil: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "Em quarentena %(hsh_path)s para %(quar_path)s porque ele não é um diretório" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Em quarentena %(object_path)s para %(quar_path)s porque ele não é um " "diretório" #, python-format msgid "Quarantining DB %s" msgstr "Quarentenando BD %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Log de suspensão do limite de taxa: %(sleep)s para %(account)s/%(container)s/" "%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Dbs %(remove)d removido" #, python-format msgid "Removing %s objects" msgstr "Removendo %s objetos" #, python-format msgid "Removing partition: %s" msgstr "Removendo partição: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Removendo arquivo pid %(pid_file)s com pid errado %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Removendo o arquivo pid %s com pid inválido" #, python-format msgid "Removing stale pid file %s" msgstr "Removendo o arquivo pid %s antigo" msgid "Replication run OVER" msgstr "Execução de replicação TERMINADA" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Retornando 497 por causa da listagem negra: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Retornando 498 para %(meth)s para %(acc)s/%(cont)s/%(obj)s. Limite de taxa " "(Suspensão Máxima) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Mudança no anel detectada. Interrompendo a aprovação da recosntrução atual." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Alteração do anel detectada. Interrompendo a aprovação da replicação atual." #, python-format msgid "Running %s once" msgstr "Executando %s uma vez," msgid "Running object reconstructor in script mode." msgstr "Executando o reconstrutor do objeto no modo de script." msgid "Running object replicator in script mode." msgstr "Executando replicador do objeto no modo de script." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Desde %(time)s: %(sync)s sincronizados [%(delete)s exclui, %(put)s coloca], " "%(skip)s ignorados, %(fail)s com falha" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Desde %(time)s: Auditoria de contas: %(passed)s auditorias passaram," "%(failed)s auditorias falharam" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Desde %(time)s: As auditorias do contêiner: %(pass)s de auditoria aprovada, " "%(fail)s com falha auditoria" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "Pulando %(device)s porque não está montado" #, python-format msgid "Skipping %s as it is not mounted" msgstr "Pulando %s porque não está montado" #, python-format msgid "Starting %s" msgstr "Iniciando %s" msgid "Starting object reconstruction pass." msgstr "Iniciando a aprovação da reconstrução de objeto." msgid "Starting object reconstructor in daemon mode." msgstr "Iniciando o reconstrutor do objeto no modo daemon." msgid "Starting object replication pass." msgstr "Iniciando a aprovação da replicação de objeto." msgid "Starting object replicator in daemon mode." msgstr "Iniciando o replicador do objeto no modo daemon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Ressincronização bem-sucedida de %(src)s em %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "O tipo de arquivo é de acesso proibido!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "O total %(key)s para o container (%(total)s) não confere com a soma %(key)s " "pelas politicas (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exceção de tempo limite com %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentando %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Tentando GET %(full_path)s" msgid "Trying to read during GET" msgstr "Tentando ler durante GET" msgid "Trying to read during GET (retrying)" msgstr "Tentando ler durante GET (tentando novamente)" msgid "Trying to send to client" msgstr "Tentando enviar para o cliente" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Tentando sincronizar sufixos com %s" #, python-format msgid "Trying to write to %s" msgstr "Tentando escrever para %s" msgid "UNCAUGHT EXCEPTION" msgstr "EXCEÇÃO NÃO CAPTURADA" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Não é possível localizar %s em libc. Saindo como um não operacional." #, python-format msgid "Unable to locate config for %s" msgstr "Não é possível localizar configuração para %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Não é possível localizar fallocate, posix_fallocate em libc. Saindo como um " "não operacional." #, python-format msgid "Unable to read config from %s" msgstr "Não é possível ler a configuração a partir de %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Não autorizado %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "Exceção não-tratada" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "Exceção inesperada ao tentar GET: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Atualize o relatório com falha para %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Atualize o relatório enviado para %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVISO: SSL deve ser ativada somente para fins de teste. Use rescisão SSL " "externa para uma implementação de produção." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite do descritor de arquivo. Executar " "como não raiz?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite máximo do processo. Executar como " "não raiz?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVISO: Não é possível modificar o limite de memória. Executar como não raiz?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Aviso: Não é possível um limite de taxa sem um cliente memcached" #, python-format msgid "method %s is not allowed." msgstr "o método %s não é permitido." msgid "no log file found" msgstr "Nenhum arquivo de log encontrado" msgid "odfpy not installed." msgstr "odfpy não está instalado." #, python-format msgid "plotting results failed due to %s" msgstr "plotar resultados falhou devido a %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib não instalado." swift-2.17.1/swift/locale/it/0000775000175000017500000000000013435012120015756 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/it/LC_MESSAGES/0000775000175000017500000000000013435012120017543 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/it/LC_MESSAGES/swift.po0000666000175000017500000007311013435012015021246 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:42+0000\n" "Last-Translator: Copied by Zanata \n" "Language: it\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Italian\n" msgid "" "\n" "user quit" msgstr "" "\n" "l'utente è uscito" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - parallelo, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d suffissi controllati - %(hashed).2f%% con hash, %(synced).2f%% " "sincronizzati" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partizioni replicate in " "%(time).2fs (%(rate).2f/sec, %(remaining)s rimanenti)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s operazioni con esito positivo, %(failure)s errori" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s restituisce 503 per %(statuses)s" #, python-format msgid "%s already started..." msgstr "%s già avviato..." #, python-format msgid "%s does not exist" msgstr "%s non esiste" #, python-format msgid "%s is not mounted" msgstr "%s non è montato" #, python-format msgid "%s responded as unmounted" msgstr "%s ha risposto come smontato" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Connessione reimpostata dal peer" #, python-format msgid ", %s containers deleted" msgstr ", %s contenitori eliminati" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s contenitori probabilmente rimanenti" #, python-format msgid ", %s containers remaining" msgstr ", %s contenitori rimanenti" #, python-format msgid ", %s objects deleted" msgstr ", %s oggetti eliminati" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s oggetti probabilmente rimanenti" #, python-format msgid ", %s objects remaining" msgstr ", %s oggetti rimanenti" #, python-format msgid ", elapsed: %.02fs" msgstr ", trascorso: %.02fs" msgid ", return codes: " msgstr ", codici di ritorno: " msgid "Account" msgstr "Conto" #, python-format msgid "Account %(account)s has not been reaped since %(time)s" msgstr "Il conto %(account)s non è stato verificato dal %(time)s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Modalità \"once\" verifica account completata: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Trasmissione verifica account completata: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "È stato eseguito un tentativo di replicare %(count)d dbs in %(time).5f " "secondi (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Codice di ritorno rsync errato: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Avvio modalità \"once\" verifica account" msgid "Begin account audit pass." msgstr "Avvio trasmissione verifica account." msgid "Begin container audit \"once\" mode" msgstr "Avvio modalità \"once\" verifica contenitore" msgid "Begin container audit pass." msgstr "Avvio trasmissione verifica contenitore." msgid "Begin container sync \"once\" mode" msgstr "Avvio della modalità \"once\" di sincronizzazione contenitore" msgid "Begin container update single threaded sweep" msgstr "Avvio pulizia a singolo thread aggiornamento contenitore" msgid "Begin container update sweep" msgstr "Avvio pulizia aggiornamento contenitore" msgid "Begin object update single threaded sweep" msgstr "Avvio pulizia a singolo thread aggiornamento oggetto" msgid "Begin object update sweep" msgstr "Avvio pulizia aggiornamento oggetto" #, python-format msgid "Beginning pass on account %s" msgstr "Avvio della trasmissione sull'account %s" msgid "Beginning replication run" msgstr "Avvio replica" msgid "Broker error trying to rollback locked connection" msgstr "" "Errore del broker durante il tentativo di eseguire il rollback della " "connessione bloccata" #, python-format msgid "Can not access the file %s." msgstr "Impossibile accedere al file %s." #, python-format msgid "Can not load profile data from %s." msgstr "Impossibile caricare i dati del profilo da %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Il client non ha eseguito la lettura dal proxy in %ss" msgid "Client disconnected on read" msgstr "Client scollegato alla lettura" msgid "Client disconnected without sending enough data" msgstr "Client disconnesso senza inviare dati sufficienti" msgid "Client disconnected without sending last chunk" msgstr "Client disconnesso senza inviare l'ultima porzione" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Il percorso del client %(client)s non corrisponde al percorso memorizzato " "nei metadati dell'oggetto %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Opzione di configurazione internal_client_conf_path non definita. Viene " "utilizzata la configurazione predefinita, vedere l'esempio internal-client." "conf-sample per le opzioni" msgid "Connection refused" msgstr "Connessione rifiutata" msgid "Connection timeout" msgstr "Timeout della connessione" msgid "Container" msgstr "Contenitore" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Modalità \"once\" verifica contenitore completata: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Trasmissione verifica contenitore completata: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" "Modalità \"once\" di sincronizzazione del contenitore completata: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Pulizia a singolo thread aggiornamento contenitore completata: " "%(elapsed).02fs, %(success)s operazioni con esito positivo, %(fail)s errori, " "%(no_change)s senza modifiche" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Pulizia aggiornamento contenitore completata: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Pulizia aggiornamento contenitore di %(path)s completata: %(elapsed).02fs, " "%(success)s operazioni con esito positivo, %(fail)s errori, %(no_change)s " "senza modifiche" #, python-format msgid "Data download error: %s" msgstr "Errore di download dei dati: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Trasmissione dei dispositivi completata: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERRORE %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERRORE %(status)d %(body)s dal server %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERRORE %(status)d %(body)s Dal server degli oggetti re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERRORE %(status)d Previsto: 100-continue dal server degli oggetti" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERRORE Aggiornamento dell'account non riuscito con %(ip)s:%(port)s/" "%(device)s (verrà eseguito un nuovo tentativo successivamente): Risposta " "%(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERRORE Risposta errata %(status)s da %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERRORE Timeout di lettura del client (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERRORE Aggiornamento del contenitore non riuscito (salvataggio per " "l'aggiornamento asincrono successivamente): %(status)d risposta da %(ip)s:" "%(port)s/%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERRORE Impossibile ottenere le informazioni sull'account %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERRORE Impossibile ottenere le informazioni sul contenitore %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ERRORE Errore di chiusura DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERRORE Eccezione che causa la disconnessione del client" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" "ERRORE Eccezione durante il trasferimento di dati nel server degli oggetti %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERRORE Impossibile ottenere i propri IP?" msgid "ERROR Insufficient Storage" msgstr "ERRORE Memoria insufficiente" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "ERRORE L'oggetto %(obj)s non ha superato la verifica ed è stato inserito " "nella quarantena: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERRORE Problema relativo a pickle, inserimento di %s nella quarantena" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERRORE Unità remota non montata %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERRORE durante la sincronizzazione di %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERRORE durante la sincronizzazione di %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERRORE durante il tentativo di eseguire la verifica %s" msgid "ERROR Unhandled exception in request" msgstr "ERRORE Eccezione non gestita nella richiesta" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERRORE errore __call__ con %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERRORE aggiornamento dell'account non riuscito con %(ip)s:%(port)s/" "%(device)s (verrà eseguito un nuovo tentativo successivamente)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERRORE aggiornamento dell'account non riuscito con %(ip)s:%(port)s/" "%(device)s (verrà eseguito un nuovo tentativo successivamente): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "ERRORE file in sospeso asincrono con nome non previsto %s" msgid "ERROR auditing" msgstr "ERRORE durante la verifica" #, python-format msgid "ERROR auditing: %s" msgstr "ERRORE durante la verifica: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERRORE aggiornamento del contenitore non riuscito con %(ip)s:%(port)s/" "%(dev)s (salvataggio per aggiornamento asincrono successivamente)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERRORE durante la lettura della risposta HTTP da %s" #, python-format msgid "ERROR reading db %s" msgstr "ERRORE durante la lettura del db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERRORE rsync non riuscito con %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERRORE durante la sincronizzazione di %(file)s con il nodo %(node)s" msgid "ERROR trying to replicate" msgstr "ERRORE durante il tentativo di eseguire la replica" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERRORE durante il tentativo di ripulire %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "ERRORE relativo al server %(type)s %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERRORE relativo al caricamento delle eliminazioni da %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERRORE relativo al server remoto %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "ERRORE: Impossibile ottenere i percorsi per gestire le partizioni: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERRORE: Impossibile accedere a %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERRORE: Impossibile eseguire la verifica: %s" msgid "Error hashing suffix" msgstr "Errore durante l'hash del suffisso" msgid "Error listing devices" msgstr "Errore durante l'elenco dei dispositivi" #, python-format msgid "Error on render profiling results: %s" msgstr "" "Errore durante la visualizzazione dei risultati della creazione dei profili: " "%s" msgid "Error parsing recon cache file" msgstr "Errore durante l'analisi del file della cache di riconoscimento" msgid "Error reading recon cache file" msgstr "Errore durante la lettura del file della cache di riconoscimento" msgid "Error reading ringfile" msgstr "Errore durante la lettura del ringfile" msgid "Error reading swift.conf" msgstr "Errore durante la lettura di swift.conf" msgid "Error retrieving recon data" msgstr "Errore durante il richiamo dei dati di riconoscimento" msgid "Error syncing handoff partition" msgstr "Errore durante la sincronizzazione della partizione di passaggio" msgid "Error syncing partition" msgstr "Errore durante la sincronizzazione della partizione" #, python-format msgid "Error syncing with node: %s" msgstr "Errore durante la sincronizzazione con il nodo: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Errore nel tentativo di ricreare %(path)s policy#%(policy)d frag#" "%(frag_index)s" msgid "Error: An error occurred" msgstr "Errore: Si è verificato un errore" msgid "Error: missing config path argument" msgstr "Errore: Argomento path della configurazione mancante" #, python-format msgid "Error: unable to locate %s" msgstr "Errore: impossibile individuare %s" msgid "Exception dumping recon cache" msgstr "Eccezione durante il dump della cache di recon" msgid "Exception in top-level account reaper loop" msgstr "Eccezione nel loop reaper dell'account di livello superiore" msgid "Exception in top-level replication loop" msgstr "Eccezione nel loop di replica di livello superiore" msgid "Exception in top-levelreconstruction loop" msgstr "Eccezione nel loop di ricostruzione di livello superiore" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Eccezione relativa a %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Eccezione relativa all'account %s" #, python-format msgid "Exception with containers for account %s" msgstr "Eccezione relativa ai contenitori per l'account %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Eccezione relativa agli oggetti per il contenitore %(container)s per " "l'account %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Previsto: 100-continue su %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "" "Viene seguita la catena CNAME per %(given_domain)s verso %(found_domain)s" msgid "Found configs:" msgstr "Configurazioni trovate:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Nella prima modalità di passaggio ci sono ancora passaggi restanti. " "Interruzione del passaggio di replica corrente." msgid "Host unreachable" msgstr "Host non raggiungibile" #, python-format msgid "Incomplete pass on account %s" msgstr "Trasmissione non completa sull'account %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Formato X-Container-Sync-To non valido %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Host non valido %r in X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Voce in sospeso non valida %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Risposta non valida %(resp)s da %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Risposta non valida %(resp)s da %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Schema non valido %r in X-Container-Sync-To, deve essere \"//\", \"http\" " "oppure \"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Chiusura rsync ad elaborazione prolungata: %s" msgid "Lockup detected.. killing live coros." msgstr "Blocco rilevato... chiusura dei coros attivi." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s associato a %(found_domain)s" #, python-format msgid "No %s running" msgstr "Nessun %s in esecuzione" #, python-format msgid "No permission to signal PID %d" msgstr "Nessuna autorizzazione per la segnalazione del PID %d" #, python-format msgid "No policy with index %s" msgstr "Nessuna politica con indice %s" #, python-format msgid "No realm key for %r" msgstr "Nessuna chiave dell'area di autenticazione per %r" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Errore del nodo limitato %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Server degli oggetti riconosciuti non sufficienti (got %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "%(sync_from)r => %(sync_to)r non trovato - oggetto %(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Nessun elemento ricostruito per %s secondi." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nessun elemento replicato per %s secondi." msgid "Object" msgstr "Oggetto" msgid "Object PUT" msgstr "PUT dell'oggetto" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Il PUT dell'oggetto ha restituito 202 per 409: %(req_timestamp)s <= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Il PUT dell'oggetto ha restituito 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Modalità \"%(mode)s\" (%(type)s) verifica oggetto completata: " "%(elapsed).02fs. Totale in quarantena: %(quars)d, Totale errori: %(errors)d, " "Totale file/sec: %(frate).2f, Totale byte/sec: %(brate).2f, Tempo verifica: " "%(audit).2f, Velocità: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Verifica oggetto (%(type)s). A partire da %(start_time)s: In locale: " "%(passes)d passati, %(quars)d in quarantena, %(errors)d errori file/sec: " "%(frate).2f , byte/sec: %(brate).2f, Tempo totale: %(total).2f, Tempo " "verifica: %(audit).2f, Velocità: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Statistiche verifica oggetto: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Ricostruzione dell'oggetto completata (una volta). (%.02f minuti)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Ricostruzione dell'oggetto completata. (%.02f minuti)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Replica dell'oggetto completata (una volta). (%.02f minuti)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Replica dell'oggetto completata. (%.02f minuti)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "I server dell'oggetto hanno restituito %s etag senza corrispondenza" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Pulizia aggiornamento oggetto completata: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Parametri, query e frammenti non consentiti in X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "Tempi partizione: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "Percorso richiesto in X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problema durante la ripulitura di %s" #, python-format msgid "Profiling Error: %s" msgstr "Errore di creazione dei profili: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s inserito in quarantena in %(quar_path)s perché non è una " "directory" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s inserito in quarantena in %(quar_path)s perché non è una " "directory" #, python-format msgid "Quarantining DB %s" msgstr "Inserimento in quarantena del DB %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Log di sospensione Ratelimit: %(sleep)s per %(account)s/%(container)s/" "%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Rimossi %(remove)d dbs" #, python-format msgid "Removing %s objects" msgstr "Rimozione di oggetti %s" #, python-format msgid "Removing partition: %s" msgstr "Rimozione della partizione: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Rimozione del file pid %(pid_file)s con pid non valido %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Rimozione del file pid %s con pid non valido" #, python-format msgid "Removing stale pid file %s" msgstr "Rimozione del file pid %s obsoleto in corso" msgid "Replication run OVER" msgstr "Esecuzione della replica TERMINATA" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Viene restituito il codice 497 a causa della blacklist: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Viene restituito 498 per %(meth)s a %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(numero massimo sospensioni) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Modifica ring rilevata. Interruzione della trasmissione della ricostruzione " "corrente." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Modifica ring rilevata. Interruzione della trasmissione della replica " "corrente." #, python-format msgid "Running %s once" msgstr "Esecuzione di %s una volta" msgid "Running object reconstructor in script mode." msgstr "" "Esecuzione del programma di ricostruzione dell'oggetto in modalità script." msgid "Running object replicator in script mode." msgstr "Esecuzione del programma di replica dell'oggetto in modalità script." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "A partire da %(time)s: %(sync)s sincronizzati [%(delete)s eliminazioni, " "%(put)s inserimenti], %(skip)s ignorati, %(fail)s non riusciti" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "A partire da %(time)s: Verifiche account: %(passed)s verifiche superate, " "%(failed)s verifiche non superate" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "A partire da %(time)s: Verifiche contenitore: %(pass)s verifiche superate, " "%(fail)s verifiche non superate" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s viene ignorato perché non è montato" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s viene ignorato perché non è montato" #, python-format msgid "Starting %s" msgstr "Avvio di %s" msgid "Starting object reconstruction pass." msgstr "Avvio della trasmissione della ricostruzione dell'oggetto." msgid "Starting object reconstructor in daemon mode." msgstr "Avvio del programma di ricostruzione dell'oggetto in modalità daemon." msgid "Starting object replication pass." msgstr "Avvio della trasmissione della replica dell'oggetto." msgid "Starting object replicator in daemon mode." msgstr "Avvio del programma di replica dell'oggetto in modalità daemon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Rsync di %(src)s eseguito correttamente su %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Non è consentito l'accesso a questo tipo di file!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Il numero totale di %(key)s per il contenitore (%(total)s) non corrisponde " "alla somma di %(key)s tra le politiche (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Eccezione di timeout con %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentativo di %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Tentativo di eseguire GET %(full_path)s" msgid "Trying to read during GET" msgstr "Tentativo di lettura durante GET" msgid "Trying to read during GET (retrying)" msgstr "Tentativo di lettura durante GET (nuovo tentativo)" msgid "Trying to send to client" msgstr "Tentativo di invio al client" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Tentativo di sincronizzazione dei suffissi con %s" #, python-format msgid "Trying to write to %s" msgstr "Tentativo di scrittura in %s" msgid "UNCAUGHT EXCEPTION" msgstr "ECCEZIONE NON RILEVATA" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Impossibile individuare %s in libc. Lasciato come no-op." #, python-format msgid "Unable to locate config for %s" msgstr "Impossibile individuare la configurazione per %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Impossibile individuare fallocate, posix_fallocate in libc. Lasciato come " "no-op." #, python-format msgid "Unable to read config from %s" msgstr "Impossibile leggere la configurazione da %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "%(sync_from)r => %(sync_to)r non autorizzato" msgid "Unhandled exception" msgstr "Eccezione non gestita" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Eccezione imprevista nel tentativo di eseguire GET: %(account)r " "%(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Report di aggiornamento non riuscito per %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Report di aggiornamento inviato per %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVVERTENZA: SSL deve essere abilitato solo per scopi di test. Utilizzare la " "terminazione SSL esterna per una distribuzione di produzione." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVVERTENZA: Impossibile modificare il limite del descrittore del file. " "Eseguire come non-root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVVERTENZA: Impossibile modificare il limite del numero massimo di processi. " "Eseguire come non-root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVVERTENZA: Impossibile modificare il limite di memoria. Eseguire come non-" "root?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avvertenza: impossibile eseguire ratelimit senza un client memcached" #, python-format msgid "method %s is not allowed." msgstr "il metodo %s non è consentito." msgid "no log file found" msgstr "nessun file di log trovato" msgid "odfpy not installed." msgstr "odfpy non installato." #, python-format msgid "plotting results failed due to %s" msgstr "tracciamento dei risultati non riuscito a causa di %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib non installato." swift-2.17.1/swift/locale/es/0000775000175000017500000000000013435012120015751 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/es/LC_MESSAGES/0000775000175000017500000000000013435012120017536 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/es/LC_MESSAGES/swift.po0000666000175000017500000011074613435012015021250 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2016. #zanata # Pablo Caruana , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-03-08 01:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-07-18 10:57+0000\n" "Last-Translator: Pablo Caruana \n" "Language: es\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Spanish\n" msgid "" "\n" "user quit" msgstr "" "\n" "salida del usuario" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - paralelo, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d sufijos comprobados - %(hashed).2f%% con hash, %(synced).2f%% " "sincronizados" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) particiones replicadas en " "%(time).2fs (%(rate).2f/segundo, %(remaining)s restantes)" #, python-format msgid "%(server)s #%(number)d not running (%(conf)s)" msgstr "%(server)s #%(number)d not running (%(conf)s)" #, python-format msgid "%(server)s (%(pid)s) appears to have stopped" msgstr "%(server)s (%(pid)s) parece haberse detenido" #, python-format msgid "%(server)s running (%(pid)s - %(conf)s)" msgstr "%(server)s running (%(pid)s - %(conf)s)" #, python-format msgid "%(server)s running (%(pid)s - %(pid_file)s)" msgstr "%(server)s corriendo (%(pid)s - %(pid_file)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s éxitos, %(failure)s fallos" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s devuelve 503 para %(statuses)s" #, python-format msgid "%(type)s: %(value)s" msgstr "%(type)s: %(value)s" #, python-format msgid "%s already started..." msgstr "%s ya está iniciado..." #, python-format msgid "%s does not exist" msgstr "%s no existe" #, python-format msgid "%s is not mounted" msgstr "%s no está montado" #, python-format msgid "%s responded as unmounted" msgstr "%s ha respondido como desmontado" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Restablecimiento de conexión por igual" #, python-format msgid ", %s containers deleted" msgstr ", %s contenedores suprimidos" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s contenedores posiblemente restantes" #, python-format msgid ", %s containers remaining" msgstr ", %s contenedores restantes" #, python-format msgid ", %s objects deleted" msgstr ", %s objetos suprimidos" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objetos posiblemente restantes" #, python-format msgid ", %s objects remaining" msgstr ", %s objectos restantes" #, python-format msgid ", elapsed: %.02fs" msgstr ", transcurrido: %.02fs" msgid ", return codes: " msgstr ", códigos de retorno:" msgid "Account" msgstr "Cuenta" #, python-format msgid "Account %(account)s has not been reaped since %(time)s" msgstr "La cuenta %(account)s no se ha cosechado desde %(time)s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Auditoría de cuenta en modalidad de \"una vez\" finalizada: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Paso de auditoría de cuenta finalizado: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Se han intentado replicar %(count)d bases de datos en %(time).5f segundos " "(%(rate).5f/s)" #, python-format msgid "Audit Failed for %(path)s: %(err)s" msgstr "Ha fallado la auditoría para %(path)s: %(err)s" #, python-format msgid "Bad key for %(name)r: %(err)s" msgstr "Clave errónea para %(name)r: %(err)s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Código de retorno de resincronización erróneo: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Comenzar auditoría de cuenta en modalidad de \"una vez\"" msgid "Begin account audit pass." msgstr "Comenzar a pasar la auditoría de cuenta." msgid "Begin container audit \"once\" mode" msgstr "Comenzar auditoría de contenedor en modalidad de \"una vez\"" msgid "Begin container audit pass." msgstr "Comenzar a pasar la auditoría de contenedor." msgid "Begin container sync \"once\" mode" msgstr "Comenzar sincronización de contenedor en modalidad de \"una vez\"" msgid "Begin container update single threaded sweep" msgstr "Comenzar el barrido de hebra única de actualización del contenedor" msgid "Begin container update sweep" msgstr "Comenzar el barrido de actualización del contenedor" #, python-format msgid "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)" msgstr "" "Comenzar auditoría de objetos en modalidad \"%(mode)s\" mode (%(audi_type)s" "%(description)s)" msgid "Begin object update single threaded sweep" msgstr "Comenzar el barrido de hebra única de actualización del objeto" msgid "Begin object update sweep" msgstr "Comenzar el barrido de actualización del objeto" #, python-format msgid "Beginning pass on account %s" msgstr "Iniciando el paso en la cuenta %s" msgid "Beginning replication run" msgstr "Iniciando la ejecución de la replicación" msgid "Broker error trying to rollback locked connection" msgstr "Error de intermediario al intentar retrotraer una conexión bloqueada" #, python-format msgid "Can not access the file %s." msgstr "No se puede acceder al archivo %s." #, python-format msgid "Can not load profile data from %s." msgstr "No se pueden cargar los datos de perfil desde %s." #, python-format msgid "Cannot read %(auditor_status)s (%(err)s)" msgstr "No se puede leer %(auditor_status)s (%(err)s)" #, python-format msgid "Cannot write %(auditor_status)s (%(err)s)" msgstr "No se puede escribir %(auditor_status)s (%(err)s)" #, python-format msgid "Client did not read from proxy within %ss" msgstr "El cliente pudo realizar la lectura desde el proxy en %ss" msgid "Client disconnected on read" msgstr "El cliente se ha desconectado de la lectura" msgid "Client disconnected without sending enough data" msgstr "El cliente se ha desconectado sin enviar suficientes datos" msgid "Client disconnected without sending last chunk" msgstr "El cliente se ha desconectado sin enviar el último fragmento" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "La vía de acceso de cliente %(client)s no coincide con la vía de acceso " "almacenada en los metadatos de objeto %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "La opción de configuración internal_client_conf_path no está definida. Se " "utilizará la configuración predeterminada, Consulte internal-client.conf-" "sample para ver las opciones" msgid "Connection refused" msgstr "Conexión rechazada" msgid "Connection timeout" msgstr "Tiempo de espera de conexión agotado" msgid "Container" msgstr "Contenedor" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Auditoría de contenedor en modalidad de \"una vez\" finalizada: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Paso de auditoría de contenedor finalizado: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "" "Sincronización de contenedor en modalidad de \"una vez\" finalizada: %.02fs" #, python-format msgid "" "Container sync report: %(container)s, time window start: %(start)s, time " "window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, " "bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " "total_rows: %(total)s" msgstr "" "Informe de sincronización de contenedores: %(container)s, inicio de la " "ventana de tiempo: %(start)s, extremo ventana de tiempo: %(end)s, " "colocaciones: %(puts)s, publicaciones:: %(posts)s, eliminados: %(deletes)s, " "bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " "total_filas: %(total)s" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Barrido de hebra única de actualización del contenedor finalizado: " "%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos, %(no_change)s sin " "cambios" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Barrido de actualización del contenedor finalizado: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Barrido de actualización del contenedor de %(path)s finalizado: " "%(elapsed).02fs, %(success)s con éxito, %(fail)s fallos, %(no_change)s sin " "cambios" #, python-format msgid "" "Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds" msgstr "" "No se ha podido enlazar a %(addr)s:%(port)s después de intentarlo durante " "%(timeout)ssegundos" #, python-format msgid "Could not load %(conf)r: %(error)s" msgstr "No se ha podido cargar %(conf)r: %(error)s" #, python-format msgid "Data download error: %s" msgstr "Error de descarga de datos: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Paso de dispositivos finalizado: %.02fs" msgid "Did not get a keys dict" msgstr "No tuvimos un diccionario de claves" #, python-format msgid "Directory %(directory)r does not map to a valid policy (%(error)s)" msgstr "" "El directorio %(directory)r no está correlacionado con una política válida " "(%(error)s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERROR %(status)d %(body)s Desde el servidor %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERROR %(status)d %(body)s Desde el servidor de objeto re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERROR %(status)d Esperado: 100-continuo Desde el servidor de objeto" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server" msgstr "" "ERROR %(status)d Intentando %(method)s %(path)s Desde %(type)s de " "Servidor" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde): Respuesta %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "ERROR La actualización de la cuenta ha fallado: hay números distintos de " "hosts y dispositivos en la solicitud: %(hosts)s\" frente a %(devices)s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERROR Respuesta errónea %(status)s desde %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERROR Tiempo de espera de lectura de cliente agotado (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERROR La actualización del contenedor ha fallado (guardando para una " "actualización asíncrona posterior): %(status)d respuesta desde %(ip)s:" "%(port)s/%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "ERROR La actualización de la cuenta ha fallado: hay números distintos de " "hosts y dispositivos en la solicitud: %(hosts)s\" frente a %(devices)s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERROR No se ha podido obtener la información de cuenta %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERROR No se ha podido obtener la información de contenedor %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "ERROR Fallo al cerrar el archivo de disco %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERROR Excepción que provoca la desconexión del cliente" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "ERROR Excepción al transferir datos a los servidores de objetos %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERROR ¿No puedo obtener mis propias IP?" msgid "ERROR Insufficient Storage" msgstr "ERROR No hay suficiente almacenamiento" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "ERROR La auditoría del objeto %(obj)s ha fallado y se ha puesto en " "cuarentena: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERROR Problema de desorden, poniendo %s en cuarentena" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERROR Unidad remota no montada %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERROR al sincronizar %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERROR al sincronizar %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERROR al intentar la auditoría de %s" msgid "ERROR Unhandled exception in request" msgstr "ERROR Excepción no controlada en la solicitud" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR Error de __call__ con %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERROR La actualización de la cuenta ha fallado con %(ip)s:%(port)s/" "%(device)s (se volverá a intentar más tarde): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" "ERROR Archivo pendiente de sincronización asíncrona con nombre inesperado %s" msgid "ERROR auditing" msgstr "ERROR de auditoría" #, python-format msgid "ERROR auditing: %s" msgstr "ERROR en la auditoría: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERROR La actualización del contenedor ha fallado con %(ip)s:%(port)s/%(dev)s " "(guardando para una actualización asíncrona posterior)" msgid "ERROR get_keys() missing callback" msgstr "ERROR get_keys() No se proporciona devolución de llamada " #, python-format msgid "ERROR get_keys(): from callback: %s" msgstr "ERROR get_keys() No se proporciona devolución de llamada: %s" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERROR al leer la respuesta HTTP desde %s" #, python-format msgid "ERROR reading db %s" msgstr "ERROR al leer la base de datos %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERROR La resincronización ha fallado con %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERROR al sincronizar %(file)s con el nodo %(node)s" msgid "ERROR trying to replicate" msgstr "ERROR al intentar la replicación" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERROR al intentar limpiar %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "ERROR con el servidor %(type)s %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERROR con las supresiones de carga desde %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERROR con el servidor remoto %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" "ERROR: No se han podido obtener las vías de acceso a las particiones de " "unidad: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERROR: no se ha podido acceder a %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERROR: no se ha podido ejecutar la auditoría: %s" msgid "Error hashing suffix" msgstr "Error en el hash del sufijo" #, python-format msgid "Error in %(conf)r with mtime_check_interval: %(error)s" msgstr "Error en %(conf)r con mtime_check_interval: %(error)s" msgid "Error listing devices" msgstr "Error al mostrar los dispositivos" #, python-format msgid "Error on render profiling results: %s" msgstr "Error al representar los resultados de perfil: %s" msgid "Error parsing recon cache file" msgstr "Error al analizar el archivo de memoria caché de recon" msgid "Error reading recon cache file" msgstr "Error al leer el archivo de memoria caché de recon" msgid "Error reading ringfile" msgstr "Error al leer el ringfile" msgid "Error reading swift.conf" msgstr "Error al leer swift.conf" msgid "Error retrieving recon data" msgstr "Error al recuperar los datos de recon" msgid "Error syncing handoff partition" msgstr "Error al sincronizar la partición de transferencia" msgid "Error syncing partition" msgstr "Error al sincronizar la partición" #, python-format msgid "Error syncing with node: %s" msgstr "Error en la sincronización con el nodo: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Error al intentar reconstruir %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Error: se ha producido un error" msgid "Error: missing config path argument" msgstr "Error: falta el argumento de vía de acceso de configuración" #, python-format msgid "Error: unable to locate %s" msgstr "Error: no se ha podido localizar %s" msgid "Exception dumping recon cache" msgstr "Excepción al volcar la memoria caché de recon" msgid "Exception in top-level account reaper loop" msgstr "Excepción en el bucle cosechador de cuenta de nivel superior" msgid "Exception in top-level replication loop" msgstr "Excepción en el bucle de réplica de nivel superior" msgid "Exception in top-levelreconstruction loop" msgstr "Excepción en el bucle de reconstrucción de nivel superior" #, python-format msgid "Exception while deleting container %(container)s %(err)s" msgstr "Excepción al suprimir el contenedor %(container)s %(err)s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Excepción con %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Excepción con la cuenta %s" #, python-format msgid "Exception with containers for account %s" msgstr "Excepción con los contenedores para la cuenta %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Excepción con objetos para el contenedor %(container)s para la cuenta " "%(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Esperado: 100-continuo en %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Siguiente cadena CNAME de %(given_domain)s a %(found_domain)s" msgid "Found configs:" msgstr "Configuraciones encontradas:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "El modo de transferencias primero aún tiene transferencias restantes. " "Abortando el pase de réplica actual." msgid "Host unreachable" msgstr "Host no alcanzable" #, python-format msgid "Incomplete pass on account %s" msgstr "Paso incompleto en la cuenta %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Formato de X-Container-Sync-To no válido %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Host no válido %r en X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrada pendiente no válida %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Respuesta no válida %(resp)s de %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Respuesta no válida %(resp)s desde %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Esquema no válido %r en X-Container-Sync-To, debe ser \"//\", \"http\" o " "\"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Interrumpiendo resincronización (rsync) de larga duración: %s" #, python-format msgid "Loading JSON from %(auditor_status)s failed (%(err)s)" msgstr "Error al cargar JSON desde %(auditor_status)s falla (%(err)s)" msgid "Lockup detected.. killing live coros." msgstr "Bloqueo detectado. Interrumpiendo coros activos." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "Se ha correlacionado %(given_domain)s con %(found_domain)s" #, python-format msgid "Missing key for %r" msgstr "Falta una clave en %r" #, python-format msgid "No %s running" msgstr "Ningún %s en ejecución" #, python-format msgid "No cluster endpoint for %(realm)r %(cluster)r" msgstr "No hay ningún punto final %(realm)r %(cluster)r" #, python-format msgid "No permission to signal PID %d" msgstr "No hay permiso para señalar el PID %d" #, python-format msgid "No policy with index %s" msgstr "No hay ninguna política que tenga el índice %s" #, python-format msgid "No realm key for %r" msgstr "No hay clave de dominio para %r" #, python-format msgid "No space left on device for %(file)s (%(err)s)" msgstr "No queda espacio libre en el dispositivo para %(file)s (%(err)s)" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Error de nodo limitado %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "No hay suficientes servidores de objetos reconocidos (constan %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "No se ha encontrado %(sync_from)r => %(sync_to)r - " "objeto %(obj_name)rd" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "No se ha reconstruido nada durante %s segundos." #, python-format msgid "Nothing replicated for %s seconds." msgstr "No se ha replicado nada durante %s segundos." msgid "Object" msgstr "Objeto" msgid "Object PUT" msgstr "Objeto PUT" #, python-format msgid "" "Object PUT exceptions after last send, %(conns)s/%(nodes)s required " "connections" msgstr "" "excepciones de objeto PUT después de la última emisión, %(conns)s/%(nodes)s " "conexiones requeridas" #, python-format msgid "" "Object PUT exceptions during send, %(conns)s/%(nodes)s required connections" msgstr "" "excepciones de objeto PUT después de la última emisión, %(conns)s/%(nodes)s " "conexiones requeridas" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "El objeto PUT devuelve 202 para 409: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "El objeto PUT devuelve 412, %(statuses)r" #, python-format msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections" msgstr "Retorno de objecto PUT 503, %(conns)s/%(nodes)s conexiones requeridas" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Auditoría de objetos (%(type)s) en modalidad \"%(mode)s\" finalizada: " "%(elapsed).02fs. Total en cuarentena: %(quars)d, Errores totales: " "%(errors)d, Archivos totales por segundo: %(frate).2f, Bytes totales por " "segundo: %(brate).2f, Tiempo de auditoría: %(audit).2f, Velocidad: " "%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Auditoría de objetos (%(type)s). Desde %(start_time)s: Localmente: " "%(passes)d han pasado, %(quars)d en cuarentena, %(errors)d errores, archivos " "por segundo: %(frate).2f , bytes por segundo: %(brate).2f, Tiempo total: " "%(total).2f, Tiempo de auditoría: %(audit).2f, Velocidad: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Estadísticas de auditoría de objetos: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Reconstrucción de objeto finalizada (una vez). (%.02f minutos)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Reconstrucción de objeto finalizada. (%.02f minutos)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Réplica de objeto finalizada (una vez). (%.02f minutos)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Réplica de objeto finalizada. (%.02f minutos)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "" "Los servidores de objeto han devuelvo %s etiquetas (etags) no coincidentes" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Barrido de actualización del objeto finalizado: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "Parámetros, consultas y fragmentos no permitidos en X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Tiempos de partición: máximo %(max).4fs, mínimo %(min).4fs, medio %(med).4fs" #, python-format msgid "" "Pass beginning; %(containers)s possible containers; %(objects)s possible " "objects" msgstr "" "Inicio del paso; %(containers)s posibles contenedores; %(objects)s posibles " "objetos" #, python-format msgid "Pass completed in %(time)ds; %(objects)d objects expired" msgstr "Paso completado en %(time)ds; %(objects)d objetos caducados" #, python-format msgid "Pass so far %(time)ds; %(objects)d objects expired" msgstr "Paso hasta ahora%(time)ds; %(objects)d objetos caducados" msgid "Path required in X-Container-Sync-To" msgstr "Vía de acceso necesaria en X-Container-Sync-To" #, python-format msgid "Problem cleaning up %(datadir)s (%(err)s)" msgstr "Problema al limpiar %(datadir)s (%(err)s)" #, python-format msgid "Problem cleaning up %s" msgstr "Problema al limpiar %s" #, python-format msgid "Profiling Error: %s" msgstr "Error de perfil: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "Se ha puesto en cuarentena %(hsh_path)s en %(quar_path)s debido a que no es " "un directorio" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Se ha puesto en cuarentena %(object_path)s en %(quar_path)s debido a que no " "es un directorio" #, python-format msgid "Quarantining DB %s" msgstr "Poniendo en cuarentena la base de datos %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Ajuste de límite de registro de suspensión: %(sleep)s para %(account)s/" "%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Se han eliminado %(remove)d bases de datos" #, python-format msgid "Removing %s objects" msgstr "Eliminando %s objetos" #, python-format msgid "Removing partition: %s" msgstr "Eliminando partición: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "" "Eliminando el archivo PID %(pid_file)s que tiene el PID no válido %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Eliminando el archivo PID %s, que tiene un PID no válido" #, python-format msgid "Removing stale pid file %s" msgstr "Eliminando fichero de identificación positiva obsoleto %s" msgid "Replication run OVER" msgstr "Ejecución de la replicación finalizada" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Se devuelven 497 debido a las listas negras: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Se devuelven 498 de %(meth)s a %(acc)s/%(cont)s/%(obj)s. Ajuste de límite " "(suspensión máxima) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Cambio de anillo detectado. Abortando el pase de reconstrucción actual." msgid "Ring change detected. Aborting current replication pass." msgstr "Cambio de anillo detectado. Abortando el pase de réplica actual." #, python-format msgid "Running %s once" msgstr "Ejecutando %s una vez" msgid "Running object reconstructor in script mode." msgstr "Ejecutando reconstructor de objeto en modo script." msgid "Running object replicator in script mode." msgstr "Ejecutando replicador de objeto en modalidad de script." #, python-format msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s" msgstr "Señal %(server)s pid: %(pid)s Señal : %(signal)s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Desde %(time)s: %(sync)s se han sincronizado [%(delete)s supresiones, " "%(put)s colocaciones], %(skip)s se han omitido, %(fail)s han fallado" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Desde %(time)s: Auditorías de cuenta: %(passed)s han pasado la auditoría," "%(failed)s han fallado la auditoría" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Desde %(time)s: Auditorías de contenedor: %(pass)s han pasado la auditoría," "%(fail)s han fallado la auditoría" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "Omitiendo %(device)s, ya que no está montado" #, python-format msgid "Skipping %(dir)s: %(err)s" msgstr "Omitiendo %(dir)s: %(err)s" #, python-format msgid "Skipping %s as it is not mounted" msgstr "Omitiendo %s, ya que no está montado" #, python-format msgid "Starting %s" msgstr "Iniciando %s" msgid "Starting object reconstruction pass." msgstr "Iniciando el paso de reconstrucción de objeto." msgid "Starting object reconstructor in daemon mode." msgstr "Iniciando reconstructor de objeto en modo daemon." msgid "Starting object replication pass." msgstr "Iniciando el paso de réplica de objeto." msgid "Starting object replicator in daemon mode." msgstr "Iniciando replicador de objeto en modalidad de daemon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "" "Resincronización de %(src)s realizada con éxito en %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "El acceso al tipo de archivo está prohibido." #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "El total de %(key)s del contenedor (%(total)s) no coincide con la suma de " "%(key)s en las políticas (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Excepción de tiempo de espera superado con %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Intentando %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Intentando hacer un GET de %(full_path)s" #, python-format msgid "Trying to get %(status_type)s status of PUT to %(path)s" msgstr "Intentando obtener %(status_type)s el estado de PUT a %(path)s" msgid "Trying to read during GET" msgstr "Intentado leer durante GET" msgid "Trying to read during GET (retrying)" msgstr "Intentando leer durante GET (reintento)" msgid "Trying to send to client" msgstr "Intentando enviar al cliente" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Intentando sincronizar los sufijos con %s" #, python-format msgid "Trying to write to %s" msgstr "Intentando escribir en %s" msgid "UNCAUGHT EXCEPTION" msgstr "UNCAUGHT EXCEPTION" #, python-format msgid "Unable to find %(section)s config section in %(conf)s" msgstr "No se ha podido encontrar %(section)s de la configuración en %(conf)s" #, python-format msgid "Unable to load internal client from config: %(conf)r (%(error)s)" msgstr "" "No se puede cargar el cliente interno a partir de la configuración: %(conf)r " "(%(error)s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "No se ha podido localizar %s en libc. Se dejará como no operativo." #, python-format msgid "Unable to locate config for %s" msgstr "No se ha podido encontrar el número de configuración de %s" #, python-format msgid "Unable to locate config number %(number)s for %(server)s" msgstr "" "No se ha podido encontrar el número de configuración %(number)s de %(server)s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "No se ha podido localizar fallocate, posix_fallocate en libc. Se dejará como " "no operativo." #, python-format msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" msgstr "No se puede realizar fsync() en el directorio %(dir)s: %(err)s" #, python-format msgid "Unable to read config from %s" msgstr "No se ha podido leer la configuración de %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "%(sync_from)r => %(sync_to)r sin autorización" msgid "Unhandled exception" msgstr "Excepción no controlada" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Se ha producido una excepción desconocida al intentar hacer un GET de: " "%(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Informe de actualización fallido para %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Informe de actualización enviado para %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVISO: SSL sólo se debe habilitar con fines de prueba. Utilice la " "terminación de SSL externa para un despliegue de producción." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVISO: no se ha podido modificar el límite del descriptor de archivos. ¿Está " "en ejecución como no root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVISO: no se ha podido modificar el límite máximo de procesos. ¿Está en " "ejecución como no root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVISO: no se ha podido modificar el límite de memoria. ¿Está en ejecución " "como no root?" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up" msgstr "" "Se han esperado %(kill_wait)s segundos a que terminara %(server)s; " "abandonando" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgstr "" "Se han esperado %(kill_wait)s segundos a que terminara %(server)s ; " "terminando" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Aviso: no se puede ajustar el límite sin un cliente almacenado en memoria " "caché" #, python-format msgid "method %s is not allowed." msgstr "el método %s no está permitido." msgid "no log file found" msgstr "no se ha encontrado ningún archivo de registro" msgid "odfpy not installed." msgstr "odfpy no está instalado." #, python-format msgid "plotting results failed due to %s" msgstr "error en el trazado de resultados debido a %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib no está instalado." swift-2.17.1/swift/locale/ja/0000775000175000017500000000000013435012120015734 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/ja/LC_MESSAGES/0000775000175000017500000000000013435012120017521 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/ja/LC_MESSAGES/swift.po0000666000175000017500000010550313435012015021226 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Sasuke(Kyohei MORIYAMA) <>, 2015 # Andreas Jaeger , 2016. #zanata # Shu Muto , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-03-08 01:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-02-09 05:39+0000\n" "Last-Translator: Shu Muto \n" "Language: ja\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Japanese\n" msgid "" "\n" "user quit" msgstr "" "\n" "ユーザー終了" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - パラレルã€%s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d ã‚µãƒ•ã‚£ãƒƒã‚¯ã‚¹ãŒæ¤œæŸ»ã•れã¾ã—㟠- ãƒãƒƒã‚·ãƒ¥æ¸ˆã¿ %(hashed).2f%%ã€åŒæœŸ" "済㿠%(synced).2f%%" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) パーティションãŒ%(time).2fs ã§" "複製ã•れã¾ã—㟠(%(rate).2f/ç§’ã€æ®‹ã‚Š %(remaining)s)" #, python-format msgid "%(server)s #%(number)d not running (%(conf)s)" msgstr "%(server)s #%(number)d ã¯ç¨¼åƒã—ã¦ã„ã¾ã›ã‚“ (%(conf)s)" #, python-format msgid "%(server)s (%(pid)s) appears to have stopped" msgstr "%(server)s (%(pid)s) ãŒåœæ­¢ã•れãŸå¯èƒ½æ€§ãŒã‚りã¾ã™" #, python-format msgid "%(server)s running (%(pid)s - %(conf)s)" msgstr "%(server)s 稼åƒä¸­ (%(pid)s - %(conf)s)" #, python-format msgid "%(server)s running (%(pid)s - %(pid_file)s)" msgstr "%(server)s 稼åƒä¸­ (%(pid)s - %(pid_file)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "æˆåŠŸ %(success)sã€å¤±æ•— %(failure)s" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s ㌠%(statuses)s ã«ã¤ã„㦠503 ã‚’è¿”ã—ã¦ã„ã¾ã™" #, python-format msgid "%(type)s: %(value)s" msgstr "%(type)s: %(value)s" #, python-format msgid "%s already started..." msgstr "%s ã¯æ—¢ã«é–‹å§‹ã•れã¦ã„ã¾ã™..." #, python-format msgid "%s does not exist" msgstr "%s ãŒå­˜åœ¨ã—ã¾ã›ã‚“" #, python-format msgid "%s is not mounted" msgstr "%s ãŒãƒžã‚¦ãƒ³ãƒˆã•れã¦ã„ã¾ã›ã‚“" #, python-format msgid "%s responded as unmounted" msgstr "%s ã¯ã‚¢ãƒ³ãƒžã‚¦ãƒ³ãƒˆã¨ã—ã¦å¿œç­”ã—ã¾ã—ãŸ" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: 接続ãŒãƒ”ã‚¢ã«ã‚ˆã£ã¦ãƒªã‚»ãƒƒãƒˆã•れã¾ã—ãŸ" #, python-format msgid ", %s containers deleted" msgstr "ã€%s コンテナーãŒå‰Šé™¤ã•れã¾ã—ãŸ" #, python-format msgid ", %s containers possibly remaining" msgstr "ã€%s ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ãŒæ®‹ã£ã¦ã„ã‚‹ã¨æ€ã‚れã¾ã™" #, python-format msgid ", %s containers remaining" msgstr "ã€%s ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ãŒæ®‹ã£ã¦ã„ã¾ã™" #, python-format msgid ", %s objects deleted" msgstr "ã€%s オブジェクトãŒå‰Šé™¤ã•れã¾ã—ãŸ" #, python-format msgid ", %s objects possibly remaining" msgstr "ã€%s ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆãŒæ®‹ã£ã¦ã„ã‚‹ã¨æ€ã‚れã¾ã™" #, python-format msgid ", %s objects remaining" msgstr "ã€%s ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆãŒæ®‹ã£ã¦ã„ã¾ã™" #, fuzzy, python-format msgid ", elapsed: %.02fs" msgstr "ã€çµŒéŽæ™‚é–“: %.02fs" msgid ", return codes: " msgstr "ã€æˆ»ã‚Šã‚³ãƒ¼ãƒ‰: " msgid "Account" msgstr "アカウント" #, python-format msgid "Account %(account)s has not been reaped since %(time)s" msgstr "アカウント %(account)s 㯠%(time)s 以é™ãƒªãƒ¼ãƒ—ã•れã¦ã„ã¾ã›ã‚“" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "アカウント監査 \"once\" モードãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "アカウント監査ã®å‡¦ç†ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(time).5f ç§’ã§ %(count)d 個㮠DB ã®è¤‡è£½ã‚’試行ã—ã¾ã—㟠(%(rate).5f/s)" #, python-format msgid "Audit Failed for %(path)s: %(err)s" msgstr "%(path)s ã®ç›£æŸ»ãŒå¤±æ•—ã—ã¾ã—ãŸ: %(err)s" #, python-format msgid "Audit passed for %s" msgstr "%s ã®ç›£æŸ»ãŒåˆæ ¼ã—ã¾ã—ãŸã€‚" #, python-format msgid "Bad key for %(name)r: %(err)s" msgstr "%(name)r ã®ã‚­ãƒ¼ãŒä¸æ­£ã§ã™: %(err)s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "æ­£ã—ããªã„å†åŒæœŸæˆ»ã‚Šã‚³ãƒ¼ãƒ‰: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "アカウント監査 \"once\" モードã®é–‹å§‹" msgid "Begin account audit pass." msgstr "アカウント監査パスを開始ã—ã¾ã™ã€‚" msgid "Begin container audit \"once\" mode" msgstr "コンテナー監査「onceã€ãƒ¢ãƒ¼ãƒ‰ã®é–‹å§‹" msgid "Begin container audit pass." msgstr "コンテナー監査パスを開始ã—ã¾ã™ã€‚" msgid "Begin container sync \"once\" mode" msgstr "ã‚³ãƒ³ãƒ†ãƒŠãƒ¼åŒæœŸã€Œonceã€ãƒ¢ãƒ¼ãƒ‰ã®é–‹å§‹" msgid "Begin container update single threaded sweep" msgstr "コンテナー更新å˜ä¸€ã‚¹ãƒ¬ãƒƒãƒ‰åŒ–スイープã®é–‹å§‹" msgid "Begin container update sweep" msgstr "コンテナー更新スイープã®é–‹å§‹" msgid "Begin object update single threaded sweep" msgstr "オブジェクト更新å˜ä¸€ã‚¹ãƒ¬ãƒƒãƒ‰åŒ–スイープã®é–‹å§‹" msgid "Begin object update sweep" msgstr "オブジェクト更新スイープã®é–‹å§‹" #, python-format msgid "Beginning pass on account %s" msgstr "アカウント %s ã§ãƒ‘スを開始中" msgid "Beginning replication run" msgstr "複製ã®å®Ÿè¡Œã‚’開始中" msgid "Broker error trying to rollback locked connection" msgstr "ãƒ­ãƒƒã‚¯æ¸ˆã¿æŽ¥ç¶šã®ãƒ­ãƒ¼ãƒ«ãƒãƒƒã‚¯ã‚’試行中ã®ãƒ–ローカーエラー" #, python-format msgid "Can not access the file %s." msgstr "ファイル %s ã«ã‚¢ã‚¯ã‚»ã‚¹ã§ãã¾ã›ã‚“。" #, python-format msgid "Can not load profile data from %s." msgstr "プロファイルデータを %s ã‹ã‚‰ãƒ­ãƒ¼ãƒ‰ã§ãã¾ã›ã‚“。" #, python-format msgid "Client did not read from proxy within %ss" msgstr "クライアント㯠%s 内ã®ãƒ—ロキシーã‹ã‚‰ã®èª­ã¿å–りを行ã„ã¾ã›ã‚“ã§ã—ãŸ" msgid "Client disconnected on read" msgstr "クライアントãŒèª­ã¿å–り時ã«åˆ‡æ–­ã•れã¾ã—ãŸ" msgid "Client disconnected without sending enough data" msgstr "å分ãªãƒ‡ãƒ¼ã‚¿ã‚’é€ä¿¡ã›ãšã«ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆãŒåˆ‡æ–­ã•れã¾ã—ãŸ" msgid "Client disconnected without sending last chunk" msgstr "最後ã®ãƒãƒ£ãƒ³ã‚¯ã‚’é€ä¿¡ã›ãšã«ã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆãŒåˆ‡æ–­ã•れã¾ã—ãŸ" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "クライアントパス %(client)s ã¯ã‚ªãƒ–ジェクトメタデータ %(meta)s ã«ä¿ç®¡ã•れãŸãƒ‘" "スã«ä¸€è‡´ã—ã¾ã›ã‚“" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "設定オプション internal_client_conf_path ãŒå®šç¾©ã•れã¦ã„ã¾ã›ã‚“。デフォルト設定" "を使用ã—ã¦ã„ã¾ã™ã€‚オプションã«ã¤ã„ã¦ã¯ internal-client.conf-sample ã‚’å‚ç…§ã—ã¦" "ãã ã•ã„" msgid "Connection refused" msgstr "æŽ¥ç¶šãŒæ‹’å¦ã•れã¾ã—ãŸ" msgid "Connection timeout" msgstr "接続ãŒã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—ãŸ" msgid "Container" msgstr "コンテナー" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "コンテナー監査「onceã€ãƒ¢ãƒ¼ãƒ‰ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "コンテナー監査ã®å‡¦ç†ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "ã‚³ãƒ³ãƒ†ãƒŠãƒ¼åŒæœŸã€Œonceã€ãƒ¢ãƒ¼ãƒ‰ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "コンテナー更新å˜ä¸€ã‚¹ãƒ¬ãƒƒãƒ‰åŒ–スイープãŒå®Œäº†ã—ã¾ã—ãŸ: %(elapsed).02fsã€æˆåŠŸ " "%(success)sã€å¤±æ•— %(fail)sã€æœªå¤‰æ›´ %(no_change)s" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "コンテナー更新スイープãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s ã®ã‚³ãƒ³ãƒ†ãƒŠãƒ¼æ›´æ–°ã‚¹ã‚¤ãƒ¼ãƒ—ãŒå®Œäº†ã—ã¾ã—ãŸ: %(elapsed).02fsã€æˆåŠŸ " "%(success)sã€å¤±æ•— %(fail)sã€æœªå¤‰æ›´ %(no_change)s" #, python-format msgid "Could not load %(conf)r: %(error)s" msgstr "%(conf)r をロードã§ãã¾ã›ã‚“ã§ã—ãŸ: %(error)s" #, python-format msgid "Data download error: %s" msgstr "データダウンロードエラー: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "デãƒã‚¤ã‚¹ã®å‡¦ç†ãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" msgid "Did not get a keys dict" msgstr "キーã®è¾žæ›¸ã‚’å–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸã€‚" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "エラー %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "エラー %(status)d: %(type)s サーãƒãƒ¼ã‹ã‚‰ã® %(body)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "エラー %(status)d: オブジェクトサーãƒãƒ¼ã‹ã‚‰ã® %(body)sã€re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "エラー %(status)d: 予期: オブジェクトサーãƒãƒ¼ã‹ã‚‰ã® 100-continue" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "エラー: アカウント更新㌠%(ip)s:%(port)s/%(device)s ã§å¤±æ•—ã—ã¾ã—ãŸ(後ã§å†è©¦è¡Œ" "ã•れã¾ã™): 応答 %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "エラー: ホスト %(host)s ã‹ã‚‰ã®å¿œç­” %(status)s ãŒæ­£ã—ãã‚りã¾ã›ã‚“" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "エラー: クライアント読ã¿å–りãŒã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆã«ãªã‚Šã¾ã—㟠(%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "エラー: コンテナー更新ã«å¤±æ•—ã—ã¾ã—㟠(後ã®éžåŒæœŸæ›´æ–°ã®ãŸã‚ã«ä¿å­˜ä¸­): %(ip)s:" "%(port)s/%(dev)s ã‹ã‚‰ã® %(status)d 応答" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERROR アカウント情報 %s ãŒå–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ" #, python-format msgid "ERROR Could not get container info %s" msgstr "エラー: コンテナー情報 %s ã‚’å–å¾—ã§ãã¾ã›ã‚“ã§ã—ãŸ" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "エラー: DiskFile %(data_file)s ã‚’é–‰ã˜ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "エラー: 例外ã«ã‚ˆã‚Šã‚¯ãƒ©ã‚¤ã‚¢ãƒ³ãƒˆãŒåˆ‡æ–­ã•れã¦ã„ã¾ã™" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "エラー: オブジェクトサーãƒãƒ¼ %s ã¸ã®ãƒ‡ãƒ¼ã‚¿è»¢é€ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "ERROR Failed to get my own IPs?" msgstr "エラー: 自分㮠IP ã®å–å¾—ã«å¤±æ•—?" msgid "ERROR Insufficient Storage" msgstr "エラー: ストレージãŒä¸è¶³ã—ã¦ã„ã¾ã™" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "エラー: オブジェクト %(obj)s ã¯ç›£æŸ»ã«å¤±æ•—ã—ã€æ¤œç–«ã•れã¾ã—ãŸ: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "エラー: ピックルã®å•題ã€%s を検疫ã—ã¾ã™" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "エラー: リモートドライブ㫠%s ãŒãƒžã‚¦ãƒ³ãƒˆã•れã¦ã„ã¾ã›ã‚“" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "%(db_file)s %(row)s ã®åŒæœŸã‚¨ãƒ©ãƒ¼" #, python-format msgid "ERROR Syncing %s" msgstr "%s ã®åŒæœŸã‚¨ãƒ©ãƒ¼" #, python-format msgid "" "ERROR There are not enough handoff nodes to reach replica count for " "partition %s" msgstr "" "エラー パーティション %s ã®ãƒ¬ãƒ—リカ数ã«é”ã™ã‚‹ãŸã‚ã® handoff ノードãŒä¸è¶³ã—ã¦" "ã„ã¾ã™ã€‚" #, python-format msgid "ERROR Trying to audit %s" msgstr "%s ã®ç›£æŸ»ã‚’試行中ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "ERROR Unhandled exception in request" msgstr "エラー: è¦æ±‚ã§æœªå‡¦ç†ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "エラー: %(method)s %(path)s ã§ã® __call__ エラー" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "エラー: アカウント更新㌠%(ip)s:%(port)s/%(device)s ã§å¤±æ•—ã—ã¾ã—ãŸ(後ã§å†è©¦è¡Œ" "ã•れã¾ã™)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "エラー: アカウント更新㌠%(ip)s:%(port)s/%(device)s ã§å¤±æ•—ã—ã¾ã—ãŸ(後ã§å†è©¦è¡Œ" "ã•れã¾ã™): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "エラー: 予期ã—ãªã„åå‰ %s ã‚’æŒã¤ãƒ•ァイルをéžåŒæœŸä¿ç•™ä¸­" msgid "ERROR auditing" msgstr "監査エラー" #, python-format msgid "ERROR auditing: %s" msgstr "監査エラー: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "エラー: コンテナー更新㌠%(ip)s:%(port)s/%(dev)s ã§å¤±æ•—ã—ã¾ã—㟠(後ã®éžåŒæœŸæ›´" "æ–°ã®ãŸã‚ã«ä¿å­˜ä¸­)" msgid "ERROR get_keys() missing callback" msgstr "エラー get_keys() コールãƒãƒƒã‚¯ãŒã‚りã¾ã›ã‚“" #, python-format msgid "ERROR get_keys(): from callback: %s" msgstr "エラー get_keys(): コールãƒãƒƒã‚¯: %s" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "%s ã‹ã‚‰ã® HTTP 応答ã®èª­ã¿å–りエラー" #, python-format msgid "ERROR reading db %s" msgstr "DB %s ã®èª­ã¿å–りエラー" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "エラー: %(code)s ã¨ã®å†åŒæœŸã«å¤±æ•—ã—ã¾ã—ãŸ: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ノード %(node)s ã¨ã® %(file)s ã®åŒæœŸã‚¨ãƒ©ãƒ¼" msgid "ERROR trying to replicate" msgstr "複製ã®è©¦è¡Œã‚¨ãƒ©ãƒ¼" #, python-format msgid "ERROR while trying to clean up %s" msgstr "%s ã®ã‚¯ãƒªãƒ¼ãƒ³ã‚¢ãƒƒãƒ—を試行中ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "%(type)s サーãƒãƒ¼ %(ip)s:%(port)s/%(device)s ã§ã®ã‚¨ãƒ©ãƒ¼ã€è¿”ã•れãŸå€¤: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "%s ã‹ã‚‰ã®æŠ‘æ­¢ã®ãƒ­ãƒ¼ãƒ‰ã§ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸ: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "リモートサーãƒãƒ¼ %(ip)s:%(port)s/%(device)s ã§ã®ã‚¨ãƒ©ãƒ¼" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "エラー: ドライブパーティションã«å¯¾ã™ã‚‹ãƒ‘スã®å–å¾—ã«å¤±æ•—ã—ã¾ã—ãŸ: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "エラー: %(path)s ã«ã‚¢ã‚¯ã‚»ã‚¹ã§ãã¾ã›ã‚“: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "エラー: 監査を実行ã§ãã¾ã›ã‚“: %s" msgid "Error hashing suffix" msgstr "サフィックスã®ãƒãƒƒã‚·ãƒ¥ã‚¨ãƒ©ãƒ¼" #, python-format msgid "Error in %(conf)r with mtime_check_interval: %(error)s" msgstr "mtime_check_interval ã§ %(conf)r ã«ã‚¨ãƒ©ãƒ¼ãŒã‚りã¾ã™: %(error)s" msgid "Error listing devices" msgstr "デãƒã‚¤ã‚¹ã®ãƒªã‚¹ãƒˆã‚¨ãƒ©ãƒ¼" #, python-format msgid "Error on render profiling results: %s" msgstr "ãƒ¬ãƒ³ãƒ€ãƒªãƒ³ã‚°ãƒ—ãƒ­ãƒ•ã‚¡ã‚¤ãƒ«çµæžœã§ã®ã‚¨ãƒ©ãƒ¼: %s" msgid "Error parsing recon cache file" msgstr "冿§‹æˆã‚­ãƒ£ãƒƒã‚·ãƒ¥ãƒ•ã‚¡ã‚¤ãƒ«ã®æ§‹æ–‡è§£æžã‚¨ãƒ©ãƒ¼" msgid "Error reading recon cache file" msgstr "冿§‹æˆã‚­ãƒ£ãƒƒã‚·ãƒ¥ãƒ•ァイルã®èª­ã¿å–りエラー" msgid "Error reading ringfile" msgstr "リングファイルã®èª­ã¿å–りエラー" msgid "Error reading swift.conf" msgstr "swift.conf ã®èª­ã¿å–りエラー" msgid "Error retrieving recon data" msgstr "冿§‹æˆãƒ‡ãƒ¼ã‚¿ã®å–得エラー" #, python-format msgid "Error sending UDP message to %(target)r: %(err)s" msgstr "%(target)r ã¸ã® UDP メッセージé€ä¿¡ã‚¨ãƒ©ãƒ¼: %(err)s" msgid "Error syncing handoff partition" msgstr "ãƒãƒ³ãƒ‰ã‚ªãƒ•パーティションã®åŒæœŸã‚¨ãƒ©ãƒ¼" msgid "Error syncing partition" msgstr "パーティションã¨ã®åŒæœŸã‚¨ãƒ©ãƒ¼" #, python-format msgid "Error syncing with node: %s" msgstr "ノードã¨ã®åŒæœŸã‚¨ãƒ©ãƒ¼: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "%(path)s ã®å†æ§‹ç¯‰ã‚’試行中ã«ã‚¨ãƒ©ãƒ¼ãŒç™ºç”Ÿã—ã¾ã—ãŸã€‚ãƒãƒªã‚·ãƒ¼ #%(policy)d フラグ" "メント #%(frag_index)s" msgid "Error: An error occurred" msgstr "エラー: エラーãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "Error: missing config path argument" msgstr "エラー: æ§‹æˆãƒ‘ス引数ãŒã‚りã¾ã›ã‚“" #, python-format msgid "Error: unable to locate %s" msgstr "エラー: %s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" msgid "Exception dumping recon cache" msgstr "冿§‹æˆã‚­ãƒ£ãƒƒã‚·ãƒ¥ã®ãƒ€ãƒ³ãƒ—ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "Exception in top-level account reaper loop" msgstr "最上ä½ã‚¢ã‚«ã‚¦ãƒ³ãƒˆãƒªãƒ¼ãƒ‘ーループã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "Exception in top-level replication loop" msgstr "最上ä½è¤‡è£½ãƒ«ãƒ¼ãƒ—ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" msgid "Exception in top-levelreconstruction loop" msgstr "最上ä½å†æ§‹æˆãƒ«ãƒ¼ãƒ—ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "Exception with account %s" msgstr "アカウント %s ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "Exception with containers for account %s" msgstr "アカウント %s ã®ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ã§ä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "アカウント %(account)s ã®ã‚³ãƒ³ãƒ†ãƒŠãƒ¼ %(container)s ã®ã‚ªãƒ–ジェクトã§ä¾‹å¤–ãŒç™ºç”Ÿ" "ã—ã¾ã—ãŸ" #, python-format msgid "Expect: 100-continue on %s" msgstr "予期: %s ã§ã® 100-continue" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s ã‹ã‚‰ %(found_domain)s 㸠CNAME ãƒã‚§ãƒ¼ãƒ³ã‚’フォロー中" msgid "Found configs:" msgstr "æ§‹æˆãŒè¦‹ã¤ã‹ã‚Šã¾ã—ãŸ:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "ãƒãƒ³ãƒ‰ã‚ªãƒ•ã®ãƒ•ァーストモードã«ãƒãƒ³ãƒ‰ã‚ªãƒ•ãŒæ®‹ã£ã¦ã„ã¾ã™ã€‚ç¾è¡Œè¤‡è£½ãƒ‘スを打ã¡åˆ‡" "りã¾ã™ã€‚" msgid "Host unreachable" msgstr "ホストãŒåˆ°é”ä¸èƒ½ã§ã™" #, python-format msgid "Incomplete pass on account %s" msgstr "アカウント %s ã§ã®ä¸å®Œå…¨ãªãƒ‘ス" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "X-Container-Sync-To å½¢å¼ %r ãŒç„¡åйã§ã™" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "無効ãªãƒ›ã‚¹ãƒˆ %r ㌠X-Container-Sync-To ã«ã‚りã¾ã™" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "無効ãªä¿ç•™ä¸­é …ç›® %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "%(full_path)s ã‹ã‚‰ã®å¿œç­” %(resp)s ãŒç„¡åйã§ã™" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)s ã‹ã‚‰ã®å¿œç­” %(resp)s ãŒç„¡åйã§ã™" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "無効ãªã‚¹ã‚­ãƒ¼ãƒ  %r ㌠X-Container-Sync-To ã«ã‚りã¾ã™ã€‚「//ã€ã€ã€Œhttpã€ã€" "「httpsã€ã®ã„ãšã‚Œã‹ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。" msgid "Invalid swift_bytes" msgstr "無効㪠swift_bytes" #, python-format msgid "Killing long-running rsync: %s" msgstr "長期実行ã®å†åŒæœŸã‚’強制終了中: %s" msgid "Lockup detected.. killing live coros." msgstr "ãƒ­ãƒƒã‚¯ãŒæ¤œå‡ºã•れã¾ã—ãŸ.. ライブ coros を強制終了中" #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s ㌠%(found_domain)s ã«ãƒžãƒƒãƒ—ã•れã¾ã—ãŸ" #, python-format msgid "Missing key for %r" msgstr "%r ã«ã‚­ãƒ¼ãŒã‚りã¾ã›ã‚“。" #, python-format msgid "No %s running" msgstr "%s ãŒå®Ÿè¡Œã•れã¦ã„ã¾ã›ã‚“" #, python-format msgid "No cluster endpoint for %(realm)r %(cluster)r" msgstr "%(realm)r %(cluster)r ã®ã‚¨ãƒ³ãƒ‰ãƒã‚¤ãƒ³ãƒˆã‚¯ãƒ©ã‚¹ã‚¿ãƒ¼ãŒã‚りã¾ã›ã‚“" #, python-format msgid "No permission to signal PID %d" msgstr "PID %d ã«ã‚·ã‚°ãƒŠãƒ«é€šçŸ¥ã™ã‚‹è¨±å¯ãŒã‚りã¾ã›ã‚“" #, python-format msgid "No policy with index %s" msgstr "インデックス %s ã®ãƒãƒªã‚·ãƒ¼ã¯ã‚りã¾ã›ã‚“" #, python-format msgid "No realm key for %r" msgstr "%r ã®ãƒ¬ãƒ«ãƒ ã‚­ãƒ¼ãŒã‚りã¾ã›ã‚“" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "ãƒŽãƒ¼ãƒ‰ã‚¨ãƒ©ãƒ¼åˆ¶é™ %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "肯定応答を返ã—ãŸã‚ªãƒ–ジェクト・サーãƒãƒ¼ãŒä¸å分ã§ã™ (%d å–å¾—)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "䏿¤œå‡º %(sync_from)r => %(sync_to)r - オブジェクト " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%s ç§’é–“ã§ä½•ã‚‚å†æ§‹æˆã•れã¾ã›ã‚“ã§ã—ãŸã€‚" #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s ç§’é–“ã§ä½•も複製ã•れã¾ã›ã‚“ã§ã—ãŸã€‚" msgid "Object" msgstr "オブジェクト" msgid "Object PUT" msgstr "オブジェクト PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "オブジェクト PUT ㌠409 ã«å¯¾ã—㦠202 ã‚’è¿”ã—ã¦ã„ã¾ã™: %(req_timestamp)s<= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "オブジェクト PUT ㌠412 ã‚’è¿”ã—ã¦ã„ã¾ã™ã€‚%(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "オブジェクト監査 (%(type)s) 「%(mode)sã€ãƒ¢ãƒ¼ãƒ‰å®Œäº†: %(elapsed).02fs。åˆè¨ˆæ¤œç–«" "済ã¿: %(quars)dã€åˆè¨ˆã‚¨ãƒ©ãƒ¼: %(errors)dã€åˆè¨ˆãƒ•ァイル/ç§’: %(frate).2fã€åˆè¨ˆãƒ" "イト/ç§’: %(brate).2fã€ç›£æŸ»æ™‚é–“: %(audit).2fã€çއ: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "オブジェクト監査 (%(type)s)。%(start_time)s 以é™: ローカル: åˆæ ¼ã—ãŸç›£æŸ» " "%(passes)dã€æ¤œç–«æ¸ˆã¿ %(quars)dã€ã‚¨ãƒ©ãƒ¼ %(errors)dã€ãƒ•ァイル/ç§’: %(frate).2fã€" "ãƒã‚¤ãƒˆ/ç§’: %(brate).2fã€åˆè¨ˆæ™‚é–“: %(total).2fã€ç›£æŸ»æ™‚é–“: %(audit).2fã€çއ: " "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "オブジェクト監査統計: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆå†æ§‹æˆãŒå®Œäº†ã—ã¾ã—㟠(1 回)。(%.02f 分)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆå†æ§‹æˆãŒå®Œäº†ã—ã¾ã—ãŸã€‚(%.02f 分)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "オブジェクト複製ãŒå®Œäº†ã—ã¾ã—㟠(1 回)。(%.02f 分)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "オブジェクト複製ãŒå®Œäº†ã—ã¾ã—ãŸã€‚(%.02f 分)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "オブジェクトサーãƒãƒ¼ãŒ %s 個ã®ä¸ä¸€è‡´ etag ã‚’è¿”ã—ã¾ã—ãŸ" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "オブジェクト更新スイープãŒå®Œäº†ã—ã¾ã—ãŸ: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "パラメーターã€ç…§ä¼šã€ãŠã‚ˆã³ãƒ•ラグメント㯠X-Container-Sync-To ã§è¨±å¯ã•れã¦ã„ã¾" "ã›ã‚“" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "パーティション時間: 最大 %(max).4fsã€æœ€å° %(min).4fsã€ä¸­é–“ %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To ã«ãƒ‘スãŒå¿…è¦ã§ã™" #, python-format msgid "Problem cleaning up %s" msgstr "%s ã®ã‚¯ãƒªãƒ¼ãƒ³ã‚¢ãƒƒãƒ—中ã«å•題ãŒç™ºç”Ÿã—ã¾ã—ãŸ" #, python-format msgid "Profiling Error: %s" msgstr "プロファイル作æˆã‚¨ãƒ©ãƒ¼: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "ディレクトリーã§ã¯ãªã„ãŸã‚ã€%(hsh_path)s 㯠%(quar_path)s ã¸æ¤œç–«ã•れã¾ã—ãŸ" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "ディレクトリーã§ã¯ãªã„ãŸã‚ã€%(object_path)s 㯠%(quar_path)s ã¸æ¤œç–«ã•れã¾ã—ãŸ" #, python-format msgid "Quarantining DB %s" msgstr "DB %s ã®æ¤œç–«ä¸­" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Ratelimit スリープログ: %(account)s/%(container)s/%(object)s ã® %(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d 個㮠DB ãŒå‰Šé™¤ã•れã¾ã—ãŸ" #, python-format msgid "Removing %s objects" msgstr "%s オブジェクトã®å‰Šé™¤ä¸­" #, python-format msgid "Removing partition: %s" msgstr "パーティションã®å‰Šé™¤ä¸­: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "æ­£ã—ããªã„ pid %(pid)d ã® pid ファイル %(pid_file)s を削除中" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "無効㪠pid ã® pid ファイル %s を削除中" #, python-format msgid "Removing stale pid file %s" msgstr "失効ã—㟠pid ファイル %s を削除中" msgid "Replication run OVER" msgstr "複製ã®å®Ÿè¡ŒãŒçµ‚了ã—ã¾ã—ãŸ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "ブラックリスティングã®ãŸã‚ 497 ã‚’è¿”ã—ã¦ã„ã¾ã™: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "%(acc)s/%(cont)s/%(obj)s ã«å¯¾ã™ã‚‹ %(meth)s ã«é–¢ã—㦠498 ã‚’è¿”ã—ã¦ã„ã¾ã™ã€‚" "Ratelimit (最大スリープ) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "ãƒªãƒ³ã‚°å¤‰æ›´ãŒæ¤œå‡ºã•れã¾ã—ãŸã€‚ç¾è¡Œå†æ§‹æˆãƒ‘スを打ã¡åˆ‡ã‚Šã¾ã™ã€‚" msgid "Ring change detected. Aborting current replication pass." msgstr "ãƒªãƒ³ã‚°å¤‰æ›´ãŒæ¤œå‡ºã•れã¾ã—ãŸã€‚ç¾è¡Œè¤‡è£½ãƒ‘スを打ã¡åˆ‡ã‚Šã¾ã™ã€‚" #, python-format msgid "Running %s once" msgstr "%s ã‚’ 1 回実行中" msgid "Running object reconstructor in script mode." msgstr "スクリプトモードã§ã‚ªãƒ–ジェクトリコンストラクターを実行中ã§ã™ã€‚" msgid "Running object replicator in script mode." msgstr "スクリプトモードã§ã‚ªãƒ–ジェクトレプリケーターを実行中ã§ã™ã€‚" #, python-format msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s" msgstr "%(server)s pid: %(pid)s ã¸ã®ã‚·ã‚°ãƒŠãƒ«: %(signal)s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "%(time)s 以é™: åŒæœŸæ¸ˆã¿ %(sync)s [削除 %(delete)sã€æ›¸ã込㿠%(put)s]ã€ã‚¹ã‚­ãƒƒ" "プ %(skip)sã€å¤±æ•— %(fail)s" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "%(time)s 以é™: アカウント監査: åˆæ ¼ã—ãŸç›£æŸ» %(passed)sã€ä¸åˆæ ¼ã®ç›£" "査%(failed)s" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "%(time)s 以é™: コンテナー監査: åˆæ ¼ã—ãŸç›£æŸ» %(pass)sã€ä¸åˆæ ¼ã®ç›£æŸ»%(fail)s" #, python-format msgid "Skipping %(datadir)s because %(err)s" msgstr "%(err)s ã®ãŸã‚ %(datadir)s をスキップã—ã¾ã™" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s ã¯ãƒžã‚¦ãƒ³ãƒˆã•れã¦ã„ãªã„ãŸã‚ã€ã‚¹ã‚­ãƒƒãƒ—ã•れã¾ã™" #, python-format msgid "Skipping %s as it is not mounted" msgstr "マウントã•れã¦ã„ãªã„ãŸã‚〠%s をスキップã—ã¾ã™" #, python-format msgid "Starting %s" msgstr "%s ã‚’é–‹å§‹ã—ã¦ã„ã¾ã™" msgid "Starting object reconstruction pass." msgstr "ã‚ªãƒ–ã‚¸ã‚§ã‚¯ãƒˆå†æ§‹æˆãƒ‘スを開始中ã§ã™ã€‚" msgid "Starting object reconstructor in daemon mode." msgstr "オブジェクトリコンストラクターをデーモンモードã§é–‹å§‹ä¸­ã§ã™ã€‚" msgid "Starting object replication pass." msgstr "オブジェクト複製パスを開始中ã§ã™ã€‚" msgid "Starting object replicator in daemon mode." msgstr "オブジェクトレプリケーターをデーモンモードã§é–‹å§‹ä¸­ã§ã™ã€‚" #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "%(dst)s ã§ã® %(src)s ã®å†åŒæœŸãŒæˆåŠŸã—ã¾ã—㟠(%(time).03f)" msgid "The file type are forbidden to access!" msgstr "ã“ã®ãƒ•ァイルタイプã«ã¯ã‚¢ã‚¯ã‚»ã‚¹ãŒç¦æ­¢ã•れã¦ã„ã¾ã™" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "コンテナーã®åˆè¨ˆ %(key)s (%(total)s) ãŒãƒãƒªã‚·ãƒ¼å…¨ä½“ã®åˆè¨ˆ %(key)s(%(sum)s) ã«" "一致ã—ã¾ã›ã‚“" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ã®ã‚¿ã‚¤ãƒ ã‚¢ã‚¦ãƒˆä¾‹å¤–" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s を試行中" #, python-format msgid "Trying to GET %(full_path)s" msgstr "GET %(full_path)s を試行中" msgid "Trying to read during GET" msgstr "GET 時ã«èª­ã¿å–りを試行中" msgid "Trying to read during GET (retrying)" msgstr "GET 時ã«èª­ã¿å–りを試行中 (å†è©¦è¡Œä¸­)" msgid "Trying to send to client" msgstr "クライアントã¸ã®é€ä¿¡ã‚’試行中" #, python-format msgid "Trying to sync suffixes with %s" msgstr "%s ã§ã‚µãƒ•ィックスã®åŒæœŸã‚’試行中" #, python-format msgid "Trying to write to %s" msgstr "%s ã¸ã®æ›¸ãè¾¼ã¿ã‚’試行中" msgid "UNCAUGHT EXCEPTION" msgstr "キャッãƒã•れã¦ã„ãªã„例外" #, python-format msgid "Unable to find %(section)s config section in %(conf)s" msgstr "%(section)s æ§‹æˆã‚»ã‚¯ã‚·ãƒ§ãƒ³ãŒ %(conf)s ã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "%s ㌠libc ã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。no-op ã¨ã—ã¦çµ‚了ã—ã¾ã™ã€‚" #, python-format msgid "Unable to locate config for %s" msgstr "%s ã®è¨­å®šãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" #, python-format msgid "Unable to locate config number %(number)s for %(server)s" msgstr "%(server)s ã®è¨­å®šç•ªå· %(number)s ãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocateã€posix_fallocate ㌠libc ã«è¦‹ã¤ã‹ã‚Šã¾ã›ã‚“。no-op ã¨ã—ã¦çµ‚了ã—ã¾ã™ã€‚" #, python-format msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" msgstr "ディレクトリー %(dir)s ã§ fsync() を実行ã§ãã¾ã›ã‚“: %(err)s" #, python-format msgid "Unable to read config from %s" msgstr "æ§‹æˆã‚’ %s ã‹ã‚‰èª­ã¿å–ã‚‹ã“ã¨ãŒã§ãã¾ã›ã‚“" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "éžèªè¨¼ %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "未処ç†ä¾‹å¤–" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "GET を試行中ã«ä¸æ˜Žãªä¾‹å¤–ãŒç™ºç”Ÿã—ã¾ã—ãŸ: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s ã«é–¢ã™ã‚‹æ›´æ–°ãƒ¬ãƒãƒ¼ãƒˆãŒå¤±æ•—ã—ã¾ã—ãŸ" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s ã«é–¢ã™ã‚‹æ›´æ–°ãƒ¬ãƒãƒ¼ãƒˆãŒé€ä¿¡ã•れã¾ã—ãŸ" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "警告: SSL を有効ã«ã™ã‚‹ã®ã¯ãƒ†ã‚¹ãƒˆç›®çš„ã®ã¿ã§ãªã‘れã°ãªã‚Šã¾ã›ã‚“。製å“ã®ãƒ‡ãƒ—ロイ" "ã«ã¯å¤–部 SSL 終端を使用ã—ã¦ãã ã•ã„。" msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告: ファイル記述å­åˆ¶é™ã‚’変更ã§ãã¾ã›ã‚“。éžãƒ«ãƒ¼ãƒˆã¨ã—ã¦å®Ÿè¡Œã—ã¾ã™ã‹?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告: 最大処ç†é™ç•Œã‚’変更ã§ãã¾ã›ã‚“。éžãƒ«ãƒ¼ãƒˆã¨ã—ã¦å®Ÿè¡Œã—ã¾ã™ã‹?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告: メモリー制é™ã‚’変更ã§ãã¾ã›ã‚“。éžãƒ«ãƒ¼ãƒˆã¨ã—ã¦å®Ÿè¡Œã—ã¾ã™ã‹?" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up" msgstr "%(kill_wait)s ç§’é–“ã€%(server)s ã®åœæ­¢ã‚’待機ã—ã¾ã—ãŸã€‚中止ã—ã¾ã™" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgstr "%(kill_wait)s ç§’é–“ã€%(server)s ã®åœæ­¢ã‚’待機ã—ã¾ã—ãŸã€‚強制終了ã—ã¾ã™" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告: memcached クライアントãªã—ã§ ratelimit を行ã†ã“ã¨ã¯ã§ãã¾ã›ã‚“" #, python-format msgid "method %s is not allowed." msgstr "メソッド %s ã¯è¨±å¯ã•れã¦ã„ã¾ã›ã‚“。" msgid "no log file found" msgstr "ログファイルãŒè¦‹ã¤ã‹ã‚Šã¾ã›ã‚“" msgid "odfpy not installed." msgstr "odfpy ãŒã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•れã¦ã„ã¾ã›ã‚“。" #, python-format msgid "plotting results failed due to %s" msgstr "%s ãŒåŽŸå› ã§çµæžœã®ãƒ—ロットã«å¤±æ•—ã—ã¾ã—ãŸ" msgid "python-matplotlib not installed." msgstr "python-matplotlib ãŒã‚¤ãƒ³ã‚¹ãƒˆãƒ¼ãƒ«ã•れã¦ã„ã¾ã›ã‚“。" swift-2.17.1/swift/locale/tr_TR/0000775000175000017500000000000013435012120016374 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/tr_TR/LC_MESSAGES/0000775000175000017500000000000013435012120020161 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/tr_TR/LC_MESSAGES/swift.po0000666000175000017500000006540013435012015021667 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # İşbaran Akçayır , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-03-08 01:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: tr_TR\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Turkish (Turkey)\n" msgid "" "\n" "user quit" msgstr "" "\n" "kullanıcı çıktı" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - paralel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d sonek kontrol edildi - %(hashed).2f%% özetlenen, %(synced).2f%% " "eÅŸzamanlanan" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) bölüm %(time).2fs (%(rate).2f/" "sn, %(remaining)s kalan) içinde çoÄŸaltıldı" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s baÅŸarı, %(failure)s baÅŸarısızlık" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s %(statuses)s için 503 döndürüyor" #, python-format msgid "%s already started..." msgstr "%s zaten baÅŸlatıldı..." #, python-format msgid "%s does not exist" msgstr "%s mevcut deÄŸil" #, python-format msgid "%s is not mounted" msgstr "%s baÄŸlı deÄŸil" #, python-format msgid "%s responded as unmounted" msgstr "%s baÄŸlı deÄŸil olarak yanıt verdi" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: BaÄŸlantı eÅŸ tarafından sıfırlandı" #, python-format msgid ", %s containers deleted" msgstr ", %s kap silindi" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s kap kaldı muhtemelen" #, python-format msgid ", %s containers remaining" msgstr ", %s kap kaldı" #, python-format msgid ", %s objects deleted" msgstr ", %s nesne silindi" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s nesne kaldı muhtemelen" #, python-format msgid ", %s objects remaining" msgstr ", %s nesne kaldı" #, python-format msgid ", elapsed: %.02fs" msgstr ", geçen süre: %.02fs" msgid ", return codes: " msgstr ", dönen kodlar: " msgid "Account" msgstr "Hesap" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Hesap denetimi \"bir kere\" kipi tamamlandı: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Hesap denetimi geçiÅŸi tamamlandı: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(count)d db %(time).5f saniyede çoÄŸaltılmaya çalışıldı (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Kötü rsync dönüş kodu: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Hesap denetimi \"bir kere\" kipini baÅŸlat" msgid "Begin account audit pass." msgstr "Hesap denetimi baÅŸlatma geçildi." msgid "Begin container audit \"once\" mode" msgstr "Kap denetimine \"bir kere\" kipinde baÅŸla" msgid "Begin container audit pass." msgstr "Kap denetimi geçiÅŸini baÅŸlat." msgid "Begin container sync \"once\" mode" msgstr "Kap eÅŸzamanlamayı \"bir kere\" kipinde baÅŸlat" msgid "Begin container update single threaded sweep" msgstr "Kap güncelleme tek iÅŸ iplikli süpürmeye baÅŸla" msgid "Begin container update sweep" msgstr "Kap güncelleme süpürmesine baÅŸla" msgid "Begin object update single threaded sweep" msgstr "Nesne güncelleme tek iÅŸ iplikli süpürmeye baÅŸla" msgid "Begin object update sweep" msgstr "Nesne güncelleme süpürmesine baÅŸla" #, python-format msgid "Beginning pass on account %s" msgstr "%s hesabı üzerinde geçiÅŸ baÅŸlatılıyor" msgid "Beginning replication run" msgstr "ÇoÄŸaltmanın çalıştırılmasına baÅŸlanıyor" msgid "Broker error trying to rollback locked connection" msgstr "Kilitli baÄŸlantı geri alınmaya çalışılırken vekil hatası" #, python-format msgid "Can not access the file %s." msgstr "%s dosyasına eriÅŸilemiyor." #, python-format msgid "Can not load profile data from %s." msgstr "%s'den profil verisi yüklenemiyor." #, python-format msgid "Client did not read from proxy within %ss" msgstr "İstemci %ss içinde vekilden okumadı" msgid "Client disconnected on read" msgstr "İstemci okuma sırasında baÄŸlantıyı kesti" msgid "Client disconnected without sending enough data" msgstr "İstemci yeterli veri göndermeden baÄŸlantıyı kesti" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "İstemci yolu %(client)s nesne metadata'sında kayıtlı yol ile eÅŸleÅŸmiyor " "%(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Yapılandırma seçeneÄŸi internal_client_conf_path belirtilmemiÅŸ. Varsayılan " "yapılandırma kullanılıyor, seçenekleri çin internal-client.conf-sample'a " "bakın" msgid "Connection refused" msgstr "BaÄŸlantı reddedildi" msgid "Connection timeout" msgstr "BaÄŸlantı zaman aşımına uÄŸradı" msgid "Container" msgstr "Kap" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Kap denetimi \"bir kere\" kipinde tamamlandı: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Kap denetim geçiÅŸi tamamlandı: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Kap eÅŸzamanlama \"bir kere\" kipinde tamamlandı: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Kap güncelleme tek iÅŸ iplikli süpürme tamamlandı: %(elapsed).02fs, " "%(success)s baÅŸarılı, %(fail)s baÅŸarısız, %(no_change)s deÄŸiÅŸiklik yok" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Kap güncelleme süpürme tamamlandı: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s in kap güncelleme süpürmesi tamamlandı: %(elapsed).02fs, " "%(success)s baÅŸarılı, %(fail)s baÅŸarısız, %(no_change)s deÄŸiÅŸiklik yok" #, python-format msgid "Data download error: %s" msgstr "Veri indirme hatası: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Aygıtlar geçiÅŸi tamamlandı: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "HATA %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "HATA %(status)d %(body)s %(type)s Sunucudan" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "HATA %(status)d %(body)s Nesne Sunucu re'den: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "HATA %(status)d Beklenen: 100-Nesne Sunucusundan devam et" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme baÅŸarısız (sonra tekrar " "denenecek): Yanıt %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "HATA %(host)s dan kötü yanıt %(status)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "HATA İstemci okuma zaman aşımına uÄŸradı (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "HATA Kap güncelleme baÅŸarısız (daha sonraki async güncellemesi için " "kaydediliyor): %(ip)s:%(port)s/%(dev)s den %(status)d yanıtı" #, python-format msgid "ERROR Could not get account info %s" msgstr "HATA hesap bilgisi %s alınamadı" #, python-format msgid "ERROR Could not get container info %s" msgstr "HATA %s kap bilgisi alınamadı" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "HATA %(data_file)s disk dosyası kapatma baÅŸarısız: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "HATA İstisna istemci baÄŸlantısının kesilmesine neden oluyor" msgid "ERROR Failed to get my own IPs?" msgstr "Kendi IP'lerimi alırken HATA?" msgid "ERROR Insufficient Storage" msgstr "HATA Yetersiz Depolama" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "HATA Nesne %(obj)s denetimde baÅŸarısız oldu ve karantinaya alındı: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "HATA Picke problemi, %s karantinaya alınıyor" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "HATA Uzak sürücü baÄŸlı deÄŸil %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "HATA %(db_file)s %(row)s eÅŸzamanlamada" #, python-format msgid "ERROR Syncing %s" msgstr "HATA %s EÅŸzamanlama" #, python-format msgid "ERROR Trying to audit %s" msgstr "HATA %s denetimi denemesinde" msgid "ERROR Unhandled exception in request" msgstr "HATA İstekte ele alınmayan istisna var" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ hatası %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "HATA %(ip)s:%(port)s/%(device)s ile hesap güncelleme baÅŸarısız (sonra " "yeniden denenecek)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "HATA hesap güncelleme baÅŸarısız %(ip)s:%(port)s/%(device)s (sonra tekrar " "denenecek):" #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "HATA beklenmeyen isimli async bekleyen dosya %s" msgid "ERROR auditing" msgstr "denetlemede HATA" #, python-format msgid "ERROR auditing: %s" msgstr "HATA denetim: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "HATA kap güncelleme %(ip)s:%(port)s/%(dev)s ile baÅŸarısız oldu (sonraki " "async güncellemesi için kaydediliyor)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "%s'den HTTP yanıtı okumada HATA" #, python-format msgid "ERROR reading db %s" msgstr "%s veri tabanı okumada HATA" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "HATA rsync %(code)s ile baÅŸarısız oldu: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "%(node)s düğümlü %(file)s eÅŸ zamanlamada HATA" msgid "ERROR trying to replicate" msgstr "ÇoÄŸaltmaya çalışmada HATA" #, python-format msgid "ERROR while trying to clean up %s" msgstr "%s temizlenmeye çalışırken HATA" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "HATA %(type)s sunucusu %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "HATA %s den baskılamaların yüklenmesinde: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "HATA uzuk sunucuda %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "HATA: Sürücü bölümlerine olan yollar alınamadı: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "HATA: %(path)s e eriÅŸilemiyor: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "HATA: Denetim çalıştırılamıyor: %s" msgid "Error hashing suffix" msgstr "Sonek özetini çıkarmada hata" msgid "Error listing devices" msgstr "Aygıtları listelemede hata" #, python-format msgid "Error on render profiling results: %s" msgstr "Profilleme sonuçlarının gerçeklenmesinde hata: %s" msgid "Error parsing recon cache file" msgstr "Recon zula dosyasını ayrıştırmada hata" msgid "Error reading recon cache file" msgstr "Recon zula dosyası okumada hata" msgid "Error reading ringfile" msgstr "Halka dosyası okunurken hata" msgid "Error reading swift.conf" msgstr "swift.conf okunurken hata" msgid "Error retrieving recon data" msgstr "Recon verisini almada hata" msgid "Error syncing handoff partition" msgstr "Devir bölümünü eÅŸ zamanlamada hata" msgid "Error syncing partition" msgstr "Bölüm eÅŸzamanlamada hata" #, python-format msgid "Error syncing with node: %s" msgstr "Düğüm ile eÅŸ zamanlamada hata: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Yeniden inÅŸa denenirken hata %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Hata: Bir hata oluÅŸtu" msgid "Error: missing config path argument" msgstr "Hata: yapılandırma yolu deÄŸiÅŸkeni eksik" #, python-format msgid "Error: unable to locate %s" msgstr "Hata: %s bulunamıyor" msgid "Exception dumping recon cache" msgstr "Yeniden baÄŸlanma zulasının dökümünde istisna" msgid "Exception in top-level account reaper loop" msgstr "Üst seviye hesap biçme döngüsünde istisna" msgid "Exception in top-level replication loop" msgstr "Üst seviye çoÄŸaltma döngüsünde istisna" msgid "Exception in top-levelreconstruction loop" msgstr "Üst seviye yeniden oluÅŸturma döngüsünde istisna" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ile istisna" #, python-format msgid "Exception with account %s" msgstr "%s hesabında istisna" #, python-format msgid "Exception with containers for account %s" msgstr "%s hesabı için kaplarla ilgili istisna" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "%(account)s hesabı için %(container)s kabı için nesneler için istisna" #, python-format msgid "Expect: 100-continue on %s" msgstr "Beklenen: 100-%s üzerinden devam et" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s den %(found_domain)s e CNAME zinciri takip ediliyor" msgid "Found configs:" msgstr "Yapılandırmalar bulundu:" msgid "Host unreachable" msgstr "İstemci eriÅŸilebilir deÄŸil" #, python-format msgid "Incomplete pass on account %s" msgstr "%s hesabından tamamlanmamış geçiÅŸ" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Geçersix X-Container-Sync-To biçimi %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To'da geçersiz istemci %r" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Geçersiz bekleyen girdi %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "%(full_path)s den geçersiz yanıt %(resp)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "%(ip)s den geçersiz yanıt %(resp)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "X-Container-Sync-To'da geçersiz ÅŸema %r, \"//\", \"http\", veya \"https\" " "olmalı." #, python-format msgid "Killing long-running rsync: %s" msgstr "Uzun süre çalışan rsync öldürülüyor: %s" msgid "Lockup detected.. killing live coros." msgstr "Kilitleme algılandı.. canlı co-rutinler öldürülüyor." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s %(found_domain)s eÅŸleÅŸtirildi" #, python-format msgid "No %s running" msgstr "Çalışan %s yok" #, python-format msgid "No permission to signal PID %d" msgstr "%d PID'ine sinyalleme izni yok" #, python-format msgid "No policy with index %s" msgstr "%s indisine sahip ilke yok" #, python-format msgid "No realm key for %r" msgstr "%r için realm anahtarı yok" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Düğüm hatası sınırlandı %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Yeterince nesne sunucu ack'lenmedi (%d alındı)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Bulunamadı %(sync_from)r => %(sync_to)r - nesne %(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%s saniye boyunca hiçbir ÅŸey yeniden oluÅŸturulmadı." #, python-format msgid "Nothing replicated for %s seconds." msgstr "%s saniyedir hiçbir ÅŸey çoÄŸaltılmadı." msgid "Object" msgstr "Nesne" msgid "Object PUT" msgstr "Nesne PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "Nesne PUT 409 için 202 döndürüyor: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Nesne PUT 412 döndürüyor, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Nesne denetimi (%(type)s) \"%(mode)s\" kipinde tamamlandı: %(elapsed).02fs. " "Toplam karantina: %(quars)d, Toplam hata: %(errors)d, Toplam dosya/sn: " "%(frate).2f, Toplam bayt/sn: %(brate).2f, Denetleme zamanı: %(audit).2f, " "Oran: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Nesne denetim istatistikleri: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Nesne yeniden oluÅŸturma tamamlandı (bir kere). (%.02f dakika)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Nesne yeniden oluÅŸturma tamamlandı. (%.02f dakika)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Nesne çoÄŸaltma tamamlandı (bir kere). (%.02f dakika)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Nesne çoÄŸaltma tamamlandı. (%.02f dakika)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Nesne sunucuları %s eÅŸleÅŸmeyen etag döndürdü" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Nesne güncelleme süpürmesi tamamlandı: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "X-Container-Sync-To'da parametre, sorgular, ve parçalara izin verilmez" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Bölüm zamanları: azami %(max).4fs, asgari %(min).4fs, ortalama %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To'de yol gerekli" #, python-format msgid "Problem cleaning up %s" msgstr "%s temizliÄŸinde problem" #, python-format msgid "Profiling Error: %s" msgstr "Profilleme Hatası: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "%(hsh_path)s %(quar_path)s karantinasına alındı çünkü bir dizin deÄŸil" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Bir dizin olmadığından %(object_path)s %(quar_path)s e karantinaya alındı" #, python-format msgid "Quarantining DB %s" msgstr "DB %s karantinaya alınıyor" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Oran sınırı uyku kaydı: %(account)s/%(container)s/%(object)s için %(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d db silindi" #, python-format msgid "Removing %s objects" msgstr "%s nesne kaldırılıyor" #, python-format msgid "Removing partition: %s" msgstr "Bölüm kaldırılıyor: %s" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Geçersiz pid'e sahip pid dosyası %s siliniyor" #, python-format msgid "Removing stale pid file %s" msgstr "Askıdaki pid dosyası siliniyor %s" msgid "Replication run OVER" msgstr "ÇoÄŸaltma çalışması BİTTİ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Kara listeleme yüzünden 497 döndürülüyor: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "%(acc)s/%(cont)s/%(obj)s ye %(meth)s için 498 döndürülüyor. Oran sınırı " "(Azami uyku) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Zincir deÄŸiÅŸikliÄŸi algılandı. Mevcut yeniden oluÅŸturma geçiÅŸi iptal ediliyor." msgid "Ring change detected. Aborting current replication pass." msgstr "Zincir deÄŸiÅŸimi algılandı. Mevcut çoÄŸaltma geçiÅŸi iptal ediliyor." #, python-format msgid "Running %s once" msgstr "%s bir kere çalıştırılıyor" msgid "Running object reconstructor in script mode." msgstr "Nesne yeniden oluÅŸturma betik kipinde çalıştırılıyor." msgid "Running object replicator in script mode." msgstr "Nesne çoÄŸaltıcı betik kipinde çalıştırılıyor." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "%(time)s den beri: %(sync)s eÅŸzamanlandı [%(delete)s silme, %(put)s koyma], " "%(skip)s atlama, %(fail)s baÅŸarısız" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "%(time)s den beri: Hesap denetimleri: %(passed)s denetimi geçti, %(failed)s " "denetimi geçemedi" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "%(time)s den beri: Kap denetimleri: %(pass)s denetimi geçti, %(fail)s " "denetimde baÅŸarısız" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "BaÄŸlı olmadığından %(device)s atlanıyor" #, python-format msgid "Skipping %s as it is not mounted" msgstr "BaÄŸlı olmadığından %s atlanıyor" #, python-format msgid "Starting %s" msgstr "%s baÅŸlatılıyor" msgid "Starting object reconstruction pass." msgstr "Nesne yeniden oluÅŸturma geçiÅŸi baÅŸlatılıyor." msgid "Starting object reconstructor in daemon mode." msgstr "Nesne yeniden oluÅŸturma artalan iÅŸlemi kipinde baÅŸlatılıyor." msgid "Starting object replication pass." msgstr "Nesne çoÄŸaltma geçiÅŸi baÅŸlatılıyor." msgid "Starting object replicator in daemon mode." msgstr "Nesne çoÄŸaltıcı artalan iÅŸlemi kipinde baÅŸlatılıyor." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "%(dst)s (%(time).03f) de %(src)s baÅŸarılı rsync'i" msgid "The file type are forbidden to access!" msgstr "Dosya türüne eriÅŸim yasaklanmış!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "(%(total)s) kabı için %(key)s toplamı ilkeler arasındaki %(key)s toplamıyla " "eÅŸleÅŸmiyor (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s ile zaman aşımı istisnası" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "%(method)s %(path)s deneniyor" #, python-format msgid "Trying to GET %(full_path)s" msgstr "%(full_path)s GET deneniyor" msgid "Trying to read during GET" msgstr "GET sırasında okuma deneniyor" msgid "Trying to read during GET (retrying)" msgstr "GET sırasında okuma deneniyor (yeniden deneniyor)" msgid "Trying to send to client" msgstr "İstemciye gönderilmeye çalışılıyor" #, python-format msgid "Trying to sync suffixes with %s" msgstr "%s e sahip son ekler eÅŸzamanlanmaya çalışılıyor" #, python-format msgid "Trying to write to %s" msgstr "%s'e yazmaya çalışılıyor" msgid "UNCAUGHT EXCEPTION" msgstr "YAKALANMAYAN İSTİSNA" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "%s libc'de bulunamadı. No-op olarak çıkılıyor." #, python-format msgid "Unable to locate config for %s" msgstr "%s için yapılandırma bulunamıyor" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate, posix_fallocate libc'de bulunamadı. No-op olarak çıkılıyor." #, python-format msgid "Unable to read config from %s" msgstr "%s'den yapılandırma okunamıyor" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "%(sync_from)r => %(sync_to)r yetki al" msgid "Unhandled exception" msgstr "Yakalanmamış istisna" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "GET sırasında bilinmeyen istisna: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s için güncelleme raporu baÅŸarısız" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s için güncelleme raporu gönderildi" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "UYARI: SSL yalnızca test amaçlı etkinleÅŸtirilmelidir. Üretim için kurulumda " "harici SSL sonlandırma kullanın." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "UYARI: Dosya göstericisi sınırı deÄŸiÅŸtirilemiyor. Root deÄŸil misiniz?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "UYARI: Azami süreç limiti deÄŸiÅŸtirilemiyor. Root deÄŸil misiniz?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "UYARI: Hafıza sınırı deÄŸiÅŸtirilemiyor. Root deÄŸil misiniz?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Uyarı: Memcached istemcisi olmadan oran sınırlama yapılamaz" #, python-format msgid "method %s is not allowed." msgstr "%s metoduna izin verilmez." msgid "no log file found" msgstr "kayıt dosyası bulunamadı" msgid "odfpy not installed." msgstr "odfpy kurulu deÄŸil." #, python-format msgid "plotting results failed due to %s" msgstr "çizdirme sonuçlaru %s sebebiyle baÅŸarısız" msgid "python-matplotlib not installed." msgstr "python-matplotlib kurulu deÄŸil." swift-2.17.1/swift/locale/en_GB/0000775000175000017500000000000013435012120016314 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000013435012120020101 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/en_GB/LC_MESSAGES/swift.po0000666000175000017500000011326713435012015021614 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andi Chandler , 2016. #zanata # Andreas Jaeger , 2016. #zanata # Andi Chandler , 2018. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-03-08 01:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2018-01-27 09:17+0000\n" "Last-Translator: Andi Chandler \n" "Language: en_GB\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: English (United Kingdom)\n" msgid "" "\n" "user quit" msgstr "" "\n" "user quit" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - parallel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(reconstructed)d/%(total)d (%(percentage).2f%%) partitions reconstructed in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" #, python-format msgid "%(replication_ip)s/%(device)s responded as unmounted" msgstr "%(replication_ip)s/%(device)s responded as unmounted" #, python-format msgid "%(server)s #%(number)d not running (%(conf)s)" msgstr "%(server)s #%(number)d not running (%(conf)s)" #, python-format msgid "%(server)s (%(pid)s) appears to have stopped" msgstr "%(server)s (%(pid)s) appears to have stopped" #, python-format msgid "%(server)s running (%(pid)s - %(conf)s)" msgstr "%(server)s running (%(pid)s - %(conf)s)" #, python-format msgid "%(server)s running (%(pid)s - %(pid_file)s)" msgstr "%(server)s running (%(pid)s - %(pid_file)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s successes, %(failure)s failures" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s returning 503 for %(statuses)s" #, python-format msgid "%(type)s: %(value)s" msgstr "%(type)s: %(value)s" #, python-format msgid "%s already started..." msgstr "%s already started..." #, python-format msgid "%s does not exist" msgstr "%s does not exist" #, python-format msgid "%s is not mounted" msgstr "%s is not mounted" #, python-format msgid "%s responded as unmounted" msgstr "%s responded as unmounted" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Connection reset by peer" #, python-format msgid ", %s containers deleted" msgstr ", %s containers deleted" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s containers possibly remaining" #, python-format msgid ", %s containers remaining" msgstr ", %s containers remaining" #, python-format msgid ", %s objects deleted" msgstr ", %s objects deleted" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objects possibly remaining" #, python-format msgid ", %s objects remaining" msgstr ", %s objects remaining" #, python-format msgid ", elapsed: %.02fs" msgstr ", elapsed: %.02fs" msgid ", return codes: " msgstr ", return codes: " msgid "Account" msgstr "Account" #, python-format msgid "Account %(account)s has not been reaped since %(time)s" msgstr "Account %(account)s has not been reaped since %(time)s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Account audit \"once\" mode completed: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Account audit pass completed: %.02fs" #, python-format msgid "" "Adding required filter %(filter_name)s to pipeline at position %(insert_at)d" msgstr "" "Adding required filter %(filter_name)s to pipeline at position %(insert_at)d" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" #, python-format msgid "Audit Failed for %(path)s: %(err)s" msgstr "Audit Failed for %(path)s: %(err)s" #, python-format msgid "Audit passed for %s" msgstr "Audit passed for %s" #, python-format msgid "Bad key for %(name)r: %(err)s" msgstr "Bad key for %(name)r: %(err)s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Bad rsync return code: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Begin account audit \"once\" mode" msgid "Begin account audit pass." msgstr "Begin account audit pass." msgid "Begin container audit \"once\" mode" msgstr "Begin container audit \"once\" mode" msgid "Begin container audit pass." msgstr "Begin container audit pass." msgid "Begin container sync \"once\" mode" msgstr "Begin container sync \"once\" mode" msgid "Begin container update single threaded sweep" msgstr "Begin container update single threaded sweep" msgid "Begin container update sweep" msgstr "Begin container update sweep" #, python-format msgid "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)" msgstr "Begin object audit \"%(mode)s\" mode (%(audi_type)s%(description)s)" msgid "Begin object update single threaded sweep" msgstr "Begin object update single threaded sweep" msgid "Begin object update sweep" msgstr "Begin object update sweep" #, python-format msgid "Beginning pass on account %s" msgstr "Beginning pass on account %s" msgid "Beginning replication run" msgstr "Beginning replication run" msgid "Broker error trying to rollback locked connection" msgstr "Broker error trying to rollback locked connection" #, python-format msgid "Can not access the file %s." msgstr "Can not access the file %s." #, python-format msgid "Can not load profile data from %s." msgstr "Can not load profile data from %s." #, python-format msgid "Cannot read %(auditor_status)s (%(err)s)" msgstr "Cannot read %(auditor_status)s (%(err)s)" #, python-format msgid "Cannot write %(auditor_status)s (%(err)s)" msgstr "Cannot write %(auditor_status)s (%(err)s)" #, python-format msgid "Client did not read from proxy within %ss" msgstr "Client did not read from proxy within %ss" msgid "Client disconnected on read" msgstr "Client disconnected on read" msgid "Client disconnected without sending enough data" msgstr "Client disconnected without sending enough data" msgid "Client disconnected without sending last chunk" msgstr "Client disconnected without sending last chunk" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgid "Connection refused" msgstr "Connection refused" msgid "Connection timeout" msgstr "Connection timeout" msgid "Container" msgstr "Container" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Container audit \"once\" mode completed: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Container audit pass completed: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Container sync \"once\" mode completed: %.02fs" #, python-format msgid "" "Container sync report: %(container)s, time window start: %(start)s, time " "window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, " "bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " "total_rows: %(total)s" msgstr "" "Container sync report: %(container)s, time window start: %(start)s, time " "window end: %(end)s, puts: %(puts)s, posts: %(posts)s, deletes: %(deletes)s, " "bytes: %(bytes)s, sync_point1: %(point1)s, sync_point2: %(point2)s, " "total_rows: %(total)s" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Container update sweep completed: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" #, python-format msgid "Could not autocreate account %r" msgstr "Could not autocreate account %r" #, python-format msgid "" "Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds" msgstr "" "Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds" #, python-format msgid "Could not load %(conf)r: %(error)s" msgstr "Could not load %(conf)r: %(error)s" #, python-format msgid "Data download error: %s" msgstr "Data download error: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Devices pass completed: %.02fs" msgid "Did not get a keys dict" msgstr "Did not get a keys dict" #, python-format msgid "Directory %(directory)r does not map to a valid policy (%(error)s)" msgstr "Directory %(directory)r does not map to a valid policy (%(error)s)" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERROR %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERROR %(status)d %(body)s From %(type)s Server" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERROR %(status)d %(body)s From Object Server re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "ERROR %(status)d Expect: 100-continue From Object Server" #, python-format msgid "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server" msgstr "ERROR %(status)d Trying to %(method)s %(path)s From %(type)s Server" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERROR Bad response %(status)s from %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERROR Client read timeout (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" #, python-format msgid "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "ERROR Container update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERROR Could not get account info %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERROR Could not get container info %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERROR Exception causing client disconnect" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "ERROR Exception transferring data to object servers %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERROR Failed to get my own IPs?" msgid "ERROR Insufficient Storage" msgstr "ERROR Insufficient Storage" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERROR Pickle problem, quarantining %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERROR Remote drive not mounted %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERROR Syncing %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERROR Syncing %s" #, python-format msgid "" "ERROR There are not enough handoff nodes to reach replica count for " "partition %s" msgstr "" "ERROR There are not enough hand-off nodes to reach replica count for " "partition %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERROR Trying to audit %s" msgid "ERROR Unhandled exception in request" msgstr "ERROR Unhandled exception in request" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ error with %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "ERROR async pending file with unexpected name %s" msgid "ERROR auditing" msgstr "ERROR auditing" #, python-format msgid "ERROR auditing: %s" msgstr "ERROR auditing: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgid "ERROR get_keys() missing callback" msgstr "ERROR get_keys() missing callback" #, python-format msgid "ERROR get_keys(): from callback: %s" msgstr "ERROR get_keys(): from callback: %s" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "ERROR reading HTTP response from %s" #, python-format msgid "ERROR reading db %s" msgstr "ERROR reading db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERROR rsync failed with %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERROR syncing %(file)s with node %(node)s" msgid "ERROR trying to replicate" msgstr "ERROR trying to replicate" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERROR while trying to clean up %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERROR with loading suppressions from %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERROR with remote server %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "ERROR: Failed to get paths to drive partitions: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERROR: Unable to access %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERROR: Unable to run auditing: %s" #, python-format msgid "" "Error code %(status)d is returned from remote server %(ip)s: %(port)s / " "%(device)s" msgstr "" "Error code %(status)d is returned from remote server %(ip)s: %(port)s / " "%(device)s" #, python-format msgid "Error decoding fragments for %r" msgstr "Error decoding fragments for %r" #, python-format msgid "Error decrypting %(resp_type)s: %(reason)s" msgstr "Error decrypting %(resp_type)s: %(reason)s" #, python-format msgid "Error decrypting %(resp_type)s: Missing %(key)s" msgstr "Error decrypting %(resp_type)s: Missing %(key)s" #, python-format msgid "Error decrypting container listing: %s" msgstr "Error decrypting container listing: %s" #, python-format msgid "Error decrypting header %(header)s: %(error)s" msgstr "Error decrypting header %(header)s: %(error)s" #, python-format msgid "Error decrypting object: %s" msgstr "Error decrypting object: %s" msgid "Error hashing suffix" msgstr "Error hashing suffix" #, python-format msgid "Error in %(conf)r with mtime_check_interval: %(error)s" msgstr "Error in %(conf)r with mtime_check_interval: %(error)s" msgid "Error listing devices" msgstr "Error listing devices" #, python-format msgid "Error on render profiling results: %s" msgstr "Error on render profiling results: %s" msgid "Error parsing recon cache file" msgstr "Error parsing recon cache file" msgid "Error reading recon cache file" msgstr "Error reading recon cache file" msgid "Error reading ringfile" msgstr "Error reading ringfile" msgid "Error reading swift.conf" msgstr "Error reading swift.conf" msgid "Error retrieving recon data" msgstr "Error retrieving recon data" #, python-format msgid "Error sending UDP message to %(target)r: %(err)s" msgstr "Error sending UDP message to %(target)r: %(err)s" msgid "Error syncing handoff partition" msgstr "Error syncing hand-off partition" msgid "Error syncing partition" msgstr "Error syncing partition" #, python-format msgid "Error syncing with node: %s" msgstr "Error syncing with node: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Error: An error occurred" msgid "Error: missing config path argument" msgstr "Error: missing config path argument" #, python-format msgid "Error: unable to locate %s" msgstr "Error: unable to locate %s" msgid "Exception dumping recon cache" msgstr "Exception dumping recon cache" #, python-format msgid "Exception fetching fragments for %r" msgstr "Exception fetching fragments for %r" msgid "Exception in top-level account reaper loop" msgstr "Exception in top-level account reaper loop" msgid "Exception in top-level replication loop" msgstr "Exception in top-level replication loop" msgid "Exception in top-levelreconstruction loop" msgstr "Exception in top-level reconstruction loop" #, python-format msgid "Exception while deleting container %(container)s %(err)s" msgstr "Exception while deleting container %(container)s %(err)s" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exception with %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Exception with account %s" #, python-format msgid "Exception with containers for account %s" msgstr "Exception with containers for account %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Exception with objects for container %(container)s for account %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Expect: 100-continue on %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgid "Found configs:" msgstr "Found configs:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Hand-offs first mode still has hand-offs remaining. Aborting current " "replication pass." msgid "" "Handoffs only mode found no handoffs remaining. You should disable " "handoffs_only once all nodes are reporting no handoffs remaining." msgstr "" "Hand-offs only mode found no hand-offs remaining. You should disable " "handoffs_only once all nodes are reporting no hand-offs remaining." msgid "" "Handoffs only mode still has handoffs remaining. Next pass will continue to " "revert handoffs." msgstr "" "Hand-offs only mode still has hand-offs remaining. Next pass will continue " "to revert hand-offs." msgid "Host unreachable" msgstr "Host unreachable" #, python-format msgid "Incomplete pass on account %s" msgstr "Incomplete pass on account %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Invalid X-Container-Sync-To format %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Invalid host %r in X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Invalid pending entry %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Invalid response %(resp)s from %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Invalid response %(resp)s from %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgid "Invalid swift_bytes" msgstr "Invalid swift_bytes" #, python-format msgid "Killing long-running rsync: %s" msgstr "Killing long-running rsync: %s" #, python-format msgid "Loading JSON from %(auditor_status)s failed (%(err)s)" msgstr "Loading JSON from %(auditor_status)s failed (%(err)s)" msgid "Lockup detected.. killing live coros." msgstr "Lockup detected.. killing live coros." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "Mapped %(given_domain)s to %(found_domain)s" #, python-format msgid "Missing key for %r" msgstr "Missing key for %r" msgid "More than one part in a single-part response?" msgstr "More than one part in a single-part response?" #, python-format msgid "No %s running" msgstr "No %s running" #, python-format msgid "No cluster endpoint for %(realm)r %(cluster)r" msgstr "No cluster endpoint for %(realm)r %(cluster)r" #, python-format msgid "No permission to signal PID %d" msgstr "No permission to signal PID %d" #, python-format msgid "No policy with index %s" msgstr "No policy with index %s" #, python-format msgid "No realm key for %r" msgstr "No realm key for %r" #, python-format msgid "No space left on device for %(file)s (%(err)s)" msgstr "No space left on device for %(file)s (%(err)s)" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Node error limited %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Not enough object servers ack'ed (got %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Nothing reconstructed for %s seconds." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Nothing replicated for %s seconds." msgid "Object" msgstr "Object" msgid "Object PUT" msgstr "Object PUT" #, python-format msgid "" "Object PUT exceptions after last send, %(conns)s/%(nodes)s required " "connections" msgstr "" "Object PUT exceptions after last send, %(conns)s/%(nodes)s required " "connections" #, python-format msgid "" "Object PUT exceptions during send, %(conns)s/%(nodes)s required connections" msgstr "" "Object PUT exceptions during send, %(conns)s/%(nodes)s required connections" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Object PUT returning 412, %(statuses)r" #, python-format msgid "Object PUT returning 503, %(conns)s/%(nodes)s required connections" msgstr "Object PUT returning 503, %(conns)s/%(nodes)s required connections" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Object audit stats: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Object reconstruction complete (once). (%.02f minutes)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Object reconstruction complete. (%.02f minutes)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Object replication complete (once). (%.02f minutes)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Object replication complete. (%.02f minutes)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Object servers returned %s mismatched etags" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Object update sweep completed: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Params, queries, and fragments not allowed in X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" #, python-format msgid "" "Pass beginning; %(containers)s possible containers; %(objects)s possible " "objects" msgstr "" "Pass beginning; %(containers)s possible containers; %(objects)s possible " "objects" #, python-format msgid "Pass completed in %(time)ds; %(objects)d objects expired" msgstr "Pass completed in %(time)ds; %(objects)d objects expired" #, python-format msgid "Pass so far %(time)ds; %(objects)d objects expired" msgstr "Pass so far %(time)ds; %(objects)d objects expired" msgid "Path required in X-Container-Sync-To" msgstr "Path required in X-Container-Sync-To" #, python-format msgid "Pipeline is \"%s\"" msgstr "Pipeline is \"%s\"" #, python-format msgid "Pipeline was modified. New pipeline is \"%s\"." msgstr "Pipeline was modified. New pipeline is \"%s\"." #, python-format msgid "Problem checking EC fragment %(datadir)s: %(err)s" msgstr "Problem checking EC fragment %(datadir)s: %(err)s" #, python-format msgid "Problem cleaning up %(datadir)s (%(err)s)" msgstr "Problem cleaning up %(datadir)s (%(err)s)" #, python-format msgid "Problem cleaning up %s" msgstr "Problem cleaning up %s" #, python-format msgid "Problem making data file durable %(file)s (%(err)s)" msgstr "Problem making data file durable %(file)s (%(err)s)" #, python-format msgid "Problem with fragment response: %s" msgstr "Problem with fragment response: %s" #, python-format msgid "Profiling Error: %s" msgstr "Profiling Error: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" #, python-format msgid "Quarantining DB %s" msgstr "Quarantining DB %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Removed %(remove)d dbs" #, python-format msgid "Removing %s objects" msgstr "Removing %s objects" #, python-format msgid "Removing partition: %s" msgstr "Removing partition: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Removing pid file %(pid_file)s with wrong pid %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Removing pid file %s with invalid pid" #, python-format msgid "Removing stale pid file %s" msgstr "Removing stale pid file %s" msgid "Replication run OVER" msgstr "Replication run OVER" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Returning 497 because of blacklisting: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "Ring change detected. Aborting current reconstruction pass." msgid "Ring change detected. Aborting current replication pass." msgstr "Ring change detected. Aborting current replication pass." #, python-format msgid "Running %s once" msgstr "Running %s once" msgid "Running object reconstructor in script mode." msgstr "Running object reconstructor in script mode." msgid "Running object replicator in script mode." msgstr "Running object replicator in script mode." #, python-format msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s" msgstr "Signal %(server)s pid: %(pid)s signal: %(signal)s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" #, python-format msgid "Skipping %(datadir)s because %(err)s" msgstr "Skipping %(datadir)s because %(err)s" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "Skipping %(device)s as it is not mounted" #, python-format msgid "Skipping %(dir)s: %(err)s" msgstr "Skipping %(dir)s: %(err)s" #, python-format msgid "Skipping %s as it is not mounted" msgstr "Skipping %s as it is not mounted" #, python-format msgid "Starting %s" msgstr "Starting %s" msgid "Starting object reconstruction pass." msgstr "Starting object reconstruction pass." msgid "Starting object reconstructor in daemon mode." msgstr "Starting object reconstructor in daemon mode." msgid "Starting object replication pass." msgstr "Starting object replication pass." msgid "Starting object replicator in daemon mode." msgstr "Starting object replicator in daemon mode." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "The file type are forbidden to access!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Timeout Exception with %(ip)s:%(port)s/%(device)s" #, python-format msgid "Timeout fetching fragments for %r" msgstr "Timeout fetching fragments for %r" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Trying to %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Trying to GET %(full_path)s" #, python-format msgid "Trying to get %(status_type)s status of PUT to %(path)s" msgstr "Trying to get %(status_type)s status of PUT to %(path)s" msgid "Trying to read during GET" msgstr "Trying to read during GET" msgid "Trying to read during GET (retrying)" msgstr "Trying to read during GET (retrying)" msgid "Trying to send to client" msgstr "Trying to send to client" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Trying to sync suffixes with %s" #, python-format msgid "Trying to write to %s" msgstr "Trying to write to %s" msgid "UNCAUGHT EXCEPTION" msgstr "UNCAUGHT EXCEPTION" #, python-format msgid "Unable to find %(section)s config section in %(conf)s" msgstr "Unable to find %(section)s config section in %(conf)s" #, python-format msgid "Unable to load internal client from config: %(conf)r (%(error)s)" msgstr "Unable to load internal client from config: %(conf)r (%(error)s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Unable to locate %s in libc. Leaving as a no-op." #, python-format msgid "Unable to locate config for %s" msgstr "Unable to locate config for %s" #, python-format msgid "Unable to locate config number %(number)s for %(server)s" msgstr "Unable to locate config number %(number)s for %(server)s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." #, python-format msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" msgstr "Unable to perform fsync() on directory %(dir)s: %(err)s" #, python-format msgid "Unable to read config from %s" msgstr "Unable to read config from %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Unauth %(sync_from)r => %(sync_to)r" #, python-format msgid "" "Unexpected fragment data type (not quarantined)%(datadir)s: %(type)s at " "offset 0x%(offset)x" msgstr "" "Unexpected fragment data type (not quarantined)%(datadir)s: %(type)s at " "offset 0x%(offset)x" msgid "Unhandled exception" msgstr "Unhandled exception" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "Unknown exception trying to GET: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Update report failed for %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Update report sent for %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgid "" "WARNING: Unable to modify I/O scheduling class and priority of process. " "Keeping unchanged! Check logs for more info." msgstr "" "WARNING: Unable to modify I/O scheduling class and priority of process. " "Keeping unchanged! Check logs for more info." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "WARNING: Unable to modify max process limit. Running as non-root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "WARNING: Unable to modify memory limit. Running as non-root?" msgid "" "WARNING: Unable to modify scheduling priority of process. Keeping unchanged! " "Check logs for more info. " msgstr "" "WARNING: Unable to modify scheduling priority of process. Keeping unchanged! " "Check logs for more info. " #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up" msgstr "Waited %(kill_wait)s seconds for %(server)s to die; giving up" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgstr "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Warning: Cannot ratelimit without a memcached client" #, python-format msgid "autocreate account %r" msgstr "autocreate account %r" #, python-format msgid "method %s is not allowed." msgstr "method %s is not allowed." #, python-format msgid "next_part_power set in policy '%s'. Skipping" msgstr "next_part_power set in policy '%s'. Skipping" msgid "no log file found" msgstr "no log file found" msgid "odfpy not installed." msgstr "odfpy not installed." #, python-format msgid "plotting results failed due to %s" msgstr "plotting results failed due to %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib not installed." #, python-format msgid "" "sorting_method is set to '%(method)s', not 'affinity'; %(label)s " "read_affinity setting will have no effect." msgstr "" "sorting_method is set to '%(method)s', not 'affinity'; %(label)s " "read_affinity setting will have no effect." swift-2.17.1/swift/locale/fr/0000775000175000017500000000000013435012120015751 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/fr/LC_MESSAGES/0000775000175000017500000000000013435012120017536 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/fr/LC_MESSAGES/swift.po0000666000175000017500000007365513435012015021257 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Maxime COQUEREL , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:42+0000\n" "Last-Translator: Copied by Zanata \n" "Language: fr\n" "Plural-Forms: nplurals=2; plural=(n > 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: French\n" msgid "" "\n" "user quit" msgstr "" "\n" "l'utilisateur quitte le programme" #, python-format msgid " - %s" msgstr "- %s" #, python-format msgid " - parallel, %s" msgstr "- parallel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d suffixe(s) vérifié(s) - %(hashed).2f%% haché(s), %(synced).2f%% " "synchronisé(s)" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions répliquées en " "%(time).2fs (%(rate).2f/sec ; %(remaining)s restante(s))" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s succès, %(failure)s échec(s)" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s : renvoi de l'erreur 503 pour %(statuses)s" #, python-format msgid "%s already started..." msgstr "%s déjà démarré..." #, python-format msgid "%s does not exist" msgstr "%s n'existe pas" #, python-format msgid "%s is not mounted" msgstr "%s n'est pas monté" #, python-format msgid "%s responded as unmounted" msgstr "%s ont été identifié(es) comme étant démonté(es)" #, python-format msgid "%s: Connection reset by peer" msgstr "%s : Connexion réinitialisée par l'homologue" #, python-format msgid ", %s containers deleted" msgstr ", %s containers supprimés" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s conteneur(s) restant(s), le cas échéant" #, python-format msgid ", %s containers remaining" msgstr ", %s conteneur(s) restant(s)" #, python-format msgid ", %s objects deleted" msgstr ", %s objets supprimés" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s objet(s) restant(s), le cas échéant" #, python-format msgid ", %s objects remaining" msgstr ", %s objet(s) restant(s)" #, python-format msgid ", elapsed: %.02fs" msgstr ", temps écoulé : %.02fs" msgid ", return codes: " msgstr ", return codes: " msgid "Account" msgstr "Compte" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Audit de compte en mode \"Once\" terminé : %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Session d'audit de compte terminée : %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Tentative de réplication de %(count)d bases de données en %(time).5f " "secondes (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Code retour Rsync non valide : %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Démarrer l'audit de compte en mode \"Once\" (une fois)" msgid "Begin account audit pass." msgstr "Démarrer la session d'audit de compte." msgid "Begin container audit \"once\" mode" msgstr "Démarrer l'audit de conteneur en mode \"Once\" (une fois)" msgid "Begin container audit pass." msgstr "Démarrer la session d'audit de conteneur." msgid "Begin container sync \"once\" mode" msgstr "Démarrer la synchronisation de conteneurs en mode \"Once\" (une fois)" msgid "Begin container update single threaded sweep" msgstr "" "Démarrer le balayage des mises à jour du conteneur (unité d'exécution unique)" msgid "Begin container update sweep" msgstr "Démarrer le balayage des mises à jour du conteneur" msgid "Begin object update single threaded sweep" msgstr "" "Démarrer le balayage des mises à jour d'objet (unité d'exécution unique)" msgid "Begin object update sweep" msgstr "Démarrer le balayage des mises à jour d'objet" #, python-format msgid "Beginning pass on account %s" msgstr "Démarrage de la session d'audit sur le compte %s" msgid "Beginning replication run" msgstr "Démarrage du cycle de réplication" msgid "Broker error trying to rollback locked connection" msgstr "" "Erreur de courtier lors d'une tentative d'annulation d'une connexion " "verrouillée" #, python-format msgid "Can not access the file %s." msgstr "Ne peut pas accéder au fichier %s." #, python-format msgid "Can not load profile data from %s." msgstr "Impossible de charger des données de profil depuis %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Le client n'a pas lu les données du proxy en %s s" msgid "Client disconnected on read" msgstr "Client déconnecté lors de la lecture" msgid "Client disconnected without sending enough data" msgstr "Client déconnecté avant l'envoi de toutes les données requises" msgid "Client disconnected without sending last chunk" msgstr "Le client a été déconnecté avant l'envoi du dernier bloc" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Le chemin d'accès au client %(client)s ne correspond pas au chemin stocké " "dans les métadonnées d'objet %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "L'option de configuration internal_client_conf_path n'a pas été définie. La " "configuration par défaut est utilisée. Consultez les options dans internal-" "client.conf-sample." msgid "Connection refused" msgstr "Connexion refusée" msgid "Connection timeout" msgstr "Dépassement du délai d'attente de connexion" msgid "Container" msgstr "Conteneur" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Audit de conteneur en mode \"Once\" terminé : %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Session d'audit de conteneur terminée : %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Synchronisation de conteneurs en mode \"Once\" terminée : %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Le balayage des mises à jour du conteneur (unité d'exécution unique) est " "terminé : %(elapsed).02fs, %(success)s succès, %(fail)s échec(s), " "%(no_change)s inchangé(s)" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Le balayage des mises à jour du conteneur est terminé : %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Le balayage des mises à jour du conteneur (%(path)s) est terminé : " "%(elapsed).02fs, %(success)s succès, %(fail)s échec(s), %(no_change)s " "inchangé(s)" #, python-format msgid "Data download error: %s" msgstr "Erreur de téléchargement des données: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Session d'audit d'unité terminée : %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "ERREUR %(db_file)s : %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "ERREUR %(status)d %(body)s depuis le serveur %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "ERREUR %(status)d %(body)s depuis le serveur d'objets. Réf. : %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "" "ERREUR %(status)d Attendu(s) : 100 - poursuivre depuis le serveur d'objets" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s " "(une nouvelle tentative sera effectuée ultérieurement). Réponse %(status)s " "%(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "ERREUR Réponse incorrecte %(status)s de %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "ERREUR Dépassement du délai de lecture du client (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "ERREUR Echec de la mise à jour du conteneur (sauvegarde pour mise à jour " "asynchrone ultérieure) : réponse %(status)d renvoyée par %(ip)s:%(port)s/" "%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "ERREUR Impossible d'obtenir les infos de compte %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "ERREUR Impossible d'obtenir les infos de conteneur %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "ERREUR Incident de fermeture du fichier disque %(data_file)s : %(exc)s : " "%(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "ERREUR Exception entraînant la déconnexion du client" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" "ERREUR Exception lors du transfert de données vers des serveurs d'objets %s" msgid "ERROR Failed to get my own IPs?" msgstr "ERREUR Obtention impossible de mes propres adresses IP ?" msgid "ERROR Insufficient Storage" msgstr "ERREUR Stockage insuffisant" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "ERREUR L'objet %(obj)s a échoué à l'audit et a été en quarantaine : %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "ERREUR Problème lié à Pickle. Mise en quarantaine de %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "ERREUR Unité distante %s non montée" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "ERREUR lors de la synchronisation de %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "ERREUR lors de la synchronisation de %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "ERREUR lors de la tentative d'audit de %s" msgid "ERROR Unhandled exception in request" msgstr "ERREUR Exception non gérée dans la demande" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "ERROR __call__ error sur %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s " "(une nouvelle tentative sera effectuée ultérieurement)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "ERREUR Echec de la mise à jour du compte avec %(ip)s:%(port)s/%(device)s " "(une nouvelle tentative sera effectuée ultérieurement) : " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" "ERREUR Le fichier des mises à jour asynchrones en attente porte un nom " "inattendu %s" msgid "ERROR auditing" msgstr "Erreur d'audit" #, python-format msgid "ERROR auditing: %s" msgstr "ERREUR d'audit : %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "ERREUR Echec de la mise à jour du conteneur avec %(ip)s:%(port)s/%(dev)s " "(sauvegarde pour mise à jour asynchrone ultérieure)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "Erreur de lecture de la réponse HTTP depuis %s" #, python-format msgid "ERROR reading db %s" msgstr "ERREUR de lecture de db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "ERREUR Echec de Rsync avec %(code)s : %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "ERREUR de synchronisation de %(file)s avec le noeud %(node)s" msgid "ERROR trying to replicate" msgstr "ERREUR lors de la tentative de réplication" #, python-format msgid "ERROR while trying to clean up %s" msgstr "ERREUR pendant le nettoyage %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "ERREUR liée au serveur %(type)s %(ip)s:%(port)s/%(device)s. Réf. : %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "ERREUR de chargement des suppressions de %s : " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "ERREUR liée au serveur distant %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" "ERREUR : Echec de l'obtention des chemins d'accès aux partitions d'unité : %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "ERREUR : Impossible d'accéder à %(path)s : %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "ERREUR : Impossible d'exécuter l'audit : %s" msgid "Error hashing suffix" msgstr "Erreur suffixe hashing" msgid "Error listing devices" msgstr "Erreur lors du listage des unités" #, python-format msgid "Error on render profiling results: %s" msgstr "Erreur de rendu des résultats de profilage : %s" msgid "Error parsing recon cache file" msgstr "Erreur lors de l'analyse syntaxique du fichier cache Recon" msgid "Error reading recon cache file" msgstr "Erreur de lecture du fichier cache Recon" msgid "Error reading ringfile" msgstr "Erreur de lecture du fichier Ring" msgid "Error reading swift.conf" msgstr "Erreur de lecture de swift.conf" msgid "Error retrieving recon data" msgstr "Erreur lors de l'extraction des données Recon" msgid "Error syncing handoff partition" msgstr "Erreur lors de la synchronisation de la partition de transfert" msgid "Error syncing partition" msgstr "Erreur de synchronisation de la partition" #, python-format msgid "Error syncing with node: %s" msgstr "Erreur de synchronisation avec le noeud : %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Une erreur est survenue lors de la tentative de régénération de %(path)s " "policy#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Erreur : une erreur s'est produite" msgid "Error: missing config path argument" msgstr "Erreur: Manque argument de configuration du chemin" #, python-format msgid "Error: unable to locate %s" msgstr "Erreur: impossible de localiser %s" msgid "Exception dumping recon cache" msgstr "Exception lors du vidage de cache Recon" msgid "Exception in top-level account reaper loop" msgstr "Exception dans la boucle de collecteur de compte de niveau supérieur" msgid "Exception in top-level replication loop" msgstr "Exception dans la boucle de réplication de niveau supérieur" msgid "Exception in top-levelreconstruction loop" msgstr "Exception dans la boucle de reconstruction de niveau supérieur" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Exception liée à %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Exception avec le compte %s" #, python-format msgid "Exception with containers for account %s" msgstr "Exception avec les containers pour le compte %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Exception liée aux objets pour le conteneur %(container)s et le compte " "%(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Attendus(s) : 100 - poursuivre sur %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "" "Suivi de la chaîne CNAME pour %(given_domain)s jusqu'à %(found_domain)s" msgid "Found configs:" msgstr "Configurations trouvées :" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Le premier mode de transferts contient d'autres transferts. Abandon de la " "session de réplication en cours." msgid "Host unreachable" msgstr "Hôte inaccessible" #, python-format msgid "Incomplete pass on account %s" msgstr "Session d'audit incomplète sur le compte %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Non valide X-Container-Sync-To format %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Hôte %r non valide dans X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Entrée en attente non valide %(file)s : %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Réponse %(resp)s non valide de %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Réponse %(resp)s non valide de %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Schéma %r non valide dans X-Container-Sync-To. Doit être \"//\", \"http\" ou " "\"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Arrêt de l'opération Rsync à exécution longue : %s" msgid "Lockup detected.. killing live coros." msgstr "Blocage détecté. Arrêt des coroutines actives." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s mappé avec %(found_domain)s" #, python-format msgid "No %s running" msgstr "Non démarré %s" #, python-format msgid "No permission to signal PID %d" msgstr "Aucun droit pour signaler le PID %d" #, python-format msgid "No policy with index %s" msgstr "Aucune statégie avec un index de type %s" #, python-format msgid "No realm key for %r" msgstr "Aucune clé de domaine pour %r" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "" "Noeud marqué avec limite d'erreurs (error_limited) %(ip)s:%(port)s " "(%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "" "Le nombre de serveurs d'objets reconnus n'est pas suffisant (%d obtenus)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Introuvable : %(sync_from)r => %(sync_to)r - objet " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Aucun élément reconstruit pendant %s secondes." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Aucun élément répliqué pendant %s secondes." msgid "Object" msgstr "Objet" msgid "Object PUT" msgstr "Object PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "L'opération d'insertion (PUT) d'objet a renvoyé l'erreur 202 pour 409 : " "%(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "" "L'opération d'insertion (PUT) d'objet a renvoyé l'erreur 412. %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "L'audit d'objet (%(type)s) en mode \"%(mode)s\" est terminé : " "%(elapsed).02fs. Nombre total mis en quarantaine : %(quars)d. Nombre total " "d'erreurs : %(errors)d. Nombre total de fichiers/sec : %(frate).2f. Nombre " "total d'octets/sec : %(brate).2f. Durée d'audit : %(audit).2f. Taux : " "%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Audit d'objet (%(type)s). Depuis %(start_time)s, localement : %(passes)d " "succès. %(quars)d en quarantaine. %(errors)d erreurs. Fichiers/sec : " "%(frate).2f. Octets/sec : %(brate).2f. Durée totale : %(total).2f. Durée " "d'audit : %(audit).2f. Taux : %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Statistiques de l'audit d'objet : %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "" "La reconstruction d'objet en mode Once (une fois) est terminée. (%.02f " "minutes)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Reconstruction d'objet terminée. (%.02f minutes)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "" "La réplication d'objet en mode Once (une fois) est terminée. (%.02f minutes)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Réplication d'objet terminée. (%.02f minutes)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Des serveurs d'objets ont renvoyé %s en-têtes Etag non concordants" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Le balayage des mises à jour d'objet est terminé : %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Paramètres, requêtes et fragments interdits dans X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Temps de partition : maximum %(max).4fs, minimum %(min).4fs, moyenne " "%(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "Chemin requis dans X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Problème lors du nettoyage de %s" #, python-format msgid "Profiling Error: %s" msgstr "Erreur de profilage : %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s n'est pas un répertoire et a donc été mis en quarantaine dans " "%(quar_path)s" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s n'est pas un répertoire et a donc été mis en quarantaine " "dans %(quar_path)s" #, python-format msgid "Quarantining DB %s" msgstr "Mise en quarantaine de la base de données %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Journal de mise en veille Ratelimit : %(sleep)s pour %(account)s/" "%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d bases de données ont été retirées" #, python-format msgid "Removing %s objects" msgstr "Suppression de %s objets" #, python-format msgid "Removing partition: %s" msgstr "Suppression partition: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "" "Supression du fichier PID %(pid_file)s, comportant un PID incorrect %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Suppression du fichier pid %s comportant un pid non valide" #, python-format msgid "Removing stale pid file %s" msgstr "Suppression du fichier PID %s périmé" msgid "Replication run OVER" msgstr "Le cycle de réplication est terminé" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Renvoi de 497 en raison du placement sur liste noire : %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Renvoi de 498 pour %(meth)s jusqu'à %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(Max Sleep) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Changement d'anneau détecté. Abandon de la session de reconstruction en " "cours." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Changement d'anneau détecté. Abandon de la session de réplication en cours." #, python-format msgid "Running %s once" msgstr "Exécution unique de %s" msgid "Running object reconstructor in script mode." msgstr "Exécution du reconstructeur d'objet en mode script." msgid "Running object replicator in script mode." msgstr "Exécution du réplicateur d'objet en mode script." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Depuis %(time)s : %(sync)s synchronisé(s) [%(delete)s suppression(s), " "%(put)s insertion(s)], %(skip)s ignoré(s), %(fail)s échec(s)" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Depuis %(time)s : audits de compte : %(passed)s succès, %(failed)s échec(s)" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Depuis %(time)s : audits de conteneur : %(pass)s succès, %(fail)s échec(s)" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s est ignoré car il n'est pas monté" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s est ignoré car il n'est pas monté" #, python-format msgid "Starting %s" msgstr "Démarrage %s" msgid "Starting object reconstruction pass." msgstr "Démarrage de la session de reconstruction d'objet." msgid "Starting object reconstructor in daemon mode." msgstr "Démarrage du reconstructeur d'objet en mode démon." msgid "Starting object replication pass." msgstr "Démarrage de la session de réplication d'objet." msgid "Starting object replicator in daemon mode." msgstr "Démarrage du réplicateur d'objet en mode démon." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Succès de Rsync pour %(src)s dans %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Accès interdit au type de fichier" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Le total %(key)s du conteneur (%(total)s) ne correspond pas à la somme des " "clés %(key)s des différentes règles (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "" "Exception liée à un dépassement de délai concernant %(ip)s:%(port)s/" "%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Tentative d'exécution de %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Tentative de lecture de %(full_path)s" msgid "Trying to read during GET" msgstr "Tentative de lecture pendant une opération GET" msgid "Trying to read during GET (retrying)" msgstr "Tentative de lecture pendant une opération GET (nouvelle tentative)" msgid "Trying to send to client" msgstr "Tentative d'envoi au client" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Tentative de synchronisation de suffixes à l'aide de %s" #, python-format msgid "Trying to write to %s" msgstr "Tentative d'écriture sur %s" msgid "UNCAUGHT EXCEPTION" msgstr "EXCEPTION NON INTERCEPTEE" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" "Impossible de localiser %s dans libc. Laissé comme action nulle (no-op)." #, python-format msgid "Unable to locate config for %s" msgstr "Impossible de trouver la configuration pour %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Impossible de localiser fallocate, posix_fallocate dans libc. Laissé comme " "action nulle (no-op)." #, python-format msgid "Unable to read config from %s" msgstr "Impossible de lire le fichier de configuration depuis %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Non autorisé : %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "Exception non prise en charge" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Une exception inconnue s'est produite pendant une opération GET: %(account)r " "%(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Echec du rapport de mise à jour pour %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Rapport de mise à jour envoyé pour %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "AVERTISSEMENT : SSL ne doit être activé qu'à des fins de test. Utilisez la " "terminaison SSL externe pour un déploiement en production." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "AVERTISSEMENT : Impossible de modifier la limite de descripteur de fichier. " "Exécution en tant que non root ?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "AVERTISSEMENT : Impossible de modifier la limite maximale de processus. " "Exécution en tant que non root ?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "AVERTISSEMENT : Impossible de modifier la limite de mémoire. Exécution en " "tant que non root ?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "Avertissement : impossible d'appliquer Ratelimit sans client memcached" #, python-format msgid "method %s is not allowed." msgstr "Méthode %s interdite." msgid "no log file found" msgstr "Pas de fichier log trouvé" msgid "odfpy not installed." msgstr "odfpy n'est pas installé." #, python-format msgid "plotting results failed due to %s" msgstr "Echec du traçage des résultats. Cause : %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib non installé." swift-2.17.1/swift/locale/zh_CN/0000775000175000017500000000000013435012120016343 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/zh_CN/LC_MESSAGES/0000775000175000017500000000000013435012120020130 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/zh_CN/LC_MESSAGES/swift.po0000666000175000017500000006364413435012015021646 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Pearl Yajing Tan(Seagate Tech) , 2014 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-03-08 01:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh_CN\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (China)\n" msgid "" "\n" "user quit" msgstr "" "\n" "用户退出" #, python-format msgid " - %s" msgstr "- %s" #, python-format msgid " - parallel, %s" msgstr "ï¼å¹³è¡Œï¼Œ%s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "%(checked)dåŽç¼€å·²è¢«æ£€æŸ¥ %(hashed).2f%% hashed, %(synced).2f%% synced" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) 分区被å¤åˆ¶ æŒç»­æ—¶é—´ä¸º \"\n" "\"%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)sæˆåŠŸï¼Œ%(failure)s失败" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s 返回 503 在 %(statuses)s" #, python-format msgid "%s already started..." msgstr "%så·²å¯åЍ..." #, python-format msgid "%s does not exist" msgstr "%sä¸å­˜åœ¨" #, python-format msgid "%s is not mounted" msgstr "%s未挂载" #, python-format msgid "%s responded as unmounted" msgstr "%s å“应为未安装" #, python-format msgid "%s: Connection reset by peer" msgstr "%s:已由åŒçº§é‡ç½®è¿žæŽ¥" #, python-format msgid ", %s containers deleted" msgstr ",删除容器%s" #, python-format msgid ", %s containers possibly remaining" msgstr ",å¯èƒ½å‰©ä½™å®¹å™¨%s" #, python-format msgid ", %s containers remaining" msgstr ",剩余容器%s" #, python-format msgid ", %s objects deleted" msgstr ",删除对象%s" #, python-format msgid ", %s objects possibly remaining" msgstr ",å¯èƒ½å‰©ä½™å¯¹è±¡%s" #, python-format msgid ", %s objects remaining" msgstr ",剩余对象%s" #, python-format msgid ", elapsed: %.02fs" msgstr ",耗时:%.02fs" msgid ", return codes: " msgstr ",返回代ç ï¼š" msgid "Account" msgstr "è´¦å·" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "è´¦å·å®¡è®¡\"once\"模å¼å®Œæˆï¼š %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "è´¦å·å®¡è®¡å®Œæˆï¼š%.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "%(time).5f seconds (%(rate).5f/s)å°è¯•å¤åˆ¶%(count)d dbs" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Bad rsync返还代ç ï¼š%(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "开始账å·å®¡è®¡\"once\"模å¼" msgid "Begin account audit pass." msgstr "开始账å·å®¡è®¡é€šè¿‡" msgid "Begin container audit \"once\" mode" msgstr "开始容器审计\"once\" 模å¼" msgid "Begin container audit pass." msgstr "开始通过容器审计" msgid "Begin container sync \"once\" mode" msgstr "å¼€å§‹å®¹å™¨åŒæ­¥\"once\"模å¼" msgid "Begin container update single threaded sweep" msgstr "开始容器更新å•线程扫除" msgid "Begin container update sweep" msgstr "开始容器更新扫除" msgid "Begin object update single threaded sweep" msgstr "开始对象更新å•线程扫除" msgid "Begin object update sweep" msgstr "开始对象更新扫除" #, python-format msgid "Beginning pass on account %s" msgstr "è´¦å·%s开始通过" msgid "Beginning replication run" msgstr "开始è¿è¡Œå¤åˆ¶" msgid "Broker error trying to rollback locked connection" msgstr "æœåŠ¡å™¨é”™è¯¯å¹¶å°è¯•去回滚已ç»é”ä½çš„链接" #, python-format msgid "Can not access the file %s." msgstr "无法访问文件%s" #, python-format msgid "Can not load profile data from %s." msgstr "无法从%sä¸‹è½½åˆ†æžæ•°æ®" #, python-format msgid "Client did not read from proxy within %ss" msgstr "客户尚未从代ç†å¤„读å–%ss" msgid "Client disconnected on read" msgstr "å®¢æˆ·è¯»å–æ—¶ä¸­æ–­" msgid "Client disconnected without sending enough data" msgstr "客户中断 尚未å‘é€è¶³å¤Ÿ" msgid "Client disconnected without sending last chunk" msgstr "客户机已断开连接而未å‘逿œ€åŽä¸€ä¸ªæ•°æ®å—" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "客户路径%(client)s与对象元数æ®ä¸­å­˜å‚¨çš„路径%(meta)sä¸ç¬¦" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "未定义é…置选项 internal_client_conf_path。正在使用缺çœé…置。请å‚阅 internal-" "client.conf-sample 以了解å„个选项" msgid "Connection refused" msgstr "连接被拒ç»" msgid "Connection timeout" msgstr "连接超时" msgid "Container" msgstr "容器" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "容器审计\"once\"模å¼å®Œæˆï¼š%.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "容器审计通过完æˆï¼š %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "å®¹å™¨åŒæ­¥\"once\"模å¼å®Œæˆï¼š%.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "容器更新å•线程扫除完æˆï¼š%(elapsed).02fs, %(success)s æˆåŠŸ, %(fail)s 失败, " "%(no_change)s 无更改" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "容器更新扫除完æˆï¼š%.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "通过路径%(path)s容器更新扫除完æˆï¼š%(elapsed).02fs, %(success)s æˆåŠŸ, " "%(fail)s 失败, %(no_change)s 无更改" #, python-format msgid "Data download error: %s" msgstr "æ•°æ®ä¸‹è½½é”™è¯¯ï¼š%s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "设备通过完æˆï¼š %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "错误 %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "错误 %(status)d %(body)s æ¥è‡ª %(type)s æœåС噍" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "错误 %(status)d %(body)s æ¥è‡ª 对象æœåС噍 re: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "å‘生 %(status)d é”™è¯¯ï¼Œéœ€è¦ 100 - 从对象æœåŠ¡å™¨ç»§ç»­" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "出现错误 è´¦å·æ›´æ–°å¤±è´¥ï¼š %(ip)s:%(port)s/%(device)s (ç¨åŽå°è¯•): 回应 " "%(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "失败å“应错误%(status)sæ¥è‡ª%(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "错误 客户读å–è¶…æ—¶(%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "错误 容器更新失败(正在ä¿å­˜ ç¨åŽåŒæ­¥æ›´æ–°):%(status)d回应æ¥è‡ª%(ip)s:%(port)s/" "%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "错误:无法获å–è´¦å·ä¿¡æ¯%s" #, python-format msgid "ERROR Could not get container info %s" msgstr "错误:无法获å–容器%sä¿¡æ¯" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "ç£ç›˜æ–‡ä»¶é”™è¯¯%(data_file)s关闭失败: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "错误 异常导致客户端中断连接" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "错误:å‘对象æœåС噍 %s ä¼ è¾“æ•°æ®æ—¶å‘生异常" msgid "ERROR Failed to get my own IPs?" msgstr "错误 无法获得我方IPs?" msgid "ERROR Insufficient Storage" msgstr "错误 存储空间ä¸è¶³" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "错误 对象%(obj)s审计失败并被隔离:%(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "错误 Pickle问题 隔离%s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "错误 远程驱动器无法挂载 %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "åŒæ­¥é”™è¯¯ %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "åŒæ­¥æ—¶å‘生错误%s" #, python-format msgid "ERROR Trying to audit %s" msgstr "错误 å°è¯•开始审计%s" msgid "ERROR Unhandled exception in request" msgstr "错误 未处ç†çš„异常å‘出请求" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "%(method)s %(path)s出现错误__call__ error" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "错误 è´¦å·æ›´æ–°å¤±è´¥ %(ip)s:%(port)s/%(device)s (ç¨åŽå°è¯•)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "错误 è´¦å·æ›´æ–°å¤±è´¥%(ip)s:%(port)s/%(device)s (ç¨åŽå°è¯•):" #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "æ‰§è¡ŒåŒæ­¥ç­‰å¾…文件 文件åä¸å¯çŸ¥%s" msgid "ERROR auditing" msgstr "错误 审计" #, python-format msgid "ERROR auditing: %s" msgstr "审计错误:%s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "错误 容器更新失败%(ip)s:%(port)s/%(dev)s(正在ä¿å­˜ ç¨åŽåŒæ­¥æ›´æ–°)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "读å–HTTP错误 å“åº”æ¥æº%s" #, python-format msgid "ERROR reading db %s" msgstr "错误 读å–db %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "错误 rsync失败 %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "错误 åŒæ­¥ %(file)s å’Œ 节点%(node)s" msgid "ERROR trying to replicate" msgstr "å°è¯•å¤åˆ¶æ—¶å‘生错误" #, python-format msgid "ERROR while trying to clean up %s" msgstr "æ¸…ç†æ—¶å‡ºçŽ°é”™è¯¯%s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)sæœåС噍å‘生错误 %(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "执行下载压缩时å‘生错误%s" #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "远程æœåС噍å‘生错误 %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "%s未挂载" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "出错,无法访问 %(path)s:%(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "错误:无法执行审计:%s" msgid "Error hashing suffix" msgstr "执行HashingåŽç¼€æ—¶å‘生错误" msgid "Error listing devices" msgstr "设备列表时出现错误" #, python-format msgid "Error on render profiling results: %s" msgstr "给予分æžç»“果时å‘生错误:%s" msgid "Error parsing recon cache file" msgstr "è§£æžrecon cache file时出现错误" msgid "Error reading recon cache file" msgstr "读å–recon cache file时出现错误" msgid "Error reading ringfile" msgstr "读å–ringfile时出现错误" msgid "Error reading swift.conf" msgstr "读å–swift.conf时出现错误" msgid "Error retrieving recon data" msgstr "检索recon data时出现错误" msgid "Error syncing handoff partition" msgstr "æ‰§è¡ŒåŒæ­¥åˆ‡æ¢åˆ†åŒºæ—¶å‘生错误" msgid "Error syncing partition" msgstr "æ‰§è¡ŒåŒæ­¥åˆ†åŒºæ—¶å‘生错误" #, python-format msgid "Error syncing with node: %s" msgstr "æ‰§è¡ŒåŒæ­¥æ—¶èŠ‚ç‚¹%så‘生错误" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "å°è¯•é‡å»º %(path)s 策略时出错:#%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "错误:一个错误å‘生了" msgid "Error: missing config path argument" msgstr "错误:设置路径信æ¯ä¸¢å¤±" #, python-format msgid "Error: unable to locate %s" msgstr "错误:无法查询到 %s" msgid "Exception dumping recon cache" msgstr "执行dump recon的时候出现异常" msgid "Exception in top-level account reaper loop" msgstr "异常出现在top-levelè´¦å·reaper环" msgid "Exception in top-level replication loop" msgstr "top-levelå¤åˆ¶åœˆå‡ºçް异叏" msgid "Exception in top-levelreconstruction loop" msgstr " top-levelreconstruction 环中å‘生异常" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s出现异常" #, python-format msgid "Exception with account %s" msgstr "è´¦å·%s出现异常" #, python-format msgid "Exception with containers for account %s" msgstr "è´¦å·%s内容器出现异常" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "è´¦å·%(account)s容器%(container)s的对象出现异常" #, python-format msgid "Expect: 100-continue on %s" msgstr "已知:100-continue on %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "è·ŸéšCNAME链从%(given_domain)s到%(found_domain)s" msgid "Found configs:" msgstr "找到é…ç½®" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "Handoffs 优先方å¼ä»æœ‰ handoffs。正在中止当å‰å¤åˆ¶è¿‡ç¨‹ã€‚" msgid "Host unreachable" msgstr "无法连接到主机" #, python-format msgid "Incomplete pass on account %s" msgstr "è´¦å·%s未完æˆé€šè¿‡" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "无效的X-Container-Sync-Toæ ¼å¼%r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To中无效主机%r" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "ä¸å¯ç”¨çš„等待输入%(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "从 %(full_path)s 返回了无效å“应 %(resp)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "无效的回应%(resp)sæ¥è‡ª%(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "在X-Container-Sync-To中%r是无效的方案,须为\"//\", \"http\", or \"https\"。" #, python-format msgid "Killing long-running rsync: %s" msgstr "终止long-runningåŒæ­¥: %s" msgid "Lockup detected.. killing live coros." msgstr "检测到lockup。终止正在执行的coros" #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "集åˆ%(given_domain)s到%(found_domain)s" #, python-format msgid "No %s running" msgstr "æ— %sè´¦å·è¿è¡Œ" #, python-format msgid "No permission to signal PID %d" msgstr "æ— æƒé™å‘é€ä¿¡å·PID%d" #, python-format msgid "No policy with index %s" msgstr "没有具备索引 %s 的策略" #, python-format msgid "No realm key for %r" msgstr "%ræƒé™keyä¸å­˜åœ¨" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "节点错误æžé™ %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "没有足够的对象æœåŠ¡å™¨åº”ç­”ï¼ˆæ”¶åˆ° %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "未找到: %(sync_from)r => %(sync_to)r - object %(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "过去 %s ç§’æœªé‡æž„任何对象。" #, python-format msgid "Nothing replicated for %s seconds." msgstr "%sç§’æ— å¤åˆ¶" msgid "Object" msgstr "对象" msgid "Object PUT" msgstr "对象上传" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "对象 PUT 正在返回 202(对于 409):%(req_timestamp)s å°äºŽæˆ–等于 " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "对象PUT返还 412,%(statuses)r " #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "对象审计 (%(type)s) \\\"%(mode)s\\\"模å¼å®Œæˆ: %(elapsed).02fs 隔离总数: " "%(quars)d, 错误总数: %(errors)d, 文件ï¼ç§’总和:%(frate).2f, bytes/sec总和: " "%(brate).2f, 审计时间: %(audit).2f, 速率: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "对象审计 (%(type)s). 自 %(start_time)s 开始: 本地:%(passes)d 通" "过,%(quars)d 隔离,%(errors)d 错误,文件/秒:%(frate).2f,字节/秒:" "%(brate).2f,总时间:%(total).2f,审计时间:%(audit).2f,速率:" "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "对象审计统计:%s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "å¯¹è±¡é‡æž„完æˆï¼ˆä¸€æ¬¡ï¼‰ã€‚(%.02f 分钟)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "å¯¹è±¡é‡æž„完æˆã€‚(%.02f 分钟)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "对象å¤åˆ¶å®Œæˆ(一次)。(%.02f minutes)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "对象å¤åˆ¶å®Œæˆã€‚(%.02f minutes)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "对象æœåŠ¡å™¨è¿”è¿˜%sä¸åŒ¹é…etags" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "对象更新扫除完æˆï¼š%.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "在X-Container-Sync-To中,å˜é‡ï¼ŒæŸ¥è¯¢å’Œç¢Žç‰‡ä¸è¢«å…许" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "分区时间: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "在X-Container-Sync-To中路径是必须的" #, python-format msgid "Problem cleaning up %s" msgstr "问题清除%s" #, python-format msgid "Profiling Error: %s" msgstr "分æžä»£ç æ—¶å‡ºçŽ°é”™è¯¯ï¼š%s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(hsh_path)så’Œ%(quar_path)s因为éžç›®å½•" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "隔离%(object_path)så’Œ%(quar_path)s因为éžç›®å½•" #, python-format msgid "Quarantining DB %s" msgstr "隔离DB%s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "æµé‡æŽ§åˆ¶ä¼‘眠日志:%(sleep)s for %(account)s/%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "删除%(remove)d dbs" #, python-format msgid "Removing %s objects" msgstr "正在移除 %s 个对象" #, python-format msgid "Removing partition: %s" msgstr "移除分区:%s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "移除 pid 文件 %(pid_file)s 失败,pid %(pid)d 䏿­£ç¡®" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "正在移除带有无效 pid çš„ pid 文件 %s" #, python-format msgid "Removing stale pid file %s" msgstr "移除原有pid文件%s" msgid "Replication run OVER" msgstr "å¤åˆ¶è¿è¡Œç»“æŸ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "返回497因为黑åå•:%s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "返还498从%(meth)s到%(acc)s/%(cont)s/%(obj)s,æµé‡æŽ§åˆ¶(Max \"\n" "\"Sleep) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "检测到环更改。正在中止当å‰é‡æž„过程。" msgid "Ring change detected. Aborting current replication pass." msgstr "Ring改å˜è¢«æ£€æµ‹åˆ°ã€‚退出现有的å¤åˆ¶é€šè¿‡" #, python-format msgid "Running %s once" msgstr "è¿è¡Œ%s一次" msgid "Running object reconstructor in script mode." msgstr "正以脚本方å¼è¿è¡Œå¯¹è±¡é‡æž„程åºã€‚" msgid "Running object replicator in script mode." msgstr "在加密模å¼ä¸‹æ‰§è¡Œå¯¹è±¡å¤åˆ¶" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "自%(time)s起:%(sync)s完æˆåŒæ­¥ [%(delete)s 删除, %(put)s 上传], \"\n" "\"%(skip)s 跳过, %(fail)s 失败" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "自%(time)s开始:账å·å®¡è®¡ï¼š%(passed)s 通过审计,%(failed)s 失败" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "自%(time)s起:容器审计:%(pass)s通过审计, %(fail)s失败" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "因无法挂载跳过%(device)s" #, python-format msgid "Skipping %s as it is not mounted" msgstr "挂载失败 跳过%s" #, python-format msgid "Starting %s" msgstr "å¯åЍ%s" msgid "Starting object reconstruction pass." msgstr "正在å¯åŠ¨å¯¹è±¡é‡æž„过程。" msgid "Starting object reconstructor in daemon mode." msgstr "æ­£ä»¥å®ˆæŠ¤ç¨‹åºæ–¹å¼å¯åŠ¨å¯¹è±¡é‡æž„程åºã€‚" msgid "Starting object replication pass." msgstr "开始通过对象å¤åˆ¶" msgid "Starting object replicator in daemon mode." msgstr "在守护模å¼ä¸‹å¼€å§‹å¯¹è±¡å¤åˆ¶" #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "æˆåŠŸçš„rsync %(src)s at %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "è¯¥æ–‡ä»¶ç±»åž‹è¢«ç¦æ­¢è®¿é—®ï¼" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "容器(%(total)s)内%(key)s总数ä¸ç¬¦åˆåè®®%(key)s总数(%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s å‘生超时异常" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "å°è¯•执行%(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "æ­£å°è¯•èŽ·å– %(full_path)s" msgid "Trying to read during GET" msgstr "执行GETæ—¶å°è¯•读å–" msgid "Trying to read during GET (retrying)" msgstr "执行GETæ—¶å°è¯•读å–(釿–°å°è¯•)" msgid "Trying to send to client" msgstr "å°è¯•å‘é€åˆ°å®¢æˆ·ç«¯" #, python-format msgid "Trying to sync suffixes with %s" msgstr "æ­£å°è¯•使åŽç¼€ä¸Ž %s åŒæ­¥" #, python-format msgid "Trying to write to %s" msgstr "å°è¯•执行书写%s" msgid "UNCAUGHT EXCEPTION" msgstr "未æ•获的异常" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "无法查询到%s ä¿ç•™ä¸ºno-op" #, python-format msgid "Unable to locate config for %s" msgstr "找ä¸åˆ° %s çš„é…ç½®" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "无法查询到fallocate, posix_fallocate。ä¿å­˜ä¸ºno-op" #, python-format msgid "Unable to read config from %s" msgstr "无法从%s读å–设置" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "未授æƒ%(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "未处ç†çš„异常" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "å°è¯•èŽ·å– %(account)r %(container)r %(object)r æ—¶å‘生未知异常" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s更新报告失败" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "更新报告å‘至%(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "警告:SSLä»…å¯ä»¥åšæµ‹è¯•ä½¿ç”¨ã€‚äº§å“部署时请使用外连SSL终端" msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告:无法修改文件æè¿°é™åˆ¶ã€‚æ˜¯å¦æŒ‰éžrootè¿è¡Œï¼Ÿ" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告:无法修改最大è¿è¡Œæžé™ï¼Œæ˜¯å¦æŒ‰éžrootè¿è¡Œï¼Ÿ" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:无法修改内存æžé™ï¼Œæ˜¯å¦æŒ‰éžrootè¿è¡Œï¼Ÿ" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:缺失缓存客户端 无法控制æµé‡ " #, python-format msgid "method %s is not allowed." msgstr "方法%sä¸è¢«å…许" msgid "no log file found" msgstr "日志文件丢失" msgid "odfpy not installed." msgstr "odfpy未安装" #, python-format msgid "plotting results failed due to %s" msgstr "绘制结果图标时失败因为%s" msgid "python-matplotlib not installed." msgstr "python-matplotlib未安装" swift-2.17.1/swift/locale/de/0000775000175000017500000000000013435012120015732 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/de/LC_MESSAGES/0000775000175000017500000000000013435012120017517 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/de/LC_MESSAGES/swift.po0000666000175000017500000010167013435012015021225 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2014 # Ettore Atalan , 2014-2015 # Jonas John , 2015 # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-06-02 07:02+0000\n" "Last-Translator: Andreas Jaeger \n" "Language: de\n" "Plural-Forms: nplurals=2; plural=(n != 1);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: German\n" msgid "" "\n" "user quit" msgstr "" "\n" "Durch Benutzer beendet" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - parallel, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "%(checked)d Suffixe überprüft - %(hashed).2f%% hashverschlüsselt, " "%(synced).2f%% synchronisiert" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "%(replicated)d/%(total)d (%(percentage).2f%%) Partitionen repliziert in " "%(time).2fs (%(rate).2f/s, %(remaining)s verbleibend)" #, python-format msgid "%(server)s (%(pid)s) appears to have stopped" msgstr "%(server)s (%(pid)s) scheinbar gestoppt" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s Erfolge, %(failure)s Fehlschläge" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s gab 503 für %(statuses)s zurück" #, python-format msgid "%(type)s: %(value)s" msgstr "%(type)s: %(value)s" #, python-format msgid "%s already started..." msgstr "%s bereits gestartet..." #, python-format msgid "%s does not exist" msgstr "%s existiert nicht" #, python-format msgid "%s is not mounted" msgstr "%s ist nicht eingehängt" #, python-format msgid "%s responded as unmounted" msgstr "%s zurückgemeldet als ausgehängt" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Verbindung zurückgesetzt durch Peer" #, python-format msgid ", %s containers deleted" msgstr ", %s Container gelöscht" #, python-format msgid ", %s containers possibly remaining" msgstr ", %s Container möglicherweise verbleibend" #, python-format msgid ", %s containers remaining" msgstr ", %s Container verbleibend" #, python-format msgid ", %s objects deleted" msgstr ", %s Objekte gelöscht" #, python-format msgid ", %s objects possibly remaining" msgstr ", %s Objekte möglicherweise verbleibend" #, python-format msgid ", %s objects remaining" msgstr ", %s Objekte verbleibend" #, python-format msgid ", elapsed: %.02fs" msgstr ", vergangen: %.02fs" msgid ", return codes: " msgstr ", Rückgabecodes: " msgid "Account" msgstr "Konto" #, python-format msgid "Account %(account)s has not been reaped since %(time)s" msgstr "Konto %(account)s wurde nicht aufgeräumt seit %(time)s" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Kontoprüfungsmodus \"once\" abgeschlossen: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Kontoprüfungsdurchlauf abgeschlossen: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Versuch, %(count)d Datenbanken in %(time).5f Sekunden zu replizieren " "(%(rate).5f/s)" #, python-format msgid "Audit Failed for %(path)s: %(err)s" msgstr "Prüfung fehlgeschlagen für %(path)s: %(err)s" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Falscher rsync-Rückgabecode: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Kontoprüfungsmodus \"once\" wird gestartet" msgid "Begin account audit pass." msgstr "Kontoprüfungsdurchlauf wird gestartet." msgid "Begin container audit \"once\" mode" msgstr "Containerprüfungsmodus \"once\" wird gestartet" msgid "Begin container audit pass." msgstr "Containerprüfungsdurchlauf wird gestartet." msgid "Begin container sync \"once\" mode" msgstr "Containersynchronisationsmodus \"once\" wird gestartet" msgid "Begin container update single threaded sweep" msgstr "Einzelthread-Scanvorgang für Containeraktualisierung wird gestartet" msgid "Begin container update sweep" msgstr "Scanvorgang für Containeraktualisierung wird gestartet" msgid "Begin object update single threaded sweep" msgstr "Einzelthread-Scanvorgang für Objektaktualisierung wird gestartet" msgid "Begin object update sweep" msgstr "Scanvorgang für Objektaktualisierung wird gestartet" #, python-format msgid "Beginning pass on account %s" msgstr "Durchlauf für Konto %s wird gestartet" msgid "Beginning replication run" msgstr "Replizierungsdurchlauf wird gestartet" msgid "Broker error trying to rollback locked connection" msgstr "" "Brokerfehler beim Versuch, für eine gesperrte Verbindung ein Rollback " "durchzuführen" #, python-format msgid "Can not access the file %s." msgstr "Kann nicht auf die Datei %s zugreifen." #, python-format msgid "Can not load profile data from %s." msgstr "Die Profildaten von %s können nicht geladen werden." #, python-format msgid "Cannot read %(auditor_status)s (%(err)s)" msgstr "%(auditor_status)s (%(err)s) kann nicht gelesen werden." #, python-format msgid "Cannot write %(auditor_status)s (%(err)s)" msgstr "Schreiben von %(auditor_status)s (%(err)s) nicht möglich." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Client konnte nicht innerhalb von %ss vom Proxy lesen" msgid "Client disconnected on read" msgstr "Client beim Lesen getrennt" msgid "Client disconnected without sending enough data" msgstr "Client getrennt ohne dem Senden von genügend Daten" msgid "Client disconnected without sending last chunk" msgstr "" "Die Verbindung zum Client wurde getrennt, bevor der letzte Chunk gesendet " "wurde. " #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Clientpfad %(client)s entspricht nicht dem in den Objektmetadaten " "gespeicherten Pfad %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "Konfigurationsoption internal_client_conf_path nicht definiert. " "Standardkonfiguration wird verwendet. Informationen zu den Optionen finden " "Sie in internal-client.conf-sample." msgid "Connection refused" msgstr "Verbindung abgelehnt" msgid "Connection timeout" msgstr "Verbindungszeitüberschreitung" msgid "Container" msgstr "Container" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Containerprüfungsmodus \"once\" abgeschlossen: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Containerprüfungsdurchlauf abgeschlossen: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Containersynchronisationsmodus \"once\" abgeschlossen: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Einzelthread-Scanvorgang für Containeraktualisierung abgeschlossen: " "%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler, %(no_change)s ohne " "Änderungen" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Scanvorgang für Containeraktualisierung abgeschlossen: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Scanvorgang für Containeraktualisierung von %(path)s abgeschlossen: " "%(elapsed).02fs, %(success)s Erfolge, %(fail)s Fehler, %(no_change)s ohne " "Änderungen" #, python-format msgid "" "Could not bind to %(addr)s:%(port)s after trying for %(timeout)s seconds" msgstr "" "Keine Bindung an %(addr)s:%(port)s möglich nach Versuch über %(timeout)s " "Sekunden" #, python-format msgid "Could not load %(conf)r: %(error)s" msgstr "%(conf)r konnte nicht geladen werden: %(error)s" #, python-format msgid "Data download error: %s" msgstr "Fehler beim Downloaden von Daten: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Gerätedurchgang abgeschlossen: %.02fs" #, python-format msgid "Directory %(directory)r does not map to a valid policy (%(error)s)" msgstr "" "Das Verzeichnis %(directory)r kann keiner gültigen Richtlinie (%(error)s) " "zugeordnet werden." #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "FEHLER %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "FEHLER %(status)d %(body)s von %(type)s Server" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "FEHLER %(status)d %(body)s Vom Objektserver bezüglich: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "FEHLER %(status)d Erwartet: 100-continue von Objektserver" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "FEHLER Kontoaktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s " "(wird zu einem späteren Zeitpunkt erneut versucht): Antwort %(status)s " "%(reason)s" #, python-format msgid "" "ERROR Account update failed: different numbers of hosts and devices in " "request: \"%(hosts)s\" vs \"%(devices)s\"" msgstr "" "FEHLER Kontoaktualisierung fehlgeschlagen: Unterschiedliche Anzahl von Hosts " "und Einheiten in der Anforderung: \"%(hosts)s\" contra \"%(devices)s\"" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "FEHLER Falsche Rückmeldung %(status)s von %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "FEHLER Client-Lesezeitüberschreitung (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen (wird für asynchrone " "Aktualisierung zu einem späteren Zeitpunkt gespeichert): %(status)d Antwort " "von %(ip)s:%(port)s/%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "FEHLER Kontoinfo %s konnte nicht abgerufen werden" #, python-format msgid "ERROR Could not get container info %s" msgstr "FEHLER Containerinformation %s konnte nicht geholt werden" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "" "FEHLER Fehler beim Schließen von DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "" "FEHLER Ausnahme, die zu einer Unterbrechung der Verbindung zum Client führt" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "FEHLER: Ausnahme bei der Übertragung von Daten an die Ojektserver %s" msgid "ERROR Failed to get my own IPs?" msgstr "FEHLER Eigene IPs konnten nicht abgerufen werden?" msgid "ERROR Insufficient Storage" msgstr "FEHLER Nicht genügend Speicher" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "FEHLER Objekt %(obj)s hat die Prüfung nicht bestanden und wurde unter " "Quarantäne gestellt: %(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "FEHLER Pickle-Problem, %s wird unter Quarantäne gestellt" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "FEHLER Entferntes Laufwerk nicht eingehängt %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "FEHLER beim Synchronisieren %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "FEHLER beim Synchronisieren %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "FEHLER beim Versuch, %s zu prüfen" msgid "ERROR Unhandled exception in request" msgstr "FEHLER Nicht behandelte Ausnahme in Anforderung" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "FEHLER __call__-Fehler mit %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s " "(wird zu einem späteren Zeitpunkt erneut versucht)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "FEHLER Kontoaktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(device)s " "(wird später erneut versucht): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "FEHLER asynchrone anstehende Datei mit unerwartetem Namen %s" msgid "ERROR auditing" msgstr "FEHLER bei der Prüfung" #, python-format msgid "ERROR auditing: %s" msgstr "FEHLER bei der Prüfung: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "FEHLER Containeraktualisierung fehlgeschlagen mit %(ip)s:%(port)s/%(dev)s " "(wird für asynchrone Aktualisierung zu einem späteren Zeitpunkt gespeichert)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "FEHLER beim Lesen der HTTP-Antwort von %s" #, python-format msgid "ERROR reading db %s" msgstr "FEHLER beim Lesen der Datenbank %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "FEHLER rsync fehlgeschlagen mit %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "" "FEHLER beim Synchronisieren von %(file)s Dateien mit dem Knoten %(node)s" msgid "ERROR trying to replicate" msgstr "FEHLER beim Versuch zu replizieren" #, python-format msgid "ERROR while trying to clean up %s" msgstr "FEHLER beim Versuch, %s zu bereinigen" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "FEHLER mit %(type)s Server %(ip)s:%(port)s/%(device)s AW: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "FEHLER beim Laden von Unterdrückungen von %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "FEHLER mit entferntem Server %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "" "FEHLER: Pfade zu Laufwerkpartitionen konnten nicht abgerufen werden: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "FEHLER: Auf %(path)s kann nicht zugegriffen werden: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "FEHLER: Prüfung konnte nicht durchgeführt werden: %s" msgid "Error hashing suffix" msgstr "Fehler beim Hashing des Suffix" #, python-format msgid "Error in %(conf)r with mtime_check_interval: %(error)s" msgstr "Fehler in %(conf)r mit mtime_check_interval: %(error)s" msgid "Error listing devices" msgstr "Fehler beim Auflisten der Geräte" #, python-format msgid "Error on render profiling results: %s" msgstr "Fehler beim Wiedergeben der Profilerstellungsergebnisse: %s" msgid "Error parsing recon cache file" msgstr "Fehler beim Analysieren von recon-Zwischenspeicherdatei" msgid "Error reading recon cache file" msgstr "Fehler beim Lesen von recon-Zwischenspeicherdatei" msgid "Error reading ringfile" msgstr "Fehler beim Lesen der Ringdatei" msgid "Error reading swift.conf" msgstr "Fehler beim Lesen der swift.conf" msgid "Error retrieving recon data" msgstr "Fehler beim Abrufen der recon-Daten" msgid "Error syncing handoff partition" msgstr "Fehler bei der Synchronisierung der Übergabepartition" msgid "Error syncing partition" msgstr "Fehler beim Syncen der Partition" #, python-format msgid "Error syncing with node: %s" msgstr "Fehler beim Synchronisieren mit Knoten: %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Fehler bei Versuch, erneuten Build zu erstellen für %(path)s policy#" "%(policy)d frag#%(frag_index)s" msgid "Error: An error occurred" msgstr "Fehler: Ein Fehler ist aufgetreten" msgid "Error: missing config path argument" msgstr "Fehler: fehlendes Konfigurationspfadargument" #, python-format msgid "Error: unable to locate %s" msgstr "Fehler: %s kann nicht lokalisiert werden" msgid "Exception dumping recon cache" msgstr "Ausnahme beim Löschen von recon-Cache" msgid "Exception in top-level account reaper loop" msgstr "Ausnahme in Reaper-Loop für Konto der höchsten Ebene" msgid "Exception in top-level replication loop" msgstr "Ausnahme in Replizierungsloop der höchsten Ebene" msgid "Exception in top-levelreconstruction loop" msgstr "Ausnahme in Rekonstruktionsloop der höchsten Ebene" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "Ausnahme bei %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "Ausnahme mit Account %s" #, python-format msgid "Exception with containers for account %s" msgstr "Ausnahme bei Containern für Konto %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "Ausnahme bei Objekten für Container %(container)s für Konto %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Erwartet: 100-continue auf %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "CNAME-Kette für %(given_domain)s bis %(found_domain)s wird gefolgt" msgid "Found configs:" msgstr "Gefundene Konfigurationen:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Der Modus 'handoffs_first' ist noch nicht abgeschlossen. Der aktuelle " "Replikationsdurchgang wird abgebrochen." msgid "Host unreachable" msgstr "Host nicht erreichbar" #, python-format msgid "Incomplete pass on account %s" msgstr "Unvollständiger Durchgang auf Konto %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "Ungültiges X-Container-Sync-To-Format %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "Ungültiger Host %r in X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "Ungültiger ausstehender Eintrag %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "Ungültige Rückmeldung %(resp)s von %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "Ungültige Rückmeldung %(resp)s von %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "Ungültiges Schema %r in X-Container-Sync-To, muss \"//\", \"http\" oder " "\"https\" sein." #, python-format msgid "Killing long-running rsync: %s" msgstr "Lange laufendes rsync wird gekillt: %s" #, python-format msgid "Loading JSON from %(auditor_status)s failed (%(err)s)" msgstr "Laden von JSON aus %(auditor_status)s fehlgeschlagen: (%(err)s)" msgid "Lockup detected.. killing live coros." msgstr "Suche erkannt. Live-Coros werden gelöscht." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "%(given_domain)s zugeordnet zu %(found_domain)s" #, python-format msgid "No %s running" msgstr "Kein %s läuft" #, python-format msgid "No cluster endpoint for %(realm)r %(cluster)r" msgstr "Kein Cluster-Endpunkt für %(realm)r %(cluster)r" #, python-format msgid "No permission to signal PID %d" msgstr "Keine Berechtigung zu Signal-Programmkennung %d" #, python-format msgid "No policy with index %s" msgstr "Keine Richtlinie mit Index %s" #, python-format msgid "No realm key for %r" msgstr "Kein Bereichsschlüssel für %r" #, python-format msgid "No space left on device for %(file)s (%(err)s)" msgstr "Kein freier Speicherplatz im Gerät für %(file)s (%(err)s) vorhanden." #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "Knotenfehler begrenzt %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "Es wurden nicht genügend Objektserver bestätigt (got %d)." #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Nicht gefunden %(sync_from)r => %(sync_to)r - Objekt " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Für %s Sekunden nichts rekonstruiert." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Für %s Sekunden nichts repliziert." msgid "Object" msgstr "Objekt" msgid "Object PUT" msgstr "Objekt PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "PUT-Operation für ein Objekt gibt 202 für 409 zurück: %(req_timestamp)s <= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Objekt PUT Rückgabe 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Objektprüfung (%(type)s) \"%(mode)s\" Modus abgeschlossen: %(elapsed).02fs. " "Unter Quarantäne gestellt insgesamt: %(quars)d, Fehler insgesamt: " "%(errors)d, Dateien/s insgesamt: %(frate).2f, Bytes/s insgesamt: " "%(brate).2f, Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Objektprüfung (%(type)s). Seit %(start_time)s: Lokal: %(passes)d übergeben, " "%(quars)d unter Quarantäne gestellt, %(errors)d Fehler, Dateien/s: " "%(frate).2f, Bytes/s: %(brate).2f, Zeit insgesamt: %(total).2f, " "Prüfungszeit: %(audit).2f, Geschwindigkeit: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "Objektprüfungsstatistik: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "Objektrekonstruktion vollständig (einmal). (%.02f Minuten)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "Objektrekonstruktion vollständig. (%.02f Minuten)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Objektreplizierung abgeschlossen (einmal). (%.02f Minuten)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Objektreplikation vollständig. (%.02f Minuten)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Objektserver haben %s nicht übereinstimmende Etags zurückgegeben" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Scanvorgang für Objektaktualisierung abgeschlossen: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "" "Parameter, Abfragen und Fragmente nicht zulässig in X-Container-Sync-To" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Partitionszeiten: max. %(max).4fs, min. %(min).4fs, durchschnittl. %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "Pfad in X-Container-Sync-To ist erforderlich" #, python-format msgid "Problem cleaning up %(datadir)s (%(err)s)" msgstr "Problem bei der Bereinigung von %(datadir)s (%(err)s)" #, python-format msgid "Problem cleaning up %s" msgstr "Problem bei der Bereinigung von %s" #, python-format msgid "Profiling Error: %s" msgstr "Fehler bei der Profilerstellung: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s bis %(quar_path)s wurden unter Quarantäne gestellt, da es sich " "nicht um ein Verzeichnis handelt" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s bis %(quar_path)s wurden unter Quarantäne gestellt, da es " "sich nicht um ein Verzeichnis handelt" #, python-format msgid "Quarantining DB %s" msgstr "Datenbank %s wird unter Quarantäne gestellt" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Inaktivitätsprotokoll für Geschwindigkeitsbegrenzung: %(sleep)s für " "%(account)s/%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "%(remove)d Datenbanken entfernt" #, python-format msgid "Removing %s objects" msgstr "%s Objekte werden entfernt" #, python-format msgid "Removing partition: %s" msgstr "Partition wird entfernt: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "PID-Datei %(pid_file)s mit falscher PID %(pid)d wird entfernt" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "PID-Datei %s mit ungültiger PID wird entfernt." #, python-format msgid "Removing stale pid file %s" msgstr "Veraltete PID-Datei %s wird entfernt" msgid "Replication run OVER" msgstr "Replizierungsdurchlauf ABGESCHLOSSEN" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "497 wird aufgrund von Blacklisting zurückgegeben: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "498 wird für %(meth)s auf %(acc)s/%(cont)s/%(obj)s zurückgegeben. " "Geschwindigkeitsbegrenzung (Max. Inaktivität) %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Ringänderung erkannt. Aktueller Rekonstruktionsdurchgang wird abgebrochen." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Ringänderung erkannt. Aktueller Replizierungsdurchlauf wird abgebrochen." #, python-format msgid "Running %s once" msgstr "%s läuft einmal" msgid "Running object reconstructor in script mode." msgstr "Objektrekonstruktor läuft im Skriptmodus." msgid "Running object replicator in script mode." msgstr "Objektreplikator läuft im Skriptmodus." #, python-format msgid "Signal %(server)s pid: %(pid)s signal: %(signal)s" msgstr "Signal %(server)s PID: %(pid)s Signal: %(signal)s" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "Seit %(time)s: %(sync)s synchronisiert [%(delete)s Löschungen, %(put)s " "Puts], %(skip)s übersprungen, %(fail)s fehlgeschlagen" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Seit %(time)s: Kontoprüfungen: %(passed)s bestandene Prüfung,%(failed)s " "nicht bestandene Prüfung" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Seit %(time)s: Containerprüfungen: %(pass)s bestandene Prüfung, %(fail)s " "nicht bestandene Prüfung" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s wird übersprungen, da nicht angehängt" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s wird übersprungen, weil es nicht eingehängt ist" #, python-format msgid "Starting %s" msgstr "%s wird gestartet" msgid "Starting object reconstruction pass." msgstr "Objektrekonstruktionsdurchgang wird gestartet." msgid "Starting object reconstructor in daemon mode." msgstr "Objektrekonstruktor wird im Daemon-Modus gestartet." msgid "Starting object replication pass." msgstr "Objektreplikationsdurchgang wird gestartet." msgid "Starting object replicator in daemon mode." msgstr "Objektreplikator wird im Dämonmodus gestartet." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "Erfolgreiches rsync von %(src)s um %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Auf den Dateityp darf nicht zugegriffen werden!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Die Gesamtsumme an %(key)s für den Container (%(total)s) entspricht nicht " "der Summe der %(key)s für alle Richtlinien (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "Zeitüberschreitungsausnahme bei %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Versuch, %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Versuch, %(full_path)s mit GET abzurufen" msgid "Trying to read during GET" msgstr "Versuch, während des GET-Vorgangs zu lesen" msgid "Trying to read during GET (retrying)" msgstr "Versuch, während des GET-Vorgangs zu lesen (Wiederholung)" msgid "Trying to send to client" msgstr "Versuch, an den Client zu senden" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Es wird versucht, Suffixe mit %s zu synchronisieren." #, python-format msgid "Trying to write to %s" msgstr "Versuch, an %s zu schreiben" msgid "UNCAUGHT EXCEPTION" msgstr "NICHT ABGEFANGENE AUSNAHME" #, python-format msgid "Unable to find %(section)s config section in %(conf)s" msgstr "" "%(section)s-Konfigurationsabschnitt in %(conf)s kann nicht gefunden werden" #, python-format msgid "Unable to load internal client from config: %(conf)r (%(error)s)" msgstr "" "Interner Client konnte nicht aus der Konfiguration geladen werden: %(conf)r " "(%(error)s)" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "" "%s konnte nicht in libc gefunden werden. Wird als Nullbefehl verlassen." #, python-format msgid "Unable to locate config for %s" msgstr "Konfiguration für %s wurde nicht gefunden." #, python-format msgid "Unable to locate config number %(number)s for %(server)s" msgstr "Konfigurationsnummer %(number)s für %(server)s wurde nicht gefunden." msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "fallocate, posix_fallocate konnte nicht in libc gefunden werden. Wird als " "Nullbefehl verlassen." #, python-format msgid "Unable to perform fsync() on directory %(dir)s: %(err)s" msgstr "fsync() kann für Verzeichnis %(dir)s nicht ausgeführt werden: %(err)s" #, python-format msgid "Unable to read config from %s" msgstr "Konfiguration aus %s kann nicht gelesen werden" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Nicht genehmigte %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "Nicht behandelte Exception" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "Unbekannte Ausnahme bei GET-Versuch: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Aktualisierungsbericht fehlgeschlagen für %(container)s %(dbfile)s" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Aktualisierungsbericht gesendet für %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "WARNUNG: SSL sollte nur zu Testzwecken aktiviert werden. Verwenden Sie die " "externe SSL-Beendigung für eine Implementierung in der Produktionsumgebung." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "WARNUNG: Grenzwert für Dateideskriptoren kann nicht geändert werden. Wird " "nicht als Root ausgeführt?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "WARNUNG: Grenzwert für maximale Verarbeitung kann nicht geändert werden. " "Wird nicht als Root ausgeführt?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "WARNUNG: Grenzwert für Speicher kann nicht geändert werden. Wird nicht als " "Root ausgeführt?" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; giving up" msgstr "" "Hat %(kill_wait)s Sekunden für %(server)s zum Erlöschen gewartet; Gibt auf" #, python-format msgid "Waited %(kill_wait)s seconds for %(server)s to die; killing" msgstr "" "Hat %(kill_wait)s Sekunden für %(server)s zum Erlöschen gewartet. Wird " "abgebrochen." msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Warnung: Geschwindigkeitsbegrenzung kann nicht ohne memcached-Client " "durchgeführt werden" #, python-format msgid "method %s is not allowed." msgstr "Methode %s ist nicht erlaubt." msgid "no log file found" msgstr "keine Protokolldatei gefunden" msgid "odfpy not installed." msgstr "odfpy ist nicht installiert." #, python-format msgid "plotting results failed due to %s" msgstr "" "Die grafische Darstellung der Ergebnisse ist fehlgeschlagen aufgrund von %s" msgid "python-matplotlib not installed." msgstr "python-matplotlib ist nicht installiert." swift-2.17.1/swift/locale/zh_TW/0000775000175000017500000000000013435012120016375 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/zh_TW/LC_MESSAGES/0000775000175000017500000000000013435012120020162 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/zh_TW/LC_MESSAGES/swift.po0000666000175000017500000006640713435012015021700 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-03-08 01:25+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: zh_TW\n" "Plural-Forms: nplurals=1; plural=0;\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 4.3.3\n" "Language-Team: Chinese (Taiwan)\n" msgid "" "\n" "user quit" msgstr "" "\n" "使用者退出" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - 平行,%s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "已檢查 %(checked)d 個字尾 - %(hashed).2f%% 個已雜湊,%(synced).2f%% å€‹å·²åŒæ­¥" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "已抄寫 %(replicated)d/%(total)d (%(percentage).2f%%) 個分割å€ï¼ˆåœ¨ " "%(time).2fs 內,%(rate).2f/秒,剩餘 %(remaining)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s 個æˆåŠŸï¼Œ%(failure)s 個失敗" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s é‡å° %(statuses)s 正在傳回 503" #, python-format msgid "%s already started..." msgstr "%s 已啟動..." #, python-format msgid "%s does not exist" msgstr "%s ä¸å­˜åœ¨" #, python-format msgid "%s is not mounted" msgstr "未è£è¼‰ %s" #, python-format msgid "%s responded as unmounted" msgstr "%s 已回應為未è£è¼‰" #, python-format msgid "%s: Connection reset by peer" msgstr "%s:已由å°ç­‰é …ç›®é‡è¨­é€£ç·š" #, python-format msgid ", %s containers deleted" msgstr ",已刪除 %s 個儲存器" #, python-format msgid ", %s containers possibly remaining" msgstr ",å¯èƒ½å‰©é¤˜ %s 個儲存器" #, python-format msgid ", %s containers remaining" msgstr ",剩餘 %s 個儲存器" #, python-format msgid ", %s objects deleted" msgstr ",已刪除 %s 個物件" #, python-format msgid ", %s objects possibly remaining" msgstr ",å¯èƒ½å‰©é¤˜ %s 個物件" #, python-format msgid ", %s objects remaining" msgstr ",剩餘 %s 個物件" #, python-format msgid ", elapsed: %.02fs" msgstr ",經歷時間:%.02fs" msgid ", return codes: " msgstr ",回覆碼:" msgid "Account" msgstr "帳戶" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "å¸³æˆ¶å¯©æ ¸ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼å·²å®Œæˆï¼š%.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "帳戶審核通éŽå·²å®Œæˆï¼š%.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "已嘗試在 %(time).5f 秒內抄寫 %(count)d 個資料庫 (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "ä¸ç•¶çš„é ç«¯åŒæ­¥å›žè¦†ç¢¼ï¼š%(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "é–‹å§‹å¸³æˆ¶å¯©æ ¸ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼" msgid "Begin account audit pass." msgstr "開始帳戶審核通éŽã€‚" msgid "Begin container audit \"once\" mode" msgstr "é–‹å§‹å„²å­˜å™¨å¯©æ ¸ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼" msgid "Begin container audit pass." msgstr "開始儲存器審核通éŽã€‚" msgid "Begin container sync \"once\" mode" msgstr "é–‹å§‹å„²å­˜å™¨åŒæ­¥ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼" msgid "Begin container update single threaded sweep" msgstr "開始儲存器更新單一執行緒清ç†" msgid "Begin container update sweep" msgstr "開始儲存器更新清ç†" msgid "Begin object update single threaded sweep" msgstr "開始物件更新單一執行緒清ç†" msgid "Begin object update sweep" msgstr "開始物件更新清ç†" #, python-format msgid "Beginning pass on account %s" msgstr "正在開始帳戶 %s 上的通éŽ" msgid "Beginning replication run" msgstr "正在開始抄寫執行" msgid "Broker error trying to rollback locked connection" msgstr "嘗試回復已鎖定的連線時發生分é…管ç†ç³»çµ±éŒ¯èª¤" #, python-format msgid "Can not access the file %s." msgstr "ç„¡æ³•å­˜å–æª”案 %s。" #, python-format msgid "Can not load profile data from %s." msgstr "無法從 %s 中載入設定檔資料。" #, python-format msgid "Client did not read from proxy within %ss" msgstr "用戶端未在 %s 秒內從 Proxy 中讀å–" msgid "Client disconnected on read" msgstr "ç”¨æˆ¶ç«¯åœ¨è®€å–æ™‚中斷連線" msgid "Client disconnected without sending enough data" msgstr "用戶端已中斷連線,未傳é€è¶³å¤ çš„資料" msgid "Client disconnected without sending last chunk" msgstr "ç”¨æˆ¶ç«¯å·²ä¸­æ–·é€£ç·šï¼Œæœªå‚³é€æœ€å¾Œä¸€å€‹ç‰‡æ®µ" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "用戶端路徑 %(client)s ä¸ç¬¦åˆç‰©ä»¶ meta 資料%(meta)s 中儲存的路徑" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "為定義é…ç½®é¸é … internal_client_conf_path。將使用é è¨­é…置,請åƒé–± internal-" "client.conf-sample 以å–å¾—é¸é …" msgid "Connection refused" msgstr "é€£ç·šé­æ‹’" msgid "Connection timeout" msgstr "連線逾時" msgid "Container" msgstr "容器" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "å„²å­˜å™¨å¯©æ ¸ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼å·²å®Œæˆï¼š%.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "儲存器審核通éŽå·²å®Œæˆï¼š%.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "å„²å­˜å™¨åŒæ­¥ã€Œä¸€æ¬¡æ€§ã€æ¨¡å¼å·²å®Œæˆï¼š%.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "儲存器更新單一執行緒清ç†å·²å®Œæˆï¼š%(elapsed).02fs,%(success)s 個æˆ" "功,%(fail)s 個失敗,%(no_change)s 個無變更" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "儲存器更新清ç†å·²å®Œæˆï¼š%.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "%(path)s 的儲存器更新清ç†å·²å®Œæˆï¼š%(elapsed).02fs,%(success)s 個æˆ" "功,%(fail)s 個失敗,%(no_change)s 個無變更" #, python-format msgid "Data download error: %s" msgstr "資料下載錯誤:%s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "è£ç½®é€šéŽå·²å®Œæˆï¼š%.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "錯誤:%(db_file)s:%(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "錯誤:%(status)d %(body)s 來自 %(type)s 伺æœå™¨" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "錯誤:%(status)d %(body)s 來自物件伺æœå™¨ re:%(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "錯誤:%(status)d é æœŸï¼š100 繼續自物件伺æœå™¨" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將ç¨å¾Œé‡è©¦ï¼‰ï¼šå›žæ‡‰ " "%(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "錯誤:來自 %(host)s 的回應 %(status)s ä¸ç•¶" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "錯誤:用戶端讀å–逾時 (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "錯誤:儲存器更新失敗(儲存以ç¨å¾Œé€²è¡ŒéžåŒæ­¥æ›´æ–°ï¼‰ï¼š%(status)d回應(來自 " "%(ip)s:%(port)s/%(dev)s)" #, python-format msgid "ERROR Could not get account info %s" msgstr "錯誤:無法å–得帳戶資訊 %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "錯誤:無法å–得儲存器資訊 %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "錯誤:ç£ç¢Ÿæª” %(data_file)s 關閉失敗:%(exc)s:%(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "錯誤:異常狀æ³é€ æˆç”¨æˆ¶ç«¯ä¸­æ–·é€£ç·š" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "錯誤:將資料轉é€è‡³ç‰©ä»¶ä¼ºæœå™¨ %s 時發生異常狀æ³" msgid "ERROR Failed to get my own IPs?" msgstr "錯誤:無法å–得我自己的 IP?" msgid "ERROR Insufficient Storage" msgstr "錯誤:儲存體ä¸è¶³" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "錯誤:物件 %(obj)s 審核失敗,已隔離:%(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "錯誤:挑é¸å•題,正在隔離 %s" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "錯誤:未è£è¼‰é ç«¯ç£ç¢Ÿæ©Ÿ %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "åŒæ­¥ %(db_file)s %(row)s 時發生錯誤" #, python-format msgid "ERROR Syncing %s" msgstr "åŒæ­¥ %s 時發生錯誤" #, python-format msgid "ERROR Trying to audit %s" msgstr "嘗試審核 %s 時發生錯誤" msgid "ERROR Unhandled exception in request" msgstr "éŒ¯èª¤ï¼šè¦æ±‚中有無法處ç†çš„異常狀æ³" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "錯誤:%(method)s %(path)s 發生呼å«éŒ¯èª¤" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將ç¨å¾Œé‡è©¦ï¼‰" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "錯誤:%(ip)s:%(port)s/%(device)s 的帳戶更新失敗(將ç¨å¾Œé‡è©¦ï¼‰ï¼š" #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "錯誤:具有éžé æœŸå稱 %s çš„éžåŒæ­¥æ“±ç½®æª”案" msgid "ERROR auditing" msgstr "審核時發生錯誤" #, python-format msgid "ERROR auditing: %s" msgstr "審核時發生錯誤:%s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "錯誤:%(ip)s:%(port)s/%(dev)s 的儲存器更新失敗(儲存以ç¨å¾Œé€²è¡ŒéžåŒæ­¥æ›´æ–°ï¼‰" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "從 %s è®€å– HTTP 回應時發生錯誤" #, python-format msgid "ERROR reading db %s" msgstr "讀å–資料庫 %s 時發生錯誤" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "錯誤:é ç«¯åŒæ­¥å¤±æ•—,%(code)s:%(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "åŒæ­¥ %(file)s 與節點 %(node)s 時發生錯誤" msgid "ERROR trying to replicate" msgstr "嘗試抄寫時發生錯誤" #, python-format msgid "ERROR while trying to clean up %s" msgstr "嘗試清除 %s 時發生錯誤" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "%(type)s 伺æœå™¨ç™¼ç”ŸéŒ¯èª¤ï¼š%(ip)s:%(port)s/%(device)s re: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "從 %s 載入抑制時發生錯誤:" #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "é ç«¯ä¼ºæœå™¨ç™¼ç”ŸéŒ¯èª¤ï¼š%(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "錯誤:無法å–å¾—ç£ç¢Ÿæ©Ÿåˆ†å‰²å€çš„路徑:%s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "éŒ¯èª¤ï¼šç„¡æ³•å­˜å– %(path)s:%(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "錯誤:無法執行審核:%s" msgid "Error hashing suffix" msgstr "æ··åˆå­—尾時發生錯誤" msgid "Error listing devices" msgstr "列出è£ç½®æ™‚發生錯誤" #, python-format msgid "Error on render profiling results: %s" msgstr "呈ç¾å´å¯«çµæžœæ™‚發生錯誤:%s" msgid "Error parsing recon cache file" msgstr "å‰–æž recon å¿«å–æª”案時發生錯誤" msgid "Error reading recon cache file" msgstr "è®€å– recon å¿«å–æª”案時發生錯誤" msgid "Error reading ringfile" msgstr "è®€å– ringfile 時發生錯誤" msgid "Error reading swift.conf" msgstr "è®€å– swift.conf 時發生錯誤" msgid "Error retrieving recon data" msgstr "æ“·å– recon 資料時發生錯誤" msgid "Error syncing handoff partition" msgstr "åŒæ­¥éžäº¤åˆ†å‰²å€æ™‚發生錯誤" msgid "Error syncing partition" msgstr "åŒæ­¥åˆ†å‰²å€æ™‚發生錯誤" #, python-format msgid "Error syncing with node: %s" msgstr "èˆ‡ç¯€é»žåŒæ­¥æ™‚發生錯誤:%s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "嘗試é‡å»º %(path)s 原則 #%(policy)d 分段 #%(frag_index)s 時發生錯誤" msgid "Error: An error occurred" msgstr "錯誤:發生錯誤" msgid "Error: missing config path argument" msgstr "éŒ¯èª¤ï¼šéºæ¼é…置路徑引數" #, python-format msgid "Error: unable to locate %s" msgstr "錯誤:找ä¸åˆ° %s" msgid "Exception dumping recon cache" msgstr "傾出 recon å¿«å–æ™‚發生異常狀æ³" msgid "Exception in top-level account reaper loop" msgstr "最上層帳戶 Reaper 迴圈發生異常狀æ³" msgid "Exception in top-level replication loop" msgstr "最上層抄寫迴圈中發生異常狀æ³" msgid "Exception in top-levelreconstruction loop" msgstr "æœ€ä¸Šå±¤é‡æ–°å»ºæ§‹è¿´åœˆä¸­ç™¼ç”Ÿç•°å¸¸ç‹€æ³" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 發生異常狀æ³" #, python-format msgid "Exception with account %s" msgstr "帳戶 %s 發生異常狀æ³" #, python-format msgid "Exception with containers for account %s" msgstr "帳戶 %s 的儲存器發生異常狀æ³" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "帳戶 %(account)s 儲存器 %(container)s 的物件發生異常狀æ³" #, python-format msgid "Expect: 100-continue on %s" msgstr "é æœŸ 100 - 在 %s 上繼續" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "éµå¾ª %(given_domain)s 到 %(found_domain)s çš„ CNAME éˆ" msgid "Found configs:" msgstr "找到é…置:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "「éžäº¤ä½œæ¥­æœ€å…ˆã€æ¨¡å¼ä»æœ‰å‰©é¤˜çš„éžäº¤ä½œæ¥­ã€‚正在中斷ç¾è¡ŒæŠ„寫傳éžã€‚" msgid "Host unreachable" msgstr "無法抵é”主機" #, python-format msgid "Incomplete pass on account %s" msgstr "帳戶 %s ä¸Šçš„é€šéŽæœªå®Œæˆ" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "無效的 X-Container-Sync-To æ ¼å¼ %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "X-Container-Sync-To 中的主機 %r 無效" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "無效的擱置項目 %(file)s:%(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "來自 %(full_path)s 的回應 %(resp)s 無效" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "來自 %(ip)s 的回應 %(resp)s 無效" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "X-Container-Sync-To 中的架構 %r 無效,必須是 \"//\"ã€\"http\" 或\"https\"。" #, python-format msgid "Killing long-running rsync: %s" msgstr "æ­£åœ¨çµæŸé•·æ™‚間執行的é ç«¯åŒæ­¥ï¼š%s" msgid "Lockup detected.. killing live coros." msgstr "嵿¸¬åˆ°éŽ–å®šã€‚æ­£åœ¨çµæŸå³æ™‚ coro。" #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "已將 %(given_domain)s å°æ˜ è‡³ %(found_domain)s" #, python-format msgid "No %s running" msgstr "沒有 %s 在執行中" #, python-format msgid "No permission to signal PID %d" msgstr "沒有信號 PID %d çš„è¨±å¯æ¬Š" #, python-format msgid "No policy with index %s" msgstr "沒有具有索引 %s 的原則" #, python-format msgid "No realm key for %r" msgstr "沒有 %r 的範åœé‡‘é‘°" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "節點錯誤é™åˆ¶ %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "未確èªè¶³å¤ çš„物件伺æœå™¨ï¼ˆå·²å–å¾— %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "找ä¸åˆ° %(sync_from)r => %(sync_to)r - 物件%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "%s ç§’æœªé‡æ–°å»ºæ§‹ä»»ä½•內容。" #, python-format msgid "Nothing replicated for %s seconds." msgstr "æœªæŠ„å¯«ä»»ä½•é …ç›®é” %s 秒。" msgid "Object" msgstr "物件" msgid "Object PUT" msgstr "物件 PUT" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "物件 PUT é‡å° 409 正在傳回 202:%(req_timestamp)s <= %(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "物件 PUT 正在傳回 412,%(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "物件審核 (%(type)s) \"%(mode)s\" 模å¼å·²å®Œæˆï¼š%(elapsed).02fs。已隔離總計:" "%(quars)d,錯誤總計:%(errors)d,檔案/秒總計:%(frate).2f,ä½å…ƒçµ„/秒總計:" "%(brate).2f,審核時間:%(audit).2f,速率:%(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "物件審核 (%(type)s)。自 %(start_time)s 以來:本端:%(passes)d 個已通" "éŽï¼Œ%(quars)d 個已隔離,%(errors)d 個錯誤,檔案/秒:%(frate).2f,ä½å…ƒçµ„數/" "秒:%(brate).2f,時間總計:%(total).2f,審核時間:%(audit).2f,速率:" "%(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "物件審核統計資料:%s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "ç‰©ä»¶é‡æ–°å»ºæ§‹å®Œæˆï¼ˆä¸€æ¬¡æ€§ï¼‰ã€‚(%.02f 分é˜ï¼‰" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "ç‰©ä»¶é‡æ–°å»ºæ§‹å®Œæˆã€‚(%.02f 分é˜ï¼‰" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "物件抄寫完æˆï¼ˆä¸€æ¬¡æ€§ï¼‰ã€‚(%.02f 分é˜ï¼‰" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "物件抄寫完æˆã€‚(%.02f 分é˜ï¼‰" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "物件伺æœå™¨å·²å‚³å›ž %s 個ä¸ç¬¦ etag" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "物件更新清ç†å·²å®Œæˆï¼š%.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "X-Container-Sync-To 中ä¸å®¹è¨±åƒæ•¸ã€æŸ¥è©¢åŠç‰‡æ®µ" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "åˆ†å‰²å€æ™‚間:最大 %(max).4fsï¼Œæœ€å° %(min).4fs,中間 %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "X-Container-Sync-To 中需è¦è·¯å¾‘" #, python-format msgid "Problem cleaning up %s" msgstr "清除 %s 時發生å•題" #, python-format msgid "Profiling Error: %s" msgstr "å´å¯«éŒ¯èª¤ï¼š%s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "已將 %(hsh_path)s 隔離至 %(quar_path)sï¼ŒåŽŸå› æ˜¯å®ƒä¸æ˜¯ç›®éŒ„" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "已將 %(object_path)s 隔離至 %(quar_path)sï¼ŒåŽŸå› æ˜¯å®ƒä¸æ˜¯ç›®éŒ„" #, python-format msgid "Quarantining DB %s" msgstr "正在隔離資料庫 %s" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "%(account)s/%(container)s/%(object)s çš„ ratelimit 休眠日誌:%(sleep)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "已移除 %(remove)d 個資料庫" #, python-format msgid "Removing %s objects" msgstr "正在移除 %s 物件" #, python-format msgid "Removing partition: %s" msgstr "正在移除分割å€ï¼š%s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "正在移除具有錯誤 PID %(pid)d çš„ PID 檔 %(pid_file)s" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "正在移除具有無效 PID çš„ PID 檔 %s" #, python-format msgid "Removing stale pid file %s" msgstr "æ­£åœ¨ç§»é™¤éŽæ™‚ PID 檔案 %s" msgid "Replication run OVER" msgstr "æŠ„å¯«åŸ·è¡ŒçµæŸ" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "由於黑å單,正在傳回 497:%s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "正在將 %(meth)s çš„ 498 傳回至 %(acc)s/%(cont)s/%(obj)s。Ratelimit(休眠上" "é™ï¼‰%(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "嵿¸¬åˆ°ç’°è®Šæ›´ã€‚正在中斷ç¾è¡Œé‡æ–°å»ºæ§‹å‚³éžã€‚" msgid "Ring change detected. Aborting current replication pass." msgstr "嵿¸¬åˆ°ç’°è®Šæ›´ã€‚正在中斷ç¾è¡ŒæŠ„寫傳éžã€‚" #, python-format msgid "Running %s once" msgstr "正在執行 %s 一次" msgid "Running object reconstructor in script mode." msgstr "正在 Script 模å¼ä¸‹åŸ·è¡Œç‰©ä»¶é‡æ–°å»ºæ§‹å™¨ã€‚" msgid "Running object replicator in script mode." msgstr "正在 Script 模å¼ä¸‹åŸ·è¡Œç‰©ä»¶æŠ„寫器" #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "自 %(time)s ä»¥ä¾†ï¼šå·²åŒæ­¥ %(sync)s 個 [已刪除 [%(delete)s 個,已放置 %(put)s " "個]ï¼Œå·²è·³éŽ %(skip)s 個,%(fail)s 個失敗" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "自 %(time)s 以來:帳戶審核:%(passed)s 個已通éŽå¯©æ ¸ï¼Œ%(failed)s 個失敗審核" #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "自 %(time)s 以來:儲存器審核:%(pass)s 個已通éŽå¯©æ ¸ï¼Œ%(fail)s 個失敗審核" #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "æ­£åœ¨è·³éŽ %(device)s,原因是它未è£è¼‰" #, python-format msgid "Skipping %s as it is not mounted" msgstr "æ­£åœ¨è·³éŽ %s,原因是它未è£è¼‰" #, python-format msgid "Starting %s" msgstr "正在啟動 %s" msgid "Starting object reconstruction pass." msgstr "æ­£åœ¨å•Ÿå‹•ç‰©ä»¶é‡æ–°å»ºæ§‹å‚³éžã€‚" msgid "Starting object reconstructor in daemon mode." msgstr "正在常é§ç¨‹å¼æ¨¡å¼ä¸‹å•Ÿå‹•ç‰©ä»¶é‡æ–°å»ºæ§‹å™¨ã€‚" msgid "Starting object replication pass." msgstr "正在啟動物件抄寫傳éžã€‚" msgid "Starting object replicator in daemon mode." msgstr "正在常é§ç¨‹å¼æ¨¡å¼ä¸‹å•Ÿå‹•物件抄寫器。" #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "已順利é ç«¯åŒæ­¥ %(dst)s 中的 %(src)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "æ­¤æª”æ¡ˆé¡žåž‹ç¦æ­¢å­˜å–ï¼" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "儲存器的 %(key)s 總計 (%(total)s) ä¸ç¬¦åˆåŽŸå‰‡ä¸­çš„ %(key)s 總和 (%(sum)s) " #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "%(ip)s:%(port)s/%(device)s 發生逾時異常狀æ³" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "正在嘗試 %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "æ­£åœ¨å˜—è©¦å° %(full_path)s 執行 GET 動作" msgid "Trying to read during GET" msgstr "正在嘗試於 GET 期間讀å–" msgid "Trying to read during GET (retrying)" msgstr "正在嘗試於 GET 期間讀å–(正在é‡è©¦ï¼‰" msgid "Trying to send to client" msgstr "正在嘗試傳é€è‡³ç”¨æˆ¶ç«¯" #, python-format msgid "Trying to sync suffixes with %s" msgstr "正在嘗試與 %s åŒæ­¥å­—å°¾" #, python-format msgid "Trying to write to %s" msgstr "正在嘗試寫入至 %s" msgid "UNCAUGHT EXCEPTION" msgstr "æœªæ•æ‰çš„異常狀æ³" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "在 libc 中找ä¸åˆ° %s。ä¿ç•™ç‚º no-op。" #, python-format msgid "Unable to locate config for %s" msgstr "找ä¸åˆ° %s çš„é…ç½®" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "在 libc 中找ä¸åˆ° fallocateã€posix_fallocate。ä¿ç•™ç‚º no-op。" #, python-format msgid "Unable to read config from %s" msgstr "無法從 %s 讀å–é…ç½®" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "未鑑別 %(sync_from)r => %(sync_to)r" msgid "Unhandled exception" msgstr "無法處ç†çš„異常狀æ³" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "嘗試執行 GET å‹•ä½œæ™‚ç™¼ç”Ÿä¸æ˜Žç•°å¸¸ç‹€æ³ï¼š%(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "%(container)s %(dbfile)s 的更新報告失敗" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "å·²å‚³é€ %(container)s %(dbfile)s 的更新報告" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "警告:應該僅啟用 SSL 以用於測試目的。使用外部SSL 終止以進行正å¼ä½œæ¥­éƒ¨ç½²ã€‚" msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "警告:無法修改檔案æè¿°å­é™åˆ¶ã€‚ä»¥éž root 使用者身分執行?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "警告:無法修改處ç†ç¨‹åºæ•¸ä¸Šé™é™åˆ¶ã€‚ä»¥éž root 使用者身分執行?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "警告:無法修改記憶體é™åˆ¶ã€‚ä»¥éž root 使用者身分執行?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "警告:無法在沒有 memcached 用戶端的情æ³ä¸‹é™åˆ¶é€Ÿçއ" #, python-format msgid "method %s is not allowed." msgstr "ä¸å®¹è¨±æ–¹æ³• %s。" msgid "no log file found" msgstr "找ä¸åˆ°æ—¥èªŒæª”" msgid "odfpy not installed." msgstr "æœªå®‰è£ odfpy。" #, python-format msgid "plotting results failed due to %s" msgstr "由於 %sï¼Œç¹ªè£½çµæžœå¤±æ•—" msgid "python-matplotlib not installed." msgstr "æœªå®‰è£ python-matplotlib。" swift-2.17.1/swift/locale/ru/0000775000175000017500000000000013435012120015770 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/ru/LC_MESSAGES/0000775000175000017500000000000013435012120017555 5ustar zuulzuul00000000000000swift-2.17.1/swift/locale/ru/LC_MESSAGES/swift.po0000666000175000017500000011031713435012015021261 0ustar zuulzuul00000000000000# Translations template for swift. # Copyright (C) 2015 ORGANIZATION # This file is distributed under the same license as the swift project. # # Translators: # Andreas Jaeger , 2016. #zanata msgid "" msgstr "" "Project-Id-Version: swift VERSION\n" "Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n" "POT-Creation-Date: 2018-01-31 06:08+0000\n" "MIME-Version: 1.0\n" "Content-Type: text/plain; charset=UTF-8\n" "Content-Transfer-Encoding: 8bit\n" "PO-Revision-Date: 2016-04-12 06:43+0000\n" "Last-Translator: Copied by Zanata \n" "Language: ru\n" "Plural-Forms: nplurals=4; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n" "%10<=4 && (n%100<12 || n%100>14) ? 1 : n%10==0 || (n%10>=5 && n%10<=9) || (n" "%100>=11 && n%100<=14)? 2 : 3);\n" "Generated-By: Babel 2.0\n" "X-Generator: Zanata 3.9.6\n" "Language-Team: Russian\n" msgid "" "\n" "user quit" msgstr "" "\n" "Завершение работы пользователÑ" #, python-format msgid " - %s" msgstr " - %s" #, python-format msgid " - parallel, %s" msgstr " - параллельно, %s" #, python-format msgid "" "%(checked)d suffixes checked - %(hashed).2f%% hashed, %(synced).2f%% synced" msgstr "" "Проверено ÑуффикÑов: %(checked)d - Ñ…Ñшировано: %(hashed).2f%%, " "Ñинхронизировано: %(synced).2f%%" #, python-format msgid "%(msg)s %(ip)s:%(port)s/%(device)s" msgstr "%(msg)s %(ip)s:%(port)s/%(device)s" #, python-format msgid "" "%(replicated)d/%(total)d (%(percentage).2f%%) partitions replicated in " "%(time).2fs (%(rate).2f/sec, %(remaining)s remaining)" msgstr "" "Реплицировано разделов: %(replicated)d/%(total)d (%(percentage).2f%%) за " "Ð²Ñ€ÐµÐ¼Ñ %(time).2f Ñ (%(rate).2f/Ñ, оÑталоÑÑŒ: %(remaining)s)" #, python-format msgid "%(success)s successes, %(failure)s failures" msgstr "%(success)s уÑпешно, %(failure)s Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°Ð¼Ð¸" #, python-format msgid "%(type)s returning 503 for %(statuses)s" msgstr "%(type)s возвратил 503 Ð´Ð»Ñ %(statuses)s" #, python-format msgid "%s already started..." msgstr "%s уже запущен..." #, python-format msgid "%s does not exist" msgstr "%s не ÑущеÑтвует" #, python-format msgid "%s is not mounted" msgstr "%s не Ñмонтирован" #, python-format msgid "%s responded as unmounted" msgstr "%s ответил как размонтированный" #, python-format msgid "%s: Connection reset by peer" msgstr "%s: Ñоединение Ñброшено на другой Ñтороне" #, python-format msgid ", %s containers deleted" msgstr ", удалено контейнеров: %s" #, python-format msgid ", %s containers possibly remaining" msgstr ", оÑталоÑÑŒ контейнеров (возможно): %s" #, python-format msgid ", %s containers remaining" msgstr ", оÑталоÑÑŒ контейнеров: %s" #, python-format msgid ", %s objects deleted" msgstr ", удалено объектов: %s" #, python-format msgid ", %s objects possibly remaining" msgstr ", оÑталоÑÑŒ объектов (возможно): %s" #, python-format msgid ", %s objects remaining" msgstr ", оÑталоÑÑŒ объектов: %s" #, python-format msgid ", elapsed: %.02fs" msgstr ", прошло: %.02fs" msgid ", return codes: " msgstr ", коды возврата: " msgid "Account" msgstr "Ð£Ñ‡ÐµÑ‚Ð½Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ" #, python-format msgid "Account audit \"once\" mode completed: %.02fs" msgstr "Проверка учетной запиÑи в \"однократном\" режиме завершена: %.02fs" #, python-format msgid "Account audit pass completed: %.02fs" msgstr "Проход ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ запиÑи выполнен: %.02fs" #, python-format msgid "" "Attempted to replicate %(count)d dbs in %(time).5f seconds (%(rate).5f/s)" msgstr "" "Попытка репликации %(count)d баз данных за %(time).5f Ñекунд (%(rate).5f/s)" #, python-format msgid "Bad rsync return code: %(ret)d <- %(args)s" msgstr "Ðеправильный код возврата rsync: %(ret)d <- %(args)s" msgid "Begin account audit \"once\" mode" msgstr "Ðачать проверку учетной запиÑи в \"однократном\" режиме" msgid "Begin account audit pass." msgstr "Ðачать проход проверки учетной запиÑи." msgid "Begin container audit \"once\" mode" msgstr "Ðачать проверку контейнера в \"однократном\" режиме" msgid "Begin container audit pass." msgstr "Ðачать проход проверки контейнера." msgid "Begin container sync \"once\" mode" msgstr "Ðачать Ñинхронизацию контейнера в \"однократном\" режиме" msgid "Begin container update single threaded sweep" msgstr "Ðачать однонитевую Ñплошную проверку обновлений контейнера" msgid "Begin container update sweep" msgstr "Ðачать Ñплошную проверку обновлений контейнера" msgid "Begin object update single threaded sweep" msgstr "Ðачать однонитевую Ñплошную проверку обновлений объекта" msgid "Begin object update sweep" msgstr "Ðачать Ñплошную проверку обновлений объекта" #, python-format msgid "Beginning pass on account %s" msgstr "ÐачинаетÑÑ Ð¿Ñ€Ð¾Ñ…Ð¾Ð´ Ð´Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ запиÑи %s" msgid "Beginning replication run" msgstr "ЗапуÑк репликации" msgid "Broker error trying to rollback locked connection" msgstr "Ошибка поÑредника при попытке отката заблокированного ÑоединениÑ" #, python-format msgid "Can not access the file %s." msgstr "ОтÑутÑтвует доÑтуп к файлу %s." #, python-format msgid "Can not load profile data from %s." msgstr "Ðе удаетÑÑ Ð·Ð°Ð³Ñ€ÑƒÐ·Ð¸Ñ‚ÑŒ данные профайла из %s." #, python-format msgid "Client did not read from proxy within %ss" msgstr "Клиент не прочитал данные из proxy в %ss" msgid "Client disconnected on read" msgstr "Клиент отключен во Ð²Ñ€ÐµÐ¼Ñ Ñ‡Ñ‚ÐµÐ½Ð¸Ñ" msgid "Client disconnected without sending enough data" msgstr "Клиент отключен без отправки данных" msgid "Client disconnected without sending last chunk" msgstr "Клиент отключилÑÑ, не отправив поÑледний фрагмент данных" #, python-format msgid "" "Client path %(client)s does not match path stored in object metadata %(meta)s" msgstr "" "Путь клиента %(client)s не ÑоответÑтвует пути в метаданных объекта %(meta)s" msgid "" "Configuration option internal_client_conf_path not defined. Using default " "configuration, See internal-client.conf-sample for options" msgstr "" "ÐžÐ¿Ñ†Ð¸Ñ internal_client_conf_path конфигурации не определена. ИÑпользуетÑÑ " "ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð°Ñ†Ð¸Ñ Ð¿Ð¾ умолчанию. ИÑпользуйте intenal-client.conf-sample Ð´Ð»Ñ " "информации об опциÑÑ…" msgid "Connection refused" msgstr "Соединение отклонено" msgid "Connection timeout" msgstr "Тайм-аут ÑоединениÑ" msgid "Container" msgstr "контейнер" #, python-format msgid "Container audit \"once\" mode completed: %.02fs" msgstr "Проверка контейнера в \"однократном\" режиме завершена: %.02fs" #, python-format msgid "Container audit pass completed: %.02fs" msgstr "Проход проверки контейнера завершен: %.02fs" #, python-format msgid "Container sync \"once\" mode completed: %.02fs" msgstr "Ð¡Ð¸Ð½Ñ…Ñ€Ð¾Ð½Ð¸Ð·Ð°Ñ†Ð¸Ñ ÐºÐ¾Ð½Ñ‚ÐµÐ¹Ð½ÐµÑ€Ð° в \"однократном\" режиме завершена: %.02fs" #, python-format msgid "" "Container update single threaded sweep completed: %(elapsed).02fs, " "%(success)s successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Ð¡Ð¿Ð»Ð¾ÑˆÐ½Ð°Ñ Ð¾Ð´Ð½Ð¾Ð½Ð¸Ñ‚ÐµÐ²Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° обновлений контейнера завершена: " "%(elapsed).02fs, уÑпешно: %(success)s, Ñбоев: %(fail)s, без изменений: " "%(no_change)s" #, python-format msgid "Container update sweep completed: %.02fs" msgstr "Ð¡Ð¿Ð»Ð¾ÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° обновлений контейнера завершена: %.02fs" #, python-format msgid "" "Container update sweep of %(path)s completed: %(elapsed).02fs, %(success)s " "successes, %(fail)s failures, %(no_change)s with no changes" msgstr "" "Ð¡Ð¿Ð»Ð¾ÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° обновлений контейнера в %(path)s завершена: " "%(elapsed).02fs, уÑпешно: %(success)s, Ñбоев: %(fail)s, без изменений: " "%(no_change)s" #, python-format msgid "Data download error: %s" msgstr "Ошибка загрузки данных: %s" #, python-format msgid "Devices pass completed: %.02fs" msgstr "Проход уÑтройÑтв выполнен: %.02fs" #, python-format msgid "ERROR %(db_file)s: %(validate_sync_to_err)s" msgstr "Ошибка %(db_file)s: %(validate_sync_to_err)s" #, python-format msgid "ERROR %(status)d %(body)s From %(type)s Server" msgstr "Ошибка %(status)d %(body)s из Ñервера %(type)s" #, python-format msgid "ERROR %(status)d %(body)s From Object Server re: %(path)s" msgstr "Ошибка %(status)d %(body)s, ответ от Ñервера объекта: %(path)s" #, python-format msgid "ERROR %(status)d Expect: 100-continue From Object Server" msgstr "Ошибка %(status)d. Ожидаемое значение от Ñервера объекта: 100-continue" #, python-format msgid "" "ERROR Account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): Response %(status)s %(reason)s" msgstr "" "Ошибка: обновление учетной запиÑи не выполнено Ð´Ð»Ñ %(ip)s:%(port)s/" "%(device)s (Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ повторена позднее): Ответ: %(status)s %(reason)s" #, python-format msgid "ERROR Bad response %(status)s from %(host)s" msgstr "Ошибка: Ðеправильный Ð·Ð°Ð¿Ñ€Ð¾Ñ %(status)s из %(host)s" #, python-format msgid "ERROR Client read timeout (%ss)" msgstr "Ошибка: тайм-аут Ñ‡Ñ‚ÐµÐ½Ð¸Ñ ÐºÐ»Ð¸ÐµÐ½Ñ‚Ð° (%ss)" #, python-format msgid "" "ERROR Container update failed (saving for async update later): %(status)d " "response from %(ip)s:%(port)s/%(dev)s" msgstr "" "Ошибка. Обновление контейнера не выполнено (Ñохранение аÑинхронных " "обновлений будет выполнено позднее): %(status)d ответ от %(ip)s:%(port)s/" "%(dev)s" #, python-format msgid "ERROR Could not get account info %s" msgstr "Ошибка: не удалоÑÑŒ получить ÑÐ²ÐµÐ´ÐµÐ½Ð¸Ñ Ð¾Ð± учетной запиÑи %s" #, python-format msgid "ERROR Could not get container info %s" msgstr "Ошибка: не удалоÑÑŒ получить информацию о контейнере %s" #, python-format msgid "ERROR DiskFile %(data_file)s close failure: %(exc)s : %(stack)s" msgstr "Ошибка: ошибка Ð·Ð°ÐºÑ€Ñ‹Ñ‚Ð¸Ñ DiskFile %(data_file)s: %(exc)s : %(stack)s" msgid "ERROR Exception causing client disconnect" msgstr "Ошибка. ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð¿Ñ€Ð¸ отключении клиента" #, python-format msgid "ERROR Exception transferring data to object servers %s" msgstr "" "ОШИБКÐ. ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð¿Ñ€Ð¸ передаче данных на Ñерверы объектов %s" msgid "ERROR Failed to get my own IPs?" msgstr "Ошибка: не удалоÑÑŒ получить ÑобÑтвенные IP-адреÑа?" msgid "ERROR Insufficient Storage" msgstr "Ошибка - недоÑтаточно памÑти" #, python-format msgid "ERROR Object %(obj)s failed audit and was quarantined: %(err)s" msgstr "" "Ошибка: контроль объекта %(obj)s не выполнен, объект помещен в карантин: " "%(err)s" #, python-format msgid "ERROR Pickle problem, quarantining %s" msgstr "Ошибка Pickle, %s помещаетÑÑ Ð² карантин" #, python-format msgid "ERROR Remote drive not mounted %s" msgstr "Ошибка: удаленный накопитель не Ñмонтирован %s" #, python-format msgid "ERROR Syncing %(db_file)s %(row)s" msgstr "Ошибка Ñинхронизации %(db_file)s %(row)s" #, python-format msgid "ERROR Syncing %s" msgstr "Ошибка Ñинхронизации %s" #, python-format msgid "ERROR Trying to audit %s" msgstr "Ошибка при попытке ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ %s" msgid "ERROR Unhandled exception in request" msgstr "Ошибка. ÐÐµÐ¾Ð±Ñ€Ð°Ð±Ð°Ñ‚Ñ‹Ð²Ð°ÐµÐ¼Ð°Ñ Ð¸ÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² запроÑе" #, python-format msgid "ERROR __call__ error with %(method)s %(path)s " msgstr "Ошибка: ошибка __call__ в %(method)s %(path)s " #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later)" msgstr "" "Ошибка: обновление учетной запиÑи не выполнено Ð´Ð»Ñ %(ip)s:%(port)s/" "%(device)s (Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ повторена позднее)" #, python-format msgid "" "ERROR account update failed with %(ip)s:%(port)s/%(device)s (will retry " "later): " msgstr "" "Ошибка: обновление учетной запиÑи не выполнено Ð´Ð»Ñ %(ip)s:%(port)s/" "%(device)s (Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ повторена позднее): " #, python-format msgid "ERROR async pending file with unexpected name %s" msgstr "" "Ошибка Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð°Ñинхронной передачи ожидающего файла Ñ Ð½ÐµÐ¿Ñ€ÐµÐ´Ð²Ð¸Ð´ÐµÐ½Ð½Ñ‹Ð¼ " "именем %s" msgid "ERROR auditing" msgstr "ОШИБКРконтролÑ" #, python-format msgid "ERROR auditing: %s" msgstr "Ошибка контролÑ: %s" #, python-format msgid "" "ERROR container update failed with %(ip)s:%(port)s/%(dev)s (saving for async " "update later)" msgstr "" "Ошибка. Обновление контейнера не выполнена Ñ %(ip)s:%(port)s/%(dev)s " "(Ñохранение аÑинхронного Ð¾Ð±Ð½Ð¾Ð²Ð»ÐµÐ½Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ выполнено позднее)" #, python-format msgid "ERROR reading HTTP response from %s" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð¾Ñ‚Ð²ÐµÑ‚Ð° HTTP из %s" #, python-format msgid "ERROR reading db %s" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð±Ð°Ð·Ñ‹ данных %s" #, python-format msgid "ERROR rsync failed with %(code)s: %(args)s" msgstr "Ошибка: команда rsync не выполнена Ñ ÐºÐ¾Ð´Ð¾Ð¼ %(code)s: %(args)s" #, python-format msgid "ERROR syncing %(file)s with node %(node)s" msgstr "Ошибка Ñинхронизации %(file)s Ñ ÑƒÐ·Ð»Ð¾Ð¼ %(node)s" msgid "ERROR trying to replicate" msgstr "Ошибка при попытке репликации" #, python-format msgid "ERROR while trying to clean up %s" msgstr "Ошибка при попытке очиÑтки %s" #, python-format msgid "ERROR with %(type)s server %(ip)s:%(port)s/%(device)s re: %(info)s" msgstr "" "Ошибка Ñ Ñервером %(type)s %(ip)s:%(port)s/%(device)s, возврат: %(info)s" #, python-format msgid "ERROR with loading suppressions from %s: " msgstr "Ошибка при загрузки Ñкрытых объектов из %s: " #, python-format msgid "ERROR with remote server %(ip)s:%(port)s/%(device)s" msgstr "Ошибка Ñ ÑƒÐ´Ð°Ð»ÐµÐ½Ð½Ñ‹Ð¼ Ñервером %(ip)s:%(port)s/%(device)s" #, python-format msgid "ERROR: Failed to get paths to drive partitions: %s" msgstr "Ошибка: не удалоÑÑŒ получить пути к разделам накопителей: %s" #, python-format msgid "ERROR: Unable to access %(path)s: %(error)s" msgstr "Ошибка: не удалоÑÑŒ получить доÑтуп к %(path)s: %(error)s" #, python-format msgid "ERROR: Unable to run auditing: %s" msgstr "Ошибка: не удалоÑÑŒ запуÑтить процеÑÑ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ: %s" msgid "Error hashing suffix" msgstr "Ошибка Ñ…ÑÑˆÐ¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ ÑуффикÑа" msgid "Error listing devices" msgstr "Ошибка при выводе ÑпиÑка уÑтройÑтв" #, python-format msgid "Error on render profiling results: %s" msgstr "Ошибка при выводе результатов профилированиÑ: %s" msgid "Error parsing recon cache file" msgstr "Ошибка анализа файла кÑша recon" msgid "Error reading recon cache file" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ñ„Ð°Ð¹Ð»Ð° кÑша recon" msgid "Error reading ringfile" msgstr "Ошибка при чтении ringfile" msgid "Error reading swift.conf" msgstr "Ошибка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ swift.conf" msgid "Error retrieving recon data" msgstr "Ошибка при получении данных recon" msgid "Error syncing handoff partition" msgstr "Ошибка при Ñинхронизации раздела передачи управлениÑ" msgid "Error syncing partition" msgstr "Ошибка Ñинхронизации раздела" #, python-format msgid "Error syncing with node: %s" msgstr "Ошибка Ñинхронизации Ñ ÑƒÐ·Ð»Ð¾Ð¼ %s" #, python-format msgid "Error trying to rebuild %(path)s policy#%(policy)d frag#%(frag_index)s" msgstr "" "Ошибка при попытке перекомпоновки Ñтратегии %(path)s: номер#%(policy)d " "фрагмент#%(frag_index)s" msgid "Error: An error occurred" msgstr "Ошибка: произошла ошибка" msgid "Error: missing config path argument" msgstr "Ошибка: отÑутÑтвует аргумент пути конфигурации" #, python-format msgid "Error: unable to locate %s" msgstr "Ошибка: не удалоÑÑŒ найти %s" msgid "Exception dumping recon cache" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð¿Ñ€Ð¸ Ñоздании кÑша recon" msgid "Exception in top-level account reaper loop" msgstr "" "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² цикле чиÑтильщика учетных запиÑей верхнего уровнÑ" msgid "Exception in top-level replication loop" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² цикле репликации верхнего уровнÑ" msgid "Exception in top-levelreconstruction loop" msgstr "ИÑключение в цикле реконÑтрукции верхнего уровнÑ" #, python-format msgid "Exception with %(ip)s:%(port)s/%(device)s" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² %(ip)s:%(port)s/%(device)s" #, python-format msgid "Exception with account %s" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² учетной запиÑи %s" #, python-format msgid "Exception with containers for account %s" msgstr "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² контейнерах Ð´Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ запиÑи %s" #, python-format msgid "" "Exception with objects for container %(container)s for account %(account)s" msgstr "" "ИÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑÐ¸Ñ‚ÑƒÐ°Ñ†Ð¸Ñ Ð² объектах Ð´Ð»Ñ ÐºÐ¾Ð½Ñ‚ÐµÐ¹Ð½ÐµÑ€Ð° %(container)s Ð´Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ " "запиÑи %(account)s" #, python-format msgid "Expect: 100-continue on %s" msgstr "Ожидаемое значение: 100-continue в %s" #, python-format msgid "Following CNAME chain for %(given_domain)s to %(found_domain)s" msgstr "Ð¡Ð»ÐµÐ´ÑƒÑŽÑ‰Ð°Ñ Ñ†ÐµÐ¿Ð¾Ñ‡ÐºÐ° CNAME Ð´Ð»Ñ %(given_domain)s в %(found_domain)s" msgid "Found configs:" msgstr "Обнаружены конфигурации:" msgid "" "Handoffs first mode still has handoffs remaining. Aborting current " "replication pass." msgstr "" "Ð’ режиме передачи ÑƒÐ¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð¸Ñ Ð½Ðµ вÑе операции завершены. Принудительное " "завершение текущего прохода репликации." msgid "Host unreachable" msgstr "ХоÑÑ‚ недоÑтупен" #, python-format msgid "Incomplete pass on account %s" msgstr "Ðе завершен проход Ð´Ð»Ñ ÑƒÑ‡ÐµÑ‚Ð½Ð¾Ð¹ запиÑи %s" #, python-format msgid "Invalid X-Container-Sync-To format %r" msgstr "ÐедопуÑтимый формат X-Container-Sync-To %r" #, python-format msgid "Invalid host %r in X-Container-Sync-To" msgstr "ÐедопуÑтимый хоÑÑ‚ %r в X-Container-Sync-To" #, python-format msgid "Invalid pending entry %(file)s: %(entry)s" msgstr "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ð¾Ð¶Ð¸Ð´Ð°ÑŽÑ‰Ð°Ñ Ð·Ð°Ð¿Ð¸ÑÑŒ %(file)s: %(entry)s" #, python-format msgid "Invalid response %(resp)s from %(full_path)s" msgstr "ÐедопуÑтимый ответ %(resp)s от %(full_path)s" #, python-format msgid "Invalid response %(resp)s from %(ip)s" msgstr "ÐедопуÑтимый ответ %(resp)s от %(ip)s" #, python-format msgid "" "Invalid scheme %r in X-Container-Sync-To, must be \"//\", \"http\", or " "\"https\"." msgstr "" "ÐедопуÑÑ‚Ð¸Ð¼Ð°Ñ Ñхема %r в X-Container-Sync-To, допуÑтимые значениÑ: \"//\", " "\"http\" или \"https\"." #, python-format msgid "Killing long-running rsync: %s" msgstr "Принудительное завершение долго выполнÑющегоÑÑ rsync: %s" msgid "Lockup detected.. killing live coros." msgstr "Обнаружена блокировка.. принудительное завершение работающих модулей." #, python-format msgid "Mapped %(given_domain)s to %(found_domain)s" msgstr "Преобразовано %(given_domain)s в %(found_domain)s" #, python-format msgid "No %s running" msgstr "%s не выполнÑетÑÑ" #, python-format msgid "No permission to signal PID %d" msgstr "Ðет прав доÑтупа Ð´Ð»Ñ Ð¾Ñ‚Ð¿Ñ€Ð°Ð²ÐºÐ¸ Ñигнала в PID %d" #, python-format msgid "No policy with index %s" msgstr "Ðе найдено Ñтратегии Ñ Ð¸Ð½Ð´ÐµÐºÑом %s" #, python-format msgid "No realm key for %r" msgstr "ОтÑутÑтвует ключ облаÑти Ð´Ð»Ñ %r" #, python-format msgid "Node error limited %(ip)s:%(port)s (%(device)s)" msgstr "ÐžÐ³Ñ€Ð°Ð½Ð¸Ñ‡ÐµÐ½Ð½Ð°Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ° узла %(ip)s:%(port)s (%(device)s)" #, python-format msgid "Not enough object servers ack'ed (got %d)" msgstr "ÐедоÑтаточное чиÑло подтверждений Ñ Ñерверов объектов (получено %d)" #, python-format msgid "" "Not found %(sync_from)r => %(sync_to)r - object " "%(obj_name)r" msgstr "" "Ðе найдено: %(sync_from)r => %(sync_to)r - объект " "%(obj_name)r" #, python-format msgid "Nothing reconstructed for %s seconds." msgstr "Ðичего не реконÑтруировано за %s Ñ." #, python-format msgid "Nothing replicated for %s seconds." msgstr "Ðичего не реплицировано за %s Ñ." msgid "Object" msgstr "Объект" msgid "Object PUT" msgstr "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ PUT объекта" #, python-format msgid "Object PUT returning 202 for 409: %(req_timestamp)s <= %(timestamps)r" msgstr "" "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ PUT объекта возвратила 202 Ð´Ð»Ñ 409: %(req_timestamp)s <= " "%(timestamps)r" #, python-format msgid "Object PUT returning 412, %(statuses)r" msgstr "Ð¤ÑƒÐ½ÐºÑ†Ð¸Ñ PUT объекта возвратила 412, %(statuses)r" #, python-format msgid "" "Object audit (%(type)s) \"%(mode)s\" mode completed: %(elapsed).02fs. Total " "quarantined: %(quars)d, Total errors: %(errors)d, Total files/sec: " "%(frate).2f, Total bytes/sec: %(brate).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Контроль объекта (%(type)s) в режиме \"%(mode)s\" завершен: %(elapsed).02fs. " "Ð’Ñего в карантине: %(quars)d, вÑего ошибок: %(errors)d, вÑего файлов/Ñ: " "%(frate).2f, вÑего байт/Ñ: %(brate).2f, Ð²Ñ€ÐµÐ¼Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ: %(audit).2f, " "ÑкороÑть: %(audit_rate).2f" #, python-format msgid "" "Object audit (%(type)s). Since %(start_time)s: Locally: %(passes)d passed, " "%(quars)d quarantined, %(errors)d errors, files/sec: %(frate).2f, bytes/sec: " "%(brate).2f, Total time: %(total).2f, Auditing time: %(audit).2f, Rate: " "%(audit_rate).2f" msgstr "" "Проверка объекта (%(type)s). ПоÑле %(start_time)s: локально: уÑпешно - " "%(passes)d, в карантине - %(quars)d, файлов Ñ Ð¾ÑˆÐ¸Ð±ÐºÐ°Ð¼Ð¸ %(errors)d в Ñекунду: " "%(frate).2f , байт/Ñ: %(brate).2f, общее времÑ: %(total).2f, Ð²Ñ€ÐµÐ¼Ñ ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ: " "%(audit).2f, ÑкороÑть: %(audit_rate).2f" #, python-format msgid "Object audit stats: %s" msgstr "СоÑтоÑние ÐºÐ¾Ð½Ñ‚Ñ€Ð¾Ð»Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð°: %s" #, python-format msgid "Object reconstruction complete (once). (%.02f minutes)" msgstr "РеконÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° выполнена (однократно). (%.02f мин.)" #, python-format msgid "Object reconstruction complete. (%.02f minutes)" msgstr "РеконÑÑ‚Ñ€ÑƒÐºÑ†Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° выполнена. (%.02f мин.)" #, python-format msgid "Object replication complete (once). (%.02f minutes)" msgstr "Ð ÐµÐ¿Ð»Ð¸ÐºÐ°Ñ†Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° выполнена (однократно). (%.02f мин.)" #, python-format msgid "Object replication complete. (%.02f minutes)" msgstr "Ð ÐµÐ¿Ð»Ð¸ÐºÐ°Ñ†Ð¸Ñ Ð¾Ð±ÑŠÐµÐºÑ‚Ð° выполнена. (%.02f мин.)" #, python-format msgid "Object servers returned %s mismatched etags" msgstr "Серверы объектов вернули неÑоответÑтвующие etag: %s" #, python-format msgid "Object update sweep completed: %.02fs" msgstr "Ð¡Ð¿Ð»Ð¾ÑˆÐ½Ð°Ñ Ð¿Ñ€Ð¾Ð²ÐµÑ€ÐºÐ° обновлений объекта завершена: %.02fs" msgid "Params, queries, and fragments not allowed in X-Container-Sync-To" msgstr "Ð’ X-Container-Sync-To не разрешены параметры, запроÑÑ‹ и фрагменты" #, python-format msgid "Partition times: max %(max).4fs, min %(min).4fs, med %(med).4fs" msgstr "" "Ð’Ñ€ÐµÐ¼Ñ Ñ€Ð°Ð·Ð´ÐµÐ»Ð°: макÑимум: %(max).4fs, минимум: %(min).4fs, Ñреднее: %(med).4fs" msgid "Path required in X-Container-Sync-To" msgstr "ТребуетÑÑ Ð¿ÑƒÑ‚ÑŒ в X-Container-Sync-To" #, python-format msgid "Problem cleaning up %s" msgstr "Ðеполадка при очиÑтке %s" #, python-format msgid "Profiling Error: %s" msgstr "Ошибка профилированиÑ: %s" #, python-format msgid "Quarantined %(hsh_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(hsh_path)s помещен в карантин в %(quar_path)s, так как не ÑвлÑетÑÑ " "каталогом" #, python-format msgid "" "Quarantined %(object_path)s to %(quar_path)s because it is not a directory" msgstr "" "%(object_path)s помещен в карантин в %(quar_path)s, так как не ÑвлÑетÑÑ " "каталогом" #, python-format msgid "Quarantining DB %s" msgstr "БД %s помещена в карантин" #, python-format msgid "Ratelimit sleep log: %(sleep)s for %(account)s/%(container)s/%(object)s" msgstr "" "Протокол тайм-аута при ограничении ÑкороÑти %(sleep)s Ð´Ð»Ñ %(account)s/" "%(container)s/%(object)s" #, python-format msgid "Removed %(remove)d dbs" msgstr "Удалено баз данных: %(remove)d" #, python-format msgid "Removing %s objects" msgstr "Удаление объектов %s" #, python-format msgid "Removing partition: %s" msgstr "Удаление раздела: %s" #, python-format msgid "Removing pid file %(pid_file)s with wrong pid %(pid)d" msgstr "Удаление файла pid %(pid_file)s Ñ Ð¾ÑˆÐ¸Ð±Ð¾Ñ‡Ð½Ñ‹Ð¼ pid %(pid)d" #, python-format msgid "Removing pid file %s with invalid pid" msgstr "Удаление pid файла %s Ñ Ð½ÐµÐ²ÐµÑ€Ð½Ñ‹Ð¼ pid-ом" #, python-format msgid "Removing stale pid file %s" msgstr "Удаление уÑтаревшего файла pid %s" msgid "Replication run OVER" msgstr "Ð ÐµÐ¿Ð»Ð¸ÐºÐ°Ñ†Ð¸Ñ Ð·Ð°Ð¿ÑƒÑ‰ÐµÐ½Ð° поверх" #, python-format msgid "Returning 497 because of blacklisting: %s" msgstr "Возвращено 497 из-за черного ÑпиÑка: %s" #, python-format msgid "" "Returning 498 for %(meth)s to %(acc)s/%(cont)s/%(obj)s . Ratelimit (Max " "Sleep) %(e)s" msgstr "" "Возвращено 498 Ð´Ð»Ñ %(meth)s в %(acc)s/%(cont)s/%(obj)s . Ratelimit " "(макÑÐ¸Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð·Ð°Ð´ÐµÑ€Ð¶ÐºÐ°): %(e)s" msgid "Ring change detected. Aborting current reconstruction pass." msgstr "" "Обнаружено изменение кольца. Принудительное завершение текущего прохода " "реконÑтрукции." msgid "Ring change detected. Aborting current replication pass." msgstr "" "Обнаружено кольцевое изменение. Принудительное завершение текущего прохода " "репликации." #, python-format msgid "Running %s once" msgstr "Однократное выполнение %s" msgid "Running object reconstructor in script mode." msgstr "ЗапуÑк утилиты реконÑтрукции объектов в режиме Ñкрипта." msgid "Running object replicator in script mode." msgstr "ЗапуÑк утилиты репликации объектов в режиме Ñценариев." #, python-format msgid "" "Since %(time)s: %(sync)s synced [%(delete)s deletes, %(put)s puts], %(skip)s " "skipped, %(fail)s failed" msgstr "" "За %(time)s операций Ñинхронизировано %(sync)s [удалено: %(delete)s, " "добавлено: %(put)s], пропущено: %(skip)s, ошибки: %(fail)s" #, python-format msgid "" "Since %(time)s: Account audits: %(passed)s passed audit,%(failed)s failed " "audit" msgstr "" "Выполнено проверок учетной запиÑи: %(time)s, из них уÑпешно: %(passed)s, Ñ " "ошибками: %(failed)s " #, python-format msgid "" "Since %(time)s: Container audits: %(pass)s passed audit, %(fail)s failed " "audit" msgstr "" "Выполнено проверок контейнера: %(time)s, из них уÑпешно: %(pass)s, Ñ " "ошибками: %(fail)s " #, python-format msgid "Skipping %(device)s as it is not mounted" msgstr "%(device)s будет пропущен, так как он не Ñмонтирован" #, python-format msgid "Skipping %s as it is not mounted" msgstr "%s будет пропущен, так как он не Ñмонтирован" #, python-format msgid "Starting %s" msgstr "ЗапуÑк %s" msgid "Starting object reconstruction pass." msgstr "ЗапуÑк прохода реконÑтрукции объектов." msgid "Starting object reconstructor in daemon mode." msgstr "ЗапуÑк утилиты реконÑтрукции объектов в режиме демона." msgid "Starting object replication pass." msgstr "ЗапуÑк прохода репликации объектов." msgid "Starting object replicator in daemon mode." msgstr "ЗапуÑк утилиты репликации объектов в режиме демона." #, python-format msgid "Successful rsync of %(src)s at %(dst)s (%(time).03f)" msgstr "УÑпешное выполнение rsync Ð´Ð»Ñ %(src)s на %(dst)s (%(time).03f)" msgid "The file type are forbidden to access!" msgstr "Запрещен доÑтуп к Ñтому типу файла!" #, python-format msgid "" "The total %(key)s for the container (%(total)s) does not match the sum of " "%(key)s across policies (%(sum)s)" msgstr "" "Общее чиÑло %(key)s Ð´Ð»Ñ ÐºÐ¾Ð½Ñ‚ÐµÐ¹Ð½ÐµÑ€Ð° (%(total)s) не ÑоответÑтвует Ñумме " "%(key)s в ÑтратегиÑÑ… (%(sum)s)" #, python-format msgid "Timeout Exception with %(ip)s:%(port)s/%(device)s" msgstr "ИÑключение по таймауту %(ip)s:%(port)s/%(device)s" #, python-format msgid "Trying to %(method)s %(path)s" msgstr "Попытка Ð²Ñ‹Ð¿Ð¾Ð»Ð½ÐµÐ½Ð¸Ñ Ð¼ÐµÑ‚Ð¾Ð´Ð° %(method)s %(path)s" #, python-format msgid "Trying to GET %(full_path)s" msgstr "Попытка GET-запроÑа %(full_path)s" msgid "Trying to read during GET" msgstr "Попытка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ð¾ Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸ GET" msgid "Trying to read during GET (retrying)" msgstr "Попытка Ñ‡Ñ‚ÐµÐ½Ð¸Ñ Ð²Ð¾ Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð¿ÐµÑ€Ð°Ñ†Ð¸Ð¸ GET (выполнÑетÑÑ Ð¿Ð¾Ð²Ñ‚Ð¾Ñ€)" msgid "Trying to send to client" msgstr "Попытка отправки клиенту" #, python-format msgid "Trying to sync suffixes with %s" msgstr "Попытка Ñинхронизации ÑуффикÑов Ñ %s" #, python-format msgid "Trying to write to %s" msgstr "Попытка запиÑи в %s" msgid "UNCAUGHT EXCEPTION" msgstr "ÐÐµÐ¾Ð±Ñ€Ð°Ð±Ð°Ñ‚Ñ‹Ð²Ð°ÐµÐ¼Ð°Ñ Ð¸ÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑитуациÑ" #, python-format msgid "Unable to locate %s in libc. Leaving as a no-op." msgstr "Ðе удалоÑÑŒ найти %s в libc. ОÑтавлено как no-op." #, python-format msgid "Unable to locate config for %s" msgstr "Ðе удалоÑÑŒ найти конфигурационный файл Ð´Ð»Ñ %s" msgid "" "Unable to locate fallocate, posix_fallocate in libc. Leaving as a no-op." msgstr "" "Ðе удалоÑÑŒ найти fallocate, posix_fallocate в libc. ОÑтавлено как no-op." #, python-format msgid "Unable to read config from %s" msgstr "Ðе удалоÑÑŒ прочитать конфигурацию из %s" #, python-format msgid "Unauth %(sync_from)r => %(sync_to)r" msgstr "Ð¡Ð¸Ð½Ñ…Ñ€Ð¾Ð½Ð¸Ð·Ð°Ñ†Ð¸Ñ %(sync_from)r => %(sync_to)r без прав доÑтупа" msgid "Unhandled exception" msgstr "ÐÐµÐ¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚Ð°Ð½Ð½Ð°Ñ Ð¸ÑÐºÐ»ÑŽÑ‡Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ ÑитуациÑ" #, python-format msgid "Unknown exception trying to GET: %(account)r %(container)r %(object)r" msgstr "" "ÐеизвеÑтное иÑключение в GET-запроÑе: %(account)r %(container)r %(object)r" #, python-format msgid "Update report failed for %(container)s %(dbfile)s" msgstr "Отчет об обновлении Ð´Ð»Ñ %(container)s %(dbfile)s не выполнен" #, python-format msgid "Update report sent for %(container)s %(dbfile)s" msgstr "Отчет об обновлении отправлен Ð´Ð»Ñ %(container)s %(dbfile)s" msgid "" "WARNING: SSL should only be enabled for testing purposes. Use external SSL " "termination for a production deployment." msgstr "" "Предупреждение: SSL должен быть включен только в целÑÑ… теÑтированиÑ. " "ИÑпользуйте внешнее завершение SSL Ð´Ð»Ñ Ñ€Ð°Ð·Ð²ÐµÑ€Ñ‚Ñ‹Ð²Ð°Ð½Ð¸Ñ Ð² рабочем режиме." msgid "WARNING: Unable to modify file descriptor limit. Running as non-root?" msgstr "" "Предупреждение: не удалоÑÑŒ изменить предельное значение Ð´Ð»Ñ Ð´ÐµÑкриптора " "файла. Запущен без прав доÑтупа root?" msgid "WARNING: Unable to modify max process limit. Running as non-root?" msgstr "" "Предупреждение: не удалоÑÑŒ изменить предельное значение Ð´Ð»Ñ Ñ‡Ð¸Ñла процеÑÑов. " "Запущен без прав доÑтупа root?" msgid "WARNING: Unable to modify memory limit. Running as non-root?" msgstr "" "Предупреждение: не удалоÑÑŒ изменить предельное значение Ð´Ð»Ñ Ð¿Ð°Ð¼Ñти. Запущен " "без прав доÑтупа root?" msgid "Warning: Cannot ratelimit without a memcached client" msgstr "" "Предупреждение: не удаетÑÑ Ð¾Ð³Ñ€Ð°Ð½Ð¸Ñ‡Ð¸Ñ‚ÑŒ ÑкороÑть без клиента Ñ ÐºÑшированием " "памÑти" #, python-format msgid "method %s is not allowed." msgstr "Метод %s не разрешен." msgid "no log file found" msgstr "Ðе найден файл протокола" msgid "odfpy not installed." msgstr "Библиотека odfpy не уÑтановлена." #, python-format msgid "plotting results failed due to %s" msgstr "Ошибка в результатах plotting из-за %s" msgid "python-matplotlib not installed." msgstr "Библиотека python-matplotlib не уÑтановлена." swift-2.17.1/swift/__init__.py0000666000175000017500000000305313435012015016222 0ustar zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import gettext import pkg_resources try: # First, try to get our version out of PKG-INFO. If we're installed, # this'll let us find our version without pulling in pbr. After all, if # we're installed on a system, we're not in a Git-managed source tree, so # pbr doesn't really buy us anything. __version__ = __canonical_version__ = pkg_resources.get_provider( pkg_resources.Requirement.parse('swift')).version except pkg_resources.DistributionNotFound: # No PKG-INFO? We're probably running from a checkout, then. Let pbr do # its thing to figure out a version number. import pbr.version _version_info = pbr.version.VersionInfo('swift') __version__ = _version_info.release_string() __canonical_version__ = _version_info.version_string() _localedir = os.environ.get('SWIFT_LOCALEDIR') _t = gettext.translation('swift', localedir=_localedir, fallback=True) def gettext_(msg): return _t.gettext(msg) swift-2.17.1/swift/cli/0000775000175000017500000000000013435012120014652 5ustar zuulzuul00000000000000swift-2.17.1/swift/cli/ring_builder_analyzer.py0000666000175000017500000002731113435012003021604 0ustar zuulzuul00000000000000# Copyright (c) 2015 Samuel Merritt # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ This is a tool for analyzing how well the ring builder performs its job in a particular scenario. It is intended to help developers quantify any improvements or regressions in the ring builder; it is probably not useful to others. The ring builder analyzer takes a scenario file containing some initial parameters for a ring builder plus a certain number of rounds. In each round, some modifications are made to the builder, e.g. add a device, remove a device, change a device's weight. Then, the builder is repeatedly rebalanced until it settles down. Data about that round is printed, and the next round begins. Scenarios are specified in JSON. Example scenario for a gradual device addition:: { "part_power": 12, "replicas": 3, "overload": 0.1, "random_seed": 203488, "rounds": [ [ ["add", "r1z2-10.20.30.40:6200/sda", 8000], ["add", "r1z2-10.20.30.40:6200/sdb", 8000], ["add", "r1z2-10.20.30.40:6200/sdc", 8000], ["add", "r1z2-10.20.30.40:6200/sdd", 8000], ["add", "r1z2-10.20.30.41:6200/sda", 8000], ["add", "r1z2-10.20.30.41:6200/sdb", 8000], ["add", "r1z2-10.20.30.41:6200/sdc", 8000], ["add", "r1z2-10.20.30.41:6200/sdd", 8000], ["add", "r1z2-10.20.30.43:6200/sda", 8000], ["add", "r1z2-10.20.30.43:6200/sdb", 8000], ["add", "r1z2-10.20.30.43:6200/sdc", 8000], ["add", "r1z2-10.20.30.43:6200/sdd", 8000], ["add", "r1z2-10.20.30.44:6200/sda", 8000], ["add", "r1z2-10.20.30.44:6200/sdb", 8000], ["add", "r1z2-10.20.30.44:6200/sdc", 8000] ], [ ["add", "r1z2-10.20.30.44:6200/sdd", 1000] ], [ ["set_weight", 15, 2000] ], [ ["remove", 3], ["set_weight", 15, 3000] ], [ ["set_weight", 15, 4000] ], [ ["set_weight", 15, 5000] ], [ ["set_weight", 15, 6000] ], [ ["set_weight", 15, 7000] ], [ ["set_weight", 15, 8000] ]] } """ import argparse import json import sys from swift.common.ring import builder from swift.common.ring.utils import parse_add_value ARG_PARSER = argparse.ArgumentParser( description='Put the ring builder through its paces') ARG_PARSER.add_argument( '--check', '-c', action='store_true', help="Just check the scenario, don't execute it.") ARG_PARSER.add_argument( 'scenario_path', help="Path to the scenario file") class ParseCommandError(ValueError): def __init__(self, name, round_index, command_index, msg): msg = "Invalid %s (round %s, command %s): %s" % ( name, round_index, command_index, msg) super(ParseCommandError, self).__init__(msg) def _parse_weight(round_index, command_index, weight_str): try: weight = float(weight_str) except ValueError as err: raise ParseCommandError('weight', round_index, command_index, err) if weight < 0: raise ParseCommandError('weight', round_index, command_index, 'cannot be negative') return weight def _parse_add_command(round_index, command_index, command): if len(command) != 3: raise ParseCommandError( 'add command', round_index, command_index, 'expected array of length 3, but got %r' % command) dev_str = command[1] weight_str = command[2] try: dev = parse_add_value(dev_str) except ValueError as err: raise ParseCommandError('device specifier', round_index, command_index, err) dev['weight'] = _parse_weight(round_index, command_index, weight_str) if dev['region'] is None: dev['region'] = 1 default_key_map = { 'replication_ip': 'ip', 'replication_port': 'port', } for empty_key, default_key in default_key_map.items(): if dev[empty_key] is None: dev[empty_key] = dev[default_key] return ['add', dev] def _parse_remove_command(round_index, command_index, command): if len(command) != 2: raise ParseCommandError('remove commnd', round_index, command_index, "expected array of length 2, but got %r" % (command,)) dev_str = command[1] try: dev_id = int(dev_str) except ValueError as err: raise ParseCommandError('device ID in remove', round_index, command_index, err) return ['remove', dev_id] def _parse_set_weight_command(round_index, command_index, command): if len(command) != 3: raise ParseCommandError('remove command', round_index, command_index, "expected array of length 3, but got %r" % (command,)) dev_str = command[1] weight_str = command[2] try: dev_id = int(dev_str) except ValueError as err: raise ParseCommandError('device ID in set_weight', round_index, command_index, err) weight = _parse_weight(round_index, command_index, weight_str) return ['set_weight', dev_id, weight] def _parse_save_command(round_index, command_index, command): if len(command) != 2: raise ParseCommandError( command, round_index, command_index, "expected array of length 2 but got %r" % (command,)) return ['save', command[1]] def parse_scenario(scenario_data): """ Takes a serialized scenario and turns it into a data structure suitable for feeding to run_scenario(). :returns: scenario :raises ValueError: on invalid scenario """ parsed_scenario = {} try: raw_scenario = json.loads(scenario_data) except ValueError as err: raise ValueError("Invalid JSON in scenario file: %s" % err) if not isinstance(raw_scenario, dict): raise ValueError("Scenario must be a JSON object, not array or string") if 'part_power' not in raw_scenario: raise ValueError("part_power missing") try: parsed_scenario['part_power'] = int(raw_scenario['part_power']) except ValueError as err: raise ValueError("part_power not an integer: %s" % err) if not 1 <= parsed_scenario['part_power'] <= 32: raise ValueError("part_power must be between 1 and 32, but was %d" % raw_scenario['part_power']) if 'replicas' not in raw_scenario: raise ValueError("replicas missing") try: parsed_scenario['replicas'] = float(raw_scenario['replicas']) except ValueError as err: raise ValueError("replicas not a float: %s" % err) if parsed_scenario['replicas'] < 1: raise ValueError("replicas must be at least 1, but is %f" % parsed_scenario['replicas']) if 'overload' not in raw_scenario: raise ValueError("overload missing") try: parsed_scenario['overload'] = float(raw_scenario['overload']) except ValueError as err: raise ValueError("overload not a float: %s" % err) if parsed_scenario['overload'] < 0: raise ValueError("overload must be non-negative, but is %f" % parsed_scenario['overload']) if 'random_seed' not in raw_scenario: raise ValueError("random_seed missing") try: parsed_scenario['random_seed'] = int(raw_scenario['random_seed']) except ValueError as err: raise ValueError("replicas not an integer: %s" % err) if 'rounds' not in raw_scenario: raise ValueError("rounds missing") if not isinstance(raw_scenario['rounds'], list): raise ValueError("rounds must be an array") parser_for_command = { 'add': _parse_add_command, 'remove': _parse_remove_command, 'set_weight': _parse_set_weight_command, 'save': _parse_save_command, } parsed_scenario['rounds'] = [] for round_index, raw_round in enumerate(raw_scenario['rounds']): if not isinstance(raw_round, list): raise ValueError("round %d not an array" % round_index) parsed_round = [] for command_index, command in enumerate(raw_round): if command[0] not in parser_for_command: raise ValueError( "Unknown command (round %d, command %d): " "'%s' should be one of %s" % (round_index, command_index, command[0], parser_for_command.keys())) parsed_round.append( parser_for_command[command[0]]( round_index, command_index, command)) parsed_scenario['rounds'].append(parsed_round) return parsed_scenario def run_scenario(scenario): """ Takes a parsed scenario (like from parse_scenario()) and runs it. """ seed = scenario['random_seed'] rb = builder.RingBuilder(scenario['part_power'], scenario['replicas'], 1) rb.set_overload(scenario['overload']) command_map = { 'add': rb.add_dev, 'remove': rb.remove_dev, 'set_weight': rb.set_dev_weight, 'save': rb.save, } for round_index, commands in enumerate(scenario['rounds']): print("Round %d" % (round_index + 1)) for command in commands: key = command.pop(0) try: command_f = command_map[key] except KeyError: raise ValueError("unknown command %r" % key) command_f(*command) rebalance_number = 1 parts_moved, old_balance, removed_devs = rb.rebalance(seed=seed) rb.pretend_min_part_hours_passed() print("\tRebalance 1: moved %d parts, balance is %.6f, %d removed " "devs" % (parts_moved, old_balance, removed_devs)) while True: rebalance_number += 1 parts_moved, new_balance, removed_devs = rb.rebalance(seed=seed) rb.pretend_min_part_hours_passed() print("\tRebalance %d: moved %d parts, balance is %.6f, " "%d removed devs" % (rebalance_number, parts_moved, new_balance, removed_devs)) if parts_moved == 0 and removed_devs == 0: break if abs(new_balance - old_balance) < 1 and not ( old_balance == builder.MAX_BALANCE and new_balance == builder.MAX_BALANCE): break old_balance = new_balance def main(argv=None): args = ARG_PARSER.parse_args(argv) try: with open(args.scenario_path) as sfh: scenario_data = sfh.read() except OSError as err: sys.stderr.write("Error opening scenario %s: %s\n" % (args.scenario_path, err)) return 1 try: scenario = parse_scenario(scenario_data) except ValueError as err: sys.stderr.write("Invalid scenario %s: %s\n" % (args.scenario_path, err)) return 1 if not args.check: run_scenario(scenario) return 0 swift-2.17.1/swift/cli/ringbuilder.py0000666000175000017500000015765713435012015017564 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import logging from collections import defaultdict from errno import EEXIST from itertools import islice from operator import itemgetter from os import mkdir from os.path import basename, abspath, dirname, exists, join as pathjoin from sys import argv as sys_argv, exit, stderr, stdout from textwrap import wrap from time import time from datetime import timedelta import optparse import math from six.moves import zip as izip from six.moves import input from swift.common import exceptions from swift.common.ring import RingBuilder, Ring, RingData from swift.common.ring.builder import MAX_BALANCE from swift.common.ring.utils import validate_args, \ validate_and_normalize_ip, build_dev_from_opts, \ parse_builder_ring_filename_args, parse_search_value, \ parse_search_values_from_opts, parse_change_values_from_opts, \ dispersion_report, parse_add_value from swift.common.utils import lock_parent_directory, is_valid_ipv6 MAJOR_VERSION = 1 MINOR_VERSION = 3 EXIT_SUCCESS = 0 EXIT_WARNING = 1 EXIT_ERROR = 2 global argv, backup_dir, builder, builder_file, ring_file argv = backup_dir = builder = builder_file = ring_file = None def format_device(dev): """ Format a device for display. """ copy_dev = dev.copy() for key in ('ip', 'replication_ip'): if ':' in copy_dev[key]: copy_dev[key] = '[' + copy_dev[key] + ']' return ('d%(id)sr%(region)sz%(zone)s-%(ip)s:%(port)sR' '%(replication_ip)s:%(replication_port)s/%(device)s_' '"%(meta)s"' % copy_dev) def _parse_search_values(argvish): new_cmd_format, opts, args = validate_args(argvish) # We'll either parse the all-in-one-string format or the # --options format, # but not both. If both are specified, raise an error. try: search_values = {} if len(args) > 0: if new_cmd_format or len(args) != 1: print(Commands.search.__doc__.strip()) exit(EXIT_ERROR) search_values = parse_search_value(args[0]) else: search_values = parse_search_values_from_opts(opts) return search_values except ValueError as e: print(e) exit(EXIT_ERROR) def _find_parts(devs): devs = [d['id'] for d in devs] if not devs or not builder._replica2part2dev: return None partition_count = {} for replica in builder._replica2part2dev: for partition, device in enumerate(replica): if device in devs: if partition not in partition_count: partition_count[partition] = 0 partition_count[partition] += 1 # Sort by number of found replicas to keep the output format sorted_partition_count = sorted( partition_count.items(), key=itemgetter(1), reverse=True) return sorted_partition_count def _parse_list_parts_values(argvish): new_cmd_format, opts, args = validate_args(argvish) # We'll either parse the all-in-one-string format or the # --options format, # but not both. If both are specified, raise an error. try: devs = [] if len(args) > 0: if new_cmd_format: print(Commands.list_parts.__doc__.strip()) exit(EXIT_ERROR) for arg in args: devs.extend( builder.search_devs(parse_search_value(arg)) or []) else: devs.extend(builder.search_devs( parse_search_values_from_opts(opts)) or []) return devs except ValueError as e: print(e) exit(EXIT_ERROR) def _parse_add_values(argvish): """ Parse devices to add as specified on the command line. Will exit on error and spew warnings. :returns: array of device dicts """ new_cmd_format, opts, args = validate_args(argvish) # We'll either parse the all-in-one-string format or the # --options format, # but not both. If both are specified, raise an error. parsed_devs = [] if len(args) > 0: if new_cmd_format or len(args) % 2 != 0: print(Commands.add.__doc__.strip()) exit(EXIT_ERROR) devs_and_weights = izip(islice(args, 0, len(args), 2), islice(args, 1, len(args), 2)) for devstr, weightstr in devs_and_weights: dev_dict = parse_add_value(devstr) if dev_dict['region'] is None: stderr.write('WARNING: No region specified for %s. ' 'Defaulting to region 1.\n' % devstr) dev_dict['region'] = 1 if dev_dict['replication_ip'] is None: dev_dict['replication_ip'] = dev_dict['ip'] if dev_dict['replication_port'] is None: dev_dict['replication_port'] = dev_dict['port'] weight = float(weightstr) if weight < 0: raise ValueError('Invalid weight value: %s' % devstr) dev_dict['weight'] = weight parsed_devs.append(dev_dict) else: parsed_devs.append(build_dev_from_opts(opts)) return parsed_devs def check_devs(devs, input_question, opts, abort_msg): if not devs: print('Search value matched 0 devices.\n' 'The on-disk ring builder is unchanged.') exit(EXIT_ERROR) if len(devs) > 1: print('Matched more than one device:') for dev in devs: print(' %s' % format_device(dev)) if not opts.yes and input(input_question) != 'y': print(abort_msg) exit(EXIT_ERROR) def _set_weight_values(devs, weight, opts): input_question = 'Are you sure you want to update the weight for these ' \ '%s devices? (y/N) ' % len(devs) abort_msg = 'Aborting device modifications' check_devs(devs, input_question, opts, abort_msg) for dev in devs: builder.set_dev_weight(dev['id'], weight) print('%s weight set to %s' % (format_device(dev), dev['weight'])) def _parse_set_weight_values(argvish): new_cmd_format, opts, args = validate_args(argvish) # We'll either parse the all-in-one-string format or the # --options format, # but not both. If both are specified, raise an error. try: if not new_cmd_format: if len(args) % 2 != 0: print(Commands.set_weight.__doc__.strip()) exit(EXIT_ERROR) devs_and_weights = izip(islice(argvish, 0, len(argvish), 2), islice(argvish, 1, len(argvish), 2)) for devstr, weightstr in devs_and_weights: devs = (builder.search_devs( parse_search_value(devstr)) or []) weight = float(weightstr) _set_weight_values(devs, weight, opts) else: if len(args) != 1: print(Commands.set_weight.__doc__.strip()) exit(EXIT_ERROR) devs = (builder.search_devs( parse_search_values_from_opts(opts)) or []) weight = float(args[0]) _set_weight_values(devs, weight, opts) except ValueError as e: print(e) exit(EXIT_ERROR) def _set_info_values(devs, change, opts): input_question = 'Are you sure you want to update the info for these ' \ '%s devices? (y/N) ' % len(devs) abort_msg = 'Aborting device modifications' check_devs(devs, input_question, opts, abort_msg) for dev in devs: orig_dev_string = format_device(dev) test_dev = dict(dev) for key in change: test_dev[key] = change[key] for check_dev in builder.devs: if not check_dev or check_dev['id'] == test_dev['id']: continue if check_dev['ip'] == test_dev['ip'] and \ check_dev['port'] == test_dev['port'] and \ check_dev['device'] == test_dev['device']: print('Device %d already uses %s:%d/%s.' % (check_dev['id'], check_dev['ip'], check_dev['port'], check_dev['device'])) exit(EXIT_ERROR) for key in change: dev[key] = change[key] print('Device %s is now %s' % (orig_dev_string, format_device(dev))) def calculate_change_value(change_value, change, v_name, v_name_port): ip = '' if change_value and change_value[0].isdigit(): i = 1 while (i < len(change_value) and change_value[i] in '0123456789.'): i += 1 ip = change_value[:i] change_value = change_value[i:] elif change_value and change_value.startswith('['): i = 1 while i < len(change_value) and change_value[i] != ']': i += 1 i += 1 ip = change_value[:i].lstrip('[').rstrip(']') change_value = change_value[i:] if ip: change[v_name] = validate_and_normalize_ip(ip) if change_value.startswith(':'): i = 1 while i < len(change_value) and change_value[i].isdigit(): i += 1 change[v_name_port] = int(change_value[1:i]) change_value = change_value[i:] return change_value def _parse_set_info_values(argvish): new_cmd_format, opts, args = validate_args(argvish) # We'll either parse the all-in-one-string format or the # --options format, # but not both. If both are specified, raise an error. if not new_cmd_format: if len(args) % 2 != 0: print(Commands.search.__doc__.strip()) exit(EXIT_ERROR) searches_and_changes = izip(islice(argvish, 0, len(argvish), 2), islice(argvish, 1, len(argvish), 2)) for search_value, change_value in searches_and_changes: devs = builder.search_devs(parse_search_value(search_value)) change = {} change_value = calculate_change_value(change_value, change, 'ip', 'port') if change_value.startswith('R'): change_value = change_value[1:] change_value = calculate_change_value(change_value, change, 'replication_ip', 'replication_port') if change_value.startswith('/'): i = 1 while i < len(change_value) and change_value[i] != '_': i += 1 change['device'] = change_value[1:i] change_value = change_value[i:] if change_value.startswith('_'): change['meta'] = change_value[1:] change_value = '' if change_value or not change: raise ValueError('Invalid set info change value: %s' % repr(argvish[1])) _set_info_values(devs, change, opts) else: devs = builder.search_devs(parse_search_values_from_opts(opts)) change = parse_change_values_from_opts(opts) _set_info_values(devs, change, opts) def _parse_remove_values(argvish): new_cmd_format, opts, args = validate_args(argvish) # We'll either parse the all-in-one-string format or the # --options format, # but not both. If both are specified, raise an error. try: devs = [] if len(args) > 0: if new_cmd_format: print(Commands.remove.__doc__.strip()) exit(EXIT_ERROR) for arg in args: devs.extend(builder.search_devs( parse_search_value(arg)) or []) else: devs.extend(builder.search_devs( parse_search_values_from_opts(opts))) return (devs, opts) except ValueError as e: print(e) exit(EXIT_ERROR) def _make_display_device_table(builder): ip_width = 10 port_width = 4 rep_ip_width = 14 rep_port_width = 4 ip_ipv6 = rep_ipv6 = False for dev in builder._iter_devs(): if is_valid_ipv6(dev['ip']): ip_ipv6 = True if is_valid_ipv6(dev['replication_ip']): rep_ipv6 = True ip_width = max(len(dev['ip']), ip_width) rep_ip_width = max(len(dev['replication_ip']), rep_ip_width) port_width = max(len(str(dev['port'])), port_width) rep_port_width = max(len(str(dev['replication_port'])), rep_port_width) if ip_ipv6: ip_width += 2 if rep_ipv6: rep_ip_width += 2 header_line = ('Devices:%5s %6s %4s %' + str(ip_width) + 's:%-' + str(port_width) + 's %' + str(rep_ip_width) + 's:%-' + str(rep_port_width) + 's %5s %6s %10s %7s %5s %s') % ( 'id', 'region', 'zone', 'ip address', 'port', 'replication ip', 'port', 'name', 'weight', 'partitions', 'balance', 'flags', 'meta') def print_dev_f(dev, balance_per_dev=0.00, flags=''): def get_formated_ip(key): value = dev[key] if ':' in value: value = '[%s]' % value return value dev_ip = get_formated_ip('ip') dev_replication_ip = get_formated_ip('replication_ip') format_string = ''.join(['%13d %6d %4d ', '%', str(ip_width), 's:%-', str(port_width), 'd ', '%', str(rep_ip_width), 's', ':%-', str(rep_port_width), 'd %5s %6.02f' ' %10s %7.02f %5s %s']) args = (dev['id'], dev['region'], dev['zone'], dev_ip, dev['port'], dev_replication_ip, dev['replication_port'], dev['device'], dev['weight'], dev['parts'], balance_per_dev, flags, dev['meta']) print(format_string % args) return header_line, print_dev_f class Commands(object): @staticmethod def unknown(): print('Unknown command: %s' % argv[2]) exit(EXIT_ERROR) @staticmethod def create(): """ swift-ring-builder create Creates with 2^ partitions and . is number of hours to restrict moving a partition more than once. """ if len(argv) < 6: print(Commands.create.__doc__.strip()) exit(EXIT_ERROR) builder = RingBuilder(int(argv[3]), float(argv[4]), int(argv[5])) backup_dir = pathjoin(dirname(builder_file), 'backups') try: mkdir(backup_dir) except OSError as err: if err.errno != EEXIST: raise builder.save(pathjoin(backup_dir, '%d.' % time() + basename(builder_file))) builder.save(builder_file) exit(EXIT_SUCCESS) @staticmethod def default(): """ swift-ring-builder Shows information about the ring and the devices within. Output includes a table that describes the report parameters (id, region, port, flags, etc). flags: possible values are 'DEL' and '' DEL - indicates that the device is marked for removal from ring and will be removed in next rebalance. """ try: builder_id = builder.id except AttributeError: builder_id = "(not assigned)" print('%s, build version %d, id %s' % (builder_file, builder.version, builder_id)) balance = 0 ring_empty_error = None regions = len(set(d['region'] for d in builder.devs if d is not None)) zones = len(set((d['region'], d['zone']) for d in builder.devs if d is not None)) dev_count = len([dev for dev in builder.devs if dev is not None]) try: balance = builder.get_balance() except exceptions.EmptyRingError as e: ring_empty_error = str(e) dispersion_trailer = '' if builder.dispersion is None else ( ', %.02f dispersion' % (builder.dispersion)) print('%d partitions, %.6f replicas, %d regions, %d zones, ' '%d devices, %.02f balance%s' % ( builder.parts, builder.replicas, regions, zones, dev_count, balance, dispersion_trailer)) print('The minimum number of hours before a partition can be ' 'reassigned is %s (%s remaining)' % ( builder.min_part_hours, timedelta(seconds=builder.min_part_seconds_left))) print('The overload factor is %0.2f%% (%.6f)' % ( builder.overload * 100, builder.overload)) ring_dict = None builder_dict = builder.get_ring().to_dict() # compare ring file against builder file if not exists(ring_file): print('Ring file %s not found, ' 'probably it hasn\'t been written yet' % ring_file) else: try: ring_dict = RingData.load(ring_file).to_dict() except Exception as exc: print('Ring file %s is invalid: %r' % (ring_file, exc)) else: if builder_dict == ring_dict: print('Ring file %s is up-to-date' % ring_file) else: print('Ring file %s is obsolete' % ring_file) if ring_empty_error: balance_per_dev = defaultdict(int) else: balance_per_dev = builder._build_balance_per_dev() header_line, print_dev_f = _make_display_device_table(builder) print(header_line) for dev in sorted( builder._iter_devs(), key=lambda x: (x['region'], x['zone'], x['ip'], x['device']) ): flags = 'DEL' if dev in builder._remove_devs else '' print_dev_f(dev, balance_per_dev[dev['id']], flags) # Print some helpful info if partition power increase in progress if (builder.next_part_power and builder.next_part_power == (builder.part_power + 1)): print('\nPreparing increase of partition power (%d -> %d)' % ( builder.part_power, builder.next_part_power)) print('Run "swift-object-relinker relink" on all nodes before ' 'moving on to increase_partition_power.') if (builder.next_part_power and builder.part_power == builder.next_part_power): print('\nIncreased partition power (%d -> %d)' % ( builder.part_power, builder.next_part_power)) if builder_dict != ring_dict: print('First run "swift-ring-builder write_ring"' ' now and copy the updated .ring.gz file to all nodes.') print('Run "swift-object-relinker cleanup" on all nodes before ' 'moving on to finish_increase_partition_power.') if ring_empty_error: print(ring_empty_error) exit(EXIT_SUCCESS) @staticmethod def search(): """ swift-ring-builder search or swift-ring-builder search --region --zone --ip --port --replication-ip --replication-port --device --meta --weight Where , and are replication ip, hostname and port. Any of the options are optional in both cases. Shows information about matching devices. """ if len(argv) < 4: print(Commands.search.__doc__.strip()) print() print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) devs = builder.search_devs(_parse_search_values(argv[3:])) if not devs: print('No matching devices found') exit(EXIT_ERROR) print('Devices: id region zone ip address port ' 'replication ip replication port name weight partitions ' 'balance meta') weighted_parts = builder.parts * builder.replicas / \ sum(d['weight'] for d in builder.devs if d is not None) for dev in devs: if not dev['weight']: if dev['parts']: balance = MAX_BALANCE else: balance = 0 else: balance = 100.0 * dev['parts'] / \ (dev['weight'] * weighted_parts) - 100.0 print(' %5d %7d %5d %15s %5d %15s %17d %9s %6.02f %10s ' '%7.02f %s' % (dev['id'], dev['region'], dev['zone'], dev['ip'], dev['port'], dev['replication_ip'], dev['replication_port'], dev['device'], dev['weight'], dev['parts'], balance, dev['meta'])) exit(EXIT_SUCCESS) @staticmethod def list_parts(): """ swift-ring-builder list_parts [] .. or swift-ring-builder list_parts --region --zone --ip --port --replication-ip --replication-port --device --meta --weight Where , and are replication ip, hostname and port. Any of the options are optional in both cases. Returns a 2 column list of all the partitions that are assigned to any of the devices matching the search values given. The first column is the assigned partition number and the second column is the number of device matches for that partition. The list is ordered from most number of matches to least. If there are a lot of devices to match against, this command could take a while to run. """ if len(argv) < 4: print(Commands.list_parts.__doc__.strip()) print() print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) if not builder._replica2part2dev: print('Specified builder file \"%s\" is not rebalanced yet. ' 'Please rebalance first.' % builder_file) exit(EXIT_ERROR) devs = _parse_list_parts_values(argv[3:]) if not devs: print('No matching devices found') exit(EXIT_ERROR) sorted_partition_count = _find_parts(devs) if not sorted_partition_count: print('No matching devices found') exit(EXIT_ERROR) print('Partition Matches') for partition, count in sorted_partition_count: print('%9d %7d' % (partition, count)) exit(EXIT_SUCCESS) @staticmethod def add(): """ swift-ring-builder add [r]z-:[R:]/_ [[r]z-:[R:]/_ ] ... Where and are replication ip and port. or swift-ring-builder add --region --zone --ip --port [--replication-ip ] [--replication-port ] --device --weight [--meta ] Adds devices to the ring with the given information. No partitions will be assigned to the new device until after running 'rebalance'. This is so you can make multiple device changes and rebalance them all just once. """ if len(argv) < 5: print(Commands.add.__doc__.strip()) exit(EXIT_ERROR) if builder.next_part_power: print('Partition power increase in progress. You need ') print('to finish the increase first before adding devices.') exit(EXIT_WARNING) try: for new_dev in _parse_add_values(argv[3:]): for dev in builder.devs: if dev is None: continue if dev['ip'] == new_dev['ip'] and \ dev['port'] == new_dev['port'] and \ dev['device'] == new_dev['device']: print('Device %d already uses %s:%d/%s.' % (dev['id'], dev['ip'], dev['port'], dev['device'])) print("The on-disk ring builder is unchanged.\n") exit(EXIT_ERROR) dev_id = builder.add_dev(new_dev) print('Device %s with %s weight got id %s' % (format_device(new_dev), new_dev['weight'], dev_id)) except ValueError as err: print(err) print('The on-disk ring builder is unchanged.') exit(EXIT_ERROR) builder.save(builder_file) exit(EXIT_SUCCESS) @staticmethod def set_weight(): """ swift-ring-builder set_weight [ ] ... [--yes] or swift-ring-builder set_weight --region --zone --ip --port --replication-ip --replication-port --device --meta --weight [--yes] Where , and are replication ip, hostname and port. and are the search weight and new weight values respectively. Any of the options are optional in both cases. Resets the devices' weights. No partitions will be reassigned to or from the device until after running 'rebalance'. This is so you can make multiple device changes and rebalance them all just once. Option --yes assume a yes response to all questions. """ # if len(argv) < 5 or len(argv) % 2 != 1: if len(argv) < 5: print(Commands.set_weight.__doc__.strip()) print() print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) _parse_set_weight_values(argv[3:]) builder.save(builder_file) exit(EXIT_SUCCESS) @staticmethod def set_info(): """ swift-ring-builder set_info :[R:]/_ [ :[R:]/_] ... [--yes] or swift-ring-builder set_info --ip --port --replication-ip --replication-port --device --meta --change-ip --change-port --change-replication-ip --change-replication-port --change-device --change-meta [--yes] Where , and are replication ip, hostname and port. Any of the options are optional in both cases. For each search-value, resets the matched device's information. This information isn't used to assign partitions, so you can use 'write_ring' afterward to rewrite the current ring with the newer device information. Any of the parts are optional in the final :/_ parameter; just give what you want to change. For instance set_info d74 _"snet: 5.6.7.8" would just update the meta data for device id 74. Option --yes assume a yes response to all questions. """ if len(argv) < 5: print(Commands.set_info.__doc__.strip()) print() print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) try: _parse_set_info_values(argv[3:]) except ValueError as err: print(err) exit(EXIT_ERROR) builder.save(builder_file) exit(EXIT_SUCCESS) @staticmethod def remove(): """ swift-ring-builder remove [search-value ...] [--yes] or swift-ring-builder remove --region --zone --ip --port --replication-ip --replication-port --device --meta --weight [--yes] Where , and are replication ip, hostname and port. Any of the options are optional in both cases. Removes the device(s) from the ring. This should normally just be used for a device that has failed. For a device you wish to decommission, it's best to set its weight to 0, wait for it to drain all its data, then use this remove command. This will not take effect until after running 'rebalance'. This is so you can make multiple device changes and rebalance them all just once. Option --yes assume a yes response to all questions. """ if len(argv) < 4: print(Commands.remove.__doc__.strip()) print() print(parse_search_value.__doc__.strip()) exit(EXIT_ERROR) if builder.next_part_power: print('Partition power increase in progress. You need ') print('to finish the increase first before removing devices.') exit(EXIT_WARNING) devs, opts = _parse_remove_values(argv[3:]) input_question = 'Are you sure you want to remove these ' \ '%s devices? (y/N) ' % len(devs) abort_msg = 'Aborting device removals' check_devs(devs, input_question, opts, abort_msg) for dev in devs: try: builder.remove_dev(dev['id']) except exceptions.RingBuilderError as e: print('-' * 79) print( 'An error occurred while removing device with id %d\n' 'This usually means that you attempted to remove\n' 'the last device in a ring. If this is the case,\n' 'consider creating a new ring instead.\n' 'The on-disk ring builder is unchanged.\n' 'Original exception message: %s' % (dev['id'], e)) print('-' * 79) exit(EXIT_ERROR) print('%s marked for removal and will ' 'be removed next rebalance.' % format_device(dev)) builder.save(builder_file) exit(EXIT_SUCCESS) @staticmethod def rebalance(): """ swift-ring-builder rebalance [options] Attempts to rebalance the ring by reassigning partitions that haven't been recently reassigned. """ usage = Commands.rebalance.__doc__.strip() parser = optparse.OptionParser(usage) parser.add_option('-f', '--force', action='store_true', help='Force a rebalanced ring to save even ' 'if < 1% of parts changed') parser.add_option('-s', '--seed', help="seed to use for rebalance") parser.add_option('-d', '--debug', action='store_true', help="print debug information") options, args = parser.parse_args(argv) def get_seed(index): if options.seed: return options.seed try: return args[index] except IndexError: pass if options.debug: logger = logging.getLogger("swift.ring.builder") logger.disabled = False logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(stdout) formatter = logging.Formatter("%(levelname)s: %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) if builder.next_part_power: print('Partition power increase in progress.') print('You need to finish the increase first before rebalancing.') exit(EXIT_WARNING) devs_changed = builder.devs_changed min_part_seconds_left = builder.min_part_seconds_left try: last_balance = builder.get_balance() last_dispersion = builder.dispersion parts, balance, removed_devs = builder.rebalance(seed=get_seed(3)) dispersion = builder.dispersion except exceptions.RingBuilderError as e: print('-' * 79) print("An error has occurred during ring validation. Common\n" "causes of failure are rings that are empty or do not\n" "have enough devices to accommodate the replica count.\n" "Original exception message:\n %s" % (e,)) print('-' * 79) exit(EXIT_ERROR) if not (parts or options.force or removed_devs): print('No partitions could be reassigned.') if min_part_seconds_left > 0: print('The time between rebalances must be at least ' 'min_part_hours: %s hours (%s remaining)' % ( builder.min_part_hours, timedelta(seconds=builder.min_part_seconds_left))) else: print('There is no need to do so at this time') exit(EXIT_WARNING) # If we set device's weight to zero, currently balance will be set # special value(MAX_BALANCE) until zero weighted device return all # its partitions. So we cannot check balance has changed. # Thus we need to check balance or last_balance is special value. be_cowardly = True if options.force: # User said save it, so we save it. be_cowardly = False elif devs_changed: # We must save if a device changed; this could be something like # a changed IP address. be_cowardly = False else: # If balance or dispersion changed (presumably improved), then # we should save to get the improvement. balance_changed = ( abs(last_balance - balance) >= 1 or (last_balance == MAX_BALANCE and balance == MAX_BALANCE)) dispersion_changed = last_dispersion is None or ( abs(last_dispersion - dispersion) >= 1) if balance_changed or dispersion_changed: be_cowardly = False if be_cowardly: print('Cowardly refusing to save rebalance as it did not change ' 'at least 1%.') exit(EXIT_WARNING) try: builder.validate() except exceptions.RingValidationError as e: print('-' * 79) print("An error has occurred during ring validation. Common\n" "causes of failure are rings that are empty or do not\n" "have enough devices to accommodate the replica count.\n" "Original exception message:\n %s" % (e,)) print('-' * 79) exit(EXIT_ERROR) print('Reassigned %d (%.02f%%) partitions. ' 'Balance is now %.02f. ' 'Dispersion is now %.02f' % ( parts, 100.0 * parts / builder.parts, balance, builder.dispersion)) status = EXIT_SUCCESS if builder.dispersion > 0: print('-' * 79) print( 'NOTE: Dispersion of %.06f indicates some parts are not\n' ' optimally dispersed.\n\n' ' You may want to adjust some device weights, increase\n' ' the overload or review the dispersion report.' % builder.dispersion) status = EXIT_WARNING print('-' * 79) elif balance > 5 and balance / 100.0 > builder.overload: print('-' * 79) print('NOTE: Balance of %.02f indicates you should push this ' % balance) print(' ring, wait at least %d hours, and rebalance/repush.' % builder.min_part_hours) print('-' * 79) status = EXIT_WARNING ts = time() builder.get_ring().save( pathjoin(backup_dir, '%d.' % ts + basename(ring_file))) builder.save(pathjoin(backup_dir, '%d.' % ts + basename(builder_file))) builder.get_ring().save(ring_file) builder.save(builder_file) exit(status) @staticmethod def dispersion(): """ swift-ring-builder dispersion [options] Output report on dispersion. --recalculate option will rebuild cached dispersion info and save builder --verbose option will display dispersion graph broken down by tier You can filter which tiers are evaluated to drill down using a regex in the optional search_filter argument. i.e. swift-ring-builder dispersion "r\d+z\d+$" -v ... would only display rows for the zone tiers swift-ring-builder dispersion ".*\-[^/]*$" -v ... would only display rows for the server tiers The reports columns are: Tier : the name of the tier parts : the total number of partitions with assignment in the tier % : the percentage of parts in the tier with replicas over assigned max : maximum replicas a part should have assigned at the tier 0 - N : the number of parts with that many replicas assigned e.g. Tier: parts % max 0 1 2 3 r1z1 1022 79.45 1 2 210 784 28 r1z1 has 1022 total parts assigned, 79% of them have more than the recommend max replica count of 1 assigned. Only 2 parts in the ring are *not* assigned in this tier (0 replica count), 210 parts have the recommend replica count of 1, 784 have 2 replicas, and 28 sadly have all three replicas in this tier. """ status = EXIT_SUCCESS if not builder._replica2part2dev: print('Specified builder file \"%s\" is not rebalanced yet. ' 'Please rebalance first.' % builder_file) exit(EXIT_ERROR) usage = Commands.dispersion.__doc__.strip() parser = optparse.OptionParser(usage) parser.add_option('--recalculate', action='store_true', help='Rebuild cached dispersion info and save') parser.add_option('-v', '--verbose', action='store_true', help='Display dispersion report for tiers') options, args = parser.parse_args(argv) if args[3:]: search_filter = args[3] else: search_filter = None orig_version = builder.version report = dispersion_report(builder, search_filter=search_filter, verbose=options.verbose, recalculate=options.recalculate) if builder.version != orig_version: # we've already done the work, better go ahead and save it! builder.save(builder_file) print('Dispersion is %.06f, Balance is %.06f, Overload is %0.2f%%' % ( builder.dispersion, builder.get_balance(), builder.overload * 100)) print('Required overload is %.6f%%' % ( builder.get_required_overload() * 100)) if report['worst_tier']: status = EXIT_WARNING print('Worst tier is %.06f (%s)' % (report['max_dispersion'], report['worst_tier'])) if report['graph']: replica_range = range(int(math.ceil(builder.replicas + 1))) part_count_width = '%%%ds' % max(len(str(builder.parts)), 5) replica_counts_tmpl = ' '.join(part_count_width for i in replica_range) tiers = (tier for tier, _junk in report['graph']) tier_width = max(max(map(len, tiers)), 30) header_line = ('%-' + str(tier_width) + 's ' + part_count_width + ' %6s %6s ' + replica_counts_tmpl) % tuple( ['Tier', 'Parts', '%', 'Max'] + replica_range) underline = '-' * len(header_line) print(underline) print(header_line) print(underline) for tier_name, dispersion in report['graph']: replica_counts_repr = replica_counts_tmpl % tuple( dispersion['replicas']) template = ''.join([ '%-', str(tier_width), 's ', part_count_width, ' %6.02f %6d %s', ]) args = ( tier_name, dispersion['placed_parts'], dispersion['dispersion'], dispersion['max_replicas'], replica_counts_repr, ) print(template % args) exit(status) @staticmethod def validate(): """ swift-ring-builder validate Just runs the validation routines on the ring. """ builder.validate() exit(EXIT_SUCCESS) @staticmethod def write_ring(): """ swift-ring-builder write_ring Just rewrites the distributable ring file. This is done automatically after a successful rebalance, so really this is only useful after one or more 'set_info' calls when no rebalance is needed but you want to send out the new device information. """ if not builder.devs: print('Unable to write empty ring.') exit(EXIT_ERROR) ring_data = builder.get_ring() if not ring_data._replica2part2dev_id: if ring_data.devs: print('Warning: Writing a ring with no partition ' 'assignments but with devices; did you forget to run ' '"rebalance"?') ring_data.save( pathjoin(backup_dir, '%d.' % time() + basename(ring_file))) ring_data.save(ring_file) exit(EXIT_SUCCESS) @staticmethod def write_builder(): """ swift-ring-builder write_builder [min_part_hours] Recreate a builder from a ring file (lossy) if you lost your builder backups. (Protip: don't lose your builder backups). [min_part_hours] is one of those numbers lost to the builder, you can change it with set_min_part_hours. """ if exists(builder_file): print('Cowardly refusing to overwrite existing ' 'Ring Builder file: %s' % builder_file) exit(EXIT_ERROR) if len(argv) > 3: min_part_hours = int(argv[3]) else: stderr.write("WARNING: default min_part_hours may not match " "the value in the lost builder.\n") min_part_hours = 24 ring = Ring(ring_file) for dev in ring.devs: if dev is None: continue dev.update({ 'parts': 0, 'parts_wanted': 0, }) builder_dict = { 'part_power': 32 - ring._part_shift, 'replicas': float(ring.replica_count), 'min_part_hours': min_part_hours, 'parts': ring.partition_count, 'devs': ring.devs, 'devs_changed': False, 'version': 0, '_replica2part2dev': ring._replica2part2dev_id, '_last_part_moves_epoch': None, '_last_part_moves': None, '_last_part_gather_start': 0, '_remove_devs': [], } builder = RingBuilder.from_dict(builder_dict) for parts in builder._replica2part2dev: for dev_id in parts: builder.devs[dev_id]['parts'] += 1 builder.save(builder_file) @staticmethod def pretend_min_part_hours_passed(): """ swift-ring-builder pretend_min_part_hours_passed Resets the clock on the last time a rebalance happened, thus circumventing the min_part_hours check. ***************************** USE THIS WITH EXTREME CAUTION ***************************** If you run this command and deploy rebalanced rings before a replication pass completes, you may introduce unavailability in your cluster. This has an end-user impact. """ builder.pretend_min_part_hours_passed() builder.save(builder_file) exit(EXIT_SUCCESS) @staticmethod def set_min_part_hours(): """ swift-ring-builder set_min_part_hours Changes the to the given . This should be set to however long a full replication/update cycle takes. We're working on a way to determine this more easily than scanning logs. """ if len(argv) < 4: print(Commands.set_min_part_hours.__doc__.strip()) exit(EXIT_ERROR) builder.change_min_part_hours(int(argv[3])) print('The minimum number of hours before a partition can be ' 'reassigned is now set to %s' % argv[3]) builder.save(builder_file) exit(EXIT_SUCCESS) @staticmethod def set_replicas(): """ swift-ring-builder set_replicas Changes the replica count to the given . may be a floating-point value, in which case some partitions will have floor() replicas and some will have ceiling() in the correct proportions. A rebalance is needed to make the change take effect. """ if len(argv) < 4: print(Commands.set_replicas.__doc__.strip()) exit(EXIT_ERROR) new_replicas = argv[3] try: new_replicas = float(new_replicas) except ValueError: print(Commands.set_replicas.__doc__.strip()) print("\"%s\" is not a valid number." % new_replicas) exit(EXIT_ERROR) if new_replicas < 1: print("Replica count must be at least 1.") exit(EXIT_ERROR) builder.set_replicas(new_replicas) print('The replica count is now %.6f.' % builder.replicas) print('The change will take effect after the next rebalance.') builder.save(builder_file) exit(EXIT_SUCCESS) @staticmethod def set_overload(): """ swift-ring-builder set_overload [%] Changes the overload factor to the given . A rebalance is needed to make the change take effect. """ if len(argv) < 4: print(Commands.set_overload.__doc__.strip()) exit(EXIT_ERROR) new_overload = argv[3] if new_overload.endswith('%'): percent = True new_overload = new_overload.rstrip('%') else: percent = False try: new_overload = float(new_overload) except ValueError: print(Commands.set_overload.__doc__.strip()) print("%r is not a valid number." % new_overload) exit(EXIT_ERROR) if percent: new_overload *= 0.01 if new_overload < 0: print("Overload must be non-negative.") exit(EXIT_ERROR) if new_overload > 1 and not percent: print("!?! Warning overload is greater than 100% !?!") status = EXIT_WARNING else: status = EXIT_SUCCESS builder.set_overload(new_overload) print('The overload factor is now %0.2f%% (%.6f)' % ( builder.overload * 100, builder.overload)) print('The change will take effect after the next rebalance.') builder.save(builder_file) exit(status) @staticmethod def prepare_increase_partition_power(): """ swift-ring-builder prepare_increase_partition_power Prepare the ring to increase the partition power by one. A write_ring command is needed to make the change take effect. Once the updated rings have been deployed to all servers you need to run the swift-object-relinker tool to relink existing data. ***************************** USE THIS WITH EXTREME CAUTION ***************************** If you increase the partition power and deploy changed rings, you may introduce unavailability in your cluster. This has an end-user impact. Make sure you execute required operations to increase the partition power accurately. """ if len(argv) < 3: print(Commands.prepare_increase_partition_power.__doc__.strip()) exit(EXIT_ERROR) if "object" not in basename(builder_file): print( 'Partition power increase is only supported for object rings.') exit(EXIT_ERROR) if not builder.prepare_increase_partition_power(): print('Ring is already prepared for partition power increase.') exit(EXIT_ERROR) builder.save(builder_file) print('The next partition power is now %d.' % builder.next_part_power) print('The change will take effect after the next write_ring.') print('Ensure your proxy-servers, object-replicators and ') print('reconstructors are using the changed rings and relink ') print('(using swift-object-relinker) your existing data') print('before the partition power increase') exit(EXIT_SUCCESS) @staticmethod def increase_partition_power(): """ swift-ring-builder increase_partition_power Increases the partition power by one. Needs to be run after prepare_increase_partition_power has been run and all existing data has been relinked using the swift-object-relinker tool. A write_ring command is needed to make the change take effect. Once the updated rings have been deployed to all servers you need to run the swift-object-relinker tool to cleanup old data. ***************************** USE THIS WITH EXTREME CAUTION ***************************** If you increase the partition power and deploy changed rings, you may introduce unavailability in your cluster. This has an end-user impact. Make sure you execute required operations to increase the partition power accurately. """ if len(argv) < 3: print(Commands.increase_partition_power.__doc__.strip()) exit(EXIT_ERROR) if builder.increase_partition_power(): print('The partition power is now %d.' % builder.part_power) print('The change will take effect after the next write_ring.') builder._update_last_part_moves() builder.save(builder_file) exit(EXIT_SUCCESS) else: print('Ring partition power cannot be increased. Either the ring') print('was not prepared yet, or this operation has already run.') exit(EXIT_ERROR) @staticmethod def cancel_increase_partition_power(): """ swift-ring-builder cancel_increase_partition_power Cancel the increase of the partition power. A write_ring command is needed to make the change take effect. Once the updated rings have been deployed to all servers you need to run the swift-object-relinker tool to cleanup unneeded links. ***************************** USE THIS WITH EXTREME CAUTION ***************************** If you increase the partition power and deploy changed rings, you may introduce unavailability in your cluster. This has an end-user impact. Make sure you execute required operations to increase the partition power accurately. """ if len(argv) < 3: print(Commands.cancel_increase_partition_power.__doc__.strip()) exit(EXIT_ERROR) if not builder.cancel_increase_partition_power(): print('Ring partition power increase cannot be canceled.') exit(EXIT_ERROR) builder.save(builder_file) print('The next partition power is now %d.' % builder.next_part_power) print('The change will take effect after the next write_ring.') print('Ensure your object-servers are using the changed rings and') print('cleanup (using swift-object-relinker) the hard links') exit(EXIT_SUCCESS) @staticmethod def finish_increase_partition_power(): """ swift-ring-builder finish_increase_partition_power Finally removes the next_part_power flag. Has to be run after the swift-object-relinker tool has been used to cleanup old existing data. A write_ring command is needed to make the change take effect. ***************************** USE THIS WITH EXTREME CAUTION ***************************** If you increase the partition power and deploy changed rings, you may introduce unavailability in your cluster. This has an end-user impact. Make sure you execute required operations to increase the partition power accurately. """ if len(argv) < 3: print(Commands.finish_increase_partition_power.__doc__.strip()) exit(EXIT_ERROR) if not builder.finish_increase_partition_power(): print('Ring partition power increase cannot be finished.') exit(EXIT_ERROR) print('The change will take effect after the next write_ring.') builder.save(builder_file) exit(EXIT_SUCCESS) def main(arguments=None): global argv, backup_dir, builder, builder_file, ring_file if arguments is not None: argv = arguments else: argv = sys_argv if len(argv) < 2: print("swift-ring-builder %(MAJOR_VERSION)s.%(MINOR_VERSION)s\n" % globals()) print(Commands.default.__doc__.strip()) print() cmds = [c for c in dir(Commands) if getattr(Commands, c).__doc__ and not c.startswith('_') and c != 'default'] cmds.sort() for cmd in cmds: print(getattr(Commands, cmd).__doc__.strip()) print() print(parse_search_value.__doc__.strip()) print() for line in wrap(' '.join(cmds), 79, initial_indent='Quick list: ', subsequent_indent=' '): print(line) print('Exit codes: 0 = operation successful\n' ' 1 = operation completed with warnings\n' ' 2 = error') exit(EXIT_SUCCESS) builder_file, ring_file = parse_builder_ring_filename_args(argv) if builder_file != argv[1]: print('Note: using %s instead of %s as builder file' % ( builder_file, argv[1])) try: builder = RingBuilder.load(builder_file) except exceptions.UnPicklingError as e: print(e) exit(EXIT_ERROR) except (exceptions.FileNotFoundError, exceptions.PermissionError) as e: if len(argv) < 3 or argv[2] not in('create', 'write_builder'): print(e) exit(EXIT_ERROR) except Exception as e: print('Problem occurred while reading builder file: %s. %s' % (builder_file, e)) exit(EXIT_ERROR) backup_dir = pathjoin(dirname(builder_file), 'backups') try: mkdir(backup_dir) except OSError as err: if err.errno != EEXIST: raise if len(argv) == 2: command = "default" else: command = argv[2] if argv[0].endswith('-safe'): try: with lock_parent_directory(abspath(builder_file), 15): getattr(Commands, command, Commands.unknown)() except exceptions.LockTimeout: print("Ring/builder dir currently locked.") exit(2) else: getattr(Commands, command, Commands.unknown)() swift-2.17.1/swift/cli/__init__.py0000666000175000017500000000000013435012003016753 0ustar zuulzuul00000000000000swift-2.17.1/swift/cli/form_signature.py0000666000175000017500000001233413435012015020260 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Script for generating a form signature for use with FormPost middleware. """ from __future__ import print_function import hmac from hashlib import sha1 from os.path import basename from time import time def main(argv): if len(argv) != 7: prog = basename(argv[0]) print('Syntax: %s ' ' ' % prog) print() print('Where:') print(' The prefix to use for form uploaded') print(' objects. For example:') print(' /v1/account/container/object_prefix_ would') print(' ensure all form uploads have that path') print(' prepended to the browser-given file name.') print(' The URL to redirect the browser to after') print(' the uploads have completed.') print(' The maximum file size per file uploaded.') print(' The maximum number of uploaded files') print(' allowed.') print(' The number of seconds from now to allow') print(' the form post to begin.') print(' The X-Account-Meta-Temp-URL-Key for the') print(' account.') print() print('Example output:') print(' Expires: 1323842228') print(' Signature: 18de97e47345a82c4dbfb3b06a640dbb') print() print('Sample form:') print() print('NOTE: the
tag\'s "action" attribute does not contain ' 'the Swift cluster\'s hostname.') print('You should manually add it before using the form.') print() print('') print(' ') print(' ... more HTML ...') print(' ') print('
') return 1 path, redirect, max_file_size, max_file_count, seconds, key = argv[1:] try: max_file_size = int(max_file_size) except ValueError: max_file_size = -1 if max_file_size < 0: print('Please use a value greater than or equal to 0.') return 1 try: max_file_count = int(max_file_count) except ValueError: max_file_count = 0 if max_file_count < 1: print('Please use a positive value.') return 1 try: expires = int(time() + int(seconds)) except ValueError: expires = 0 if expires < 1: print('Please use a positive value.') return 1 parts = path.split('/', 4) # Must be four parts, ['', 'v1', 'a', 'c'], must be a v1 request, have # account and container values, and optionally have an object prefix. if len(parts) < 4 or parts[0] or parts[1] != 'v1' or not parts[2] or \ not parts[3]: print(' must point to a container at least.') print('For example: /v1/account/container') print(' Or: /v1/account/container/object_prefix') return 1 sig = hmac.new(key, '%s\n%s\n%s\n%s\n%s' % (path, redirect, max_file_size, max_file_count, expires), sha1).hexdigest() print(' Expires:', expires) print('Signature:', sig) print('') print('Sample form:\n') print('NOTE: the
tag\'s "action" attribute does not ' 'contain the Swift cluster\'s hostname.') print('You should manually add it before using the form.\n') print('' % path) if redirect: print(' ' % redirect) print(' ' % max_file_size) print(' ' % max_file_count) print(' ' % expires) print(' ' % sig) print(' ' % max_file_count) print(' ') print(' ') for i in range(max_file_count): print(' ' % i) print('
') print(' ') print('
') return 0 swift-2.17.1/swift/cli/info.py0000666000175000017500000005230713435012015016173 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy # of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import print_function import itertools import os import sqlite3 from hashlib import md5 from six.moves import urllib from swift.common.utils import hash_path, storage_directory, \ Timestamp, is_valid_ipv6 from swift.common.ring import Ring from swift.common.request_helpers import is_sys_meta, is_user_meta, \ strip_sys_meta_prefix, strip_user_meta_prefix, \ is_object_transient_sysmeta from swift.account.backend import AccountBroker, DATADIR as ABDATADIR from swift.container.backend import ContainerBroker, DATADIR as CBDATADIR from swift.obj.diskfile import get_data_dir, read_metadata, DATADIR_BASE, \ extract_policy from swift.common.storage_policy import POLICIES class InfoSystemExit(Exception): """ Indicates to the caller that a sys.exit(1) should be performed. """ pass def parse_get_node_args(options, args): """ Parse the get_nodes commandline args :returns: a tuple, (ring_path, args) """ ring_path = None if options.policy_name: if POLICIES.get_by_name(options.policy_name) is None: raise InfoSystemExit('No policy named %r' % options.policy_name) elif args and args[0].endswith('.ring.gz'): if os.path.exists(args[0]): ring_path = args.pop(0) else: raise InfoSystemExit('Ring file does not exist') if len(args) == 1: args = args[0].strip('/').split('/', 2) if not ring_path and not options.policy_name: raise InfoSystemExit('Need to specify policy_name or ') if not (args or options.partition): raise InfoSystemExit('No target specified') if len(args) > 3: raise InfoSystemExit('Invalid arguments') return ring_path, args def curl_head_command(ip, port, device, part, target, policy_index): """ Provide a string that is a well formatted curl command to HEAD an object on a storage node. :param ip: the ip of the node :param port: the port of the node :param device: the device of the node :param target: the path of the target resource :param policy_index: the policy_index of the target resource (can be None) :returns: a string, a well formatted curl command """ if is_valid_ipv6(ip): formatted_ip = '[%s]' % ip else: formatted_ip = ip cmd = 'curl -g -I -XHEAD "http://%s:%s/%s/%s/%s"' % ( formatted_ip, port, device, part, urllib.parse.quote(target)) if policy_index is not None: cmd += ' -H "%s: %s"' % ('X-Backend-Storage-Policy-Index', policy_index) return cmd def print_ring_locations(ring, datadir, account, container=None, obj=None, tpart=None, all_nodes=False, policy_index=None): """ print out ring locations of specified type :param ring: ring instance :param datadir: name of directory where things are stored. Usually one of "accounts", "containers", "objects", or "objects-N". :param account: account name :param container: container name :param obj: object name :param tpart: target partition in ring :param all_nodes: include all handoff nodes. If false, only the N primary nodes and first N handoffs will be printed. :param policy_index: include policy_index in curl headers """ if not ring: raise ValueError("No ring specified") if not datadir: raise ValueError("No datadir specified") if tpart is None and not account: raise ValueError("No partition or account/container/object specified") if not account and (container or obj): raise ValueError("Container/object specified without account") if obj and not container: raise ValueError('Object specified without container') if obj: target = '%s/%s/%s' % (account, container, obj) elif container: target = '%s/%s' % (account, container) else: target = '%s' % (account) if tpart: part = int(tpart) else: part = ring.get_part(account, container, obj) primary_nodes = ring.get_part_nodes(part) handoff_nodes = ring.get_more_nodes(part) if not all_nodes: handoff_nodes = itertools.islice(handoff_nodes, len(primary_nodes)) handoff_nodes = list(handoff_nodes) if account and not tpart: path_hash = hash_path(account, container, obj) else: path_hash = None print('Partition\t%s' % part) print('Hash \t%s\n' % path_hash) for node in primary_nodes: print('Server:Port Device\t%s:%s %s' % (node['ip'], node['port'], node['device'])) for node in handoff_nodes: print('Server:Port Device\t%s:%s %s\t [Handoff]' % ( node['ip'], node['port'], node['device'])) print("\n") for node in primary_nodes: cmd = curl_head_command(node['ip'], node['port'], node['device'], part, target, policy_index) print(cmd) for node in handoff_nodes: cmd = curl_head_command(node['ip'], node['port'], node['device'], part, target, policy_index) cmd += ' # [Handoff]' print(cmd) print("\n\nUse your own device location of servers:") print("such as \"export DEVICE=/srv/node\"") if path_hash: for node in primary_nodes: print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s"' % (node['ip'], node['device'], storage_directory(datadir, part, path_hash))) for node in handoff_nodes: print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s" # [Handoff]' % (node['ip'], node['device'], storage_directory(datadir, part, path_hash))) else: for node in primary_nodes: print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' % (node['ip'], node['device'], datadir, part)) for node in handoff_nodes: print('ssh %s "ls -lah ${DEVICE:-/srv/node*}/%s/%s/%d"' ' # [Handoff]' % (node['ip'], node['device'], datadir, part)) print('\nnote: `/srv/node*` is used as default value of `devices`, the ' 'real value is set in the config file on each storage node.') def print_db_info_metadata(db_type, info, metadata): """ print out data base info/metadata based on its type :param db_type: database type, account or container :param info: dict of data base info :param metadata: dict of data base metadata """ if info is None: raise ValueError('DB info is None') if db_type not in ['container', 'account']: raise ValueError('Wrong DB type') try: account = info['account'] container = None if db_type == 'container': container = info['container'] path = '/%s/%s' % (account, container) else: path = '/%s' % account print('Path: %s' % path) print(' Account: %s' % account) if db_type == 'container': print(' Container: %s' % container) path_hash = hash_path(account, container) if db_type == 'container': print(' Container Hash: %s' % path_hash) else: print(' Account Hash: %s' % path_hash) print('Metadata:') print(' Created at: %s (%s)' % (Timestamp(info['created_at']).isoformat, info['created_at'])) print(' Put Timestamp: %s (%s)' % (Timestamp(info['put_timestamp']).isoformat, info['put_timestamp'])) print(' Delete Timestamp: %s (%s)' % (Timestamp(info['delete_timestamp']).isoformat, info['delete_timestamp'])) print(' Status Timestamp: %s (%s)' % (Timestamp(info['status_changed_at']).isoformat, info['status_changed_at'])) if db_type == 'account': print(' Container Count: %s' % info['container_count']) print(' Object Count: %s' % info['object_count']) print(' Bytes Used: %s' % info['bytes_used']) if db_type == 'container': try: policy_name = POLICIES[info['storage_policy_index']].name except KeyError: policy_name = 'Unknown' print(' Storage Policy: %s (%s)' % ( policy_name, info['storage_policy_index'])) print(' Reported Put Timestamp: %s (%s)' % (Timestamp(info['reported_put_timestamp']).isoformat, info['reported_put_timestamp'])) print(' Reported Delete Timestamp: %s (%s)' % (Timestamp(info['reported_delete_timestamp']).isoformat, info['reported_delete_timestamp'])) print(' Reported Object Count: %s' % info['reported_object_count']) print(' Reported Bytes Used: %s' % info['reported_bytes_used']) print(' Chexor: %s' % info['hash']) print(' UUID: %s' % info['id']) except KeyError as e: raise ValueError('Info is incomplete: %s' % e) meta_prefix = 'x_' + db_type + '_' for key, value in info.items(): if key.lower().startswith(meta_prefix): title = key.replace('_', '-').title() print(' %s: %s' % (title, value)) user_metadata = {} sys_metadata = {} for key, (value, timestamp) in metadata.items(): if is_user_meta(db_type, key): user_metadata[strip_user_meta_prefix(db_type, key)] = value elif is_sys_meta(db_type, key): sys_metadata[strip_sys_meta_prefix(db_type, key)] = value else: title = key.replace('_', '-').title() print(' %s: %s' % (title, value)) if sys_metadata: print(' System Metadata: %s' % sys_metadata) else: print('No system metadata found in db file') if user_metadata: print(' User Metadata: %s' % user_metadata) else: print('No user metadata found in db file') def print_obj_metadata(metadata): """ Print out basic info and metadata from object, as returned from :func:`swift.obj.diskfile.read_metadata`. Metadata should include the keys: name, Content-Type, and X-Timestamp. Additional metadata is displayed unmodified. :param metadata: dict of object metadata :raises ValueError: """ user_metadata = {} sys_metadata = {} transient_sys_metadata = {} other_metadata = {} if not metadata: raise ValueError('Metadata is None') path = metadata.pop('name', '') content_type = metadata.pop('Content-Type', '') ts = Timestamp(metadata.pop('X-Timestamp', 0)) account = container = obj = obj_hash = None if path: try: account, container, obj = path.split('/', 3)[1:] except ValueError: raise ValueError('Path is invalid for object %r' % path) else: obj_hash = hash_path(account, container, obj) print('Path: %s' % path) print(' Account: %s' % account) print(' Container: %s' % container) print(' Object: %s' % obj) print(' Object hash: %s' % obj_hash) else: print('Path: Not found in metadata') if content_type: print('Content-Type: %s' % content_type) else: print('Content-Type: Not found in metadata') if ts: print('Timestamp: %s (%s)' % (ts.isoformat, ts.internal)) else: print('Timestamp: Not found in metadata') for key, value in metadata.items(): if is_user_meta('Object', key): user_metadata[key] = value elif is_sys_meta('Object', key): sys_metadata[key] = value elif is_object_transient_sysmeta(key): transient_sys_metadata[key] = value else: other_metadata[key] = value def print_metadata(title, items): print(title) if items: for meta_key in sorted(items): print(' %s: %s' % (meta_key, items[meta_key])) else: print(' No metadata found') print_metadata('System Metadata:', sys_metadata) print_metadata('Transient System Metadata:', transient_sys_metadata) print_metadata('User Metadata:', user_metadata) print_metadata('Other Metadata:', other_metadata) def print_info(db_type, db_file, swift_dir='/etc/swift', stale_reads_ok=False): if db_type not in ('account', 'container'): print("Unrecognized DB type: internal error") raise InfoSystemExit() if not os.path.exists(db_file) or not db_file.endswith('.db'): print("DB file doesn't exist") raise InfoSystemExit() if not db_file.startswith(('/', './')): db_file = './' + db_file # don't break if the bare db file is given if db_type == 'account': broker = AccountBroker(db_file, stale_reads_ok=stale_reads_ok) datadir = ABDATADIR else: broker = ContainerBroker(db_file, stale_reads_ok=stale_reads_ok) datadir = CBDATADIR try: info = broker.get_info() except sqlite3.OperationalError as err: if 'no such table' in str(err): print("Does not appear to be a DB of type \"%s\": %s" % (db_type, db_file)) raise InfoSystemExit() raise account = info['account'] container = info['container'] if db_type == 'container' else None print_db_info_metadata(db_type, info, broker.metadata) try: ring = Ring(swift_dir, ring_name=db_type) except Exception: ring = None else: print_ring_locations(ring, datadir, account, container) def print_obj(datafile, check_etag=True, swift_dir='/etc/swift', policy_name=''): """ Display information about an object read from the datafile. Optionally verify the datafile content matches the ETag metadata. :param datafile: path on disk to object file :param check_etag: boolean, will read datafile content and verify computed checksum matches value stored in metadata. :param swift_dir: the path on disk to rings :param policy_name: optionally the name to use when finding the ring """ if not os.path.exists(datafile): print("Data file doesn't exist") raise InfoSystemExit() if not datafile.startswith(('/', './')): datafile = './' + datafile policy_index = None ring = None datadir = DATADIR_BASE # try to extract policy index from datafile disk path fullpath = os.path.abspath(datafile) policy_index = int(extract_policy(fullpath) or POLICIES.legacy) try: if policy_index: datadir += '-' + str(policy_index) ring = Ring(swift_dir, ring_name='object-' + str(policy_index)) elif policy_index == 0: ring = Ring(swift_dir, ring_name='object') except IOError: # no such ring pass if policy_name: policy = POLICIES.get_by_name(policy_name) if policy: policy_index_for_name = policy.idx if (policy_index is not None and policy_index_for_name is not None and policy_index != policy_index_for_name): print('Warning: Ring does not match policy!') print('Double check your policy name!') if not ring and policy_index_for_name: ring = POLICIES.get_object_ring(policy_index_for_name, swift_dir) datadir = get_data_dir(policy_index_for_name) with open(datafile, 'rb') as fp: try: metadata = read_metadata(fp) except EOFError: print("Invalid metadata") raise InfoSystemExit() etag = metadata.pop('ETag', '') length = metadata.pop('Content-Length', '') path = metadata.get('name', '') print_obj_metadata(metadata) # Optional integrity check; it's useful, but slow. file_len = None if check_etag: h = md5() file_len = 0 while True: data = fp.read(64 * 1024) if not data: break h.update(data) file_len += len(data) h = h.hexdigest() if etag: if h == etag: print('ETag: %s (valid)' % etag) else: print("ETag: %s doesn't match file hash of %s!" % (etag, h)) else: print('ETag: Not found in metadata') else: print('ETag: %s (not checked)' % etag) file_len = os.fstat(fp.fileno()).st_size if length: if file_len == int(length): print('Content-Length: %s (valid)' % length) else: print("Content-Length: %s doesn't match file length of %s" % (length, file_len)) else: print('Content-Length: Not found in metadata') account, container, obj = path.split('/', 3)[1:] if ring: print_ring_locations(ring, datadir, account, container, obj, policy_index=policy_index) def print_item_locations(ring, ring_name=None, account=None, container=None, obj=None, **kwargs): """ Display placement information for an item based on ring lookup. If a ring is provided it always takes precedence, but warnings will be emitted if it doesn't match other optional arguments like the policy_name or ring_name. If no ring is provided the ring_name and/or policy_name will be used to lookup the ring. :param ring: a ring instance :param ring_name: server type, or storage policy ring name if object ring :param account: account name :param container: container name :param obj: object name :param partition: part number for non path lookups :param policy_name: name of storage policy to use to lookup the ring :param all_nodes: include all handoff nodes. If false, only the N primary nodes and first N handoffs will be printed. """ policy_name = kwargs.get('policy_name', None) part = kwargs.get('partition', None) all_nodes = kwargs.get('all', False) swift_dir = kwargs.get('swift_dir', '/etc/swift') if ring and policy_name: policy = POLICIES.get_by_name(policy_name) if policy: if ring_name != policy.ring_name: print('Warning: mismatch between ring and policy name!') else: print('Warning: Policy %s is not valid' % policy_name) policy_index = None if ring is None and (obj or part): if not policy_name: print('Need a ring or policy') raise InfoSystemExit() policy = POLICIES.get_by_name(policy_name) if not policy: print('No policy named %r' % policy_name) raise InfoSystemExit() policy_index = int(policy) ring = POLICIES.get_object_ring(policy_index, swift_dir) ring_name = (POLICIES.get_by_name(policy_name)).ring_name if account is None and (container is not None or obj is not None): print('No account specified') raise InfoSystemExit() if container is None and obj is not None: print('No container specified') raise InfoSystemExit() if account is None and part is None: print('No target specified') raise InfoSystemExit() loc = '' if part and ring_name: if '-' in ring_name and ring_name.startswith('object'): loc = 'objects-' + ring_name.split('-', 1)[1] else: loc = ring_name + 's' if account and container and obj: loc = 'objects' if '-' in ring_name and ring_name.startswith('object'): policy_index = int(ring_name.rsplit('-', 1)[1]) loc = 'objects-%d' % policy_index if account and container and not obj: loc = 'containers' if not any([ring, ring_name]): ring = Ring(swift_dir, ring_name='container') else: if ring_name != 'container': print('Warning: account/container specified ' + 'but ring not named "container"') if account and not container and not obj: loc = 'accounts' if not any([ring, ring_name]): ring = Ring(swift_dir, ring_name='account') else: if ring_name != 'account': print('Warning: account specified ' + 'but ring not named "account"') print('\nAccount \t%s' % account) print('Container\t%s' % container) print('Object \t%s\n\n' % obj) print_ring_locations(ring, loc, account, container, obj, part, all_nodes, policy_index=policy_index) swift-2.17.1/swift/cli/relinker.py0000666000175000017500000001363613435012003017052 0ustar zuulzuul00000000000000#!/usr/bin/env python # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from swift.common.storage_policy import POLICIES from swift.common.exceptions import DiskFileDeleted, DiskFileNotExist, \ DiskFileQuarantined from swift.common.utils import replace_partition_in_path, \ audit_location_generator, get_logger from swift.obj import diskfile def relink(swift_dir='/etc/swift', devices='/srv/node', skip_mount_check=False, logger=logging.getLogger()): mount_check = not skip_mount_check run = False relinked = errors = 0 for policy in POLICIES: policy.object_ring = None # Ensure it will be reloaded policy.load_ring(swift_dir) part_power = policy.object_ring.part_power next_part_power = policy.object_ring.next_part_power if not next_part_power or next_part_power == part_power: continue logging.info('Relinking files for policy %s under %s', policy.name, devices) run = True locations = audit_location_generator( devices, diskfile.get_data_dir(policy), mount_check=mount_check) for fname, _, _ in locations: newfname = replace_partition_in_path(fname, next_part_power) try: diskfile.relink_paths(fname, newfname, check_existing=True) relinked += 1 except OSError as exc: errors += 1 logger.warning("Relinking %s to %s failed: %s", fname, newfname, exc) if not run: logger.warning("No policy found to increase the partition power.") return 2 logging.info('Relinked %d diskfiles (%d errors)', relinked, errors) if errors > 0: return 1 return 0 def cleanup(swift_dir='/etc/swift', devices='/srv/node', skip_mount_check=False, logger=logging.getLogger()): mount_check = not skip_mount_check conf = {'devices': devices, 'mount_check': mount_check} diskfile_router = diskfile.DiskFileRouter(conf, get_logger(conf)) errors = cleaned_up = 0 run = False for policy in POLICIES: policy.object_ring = None # Ensure it will be reloaded policy.load_ring(swift_dir) part_power = policy.object_ring.part_power next_part_power = policy.object_ring.next_part_power if not next_part_power or next_part_power != part_power: continue logging.info('Cleaning up files for policy %s under %s', policy.name, devices) run = True locations = audit_location_generator( devices, diskfile.get_data_dir(policy), mount_check=mount_check) for fname, device, partition in locations: expected_fname = replace_partition_in_path(fname, part_power) if fname == expected_fname: continue # Make sure there is a valid object file in the expected new # location. Note that this could be newer than the original one # (which happens if there is another PUT after partition power # has been increased, but cleanup did not yet run) loc = diskfile.AuditLocation( os.path.dirname(expected_fname), device, partition, policy) diskfile_mgr = diskfile_router[policy] df = diskfile_mgr.get_diskfile_from_audit_location(loc) try: with df.open(): pass except DiskFileQuarantined as exc: logger.warning('ERROR Object %(obj)s failed audit and was' ' quarantined: %(err)r', {'obj': loc, 'err': exc}) errors += 1 continue except DiskFileDeleted: pass except DiskFileNotExist as exc: err = False if policy.policy_type == 'erasure_coding': # Might be a non-durable fragment - check that there is # a fragment in the new path. Will be fixed by the # reconstructor then if not os.path.isfile(expected_fname): err = True else: err = True if err: logger.warning( 'Error cleaning up %s: %r', fname, exc) errors += 1 continue try: os.remove(fname) cleaned_up += 1 logging.debug("Removed %s", fname) except OSError as exc: logger.warning('Error cleaning up %s: %r', fname, exc) errors += 1 if not run: logger.warning("No policy found to increase the partition power.") return 2 logging.info('Cleaned up %d diskfiles (%d errors)', cleaned_up, errors) if errors > 0: return 1 return 0 def main(args): logging.basicConfig( format='%(message)s', level=logging.DEBUG if args.debug else logging.INFO, filename=args.logfile) logger = logging.getLogger() if args.action == 'relink': return relink( args.swift_dir, args.devices, args.skip_mount_check, logger) if args.action == 'cleanup': return cleanup( args.swift_dir, args.devices, args.skip_mount_check, logger) swift-2.17.1/swift/cli/recon.py0000666000175000017500000013745513435012015016356 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ cmdline utility to perform cluster reconnaissance """ from __future__ import print_function from eventlet.green import socket from six import string_types from six.moves.urllib.parse import urlparse from swift.common.utils import ( SWIFT_CONF_FILE, md5_hash_for_file, set_swift_dir) from swift.common.ring import Ring from swift.common.storage_policy import POLICIES, reload_storage_policies import eventlet import json import optparse import time import sys import six import os if six.PY3: from eventlet.green.urllib import request as urllib2 else: from eventlet.green import urllib2 def seconds2timeunit(seconds): elapsed = seconds unit = 'seconds' if elapsed >= 60: elapsed = elapsed / 60.0 unit = 'minutes' if elapsed >= 60: elapsed = elapsed / 60.0 unit = 'hours' if elapsed >= 24: elapsed = elapsed / 24.0 unit = 'days' return elapsed, unit def size_suffix(size): suffixes = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'] for suffix in suffixes: if size < 1000: return "%s %s" % (size, suffix) size = size // 1000 return "%s %s" % (size, suffix) class Scout(object): """ Obtain swift recon information """ def __init__(self, recon_type, verbose=False, suppress_errors=False, timeout=5): self.recon_type = recon_type self.verbose = verbose self.suppress_errors = suppress_errors self.timeout = timeout def scout_host(self, base_url, recon_type): """ Perform the actual HTTP request to obtain swift recon telemetry. :param base_url: the base url of the host you wish to check. str of the format 'http://127.0.0.1:6200/recon/' :param recon_type: the swift recon check to request. :returns: tuple of (recon url used, response body, and status) """ url = base_url + recon_type try: body = urllib2.urlopen(url, timeout=self.timeout).read() content = json.loads(body) if self.verbose: print("-> %s: %s" % (url, content)) status = 200 except urllib2.HTTPError as err: if not self.suppress_errors or self.verbose: print("-> %s: %s" % (url, err)) content = err status = err.code except (urllib2.URLError, socket.timeout) as err: if not self.suppress_errors or self.verbose: print("-> %s: %s" % (url, err)) content = err status = -1 return url, content, status def scout(self, host): """ Obtain telemetry from a host running the swift recon middleware. :param host: host to check :returns: tuple of (recon url used, response body, status, time start and time end) """ base_url = "http://%s:%s/recon/" % (host[0], host[1]) ts_start = time.time() url, content, status = self.scout_host(base_url, self.recon_type) ts_end = time.time() return url, content, status, ts_start, ts_end def scout_server_type(self, host): """ Obtain Server header by calling OPTIONS. :param host: host to check :returns: Server type, status """ try: url = "http://%s:%s/" % (host[0], host[1]) req = urllib2.Request(url) req.get_method = lambda: 'OPTIONS' conn = urllib2.urlopen(req) header = conn.info().getheader('Server') server_header = header.split('/') content = server_header[0] status = 200 except urllib2.HTTPError as err: if not self.suppress_errors or self.verbose: print("-> %s: %s" % (url, err)) content = err status = err.code except (urllib2.URLError, socket.timeout) as err: if not self.suppress_errors or self.verbose: print("-> %s: %s" % (url, err)) content = err status = -1 return url, content, status class SwiftRecon(object): """ Retrieve and report cluster info from hosts running recon middleware. """ def __init__(self): self.verbose = False self.suppress_errors = False self.timeout = 5 self.pool_size = 30 self.pool = eventlet.GreenPool(self.pool_size) self.check_types = ['account', 'container', 'object'] self.server_type = 'object' def _gen_stats(self, stats, name=None): """Compute various stats from a list of values.""" cstats = [x for x in stats if x is not None] if len(cstats) > 0: ret_dict = {'low': min(cstats), 'high': max(cstats), 'total': sum(cstats), 'reported': len(cstats), 'number_none': len(stats) - len(cstats), 'name': name} ret_dict['average'] = \ ret_dict['total'] / float(len(cstats)) ret_dict['perc_none'] = \ ret_dict['number_none'] * 100.0 / len(stats) else: ret_dict = {'reported': 0} return ret_dict def _print_stats(self, stats): """ print out formatted stats to console :param stats: dict of stats generated by _gen_stats """ print('[%(name)s] low: %(low)d, high: %(high)d, avg: ' '%(average).1f, total: %(total)d, ' 'Failed: %(perc_none).1f%%, no_result: %(number_none)d, ' 'reported: %(reported)d' % stats) def _ptime(self, timev=None): """ :param timev: a unix timestamp or None :returns: a pretty string of the current time or provided time in UTC """ if timev: return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(timev)) else: return time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime()) def get_hosts(self, region_filter, zone_filter, swift_dir, ring_names): """ Get a list of hosts in the rings. :param region_filter: Only list regions matching given filter :param zone_filter: Only list zones matching given filter :param swift_dir: Directory of swift config, usually /etc/swift :param ring_names: Collection of ring names, such as ['object', 'object-2'] :returns: a set of tuples containing the ip and port of hosts """ rings = [Ring(swift_dir, ring_name=n) for n in ring_names] devs = [d for r in rings for d in r.devs if d] if region_filter is not None: devs = [d for d in devs if d['region'] == region_filter] if zone_filter is not None: devs = [d for d in devs if d['zone'] == zone_filter] return set((d['ip'], d['port']) for d in devs) def get_ringmd5(self, hosts, swift_dir): """ Compare ring md5sum's with those on remote host :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) :param swift_dir: The local directory with the ring files. """ matches = 0 errors = 0 ring_names = set() if self.server_type == 'object': for ring_name in os.listdir(swift_dir): if ring_name.startswith('object') and \ ring_name.endswith('.ring.gz'): ring_names.add(ring_name) else: ring_name = '%s.ring.gz' % self.server_type ring_names.add(ring_name) rings = {} for ring_name in ring_names: rings[ring_name] = md5_hash_for_file( os.path.join(swift_dir, ring_name)) recon = Scout("ringmd5", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking ring md5sums" % self._ptime()) if self.verbose: for ring_file, ring_sum in rings.items(): print("-> On disk %s md5sum: %s" % (ring_file, ring_sum)) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status != 200: errors = errors + 1 continue success = True for remote_ring_file, remote_ring_sum in response.items(): remote_ring_name = os.path.basename(remote_ring_file) if not remote_ring_name.startswith(self.server_type): continue ring_sum = rings.get(remote_ring_name, None) if remote_ring_sum != ring_sum: success = False print("!! %s (%s => %s) doesn't match on disk md5sum" % ( url, remote_ring_name, remote_ring_sum)) if not success: errors += 1 continue matches += 1 if self.verbose: print("-> %s matches." % url) print("%s/%s hosts matched, %s error[s] while checking hosts." % ( matches, len(hosts), errors)) print("=" * 79) def get_swiftconfmd5(self, hosts, printfn=print): """ Compare swift.conf md5sum with that on remote hosts :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) :param printfn: function to print text; defaults to print() """ matches = 0 errors = 0 conf_sum = md5_hash_for_file(SWIFT_CONF_FILE) recon = Scout("swiftconfmd5", self.verbose, self.suppress_errors, self.timeout) printfn("[%s] Checking swift.conf md5sum" % self._ptime()) if self.verbose: printfn("-> On disk swift.conf md5sum: %s" % (conf_sum,)) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: if response[SWIFT_CONF_FILE] != conf_sum: printfn("!! %s (%s) doesn't match on disk md5sum" % (url, response[SWIFT_CONF_FILE])) else: matches = matches + 1 if self.verbose: printfn("-> %s matches." % url) else: errors = errors + 1 printfn("%s/%s hosts matched, %s error[s] while checking hosts." % (matches, len(hosts), errors)) printfn("=" * 79) def async_check(self, hosts): """ Obtain and print async pending statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ scan = {} recon = Scout("async", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking async pendings" % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: scan[url] = response['async_pending'] stats = self._gen_stats(scan.values(), 'async_pending') if stats['reported'] > 0: self._print_stats(stats) else: print("[async_pending] - No hosts returned valid data.") print("=" * 79) def driveaudit_check(self, hosts): """ Obtain and print drive audit error statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)] """ scan = {} recon = Scout("driveaudit", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking drive-audit errors" % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: scan[url] = response['drive_audit_errors'] stats = self._gen_stats(scan.values(), 'drive_audit_errors') if stats['reported'] > 0: self._print_stats(stats) else: print("[drive_audit_errors] - No hosts returned valid data.") print("=" * 79) def umount_check(self, hosts): """ Check for and print unmounted drives :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ unmounted = {} errors = {} recon = Scout("unmounted", self.verbose, self.suppress_errors, self.timeout) print("[%s] Getting unmounted drives from %s hosts..." % (self._ptime(), len(hosts))) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: unmounted[url] = [] errors[url] = [] for i in response: if not isinstance(i['mounted'], bool): errors[url].append(i['device']) else: unmounted[url].append(i['device']) for host in unmounted: node = urlparse(host).netloc for entry in unmounted[host]: print("Not mounted: %s on %s" % (entry, node)) for host in errors: node = urlparse(host).netloc for entry in errors[host]: print("Device errors: %s on %s" % (entry, node)) print("=" * 79) def server_type_check(self, hosts): """ Check for server types on the ring :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ errors = {} recon = Scout("server_type_check", self.verbose, self.suppress_errors, self.timeout) print("[%s] Validating server type '%s' on %s hosts..." % (self._ptime(), self.server_type, len(hosts))) for url, response, status in self.pool.imap( recon.scout_server_type, hosts): if status == 200: if response != self.server_type + '-server': errors[url] = response print("%s/%s hosts ok, %s error[s] while checking hosts." % ( len(hosts) - len(errors), len(hosts), len(errors))) for host in errors: print("Invalid: %s is %s" % (host, errors[host])) print("=" * 79) def expirer_check(self, hosts): """ Obtain and print expirer statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ stats = {'object_expiration_pass': [], 'expired_last_pass': []} recon = Scout("expirer/%s" % self.server_type, self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking on expirers" % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: stats['object_expiration_pass'].append( response.get('object_expiration_pass')) stats['expired_last_pass'].append( response.get('expired_last_pass')) for k in stats: if stats[k]: computed = self._gen_stats(stats[k], name=k) if computed['reported'] > 0: self._print_stats(computed) else: print("[%s] - No hosts returned valid data." % k) else: print("[%s] - No hosts returned valid data." % k) print("=" * 79) def replication_check(self, hosts): """ Obtain and print replication statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ stats = {'replication_time': [], 'failure': [], 'success': [], 'attempted': []} recon = Scout("replication/%s" % self.server_type, self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking on replication" % self._ptime()) least_recent_time = 9999999999 least_recent_url = None most_recent_time = 0 most_recent_url = None for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: stats['replication_time'].append( response.get('replication_time', response.get('object_replication_time', 0))) repl_stats = response.get('replication_stats') if repl_stats: for stat_key in ['attempted', 'failure', 'success']: stats[stat_key].append(repl_stats.get(stat_key)) last = response.get('replication_last', response.get('object_replication_last', 0)) if last < least_recent_time: least_recent_time = last least_recent_url = url if last > most_recent_time: most_recent_time = last most_recent_url = url for k in stats: if stats[k]: if k != 'replication_time': computed = self._gen_stats(stats[k], name='replication_%s' % k) else: computed = self._gen_stats(stats[k], name=k) if computed['reported'] > 0: self._print_stats(computed) else: print("[%s] - No hosts returned valid data." % k) else: print("[%s] - No hosts returned valid data." % k) if least_recent_url is not None: host = urlparse(least_recent_url).netloc if not least_recent_time: print('Oldest completion was NEVER by %s.' % host) else: elapsed = time.time() - least_recent_time elapsed, elapsed_unit = seconds2timeunit(elapsed) print('Oldest completion was %s (%d %s ago) by %s.' % ( self._ptime(least_recent_time), elapsed, elapsed_unit, host)) if most_recent_url is not None: host = urlparse(most_recent_url).netloc elapsed = time.time() - most_recent_time elapsed, elapsed_unit = seconds2timeunit(elapsed) print('Most recent completion was %s (%d %s ago) by %s.' % ( self._ptime(most_recent_time), elapsed, elapsed_unit, host)) print("=" * 79) def updater_check(self, hosts): """ Obtain and print updater statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ stats = [] recon = Scout("updater/%s" % self.server_type, self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking updater times" % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: if response['%s_updater_sweep' % self.server_type]: stats.append(response['%s_updater_sweep' % self.server_type]) if len(stats) > 0: computed = self._gen_stats(stats, name='updater_last_sweep') if computed['reported'] > 0: self._print_stats(computed) else: print("[updater_last_sweep] - No hosts returned valid data.") else: print("[updater_last_sweep] - No hosts returned valid data.") print("=" * 79) def auditor_check(self, hosts): """ Obtain and print obj auditor statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ scan = {} adone = '%s_auditor_pass_completed' % self.server_type afail = '%s_audits_failed' % self.server_type apass = '%s_audits_passed' % self.server_type asince = '%s_audits_since' % self.server_type recon = Scout("auditor/%s" % self.server_type, self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking auditor stats" % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: scan[url] = response if len(scan) < 1: print("Error: No hosts available") return stats = {} stats[adone] = [scan[i][adone] for i in scan if scan[i][adone] is not None] stats[afail] = [scan[i][afail] for i in scan if scan[i][afail] is not None] stats[apass] = [scan[i][apass] for i in scan if scan[i][apass] is not None] stats[asince] = [scan[i][asince] for i in scan if scan[i][asince] is not None] for k in stats: if len(stats[k]) < 1: print("[%s] - No hosts returned valid data." % k) else: if k != asince: computed = self._gen_stats(stats[k], k) if computed['reported'] > 0: self._print_stats(computed) if len(stats[asince]) >= 1: low = min(stats[asince]) high = max(stats[asince]) total = sum(stats[asince]) average = total / len(stats[asince]) print('[last_pass] oldest: %s, newest: %s, avg: %s' % (self._ptime(low), self._ptime(high), self._ptime(average))) print("=" * 79) def nested_get_value(self, key, recon_entry): """ Generator that yields all values for given key in a recon cache entry. This is for use with object auditor recon cache entries. If the object auditor has run in parallel, the recon cache will have entries of the form: {'object_auditor_stats_ALL': { 'disk1': {..}, 'disk2': {..}, 'disk3': {..}, ...}} If the object auditor hasn't run in parallel, the recon cache will have entries of the form: {'object_auditor_stats_ALL': {...}}. The ZBF auditor doesn't run in parallel. However, if a subset of devices is selected for auditing, the recon cache will have an entry of the form: {'object_auditor_stats_ZBF': { 'disk1disk2..diskN': {}} We use this generator to find all instances of a particular key in these multi-level dictionaries. """ for k, v in recon_entry.items(): if isinstance(v, dict): for value in self.nested_get_value(key, v): yield value if k == key: yield v def object_auditor_check(self, hosts): """ Obtain and print obj auditor statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ all_scan = {} zbf_scan = {} atime = 'audit_time' bprocessed = 'bytes_processed' passes = 'passes' errors = 'errors' quarantined = 'quarantined' recon = Scout("auditor/object", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking auditor stats " % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: if response['object_auditor_stats_ALL']: all_scan[url] = response['object_auditor_stats_ALL'] if response['object_auditor_stats_ZBF']: zbf_scan[url] = response['object_auditor_stats_ZBF'] if len(all_scan) > 0: stats = {} stats[atime] = [sum(self.nested_get_value(atime, all_scan[i])) for i in all_scan] stats[bprocessed] = [sum(self.nested_get_value(bprocessed, all_scan[i])) for i in all_scan] stats[passes] = [sum(self.nested_get_value(passes, all_scan[i])) for i in all_scan] stats[errors] = [sum(self.nested_get_value(errors, all_scan[i])) for i in all_scan] stats[quarantined] = [sum(self.nested_get_value(quarantined, all_scan[i])) for i in all_scan] for k in stats: if None in stats[k]: stats[k] = [x for x in stats[k] if x is not None] if len(stats[k]) < 1: print("[Auditor %s] - No hosts returned valid data." % k) else: computed = self._gen_stats(stats[k], name='ALL_%s_last_path' % k) if computed['reported'] > 0: self._print_stats(computed) else: print("[ALL_auditor] - No hosts returned valid data.") else: print("[ALL_auditor] - No hosts returned valid data.") if len(zbf_scan) > 0: stats = {} stats[atime] = [sum(self.nested_get_value(atime, zbf_scan[i])) for i in zbf_scan] stats[bprocessed] = [sum(self.nested_get_value(bprocessed, zbf_scan[i])) for i in zbf_scan] stats[errors] = [sum(self.nested_get_value(errors, zbf_scan[i])) for i in zbf_scan] stats[quarantined] = [sum(self.nested_get_value(quarantined, zbf_scan[i])) for i in zbf_scan] for k in stats: if None in stats[k]: stats[k] = [x for x in stats[k] if x is not None] if len(stats[k]) < 1: print("[Auditor %s] - No hosts returned valid data." % k) else: computed = self._gen_stats(stats[k], name='ZBF_%s_last_path' % k) if computed['reported'] > 0: self._print_stats(computed) else: print("[ZBF_auditor] - No hosts returned valid data.") else: print("[ZBF_auditor] - No hosts returned valid data.") print("=" * 79) def load_check(self, hosts): """ Obtain and print load average statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ load1 = {} load5 = {} load15 = {} recon = Scout("load", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking load averages" % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: load1[url] = response['1m'] load5[url] = response['5m'] load15[url] = response['15m'] stats = {"1m": load1, "5m": load5, "15m": load15} for item in stats: if len(stats[item]) > 0: computed = self._gen_stats(stats[item].values(), name='%s_load_avg' % item) self._print_stats(computed) else: print("[%s_load_avg] - No hosts returned valid data." % item) print("=" * 79) def quarantine_check(self, hosts): """ Obtain and print quarantine statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ objq = {} conq = {} acctq = {} stats = {} recon = Scout("quarantined", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking quarantine" % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: objq[url] = response['objects'] conq[url] = response['containers'] acctq[url] = response['accounts'] for key in response.get('policies', {}): pkey = "objects_%s" % key stats.setdefault(pkey, {}) stats[pkey][url] = response['policies'][key]['objects'] stats.update({"objects": objq, "containers": conq, "accounts": acctq}) for item in stats: if len(stats[item]) > 0: computed = self._gen_stats(stats[item].values(), name='quarantined_%s' % item) self._print_stats(computed) else: print("No hosts returned valid data.") print("=" * 79) def socket_usage(self, hosts): """ Obtain and print /proc/net/sockstat statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ inuse4 = {} mem = {} inuse6 = {} timewait = {} orphan = {} recon = Scout("sockstat", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking socket usage" % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: inuse4[url] = response['tcp_in_use'] mem[url] = response['tcp_mem_allocated_bytes'] inuse6[url] = response.get('tcp6_in_use', 0) timewait[url] = response['time_wait'] orphan[url] = response['orphan'] stats = {"tcp_in_use": inuse4, "tcp_mem_allocated_bytes": mem, "tcp6_in_use": inuse6, "time_wait": timewait, "orphan": orphan} for item in stats: if len(stats[item]) > 0: computed = self._gen_stats(stats[item].values(), item) self._print_stats(computed) else: print("No hosts returned valid data.") print("=" * 79) def disk_usage(self, hosts, top=0, lowest=0, human_readable=False): """ Obtain and print disk usage statistics :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ stats = {} highs = [] lows = [] raw_total_used = [] raw_total_avail = [] percents = {} top_percents = [(None, 0)] * top low_percents = [(None, 100)] * lowest recon = Scout("diskusage", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking disk usage now" % self._ptime()) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status == 200: hostusage = [] for entry in response: if not isinstance(entry['mounted'], bool): print("-> %s/%s: Error: %s" % (url, entry['device'], entry['mounted'])) elif entry['mounted']: used = float(entry['used']) / float(entry['size']) \ * 100.0 raw_total_used.append(entry['used']) raw_total_avail.append(entry['avail']) hostusage.append(round(used, 2)) for ident, oused in top_percents: if oused < used: top_percents.append( (url + ' ' + entry['device'], used)) top_percents.sort(key=lambda x: -x[1]) top_percents.pop() break for ident, oused in low_percents: if oused > used: low_percents.append( (url + ' ' + entry['device'], used)) low_percents.sort(key=lambda x: x[1]) low_percents.pop() break stats[url] = hostusage for url in stats: if len(stats[url]) > 0: # get per host hi/los for another day low = min(stats[url]) high = max(stats[url]) highs.append(high) lows.append(low) for percent in stats[url]: percents[int(percent)] = percents.get(int(percent), 0) + 1 else: print("-> %s: Error. No drive info available." % url) if len(lows) > 0: low = min(lows) high = max(highs) # dist graph shamelessly stolen from https://github.com/gholt/tcod print("Distribution Graph:") mul = 69.0 / max(percents.values()) for percent in sorted(percents): print('% 3d%%%5d %s' % (percent, percents[percent], '*' * int(percents[percent] * mul))) raw_used = sum(raw_total_used) raw_avail = sum(raw_total_avail) raw_total = raw_used + raw_avail avg_used = 100.0 * raw_used / raw_total if human_readable: raw_used = size_suffix(raw_used) raw_avail = size_suffix(raw_avail) raw_total = size_suffix(raw_total) print("Disk usage: space used: %s of %s" % (raw_used, raw_total)) print("Disk usage: space free: %s of %s" % (raw_avail, raw_total)) print("Disk usage: lowest: %s%%, highest: %s%%, avg: %s%%" % (low, high, avg_used)) else: print("No hosts returned valid data.") print("=" * 79) if top_percents: print('TOP %s' % top) for ident, used in top_percents: if ident: url, device = ident.split() host = urlparse(url).netloc.split(':')[0] print('%.02f%% %s' % (used, '%-15s %s' % (host, device))) if low_percents: print('LOWEST %s' % lowest) for ident, used in low_percents: if ident: url, device = ident.split() host = urlparse(url).netloc.split(':')[0] print('%.02f%% %s' % (used, '%-15s %s' % (host, device))) def time_check(self, hosts, jitter=0.0): """ Check a time synchronization of hosts with current time :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) :param jitter: Maximal allowed time jitter """ jitter = abs(jitter) matches = 0 errors = 0 recon = Scout("time", self.verbose, self.suppress_errors, self.timeout) print("[%s] Checking time-sync" % self._ptime()) for url, ts_remote, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status != 200: errors = errors + 1 continue if (ts_remote + jitter < ts_start or ts_remote - jitter > ts_end): diff = abs(ts_end - ts_remote) ts_end_f = self._ptime(ts_end) ts_remote_f = self._ptime(ts_remote) print("!! %s current time is %s, but remote is %s, " "differs by %.4f sec" % ( url, ts_end_f, ts_remote_f, diff)) continue matches += 1 if self.verbose: print("-> %s matches." % url) print("%s/%s hosts matched, %s error[s] while checking hosts." % ( matches, len(hosts), errors)) print("=" * 79) def version_check(self, hosts): """ Check OS Swift version of hosts. Inform if differs. :param hosts: set of hosts to check. in the format of: set([('127.0.0.1', 6020), ('127.0.0.2', 6030)]) """ versions = set() errors = 0 print("[%s] Checking versions" % self._ptime()) recon = Scout("version", self.verbose, self.suppress_errors, self.timeout) for url, response, status, ts_start, ts_end in self.pool.imap( recon.scout, hosts): if status != 200: errors = errors + 1 continue versions.add(response['version']) if self.verbose: print("-> %s installed version %s" % ( url, response['version'])) if not len(versions): print("No hosts returned valid data.") elif len(versions) == 1: print("Versions matched (%s), " "%s error[s] while checking hosts." % ( versions.pop(), errors)) else: print("Versions not matched (%s), " "%s error[s] while checking hosts." % ( ", ".join(sorted(versions)), errors)) print("=" * 79) def _get_ring_names(self, policy=None): """ Retrieve name of ring files. If no policy is passed and the server type is object, the ring names of all storage-policies are retrieved. :param policy: name or index of storage policy, only applicable with server_type==object. :returns: list of ring names. """ if self.server_type == 'object': ring_names = [p.ring_name for p in POLICIES if ( p.name == policy or not policy or ( policy.isdigit() and int(policy) == int(p) or (isinstance(policy, string_types) and policy in p.aliases)))] else: ring_names = [self.server_type] return ring_names def main(self): """ Retrieve and report cluster info from hosts running recon middleware. """ print("=" * 79) usage = ''' usage: %prog [ []] [-v] [--suppress] [-a] [-r] [-u] [-d] [-l] [-T] [--md5] [--auditor] [--updater] [--expirer] [--sockstat] [--human-readable] \taccount|container|object Defaults to object server. ex: %prog container -l --auditor ''' args = optparse.OptionParser(usage) args.add_option('--verbose', '-v', action="store_true", help="Print verbose info") args.add_option('--suppress', action="store_true", help="Suppress most connection related errors") args.add_option('--async', '-a', action="store_true", help="Get async stats") args.add_option('--replication', '-r', action="store_true", help="Get replication stats") args.add_option('--auditor', action="store_true", help="Get auditor stats") args.add_option('--updater', action="store_true", help="Get updater stats") args.add_option('--expirer', action="store_true", help="Get expirer stats") args.add_option('--unmounted', '-u', action="store_true", help="Check cluster for unmounted devices") args.add_option('--diskusage', '-d', action="store_true", help="Get disk usage stats") args.add_option('--human-readable', action="store_true", help="Use human readable suffix for disk usage stats") args.add_option('--loadstats', '-l', action="store_true", help="Get cluster load average stats") args.add_option('--quarantined', '-q', action="store_true", help="Get cluster quarantine stats") args.add_option('--validate-servers', action="store_true", help="Validate servers on the ring") args.add_option('--md5', action="store_true", help="Get md5sum of servers ring and compare to " "local copy") args.add_option('--sockstat', action="store_true", help="Get cluster socket usage stats") args.add_option('--driveaudit', action="store_true", help="Get drive audit error stats") args.add_option('--time', '-T', action="store_true", help="Check time synchronization") args.add_option('--jitter', type="float", default=0.0, help="Maximal allowed time jitter") args.add_option('--swift-versions', action="store_true", help="Check swift versions") args.add_option('--top', type='int', metavar='COUNT', default=0, help='Also show the top COUNT entries in rank order.') args.add_option('--lowest', type='int', metavar='COUNT', default=0, help='Also show the lowest COUNT entries in rank \ order.') args.add_option('--all', action="store_true", help="Perform all checks. Equal to \t\t\t-arudlqT " "--md5 --sockstat --auditor --updater --expirer " "--driveaudit --validate-servers --swift-versions") args.add_option('--region', type="int", help="Only query servers in specified region") args.add_option('--zone', '-z', type="int", help="Only query servers in specified zone") args.add_option('--timeout', '-t', type="int", metavar="SECONDS", help="Time to wait for a response from a server", default=5) args.add_option('--swiftdir', default="/etc/swift", help="Default = /etc/swift") args.add_option('--policy', '-p', help='Only query object servers in specified ' 'storage policy (specified as name or index).') options, arguments = args.parse_args() if len(sys.argv) <= 1 or len(arguments) > len(self.check_types): args.print_help() sys.exit(0) if arguments: arguments = set(arguments) if arguments.issubset(self.check_types): server_types = arguments else: print("Invalid Server Type") args.print_help() sys.exit(1) else: # default server_types = ['object'] swift_dir = options.swiftdir if set_swift_dir(swift_dir): reload_storage_policies() self.verbose = options.verbose self.suppress_errors = options.suppress self.timeout = options.timeout for server_type in server_types: self.server_type = server_type ring_names = self._get_ring_names(options.policy) if not ring_names: print('Invalid Storage Policy: %s' % options.policy) args.print_help() sys.exit(0) hosts = self.get_hosts(options.region, options.zone, swift_dir, ring_names) print("--> Starting reconnaissance on %s hosts (%s)" % (len(hosts), self.server_type)) print("=" * 79) if options.all: if self.server_type == 'object': self.async_check(hosts) self.object_auditor_check(hosts) self.updater_check(hosts) self.expirer_check(hosts) elif self.server_type == 'container': self.auditor_check(hosts) self.updater_check(hosts) elif self.server_type == 'account': self.auditor_check(hosts) self.replication_check(hosts) self.umount_check(hosts) self.load_check(hosts) self.disk_usage(hosts, options.top, options.lowest, options.human_readable) self.get_ringmd5(hosts, swift_dir) self.get_swiftconfmd5(hosts) self.quarantine_check(hosts) self.socket_usage(hosts) self.server_type_check(hosts) self.driveaudit_check(hosts) self.time_check(hosts, options.jitter) self.version_check(hosts) else: if options.async: if self.server_type == 'object': self.async_check(hosts) else: print("Error: Can't check asyncs on non object " "servers.") print("=" * 79) if options.unmounted: self.umount_check(hosts) if options.replication: self.replication_check(hosts) if options.auditor: if self.server_type == 'object': self.object_auditor_check(hosts) else: self.auditor_check(hosts) if options.updater: if self.server_type == 'account': print("Error: Can't check updaters on account " "servers.") print("=" * 79) else: self.updater_check(hosts) if options.expirer: if self.server_type == 'object': self.expirer_check(hosts) else: print("Error: Can't check expired on non object " "servers.") print("=" * 79) if options.validate_servers: self.server_type_check(hosts) if options.loadstats: self.load_check(hosts) if options.diskusage: self.disk_usage(hosts, options.top, options.lowest, options.human_readable) if options.md5: self.get_ringmd5(hosts, swift_dir) self.get_swiftconfmd5(hosts) if options.quarantined: self.quarantine_check(hosts) if options.sockstat: self.socket_usage(hosts) if options.driveaudit: self.driveaudit_check(hosts) if options.time: self.time_check(hosts, options.jitter) if options.swift_versions: self.version_check(hosts) def main(): try: reconnoiter = SwiftRecon() reconnoiter.main() except KeyboardInterrupt: print('\n') swift-2.17.1/swift/cli/dispersion_report.py0000666000175000017500000004235413435012003021010 0ustar zuulzuul00000000000000#!/usr/bin/env python # Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import json from collections import defaultdict from six.moves.configparser import ConfigParser from optparse import OptionParser from sys import exit, stdout, stderr from time import time from eventlet import GreenPool, hubs, patcher, Timeout from eventlet.pools import Pool from swift.common import direct_client try: from swiftclient import get_auth except ImportError: from swift.common.internal_client import get_auth from swift.common.internal_client import SimpleClient from swift.common.ring import Ring from swift.common.exceptions import ClientException from swift.common.utils import compute_eta, get_time_units, config_true_value from swift.common.storage_policy import POLICIES unmounted = [] notfound = [] json_output = False debug = False insecure = False def get_error_log(prefix): def error_log(msg_or_exc): global debug, unmounted, notfound if hasattr(msg_or_exc, 'http_status'): identifier = '%s:%s/%s' % (msg_or_exc.http_host, msg_or_exc.http_port, msg_or_exc.http_device) if msg_or_exc.http_status == 507: if identifier not in unmounted: unmounted.append(identifier) print('ERROR: %s is unmounted -- This will ' 'cause replicas designated for that device to be ' 'considered missing until resolved or the ring is ' 'updated.' % (identifier), file=stderr) stderr.flush() if debug and identifier not in notfound: notfound.append(identifier) print('ERROR: %s returned a 404' % (identifier), file=stderr) stderr.flush() if not hasattr(msg_or_exc, 'http_status') or \ msg_or_exc.http_status not in (404, 507): print('ERROR: %s: %s' % (prefix, msg_or_exc), file=stderr) stderr.flush() return error_log def container_dispersion_report(coropool, connpool, account, container_ring, retries, output_missing_partitions, policy): with connpool.item() as conn: containers = [c['name'] for c in conn.get_account( prefix='dispersion_%d' % policy.idx, full_listing=True)[1]] containers_listed = len(containers) if not containers_listed: print('No containers to query. Has ' 'swift-dispersion-populate been run?', file=stderr) stderr.flush() return retries_done = [0] containers_queried = [0] container_copies_missing = defaultdict(int) container_copies_found = [0] container_copies_expected = [0] begun = time() next_report = [time() + 2] def direct(container, part, nodes): found_count = 0 for node in nodes: error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node) try: attempts, _junk = direct_client.retry( direct_client.direct_head_container, node, part, account, container, error_log=error_log, retries=retries) retries_done[0] += attempts - 1 found_count += 1 except ClientException as err: if err.http_status not in (404, 507): error_log('Giving up on /%s/%s/%s: %s' % (part, account, container, err)) except (Exception, Timeout) as err: error_log('Giving up on /%s/%s/%s: %s' % (part, account, container, err)) if output_missing_partitions and \ found_count < len(nodes): missing = len(nodes) - found_count print('\r\x1B[K', end='') stdout.flush() print('# Container partition %s missing %s cop%s' % ( part, missing, 'y' if missing == 1 else 'ies'), file=stderr) container_copies_found[0] += found_count containers_queried[0] += 1 container_copies_missing[len(nodes) - found_count] += 1 if time() >= next_report[0]: next_report[0] = time() + 5 eta, eta_unit = compute_eta(begun, containers_queried[0], containers_listed) if not json_output: print('\r\x1B[KQuerying containers: %d of %d, %d%s left, %d ' 'retries' % (containers_queried[0], containers_listed, round(eta), eta_unit, retries_done[0]), end='') stdout.flush() container_parts = {} for container in containers: part, nodes = container_ring.get_nodes(account, container) if part not in container_parts: container_copies_expected[0] += len(nodes) container_parts[part] = part coropool.spawn(direct, container, part, nodes) coropool.waitall() distinct_partitions = len(container_parts) copies_found = container_copies_found[0] copies_expected = container_copies_expected[0] value = 100.0 * copies_found / copies_expected elapsed, elapsed_unit = get_time_units(time() - begun) container_copies_missing.pop(0, None) if not json_output: print('\r\x1B[KQueried %d containers for dispersion reporting, ' '%d%s, %d retries' % (containers_listed, round(elapsed), elapsed_unit, retries_done[0])) if containers_listed - distinct_partitions: print('There were %d overlapping partitions' % ( containers_listed - distinct_partitions)) for missing_copies, num_parts in container_copies_missing.items(): print(missing_string(num_parts, missing_copies, container_ring.replica_count)) print('%.02f%% of container copies found (%d of %d)' % ( value, copies_found, copies_expected)) print('Sample represents %.02f%% of the container partition space' % ( 100.0 * distinct_partitions / container_ring.partition_count)) stdout.flush() return None else: results = {'retries': retries_done[0], 'overlapping': containers_listed - distinct_partitions, 'pct_found': value, 'copies_found': copies_found, 'copies_expected': copies_expected} for missing_copies, num_parts in container_copies_missing.items(): results['missing_%d' % (missing_copies)] = num_parts return results def object_dispersion_report(coropool, connpool, account, object_ring, retries, output_missing_partitions, policy): container = 'dispersion_objects_%d' % policy.idx with connpool.item() as conn: try: objects = [o['name'] for o in conn.get_container( container, prefix='dispersion_', full_listing=True)[1]] except ClientException as err: if err.http_status != 404: raise print('No objects to query. Has ' 'swift-dispersion-populate been run?', file=stderr) stderr.flush() return objects_listed = len(objects) if not objects_listed: print('No objects to query. Has swift-dispersion-populate ' 'been run?', file=stderr) stderr.flush() return retries_done = [0] objects_queried = [0] object_copies_found = [0] object_copies_expected = [0] object_copies_missing = defaultdict(int) begun = time() next_report = [time() + 2] headers = None if policy is not None: headers = {} headers['X-Backend-Storage-Policy-Index'] = int(policy) def direct(obj, part, nodes): found_count = 0 for node in nodes: error_log = get_error_log('%(ip)s:%(port)s/%(device)s' % node) try: attempts, _junk = direct_client.retry( direct_client.direct_head_object, node, part, account, container, obj, error_log=error_log, retries=retries, headers=headers) retries_done[0] += attempts - 1 found_count += 1 except ClientException as err: if err.http_status not in (404, 507): error_log('Giving up on /%s/%s/%s/%s: %s' % (part, account, container, obj, err)) except (Exception, Timeout) as err: error_log('Giving up on /%s/%s/%s/%s: %s' % (part, account, container, obj, err)) if output_missing_partitions and \ found_count < len(nodes): missing = len(nodes) - found_count print('\r\x1B[K', end='') stdout.flush() print('# Object partition %s missing %s cop%s' % ( part, missing, 'y' if missing == 1 else 'ies'), file=stderr) object_copies_found[0] += found_count object_copies_missing[len(nodes) - found_count] += 1 objects_queried[0] += 1 if time() >= next_report[0]: next_report[0] = time() + 5 eta, eta_unit = compute_eta(begun, objects_queried[0], objects_listed) if not json_output: print('\r\x1B[KQuerying objects: %d of %d, %d%s left, %d ' 'retries' % (objects_queried[0], objects_listed, round(eta), eta_unit, retries_done[0]), end='') stdout.flush() object_parts = {} for obj in objects: part, nodes = object_ring.get_nodes(account, container, obj) if part not in object_parts: object_copies_expected[0] += len(nodes) object_parts[part] = part coropool.spawn(direct, obj, part, nodes) coropool.waitall() distinct_partitions = len(object_parts) copies_found = object_copies_found[0] copies_expected = object_copies_expected[0] value = 100.0 * copies_found / copies_expected elapsed, elapsed_unit = get_time_units(time() - begun) if not json_output: print('\r\x1B[KQueried %d objects for dispersion reporting, ' '%d%s, %d retries' % (objects_listed, round(elapsed), elapsed_unit, retries_done[0])) if objects_listed - distinct_partitions: print('There were %d overlapping partitions' % ( objects_listed - distinct_partitions)) for missing_copies, num_parts in object_copies_missing.items(): print(missing_string(num_parts, missing_copies, object_ring.replica_count)) print('%.02f%% of object copies found (%d of %d)' % (value, copies_found, copies_expected)) print('Sample represents %.02f%% of the object partition space' % ( 100.0 * distinct_partitions / object_ring.partition_count)) stdout.flush() return None else: results = {'retries': retries_done[0], 'overlapping': objects_listed - distinct_partitions, 'pct_found': value, 'copies_found': copies_found, 'copies_expected': copies_expected} for missing_copies, num_parts in object_copies_missing.items(): results['missing_%d' % (missing_copies,)] = num_parts return results def missing_string(partition_count, missing_copies, copy_count): exclamations = '' missing_string = str(missing_copies) if missing_copies == copy_count: exclamations = '!!! ' missing_string = 'all' elif copy_count - missing_copies == 1: exclamations = '! ' verb_string = 'was' partition_string = 'partition' if partition_count > 1: verb_string = 'were' partition_string = 'partitions' copy_string = 'copies' if missing_copies == 1: copy_string = 'copy' return '%sThere %s %d %s missing %s %s.' % ( exclamations, verb_string, partition_count, partition_string, missing_string, copy_string ) def main(): patcher.monkey_patch() hubs.get_hub().debug_exceptions = False conffile = '/etc/swift/dispersion.conf' parser = OptionParser(usage=''' Usage: %%prog [options] [conf_file] [conf_file] defaults to %s'''.strip() % conffile) parser.add_option('-j', '--dump-json', action='store_true', default=False, help='dump dispersion report in json format') parser.add_option('-d', '--debug', action='store_true', default=False, help='print 404s to standard error') parser.add_option('-p', '--partitions', action='store_true', default=False, help='print missing partitions to standard error') parser.add_option('--container-only', action='store_true', default=False, help='Only run container report') parser.add_option('--object-only', action='store_true', default=False, help='Only run object report') parser.add_option('--insecure', action='store_true', default=False, help='Allow accessing insecure keystone server. ' 'The keystone\'s certificate will not be verified.') parser.add_option('-P', '--policy-name', dest='policy_name', help="Specify storage policy name") options, args = parser.parse_args() if args: conffile = args.pop(0) if options.debug: global debug debug = True c = ConfigParser() if not c.read(conffile): exit('Unable to read config file: %s' % conffile) conf = dict(c.items('dispersion')) if options.dump_json: conf['dump_json'] = 'yes' if options.object_only: conf['container_report'] = 'no' if options.container_only: conf['object_report'] = 'no' if options.insecure: conf['keystone_api_insecure'] = 'yes' if options.partitions: conf['partitions'] = 'yes' output = generate_report(conf, options.policy_name) if json_output: print(json.dumps(output)) def generate_report(conf, policy_name=None): global json_output json_output = config_true_value(conf.get('dump_json', 'no')) if policy_name is None: policy = POLICIES.default else: policy = POLICIES.get_by_name(policy_name) if policy is None: exit('Unable to find policy: %s' % policy_name) if not json_output: print('Using storage policy: %s ' % policy.name) swift_dir = conf.get('swift_dir', '/etc/swift') retries = int(conf.get('retries', 5)) concurrency = int(conf.get('concurrency', 25)) endpoint_type = str(conf.get('endpoint_type', 'publicURL')) region_name = str(conf.get('region_name', '')) container_report = config_true_value(conf.get('container_report', 'yes')) object_report = config_true_value(conf.get('object_report', 'yes')) if not (object_report or container_report): exit("Neither container or object report is set to run") user_domain_name = str(conf.get('user_domain_name', '')) project_domain_name = str(conf.get('project_domain_name', '')) project_name = str(conf.get('project_name', '')) insecure = config_true_value(conf.get('keystone_api_insecure', 'no')) coropool = GreenPool(size=concurrency) os_options = {'endpoint_type': endpoint_type} if user_domain_name: os_options['user_domain_name'] = user_domain_name if project_domain_name: os_options['project_domain_name'] = project_domain_name if project_name: os_options['project_name'] = project_name if region_name: os_options['region_name'] = region_name url, token = get_auth(conf['auth_url'], conf['auth_user'], conf['auth_key'], auth_version=conf.get('auth_version', '1.0'), os_options=os_options, insecure=insecure) account = url.rsplit('/', 1)[1] connpool = Pool(max_size=concurrency) connpool.create = lambda: SimpleClient( url=url, token=token, retries=retries) container_ring = Ring(swift_dir, ring_name='container') object_ring = Ring(swift_dir, ring_name=policy.ring_name) output = {} if container_report: output['container'] = container_dispersion_report( coropool, connpool, account, container_ring, retries, conf.get('partitions'), policy) if object_report: output['object'] = object_dispersion_report( coropool, connpool, account, object_ring, retries, conf.get('partitions'), policy) return output if __name__ == '__main__': main() swift-2.17.1/swift/common/0000775000175000017500000000000013435012120015373 5ustar zuulzuul00000000000000swift-2.17.1/swift/common/base_storage_server.py0000666000175000017500000000550713435012003022002 0ustar zuulzuul00000000000000# Copyright (c) 2010-2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect from swift import __version__ as swift_version from swift.common.utils import public, timing_stats, config_true_value from swift.common.swob import Response class BaseStorageServer(object): """ Implements common OPTIONS method for object, account, container servers. """ def __init__(self, conf, **kwargs): self._allowed_methods = None replication_server = conf.get('replication_server', None) if replication_server is not None: replication_server = config_true_value(replication_server) self.replication_server = replication_server @property def server_type(self): raise NotImplementedError( 'Storage nodes have not implemented the Server type.') @property def allowed_methods(self): if self._allowed_methods is None: self._allowed_methods = [] all_methods = inspect.getmembers(self, predicate=callable) if self.replication_server is True: for name, m in all_methods: if (getattr(m, 'publicly_accessible', False) and getattr(m, 'replication', False)): self._allowed_methods.append(name) elif self.replication_server is False: for name, m in all_methods: if (getattr(m, 'publicly_accessible', False) and not getattr(m, 'replication', False)): self._allowed_methods.append(name) elif self.replication_server is None: for name, m in all_methods: if getattr(m, 'publicly_accessible', False): self._allowed_methods.append(name) self._allowed_methods.sort() return self._allowed_methods @public @timing_stats() def OPTIONS(self, req): """ Base handler for OPTIONS requests :param req: swob.Request object :returns: swob.Response object """ # Prepare the default response headers = {'Allow': ', '.join(self.allowed_methods), 'Server': '%s/%s' % (self.server_type, swift_version)} resp = Response(status=200, request=req, headers=headers) return resp swift-2.17.1/swift/common/constraints.py0000666000175000017500000003675513435012015020341 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import functools import os from os.path import isdir # tighter scoped import for mocking import six from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError from six.moves import urllib from swift.common import utils, exceptions from swift.common.swob import HTTPBadRequest, HTTPLengthRequired, \ HTTPRequestEntityTooLarge, HTTPPreconditionFailed, HTTPNotImplemented, \ HTTPException MAX_FILE_SIZE = 5368709122 MAX_META_NAME_LENGTH = 128 MAX_META_VALUE_LENGTH = 256 MAX_META_COUNT = 90 MAX_META_OVERALL_SIZE = 4096 MAX_HEADER_SIZE = 8192 MAX_OBJECT_NAME_LENGTH = 1024 CONTAINER_LISTING_LIMIT = 10000 ACCOUNT_LISTING_LIMIT = 10000 MAX_ACCOUNT_NAME_LENGTH = 256 MAX_CONTAINER_NAME_LENGTH = 256 VALID_API_VERSIONS = ["v1", "v1.0"] EXTRA_HEADER_COUNT = 0 # If adding an entry to DEFAULT_CONSTRAINTS, note that # these constraints are automatically published by the # proxy server in responses to /info requests, with values # updated by reload_constraints() DEFAULT_CONSTRAINTS = { 'max_file_size': MAX_FILE_SIZE, 'max_meta_name_length': MAX_META_NAME_LENGTH, 'max_meta_value_length': MAX_META_VALUE_LENGTH, 'max_meta_count': MAX_META_COUNT, 'max_meta_overall_size': MAX_META_OVERALL_SIZE, 'max_header_size': MAX_HEADER_SIZE, 'max_object_name_length': MAX_OBJECT_NAME_LENGTH, 'container_listing_limit': CONTAINER_LISTING_LIMIT, 'account_listing_limit': ACCOUNT_LISTING_LIMIT, 'max_account_name_length': MAX_ACCOUNT_NAME_LENGTH, 'max_container_name_length': MAX_CONTAINER_NAME_LENGTH, 'valid_api_versions': VALID_API_VERSIONS, 'extra_header_count': EXTRA_HEADER_COUNT, } SWIFT_CONSTRAINTS_LOADED = False OVERRIDE_CONSTRAINTS = {} # any constraints overridden by SWIFT_CONF_FILE EFFECTIVE_CONSTRAINTS = {} # populated by reload_constraints def reload_constraints(): """ Parse SWIFT_CONF_FILE and reset module level global constraint attrs, populating OVERRIDE_CONSTRAINTS AND EFFECTIVE_CONSTRAINTS along the way. """ global SWIFT_CONSTRAINTS_LOADED, OVERRIDE_CONSTRAINTS SWIFT_CONSTRAINTS_LOADED = False OVERRIDE_CONSTRAINTS = {} constraints_conf = ConfigParser() if constraints_conf.read(utils.SWIFT_CONF_FILE): SWIFT_CONSTRAINTS_LOADED = True for name in DEFAULT_CONSTRAINTS: try: value = constraints_conf.get('swift-constraints', name) except NoOptionError: pass except NoSectionError: # We are never going to find the section for another option break else: try: value = int(value) except ValueError: value = utils.list_from_csv(value) OVERRIDE_CONSTRAINTS[name] = value for name, default in DEFAULT_CONSTRAINTS.items(): value = OVERRIDE_CONSTRAINTS.get(name, default) EFFECTIVE_CONSTRAINTS[name] = value # "globals" in this context is module level globals, always. globals()[name.upper()] = value reload_constraints() # Maximum slo segments in buffer MAX_BUFFERED_SLO_SEGMENTS = 10000 # By default the maximum number of allowed headers depends on the number of max # allowed metadata settings plus a default value of 36 for swift internally # generated headers and regular http headers. If for some reason this is not # enough (custom middleware for example) it can be increased with the # extra_header_count constraint. MAX_HEADER_COUNT = MAX_META_COUNT + 36 + max(EXTRA_HEADER_COUNT, 0) def check_metadata(req, target_type): """ Check metadata sent in the request headers. This should only check that the metadata in the request given is valid. Checks against account/container overall metadata should be forwarded on to its respective server to be checked. :param req: request object :param target_type: str: one of: object, container, or account: indicates which type the target storage for the metadata is :returns: HTTPBadRequest with bad metadata otherwise None """ target_type = target_type.lower() prefix = 'x-%s-meta-' % target_type meta_count = 0 meta_size = 0 for key, value in req.headers.items(): if (isinstance(value, six.string_types) and len(value) > MAX_HEADER_SIZE): return HTTPBadRequest(body='Header value too long: %s' % key[:MAX_META_NAME_LENGTH], request=req, content_type='text/plain') if not key.lower().startswith(prefix): continue key = key[len(prefix):] if not key: return HTTPBadRequest(body='Metadata name cannot be empty', request=req, content_type='text/plain') bad_key = not check_utf8(key) bad_value = value and not check_utf8(value) if target_type in ('account', 'container') and (bad_key or bad_value): return HTTPBadRequest(body='Metadata must be valid UTF-8', request=req, content_type='text/plain') meta_count += 1 meta_size += len(key) + len(value) if len(key) > MAX_META_NAME_LENGTH: return HTTPBadRequest( body='Metadata name too long: %s%s' % (prefix, key), request=req, content_type='text/plain') if len(value) > MAX_META_VALUE_LENGTH: return HTTPBadRequest( body='Metadata value longer than %d: %s%s' % ( MAX_META_VALUE_LENGTH, prefix, key), request=req, content_type='text/plain') if meta_count > MAX_META_COUNT: return HTTPBadRequest( body='Too many metadata items; max %d' % MAX_META_COUNT, request=req, content_type='text/plain') if meta_size > MAX_META_OVERALL_SIZE: return HTTPBadRequest( body='Total metadata too large; max %d' % MAX_META_OVERALL_SIZE, request=req, content_type='text/plain') return None def check_object_creation(req, object_name): """ Check to ensure that everything is alright about an object to be created. :param req: HTTP request object :param object_name: name of object to be created :returns: HTTPRequestEntityTooLarge -- the object is too large :returns: HTTPLengthRequired -- missing content-length header and not a chunked request :returns: HTTPBadRequest -- missing or bad content-type header, or bad metadata :returns: HTTPNotImplemented -- unsupported transfer-encoding header value """ try: ml = req.message_length() except ValueError as e: return HTTPBadRequest(request=req, content_type='text/plain', body=str(e)) except AttributeError as e: return HTTPNotImplemented(request=req, content_type='text/plain', body=str(e)) if ml is not None and ml > MAX_FILE_SIZE: return HTTPRequestEntityTooLarge(body='Your request is too large.', request=req, content_type='text/plain') if req.content_length is None and \ req.headers.get('transfer-encoding') != 'chunked': return HTTPLengthRequired(body='Missing Content-Length header.', request=req, content_type='text/plain') if len(object_name) > MAX_OBJECT_NAME_LENGTH: return HTTPBadRequest(body='Object name length of %d longer than %d' % (len(object_name), MAX_OBJECT_NAME_LENGTH), request=req, content_type='text/plain') if 'Content-Type' not in req.headers: return HTTPBadRequest(request=req, content_type='text/plain', body='No content type') try: req = check_delete_headers(req) except HTTPException as e: return HTTPBadRequest(request=req, body=e.body, content_type='text/plain') if not check_utf8(req.headers['Content-Type']): return HTTPBadRequest(request=req, body='Invalid Content-Type', content_type='text/plain') return check_metadata(req, 'object') def check_dir(root, drive): """ Verify that the path to the device is a directory and is a lesser constraint that is enforced when a full mount_check isn't possible with, for instance, a VM using loopback or partitions. :param root: base path where the dir is :param drive: drive name to be checked :returns: full path to the device, or None if drive fails to validate """ return check_drive(root, drive, False) def check_mount(root, drive): """ Verify that the path to the device is a mount point and mounted. This allows us to fast fail on drives that have been unmounted because of issues, and also prevents us for accidentally filling up the root partition. :param root: base path where the devices are mounted :param drive: drive name to be checked :returns: full path to the device, or None if drive fails to validate """ return check_drive(root, drive, True) def check_drive(root, drive, mount_check): """ Validate the path given by root and drive is a valid existing directory. :param root: base path where the devices are mounted :param drive: drive name to be checked :param mount_check: additionally require path is mounted :returns: full path to the device, or None if drive fails to validate """ if not (urllib.parse.quote_plus(drive) == drive): return None path = os.path.join(root, drive) if mount_check: if utils.ismount(path): return path else: if isdir(path): return path return None def check_float(string): """ Helper function for checking if a string can be converted to a float. :param string: string to be verified as a float :returns: True if the string can be converted to a float, False otherwise """ try: float(string) return True except ValueError: return False def valid_timestamp(request): """ Helper function to extract a timestamp from requests that require one. :param request: the swob request object :returns: a valid Timestamp instance :raises HTTPBadRequest: on missing or invalid X-Timestamp """ try: return request.timestamp except exceptions.InvalidTimestamp as e: raise HTTPBadRequest(body=str(e), request=request, content_type='text/plain') def check_delete_headers(request): """ Check that 'x-delete-after' and 'x-delete-at' headers have valid values. Values should be positive integers and correspond to a time greater than the request timestamp. If the 'x-delete-after' header is found then its value is used to compute an 'x-delete-at' value which takes precedence over any existing 'x-delete-at' header. :param request: the swob request object :raises: HTTPBadRequest in case of invalid values :returns: the swob request object """ now = float(valid_timestamp(request)) if 'x-delete-after' in request.headers: try: x_delete_after = int(request.headers['x-delete-after']) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-After') actual_del_time = utils.normalize_delete_at_timestamp( now + x_delete_after) if int(actual_del_time) <= now: raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-After in past') request.headers['x-delete-at'] = actual_del_time del request.headers['x-delete-after'] if 'x-delete-at' in request.headers: try: x_delete_at = int(utils.normalize_delete_at_timestamp( int(request.headers['x-delete-at']))) except ValueError: raise HTTPBadRequest(request=request, content_type='text/plain', body='Non-integer X-Delete-At') if x_delete_at <= now and not utils.config_true_value( request.headers.get('x-backend-replication', 'f')): raise HTTPBadRequest(request=request, content_type='text/plain', body='X-Delete-At in past') return request def check_utf8(string): """ Validate if a string is valid UTF-8 str or unicode and that it does not contain any null character. :param string: string to be validated :returns: True if the string is valid utf-8 str or unicode and contains no null characters, False otherwise """ if not string: return False try: if isinstance(string, six.text_type): string.encode('utf-8') else: decoded = string.decode('UTF-8') if decoded.encode('UTF-8') != string: return False # A UTF-8 string with surrogates in it is invalid. if any(0xD800 <= ord(codepoint) <= 0xDFFF for codepoint in decoded): return False return '\x00' not in string # If string is unicode, decode() will raise UnicodeEncodeError # So, we should catch both UnicodeDecodeError & UnicodeEncodeError except UnicodeError: return False def check_name_format(req, name, target_type): """ Validate that the header contains valid account or container name. :param req: HTTP request object :param name: header value to validate :param target_type: which header is being validated (Account or Container) :returns: A properly encoded account name or container name :raise HTTPPreconditionFailed: if account header is not well formatted. """ if not name: raise HTTPPreconditionFailed( request=req, body='%s name cannot be empty' % target_type) if isinstance(name, six.text_type): name = name.encode('utf-8') if '/' in name: raise HTTPPreconditionFailed( request=req, body='%s name cannot contain slashes' % target_type) return name check_account_format = functools.partial(check_name_format, target_type='Account') check_container_format = functools.partial(check_name_format, target_type='Container') def valid_api_version(version): """ Checks if the requested version is valid. Currently Swift only supports "v1" and "v1.0". """ global VALID_API_VERSIONS if not isinstance(VALID_API_VERSIONS, list): VALID_API_VERSIONS = [str(VALID_API_VERSIONS)] return version in VALID_API_VERSIONS swift-2.17.1/swift/common/request_helpers.py0000666000175000017500000006755413435012015021205 0ustar zuulzuul00000000000000# Copyright (c) 2010-2013 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Miscellaneous utility functions for use in generating responses. Why not swift.common.utils, you ask? Because this way we can import things from swob in here without creating circular imports. """ import hashlib import itertools import sys import time import six from six.moves.urllib.parse import unquote from swift.common.header_key_dict import HeaderKeyDict from swift import gettext_ as _ from swift.common.storage_policy import POLICIES from swift.common.exceptions import ListingIterError, SegmentError from swift.common.http import is_success from swift.common.swob import HTTPBadRequest, \ HTTPServiceUnavailable, Range, is_chunked, multi_range_iterator, \ HTTPPreconditionFailed from swift.common.utils import split_path, validate_device_partition, \ close_if_possible, maybe_multipart_byteranges_to_document_iters, \ multipart_byteranges_to_document_iters, parse_content_type, \ parse_content_range, csv_append, list_from_csv, Spliterator from swift.common.wsgi import make_subrequest OBJECT_TRANSIENT_SYSMETA_PREFIX = 'x-object-transient-sysmeta-' def get_param(req, name, default=None): """ Get parameters from an HTTP request ensuring proper handling UTF-8 encoding. :param req: request object :param name: parameter name :param default: result to return if the parameter is not found :returns: HTTP request parameter value (as UTF-8 encoded str, not unicode object) :raises HTTPBadRequest: if param not valid UTF-8 byte sequence """ value = req.params.get(name, default) if value and not isinstance(value, six.text_type): try: value.decode('utf8') # Ensure UTF8ness except UnicodeDecodeError: raise HTTPBadRequest( request=req, content_type='text/plain', body='"%s" parameter not valid UTF-8' % name) return value def get_name_and_placement(request, minsegs=1, maxsegs=None, rest_with_last=False): """ Utility function to split and validate the request path and storage policy. The storage policy index is extracted from the headers of the request and converted to a StoragePolicy instance. The remaining args are passed through to :meth:`split_and_validate_path`. :returns: a list, result of :meth:`split_and_validate_path` with the BaseStoragePolicy instance appended on the end :raises HTTPServiceUnavailable: if the path is invalid or no policy exists with the extracted policy_index. """ policy_index = request.headers.get('X-Backend-Storage-Policy-Index') policy = POLICIES.get_by_index(policy_index) if not policy: raise HTTPServiceUnavailable( body=_("No policy with index %s") % policy_index, request=request, content_type='text/plain') results = split_and_validate_path(request, minsegs=minsegs, maxsegs=maxsegs, rest_with_last=rest_with_last) results.append(policy) return results def split_and_validate_path(request, minsegs=1, maxsegs=None, rest_with_last=False): """ Utility function to split and validate the request path. :returns: result of :meth:`~swift.common.utils.split_path` if everything's okay :raises HTTPBadRequest: if something's not okay """ try: segs = split_path(unquote(request.path), minsegs, maxsegs, rest_with_last) validate_device_partition(segs[0], segs[1]) return segs except ValueError as err: raise HTTPBadRequest(body=str(err), request=request, content_type='text/plain') def is_user_meta(server_type, key): """ Tests if a header key starts with and is longer than the user metadata prefix for given server type. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: True if the key satisfies the test, False otherwise """ if len(key) <= 8 + len(server_type): return False return key.lower().startswith(get_user_meta_prefix(server_type)) def is_sys_meta(server_type, key): """ Tests if a header key starts with and is longer than the system metadata prefix for given server type. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: True if the key satisfies the test, False otherwise """ if len(key) <= 11 + len(server_type): return False return key.lower().startswith(get_sys_meta_prefix(server_type)) def is_sys_or_user_meta(server_type, key): """ Tests if a header key starts with and is longer than the user or system metadata prefix for given server type. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: True if the key satisfies the test, False otherwise """ return is_user_meta(server_type, key) or is_sys_meta(server_type, key) def is_object_transient_sysmeta(key): """ Tests if a header key starts with and is longer than the prefix for object transient system metadata. :param key: header key :returns: True if the key satisfies the test, False otherwise """ if len(key) <= len(OBJECT_TRANSIENT_SYSMETA_PREFIX): return False return key.lower().startswith(OBJECT_TRANSIENT_SYSMETA_PREFIX) def strip_user_meta_prefix(server_type, key): """ Removes the user metadata prefix for a given server type from the start of a header key. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: stripped header key """ if not is_user_meta(server_type, key): raise ValueError('Key is not user meta') return key[len(get_user_meta_prefix(server_type)):] def strip_sys_meta_prefix(server_type, key): """ Removes the system metadata prefix for a given server type from the start of a header key. :param server_type: type of backend server i.e. [account|container|object] :param key: header key :returns: stripped header key """ if not is_sys_meta(server_type, key): raise ValueError('Key is not sysmeta') return key[len(get_sys_meta_prefix(server_type)):] def strip_object_transient_sysmeta_prefix(key): """ Removes the object transient system metadata prefix from the start of a header key. :param key: header key :returns: stripped header key """ if not is_object_transient_sysmeta(key): raise ValueError('Key is not object transient sysmeta') return key[len(OBJECT_TRANSIENT_SYSMETA_PREFIX):] def get_user_meta_prefix(server_type): """ Returns the prefix for user metadata headers for given server type. This prefix defines the namespace for headers that will be persisted by backend servers. :param server_type: type of backend server i.e. [account|container|object] :returns: prefix string for server type's user metadata headers """ return 'x-%s-%s-' % (server_type.lower(), 'meta') def get_sys_meta_prefix(server_type): """ Returns the prefix for system metadata headers for given server type. This prefix defines the namespace for headers that will be persisted by backend servers. :param server_type: type of backend server i.e. [account|container|object] :returns: prefix string for server type's system metadata headers """ return 'x-%s-%s-' % (server_type.lower(), 'sysmeta') def get_object_transient_sysmeta(key): """ Returns the Object Transient System Metadata header for key. The Object Transient System Metadata namespace will be persisted by backend object servers. These headers are treated in the same way as object user metadata i.e. all headers in this namespace will be replaced on every POST request. :param key: metadata key :returns: the entire object transient system metadata header for key """ return '%s%s' % (OBJECT_TRANSIENT_SYSMETA_PREFIX, key) def remove_items(headers, condition): """ Removes items from a dict whose keys satisfy the given condition. :param headers: a dict of headers :param condition: a function that will be passed the header key as a single argument and should return True if the header is to be removed. :returns: a dict, possibly empty, of headers that have been removed """ removed = {} keys = filter(condition, headers) removed.update((key, headers.pop(key)) for key in keys) return removed def copy_header_subset(from_r, to_r, condition): """ Will copy desired subset of headers from from_r to to_r. :param from_r: a swob Request or Response :param to_r: a swob Request or Response :param condition: a function that will be passed the header key as a single argument and should return True if the header is to be copied. """ for k, v in from_r.headers.items(): if condition(k): to_r.headers[k] = v def check_path_header(req, name, length, error_msg): """ Validate that the value of path-like header is well formatted. We assume the caller ensures that specific header is present in req.headers. :param req: HTTP request object :param name: header name :param length: length of path segment check :param error_msg: error message for client :returns: A tuple with path parts according to length :raise: HTTPPreconditionFailed if header value is not well formatted. """ hdr = unquote(req.headers.get(name)) if not hdr.startswith('/'): hdr = '/' + hdr try: return split_path(hdr, length, length, True) except ValueError: raise HTTPPreconditionFailed( request=req, body=error_msg) class SegmentedIterable(object): """ Iterable that returns the object contents for a large object. :param req: original request object :param app: WSGI application from which segments will come :param listing_iter: iterable yielding the object segments to fetch, along with the byte subranges to fetch, in the form of a 5-tuple (object-path, object-etag, object-size, first-byte, last-byte). If object-etag is None, no MD5 verification will be done. If object-size is None, no length verification will be done. If first-byte and last-byte are None, then the entire object will be fetched. :param max_get_time: maximum permitted duration of a GET request (seconds) :param logger: logger object :param swift_source: value of swift.source in subrequest environ (just for logging) :param ua_suffix: string to append to user-agent. :param name: name of manifest (used in logging only) :param response_body_length: optional response body length for the response being sent to the client. """ def __init__(self, req, app, listing_iter, max_get_time, logger, ua_suffix, swift_source, name='', response_body_length=None): self.req = req self.app = app self.listing_iter = listing_iter self.max_get_time = max_get_time self.logger = logger self.ua_suffix = " " + ua_suffix self.swift_source = swift_source self.name = name self.response_body_length = response_body_length self.peeked_chunk = None self.app_iter = self._internal_iter() self.validated_first_segment = False self.current_resp = None def _coalesce_requests(self): start_time = time.time() pending_req = pending_etag = pending_size = None try: for seg_dict in self.listing_iter: if 'raw_data' in seg_dict: if pending_req: yield pending_req, pending_etag, pending_size to_yield = seg_dict['raw_data'][ seg_dict['first_byte']:seg_dict['last_byte'] + 1] yield to_yield, None, len(seg_dict['raw_data']) pending_req = pending_etag = pending_size = None continue seg_path, seg_etag, seg_size, first_byte, last_byte = ( seg_dict['path'], seg_dict.get('hash'), seg_dict.get('bytes'), seg_dict['first_byte'], seg_dict['last_byte']) if seg_size is not None: seg_size = int(seg_size) first_byte = first_byte or 0 go_to_end = last_byte is None or ( seg_size is not None and last_byte == seg_size - 1) if time.time() - start_time > self.max_get_time: raise SegmentError( 'While processing manifest %s, ' 'max LO GET time of %ds exceeded' % (self.name, self.max_get_time)) # The "multipart-manifest=get" query param ensures that the # segment is a plain old object, not some flavor of large # object; therefore, its etag is its MD5sum and hence we can # check it. path = seg_path + '?multipart-manifest=get' seg_req = make_subrequest( self.req.environ, path=path, method='GET', headers={'x-auth-token': self.req.headers.get( 'x-auth-token')}, agent=('%(orig)s ' + self.ua_suffix), swift_source=self.swift_source) seg_req_rangeval = None if first_byte != 0 or not go_to_end: seg_req_rangeval = "%s-%s" % ( first_byte, '' if go_to_end else last_byte) seg_req.headers['Range'] = "bytes=" + seg_req_rangeval # We can only coalesce if paths match and we know the segment # size (so we can check that the ranges will be allowed) if pending_req and pending_req.path == seg_req.path and \ seg_size is not None: # Make a new Range object so that we don't goof up the # existing one in case of invalid ranges. Note that a # range set with too many individual byteranges is # invalid, so we can combine N valid byteranges and 1 # valid byterange and get an invalid range set. if pending_req.range: new_range_str = str(pending_req.range) else: new_range_str = "bytes=0-%d" % (seg_size - 1) if seg_req.range: new_range_str += "," + seg_req_rangeval else: new_range_str += ",0-%d" % (seg_size - 1) if Range(new_range_str).ranges_for_length(seg_size): # Good news! We can coalesce the requests pending_req.headers['Range'] = new_range_str continue # else, Too many ranges, or too much backtracking, or ... if pending_req: yield pending_req, pending_etag, pending_size pending_req = seg_req pending_etag = seg_etag pending_size = seg_size except ListingIterError: e_type, e_value, e_traceback = sys.exc_info() if time.time() - start_time > self.max_get_time: raise SegmentError( 'While processing manifest %s, ' 'max LO GET time of %ds exceeded' % (self.name, self.max_get_time)) if pending_req: yield pending_req, pending_etag, pending_size six.reraise(e_type, e_value, e_traceback) if time.time() - start_time > self.max_get_time: raise SegmentError( 'While processing manifest %s, ' 'max LO GET time of %ds exceeded' % (self.name, self.max_get_time)) if pending_req: yield pending_req, pending_etag, pending_size def _internal_iter(self): bytes_left = self.response_body_length try: for data_or_req, seg_etag, seg_size in self._coalesce_requests(): if isinstance(data_or_req, bytes): chunk = data_or_req # ugly, awful overloading if bytes_left is None: yield chunk elif bytes_left >= len(chunk): yield chunk bytes_left -= len(chunk) else: yield chunk[:bytes_left] continue seg_req = data_or_req seg_resp = seg_req.get_response(self.app) if not is_success(seg_resp.status_int): close_if_possible(seg_resp.app_iter) raise SegmentError( 'While processing manifest %s, ' 'got %d while retrieving %s' % (self.name, seg_resp.status_int, seg_req.path)) elif ((seg_etag and (seg_resp.etag != seg_etag)) or (seg_size and (seg_resp.content_length != seg_size) and not seg_req.range)): # The content-length check is for security reasons. Seems # possible that an attacker could upload a >1mb object and # then replace it with a much smaller object with same # etag. Then create a big nested SLO that calls that # object many times which would hammer our obj servers. If # this is a range request, don't check content-length # because it won't match. close_if_possible(seg_resp.app_iter) raise SegmentError( 'Object segment no longer valid: ' '%(path)s etag: %(r_etag)s != %(s_etag)s or ' '%(r_size)s != %(s_size)s.' % {'path': seg_req.path, 'r_etag': seg_resp.etag, 'r_size': seg_resp.content_length, 's_etag': seg_etag, 's_size': seg_size}) else: self.current_resp = seg_resp seg_hash = None if seg_resp.etag and not seg_req.headers.get('Range'): # Only calculate the MD5 if it we can use it to validate seg_hash = hashlib.md5() document_iters = maybe_multipart_byteranges_to_document_iters( seg_resp.app_iter, seg_resp.headers['Content-Type']) for chunk in itertools.chain.from_iterable(document_iters): if seg_hash: seg_hash.update(chunk) if bytes_left is None: yield chunk elif bytes_left >= len(chunk): yield chunk bytes_left -= len(chunk) else: yield chunk[:bytes_left] bytes_left -= len(chunk) close_if_possible(seg_resp.app_iter) raise SegmentError( 'Too many bytes for %(name)s; truncating in ' '%(seg)s with %(left)d bytes left' % {'name': self.name, 'seg': seg_req.path, 'left': bytes_left}) close_if_possible(seg_resp.app_iter) if seg_hash and seg_hash.hexdigest() != seg_resp.etag: raise SegmentError( "Bad MD5 checksum in %(name)s for %(seg)s: headers had" " %(etag)s, but object MD5 was actually %(actual)s" % {'seg': seg_req.path, 'etag': seg_resp.etag, 'name': self.name, 'actual': seg_hash.hexdigest()}) if bytes_left: raise SegmentError( 'Not enough bytes for %s; closing connection' % self.name) except (ListingIterError, SegmentError) as err: self.logger.error(err) if not self.validated_first_segment: raise finally: if self.current_resp: close_if_possible(self.current_resp.app_iter) def app_iter_range(self, *a, **kw): """ swob.Response will only respond with a 206 status in certain cases; one of those is if the body iterator responds to .app_iter_range(). However, this object (or really, its listing iter) is smart enough to handle the range stuff internally, so we just no-op this out for swob. """ return self def app_iter_ranges(self, ranges, content_type, boundary, content_size): """ This method assumes that iter(self) yields all the data bytes that go into the response, but none of the MIME stuff. For example, if the response will contain three MIME docs with data "abcd", "efgh", and "ijkl", then iter(self) will give out the bytes "abcdefghijkl". This method inserts the MIME stuff around the data bytes. """ si = Spliterator(self) mri = multi_range_iterator( ranges, content_type, boundary, content_size, lambda start, end_plus_one: si.take(end_plus_one - start)) try: for x in mri: yield x finally: self.close() def validate_first_segment(self): """ Start fetching object data to ensure that the first segment (if any) is valid. This is to catch cases like "first segment is missing" or "first segment's etag doesn't match manifest". Note: this does not validate that you have any segments. A zero-segment large object is not erroneous; it is just empty. """ if self.validated_first_segment: return try: self.peeked_chunk = next(self.app_iter) except StopIteration: pass finally: self.validated_first_segment = True def __iter__(self): if self.peeked_chunk is not None: pc = self.peeked_chunk self.peeked_chunk = None return itertools.chain([pc], self.app_iter) else: return self.app_iter def close(self): """ Called when the client disconnect. Ensure that the connection to the backend server is closed. """ close_if_possible(self.app_iter) def http_response_to_document_iters(response, read_chunk_size=4096): """ Takes a successful object-GET HTTP response and turns it into an iterator of (first-byte, last-byte, length, headers, body-file) 5-tuples. The response must either be a 200 or a 206; if you feed in a 204 or something similar, this probably won't work. :param response: HTTP response, like from bufferedhttp.http_connect(), not a swob.Response. """ chunked = is_chunked(dict(response.getheaders())) if response.status == 200: if chunked: # Single "range" that's the whole object with an unknown length return iter([(0, None, None, response.getheaders(), response)]) # Single "range" that's the whole object content_length = int(response.getheader('Content-Length')) return iter([(0, content_length - 1, content_length, response.getheaders(), response)]) content_type, params_list = parse_content_type( response.getheader('Content-Type')) if content_type != 'multipart/byteranges': # Single range; no MIME framing, just the bytes. The start and end # byte indices are in the Content-Range header. start, end, length = parse_content_range( response.getheader('Content-Range')) return iter([(start, end, length, response.getheaders(), response)]) else: # Multiple ranges; the response body is a multipart/byteranges MIME # document, and we have to parse it using the MIME boundary # extracted from the Content-Type header. params = dict(params_list) return multipart_byteranges_to_document_iters( response, params['boundary'], read_chunk_size) def update_etag_is_at_header(req, name): """ Helper function to update an X-Backend-Etag-Is-At header whose value is a list of alternative header names at which the actual object etag may be found. This informs the object server where to look for the actual object etag when processing conditional requests. Since the proxy server and/or middleware may set alternative etag header names, the value of X-Backend-Etag-Is-At is a comma separated list which the object server inspects in order until it finds an etag value. :param req: a swob Request :param name: name of a sysmeta where alternative etag may be found """ if ',' in name: # HTTP header names should not have commas but we'll check anyway raise ValueError('Header name must not contain commas') existing = req.headers.get("X-Backend-Etag-Is-At") req.headers["X-Backend-Etag-Is-At"] = csv_append( existing, name) def resolve_etag_is_at_header(req, metadata): """ Helper function to resolve an alternative etag value that may be stored in metadata under an alternate name. The value of the request's X-Backend-Etag-Is-At header (if it exists) is a comma separated list of alternate names in the metadata at which an alternate etag value may be found. This list is processed in order until an alternate etag is found. The left most value in X-Backend-Etag-Is-At will have been set by the left most middleware, or if no middleware, by ECObjectController, if an EC policy is in use. The left most middleware is assumed to be the authority on what the etag value of the object content is. The resolver will work from left to right in the list until it finds a value that is a name in the given metadata. So the left most wins, IF it exists in the metadata. By way of example, assume the encrypter middleware is installed. If an object is *not* encrypted then the resolver will not find the encrypter middleware's alternate etag sysmeta (X-Object-Sysmeta-Crypto-Etag) but will then find the EC alternate etag (if EC policy). But if the object *is* encrypted then X-Object-Sysmeta-Crypto-Etag is found and used, which is correct because it should be preferred over X-Object-Sysmeta-Ec-Etag. :param req: a swob Request :param metadata: a dict containing object metadata :return: an alternate etag value if any is found, otherwise None """ alternate_etag = None metadata = HeaderKeyDict(metadata) if "X-Backend-Etag-Is-At" in req.headers: names = list_from_csv(req.headers["X-Backend-Etag-Is-At"]) for name in names: if name in metadata: alternate_etag = metadata[name] break return alternate_etag swift-2.17.1/swift/common/__init__.py0000666000175000017500000000004313435012003017503 0ustar zuulzuul00000000000000"""Code common to all of Swift.""" swift-2.17.1/swift/common/memcached.py0000666000175000017500000004603213435012015017665 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Why our own memcache client? By Michael Barton python-memcached doesn't use consistent hashing, so adding or removing a memcache server from the pool invalidates a huge percentage of cached items. If you keep a pool of python-memcached client objects, each client object has its own connection to every memcached server, only one of which is ever in use. So you wind up with n * m open sockets and almost all of them idle. This client effectively has a pool for each server, so the number of backend connections is hopefully greatly reduced. python-memcache uses pickle to store things, and there was already a huge stink about Swift using pickles in memcache (http://osvdb.org/show/osvdb/86581). That seemed sort of unfair, since nova and keystone and everyone else use pickles for memcache too, but it's hidden behind a "standard" library. But changing would be a security regression at this point. Also, pylibmc wouldn't work for us because it needs to use python sockets in order to play nice with eventlet. Lucid comes with memcached: v1.4.2. Protocol documentation for that version is at: http://github.com/memcached/memcached/blob/1.4.2/doc/protocol.txt """ import six.moves.cPickle as pickle import json import logging import time from bisect import bisect from hashlib import md5 from eventlet.green import socket from eventlet.pools import Pool from eventlet import Timeout from six.moves import range from swift.common import utils DEFAULT_MEMCACHED_PORT = 11211 CONN_TIMEOUT = 0.3 POOL_TIMEOUT = 1.0 # WAG IO_TIMEOUT = 2.0 PICKLE_FLAG = 1 JSON_FLAG = 2 NODE_WEIGHT = 50 PICKLE_PROTOCOL = 2 TRY_COUNT = 3 # if ERROR_LIMIT_COUNT errors occur in ERROR_LIMIT_TIME seconds, the server # will be considered failed for ERROR_LIMIT_DURATION seconds. ERROR_LIMIT_COUNT = 10 ERROR_LIMIT_TIME = 60 ERROR_LIMIT_DURATION = 60 def md5hash(key): return md5(key).hexdigest() def sanitize_timeout(timeout): """ Sanitize a timeout value to use an absolute expiration time if the delta is greater than 30 days (in seconds). Note that the memcached server translates negative values to mean a delta of 30 days in seconds (and 1 additional second), client beware. """ if timeout > (30 * 24 * 60 * 60): timeout += time.time() return timeout class MemcacheConnectionError(Exception): pass class MemcachePoolTimeout(Timeout): pass class MemcacheConnPool(Pool): """ Connection pool for Memcache Connections The *server* parameter can be a hostname, an IPv4 address, or an IPv6 address with an optional port. See :func:`swift.common.utils.parse_socket_string` for details. """ def __init__(self, server, size, connect_timeout): Pool.__init__(self, max_size=size) self.host, self.port = utils.parse_socket_string( server, DEFAULT_MEMCACHED_PORT) self._connect_timeout = connect_timeout def create(self): addrs = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM) family, socktype, proto, canonname, sockaddr = addrs[0] sock = socket.socket(family, socket.SOCK_STREAM) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) with Timeout(self._connect_timeout): sock.connect(sockaddr) return (sock.makefile(), sock) def get(self): fp, sock = super(MemcacheConnPool, self).get() if fp is None: # An error happened previously, so we need a new connection fp, sock = self.create() return fp, sock class MemcacheRing(object): """ Simple, consistent-hashed memcache client. """ def __init__(self, servers, connect_timeout=CONN_TIMEOUT, io_timeout=IO_TIMEOUT, pool_timeout=POOL_TIMEOUT, tries=TRY_COUNT, allow_pickle=False, allow_unpickle=False, max_conns=2): self._ring = {} self._errors = dict(((serv, []) for serv in servers)) self._error_limited = dict(((serv, 0) for serv in servers)) for server in sorted(servers): for i in range(NODE_WEIGHT): self._ring[md5hash('%s-%s' % (server, i))] = server self._tries = tries if tries <= len(servers) else len(servers) self._sorted = sorted(self._ring) self._client_cache = dict(((server, MemcacheConnPool(server, max_conns, connect_timeout)) for server in servers)) self._connect_timeout = connect_timeout self._io_timeout = io_timeout self._pool_timeout = pool_timeout self._allow_pickle = allow_pickle self._allow_unpickle = allow_unpickle or allow_pickle def _exception_occurred(self, server, e, action='talking', sock=None, fp=None, got_connection=True): if isinstance(e, Timeout): logging.error("Timeout %(action)s to memcached: %(server)s", {'action': action, 'server': server}) elif isinstance(e, (socket.error, MemcacheConnectionError)): logging.error("Error %(action)s to memcached: %(server)s: %(err)s", {'action': action, 'server': server, 'err': e}) else: logging.exception("Error %(action)s to memcached: %(server)s", {'action': action, 'server': server}) try: if fp: fp.close() del fp except Exception: pass try: if sock: sock.close() del sock except Exception: pass if got_connection: # We need to return something to the pool # A new connection will be created the next time it is retrieved self._return_conn(server, None, None) now = time.time() self._errors[server].append(time.time()) if len(self._errors[server]) > ERROR_LIMIT_COUNT: self._errors[server] = [err for err in self._errors[server] if err > now - ERROR_LIMIT_TIME] if len(self._errors[server]) > ERROR_LIMIT_COUNT: self._error_limited[server] = now + ERROR_LIMIT_DURATION logging.error('Error limiting server %s', server) def _get_conns(self, key): """ Retrieves a server conn from the pool, or connects a new one. Chooses the server based on a consistent hash of "key". """ pos = bisect(self._sorted, key) served = [] while len(served) < self._tries: pos = (pos + 1) % len(self._sorted) server = self._ring[self._sorted[pos]] if server in served: continue served.append(server) if self._error_limited[server] > time.time(): continue sock = None try: with MemcachePoolTimeout(self._pool_timeout): fp, sock = self._client_cache[server].get() yield server, fp, sock except MemcachePoolTimeout as e: self._exception_occurred( server, e, action='getting a connection', got_connection=False) except (Exception, Timeout) as e: # Typically a Timeout exception caught here is the one raised # by the create() method of this server's MemcacheConnPool # object. self._exception_occurred( server, e, action='connecting', sock=sock) def _return_conn(self, server, fp, sock): """Returns a server connection to the pool.""" self._client_cache[server].put((fp, sock)) def set(self, key, value, serialize=True, time=0, min_compress_len=0): """ Set a key/value pair in memcache :param key: key :param value: value :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) :param time: the time to live :param min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it. """ key = md5hash(key) timeout = sanitize_timeout(time) flags = 0 if serialize and self._allow_pickle: value = pickle.dumps(value, PICKLE_PROTOCOL) flags |= PICKLE_FLAG elif serialize: value = json.dumps(value) flags |= JSON_FLAG for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('set %s %d %d %s\r\n%s\r\n' % (key, flags, timeout, len(value), value)) # Wait for the set to complete fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def get(self, key): """ Gets the object specified by key. It will also unserialize the object before returning if it is serialized in memcache with JSON, or if it is pickled and unpickling is allowed. :param key: key :returns: value of the key in memcache """ key = md5hash(key) value = None for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('get %s\r\n' % key) line = fp.readline().strip().split() while True: if not line: raise MemcacheConnectionError('incomplete read') if line[0].upper() == 'END': break if line[0].upper() == 'VALUE' and line[1] == key: size = int(line[3]) value = fp.read(size) if int(line[2]) & PICKLE_FLAG: if self._allow_unpickle: value = pickle.loads(value) else: value = None elif int(line[2]) & JSON_FLAG: value = json.loads(value) fp.readline() line = fp.readline().strip().split() self._return_conn(server, fp, sock) return value except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def incr(self, key, delta=1, time=0): """ Increments a key which has a numeric value by delta. If the key can't be found, it's added as delta or 0 if delta < 0. If passed a negative number, will use memcached's decr. Returns the int stored in memcached Note: The data memcached stores as the result of incr/decr is an unsigned int. decr's that result in a number below 0 are stored as 0. :param key: key :param delta: amount to add to the value of key (or set as the value if the key is not found) will be cast to an int :param time: the time to live :returns: result of incrementing :raises MemcacheConnectionError: """ key = md5hash(key) command = 'incr' if delta < 0: command = 'decr' delta = str(abs(int(delta))) timeout = sanitize_timeout(time) for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('%s %s %s\r\n' % (command, key, delta)) line = fp.readline().strip().split() if not line: raise MemcacheConnectionError('incomplete read') if line[0].upper() == 'NOT_FOUND': add_val = delta if command == 'decr': add_val = '0' sock.sendall('add %s %d %d %s\r\n%s\r\n' % (key, 0, timeout, len(add_val), add_val)) line = fp.readline().strip().split() if line[0].upper() == 'NOT_STORED': sock.sendall('%s %s %s\r\n' % (command, key, delta)) line = fp.readline().strip().split() ret = int(line[0].strip()) else: ret = int(add_val) else: ret = int(line[0].strip()) self._return_conn(server, fp, sock) return ret except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) raise MemcacheConnectionError("No Memcached connections succeeded.") def decr(self, key, delta=1, time=0): """ Decrements a key which has a numeric value by delta. Calls incr with -delta. :param key: key :param delta: amount to subtract to the value of key (or set the value to 0 if the key is not found) will be cast to an int :param time: the time to live :returns: result of decrementing :raises MemcacheConnectionError: """ return self.incr(key, delta=-delta, time=time) def delete(self, key): """ Deletes a key/value pair from memcache. :param key: key to be deleted """ key = md5hash(key) for (server, fp, sock) in self._get_conns(key): try: with Timeout(self._io_timeout): sock.sendall('delete %s\r\n' % key) # Wait for the delete to complete fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def set_multi(self, mapping, server_key, serialize=True, time=0, min_compress_len=0): """ Sets multiple key/value pairs in memcache. :param mapping: dictionary of keys and values to be set in memcache :param server_key: key to use in determining which server in the ring is used :param serialize: if True, value is serialized with JSON before sending to memcache, or with pickle if configured to use pickle instead of JSON (to avoid cache poisoning) :param time: the time to live :min_compress_len: minimum compress length, this parameter was added to keep the signature compatible with python-memcached interface. This implementation ignores it """ server_key = md5hash(server_key) timeout = sanitize_timeout(time) msg = '' for key, value in mapping.items(): key = md5hash(key) flags = 0 if serialize and self._allow_pickle: value = pickle.dumps(value, PICKLE_PROTOCOL) flags |= PICKLE_FLAG elif serialize: value = json.dumps(value) flags |= JSON_FLAG msg += ('set %s %d %d %s\r\n%s\r\n' % (key, flags, timeout, len(value), value)) for (server, fp, sock) in self._get_conns(server_key): try: with Timeout(self._io_timeout): sock.sendall(msg) # Wait for the set to complete for line in range(len(mapping)): fp.readline() self._return_conn(server, fp, sock) return except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) def get_multi(self, keys, server_key): """ Gets multiple values from memcache for the given keys. :param keys: keys for values to be retrieved from memcache :param server_key: key to use in determining which server in the ring is used :returns: list of values """ server_key = md5hash(server_key) keys = [md5hash(key) for key in keys] for (server, fp, sock) in self._get_conns(server_key): try: with Timeout(self._io_timeout): sock.sendall('get %s\r\n' % ' '.join(keys)) line = fp.readline().strip().split() responses = {} while True: if not line: raise MemcacheConnectionError('incomplete read') if line[0].upper() == 'END': break if line[0].upper() == 'VALUE': size = int(line[3]) value = fp.read(size) if int(line[2]) & PICKLE_FLAG: if self._allow_unpickle: value = pickle.loads(value) else: value = None elif int(line[2]) & JSON_FLAG: value = json.loads(value) responses[line[1]] = value fp.readline() line = fp.readline().strip().split() values = [] for key in keys: if key in responses: values.append(responses[key]) else: values.append(None) self._return_conn(server, fp, sock) return values except (Exception, Timeout) as e: self._exception_occurred(server, e, sock=sock, fp=fp) swift-2.17.1/swift/common/daemon.py0000666000175000017500000002604113435012015017220 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import errno import os import sys import time import signal from re import sub import eventlet.debug from eventlet.hubs import use_hub from swift.common import utils class Daemon(object): """ Daemon base class A daemon has a run method that accepts a ``once`` kwarg and will dispatch to :meth:`run_once` or :meth:`run_forever`. A subclass of Daemon must implement :meth:`run_once` and :meth:`run_forever`. A subclass of Daemon may override :meth:`get_worker_args` to dispatch arguments to individual child process workers and :meth:`is_healthy` to perform context specific periodic wellness checks which can reset worker arguments. Implementations of Daemon do not know *how* to daemonize, or execute multiple daemonized workers, they simply provide the behavior of the daemon and context specific knowledge about how workers should be started. """ def __init__(self, conf): self.conf = conf self.logger = utils.get_logger(conf, log_route='daemon') def run_once(self, *args, **kwargs): """Override this to run the script once""" raise NotImplementedError('run_once not implemented') def run_forever(self, *args, **kwargs): """Override this to run forever""" raise NotImplementedError('run_forever not implemented') def run(self, once=False, **kwargs): if once: self.run_once(**kwargs) else: self.run_forever(**kwargs) def get_worker_args(self, once=False, **kwargs): """ For each worker yield a (possibly empty) dict of kwargs to pass along to the daemon's :meth:`run` method after fork. The length of elements returned from this method will determine the number of processes created. If the returned iterable is empty, the Strategy will fallback to run-inline strategy. :param once: False if the worker(s) will be daemonized, True if the worker(s) will be run once :param kwargs: plumbed through via command line argparser :returns: an iterable of dicts, each element represents the kwargs to be passed to a single worker's :meth:`run` method after fork. """ return [] def is_healthy(self): """ This method is called very frequently on the instance of the daemon held by the parent process. If it returns False, all child workers are terminated, and new workers will be created. :returns: a boolean, True only if all workers should continue to run """ return True class DaemonStrategy(object): """ This is the execution strategy for using subclasses of Daemon. The default behavior is to invoke the daemon's :meth:`Daemon.run` method from within the parent process. When the :meth:`Daemon.run` method returns the parent process will exit. However, if the Daemon returns a non-empty iterable from :meth:`Daemon.get_worker_args`, the daemon's :meth:`Daemon.run` method will be invoked in child processes, with the arguments provided from the parent process's instance of the daemon. If a child process exits it will be restarted with the same options, unless it was executed in once mode. :param daemon: an instance of a :class:`Daemon` (has a `run` method) :param logger: a logger instance """ def __init__(self, daemon, logger): self.daemon = daemon self.logger = logger self.running = False # only used by multi-worker strategy self.options_by_pid = {} self.unspawned_worker_options = [] def setup(self, **kwargs): utils.validate_configuration() utils.drop_privileges(self.daemon.conf.get('user', 'swift')) utils.capture_stdio(self.logger, **kwargs) def kill_children(*args): self.running = False self.logger.info('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) os.killpg(0, signal.SIGTERM) os._exit(0) signal.signal(signal.SIGTERM, kill_children) self.running = True def _run_inline(self, once=False, **kwargs): """Run the daemon""" self.daemon.run(once=once, **kwargs) def run(self, once=False, **kwargs): """Daemonize and execute our strategy""" self.setup(**kwargs) try: self._run(once=once, **kwargs) except KeyboardInterrupt: self.logger.notice('User quit') finally: self.cleanup() self.running = False def _fork(self, once, **kwargs): pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) self.daemon.run(once, **kwargs) self.logger.debug('Forked worker %s finished', os.getpid()) # do not return from this stack, nor execute any finally blocks os._exit(0) else: self.register_worker_start(pid, kwargs) return pid def iter_unspawned_workers(self): while True: try: per_worker_options = self.unspawned_worker_options.pop() except IndexError: return yield per_worker_options def spawned_pids(self): return self.options_by_pid.keys() def register_worker_start(self, pid, per_worker_options): self.logger.debug('Spawned worker %s with %r', pid, per_worker_options) self.options_by_pid[pid] = per_worker_options def register_worker_exit(self, pid): self.unspawned_worker_options.append(self.options_by_pid.pop(pid)) def ask_daemon_to_prepare_workers(self, once, **kwargs): self.unspawned_worker_options = list( self.daemon.get_worker_args(once=once, **kwargs)) def abort_workers_if_daemon_would_like(self): if not self.daemon.is_healthy(): self.logger.debug( 'Daemon needs to change options, aborting workers') self.cleanup() return True return False def check_on_all_running_workers(self): for p in self.spawned_pids(): try: pid, status = os.waitpid(p, os.WNOHANG) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise self.logger.notice('Worker %s died', p) else: if pid == 0: # child still running continue self.logger.debug('Worker %s exited', p) self.register_worker_exit(p) def _run(self, once, **kwargs): self.ask_daemon_to_prepare_workers(once, **kwargs) if not self.unspawned_worker_options: return self._run_inline(once, **kwargs) for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 while self.running: if self.abort_workers_if_daemon_would_like(): self.ask_daemon_to_prepare_workers(once, **kwargs) self.check_on_all_running_workers() if not once: for per_worker_options in self.iter_unspawned_workers(): if self._fork(once, **per_worker_options) == 0: return 0 else: if not self.spawned_pids(): self.logger.notice('Finished %s', os.getpid()) break time.sleep(0.1) return 0 def cleanup(self): for p in self.spawned_pids(): try: os.kill(p, signal.SIGTERM) except OSError as err: if err.errno not in (errno.ESRCH, errno.EINTR, errno.ECHILD): raise self.register_worker_exit(p) self.logger.debug('Cleaned up worker %s', p) def run_daemon(klass, conf_file, section_name='', once=False, **kwargs): """ Loads settings from conf, then instantiates daemon ``klass`` and runs the daemon with the specified ``once`` kwarg. The section_name will be derived from the daemon ``klass`` if not provided (e.g. ObjectReplicator => object-replicator). :param klass: Class to instantiate, subclass of :class:`Daemon` :param conf_file: Path to configuration file :param section_name: Section name from conf file to load config from :param once: Passed to daemon :meth:`Daemon.run` method """ # very often the config section_name is based on the class name # the None singleton will be passed through to readconf as is if section_name is '': section_name = sub(r'([a-z])([A-Z])', r'\1-\2', klass.__name__).lower() try: conf = utils.readconf(conf_file, section_name, log_name=kwargs.get('log_name')) except (ValueError, IOError) as e: # The message will be printed to stderr # and results in an exit code of 1. sys.exit(e) use_hub(utils.get_hub()) # once on command line (i.e. daemonize=false) will over-ride config once = once or not utils.config_true_value(conf.get('daemonize', 'true')) # pre-configure logger if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = utils.get_logger(conf, conf.get('log_name', section_name), log_to_console=kwargs.pop('verbose', False), log_route=section_name) # optional nice/ionice priority scheduling utils.modify_priority(conf, logger) # disable fallocate if desired if utils.config_true_value(conf.get('disable_fallocate', 'no')): utils.disable_fallocate() # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # By default, disable eventlet printing stacktraces eventlet_debug = utils.config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() logger.notice('Starting %s', os.getpid()) try: DaemonStrategy(klass(conf), logger).run(once=once, **kwargs) except KeyboardInterrupt: logger.info('User quit') logger.notice('Exited %s', os.getpid()) swift-2.17.1/swift/common/linkat.py0000666000175000017500000000421713435012015017240 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os import ctypes from ctypes.util import find_library __all__ = ['linkat'] class Linkat(object): # From include/uapi/linux/fcntl.h AT_FDCWD = -100 AT_SYMLINK_FOLLOW = 0x400 __slots__ = '_c_linkat' def __init__(self): libc = ctypes.CDLL(find_library('c'), use_errno=True) try: c_linkat = libc.linkat except AttributeError: self._c_linkat = None return c_linkat.argtypes = [ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_int] c_linkat.restype = ctypes.c_int def errcheck(result, func, arguments): if result == -1: errno = ctypes.set_errno(0) raise IOError(errno, 'linkat: %s' % os.strerror(errno)) else: return result c_linkat.errcheck = errcheck self._c_linkat = c_linkat @property def available(self): return self._c_linkat is not None def __call__(self, olddirfd, oldpath, newdirfd, newpath, flags): """ linkat() creates a new link (also known as a hard link) to an existing file. See `man 2 linkat` for more info. """ if not self.available: raise EnvironmentError('linkat not available') if not isinstance(olddirfd, int) or not isinstance(newdirfd, int): raise TypeError("fd must be an integer.") return self._c_linkat(olddirfd, oldpath, newdirfd, newpath, flags) linkat = Linkat() del Linkat swift-2.17.1/swift/common/storage_policy.py0000666000175000017500000010453213435012015021002 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os import string import sys import textwrap import six from six.moves.configparser import ConfigParser from swift.common.utils import ( config_true_value, quorum_size, whataremyips, list_from_csv, config_positive_int_value, get_zero_indexed_base_string) from swift.common.ring import Ring, RingData from swift.common import utils from swift.common.exceptions import RingLoadError from pyeclib.ec_iface import ECDriver, ECDriverError, VALID_EC_TYPES LEGACY_POLICY_NAME = 'Policy-0' VALID_CHARS = '-' + string.ascii_letters + string.digits DEFAULT_POLICY_TYPE = REPL_POLICY = 'replication' EC_POLICY = 'erasure_coding' DEFAULT_EC_OBJECT_SEGMENT_SIZE = 1048576 class BindPortsCache(object): def __init__(self, swift_dir, bind_ip): self.swift_dir = swift_dir self.mtimes_by_ring_path = {} self.portsets_by_ring_path = {} self.my_ips = set(whataremyips(bind_ip)) def all_bind_ports_for_node(self): """ Given an iterable of IP addresses identifying a storage backend server, return a set of all bind ports defined in all rings for this storage backend server. The caller is responsible for not calling this method (which performs at least a stat on all ring files) too frequently. """ # NOTE: we don't worry about disappearing rings here because you can't # ever delete a storage policy. for policy in POLICIES: # NOTE: we must NOT use policy.load_ring to load the ring. Users # of this utility function will not need the actual ring data, just # the bind ports. # # This is duplicated with Ring.__init__ just a bit... serialized_path = os.path.join(self.swift_dir, policy.ring_name + '.ring.gz') try: new_mtime = os.path.getmtime(serialized_path) except OSError: continue old_mtime = self.mtimes_by_ring_path.get(serialized_path) if not old_mtime or old_mtime != new_mtime: self.portsets_by_ring_path[serialized_path] = set( dev['port'] for dev in RingData.load(serialized_path, metadata_only=True).devs if dev and dev['ip'] in self.my_ips) self.mtimes_by_ring_path[serialized_path] = new_mtime # No "break" here so that the above line will update the # mtimes_by_ring_path entry for any ring that changes, not just # the first one we notice. # Return the requested set of ports from our (now-freshened) cache return six.moves.reduce(set.union, self.portsets_by_ring_path.values(), set()) class PolicyError(ValueError): def __init__(self, msg, index=None): if index is not None: msg += ', for index %r' % index super(PolicyError, self).__init__(msg) def _get_policy_string(base, policy_index): return get_zero_indexed_base_string(base, policy_index) def get_policy_string(base, policy_or_index): """ Helper function to construct a string from a base and the policy. Used to encode the policy index into either a file name or a directory name by various modules. :param base: the base string :param policy_or_index: StoragePolicy instance, or an index (string or int), if None the legacy storage Policy-0 is assumed. :returns: base name with policy index added :raises PolicyError: if no policy exists with the given policy_index """ if isinstance(policy_or_index, BaseStoragePolicy): policy = policy_or_index else: policy = POLICIES.get_by_index(policy_or_index) if policy is None: raise PolicyError("Unknown policy", index=policy_or_index) return _get_policy_string(base, int(policy)) def split_policy_string(policy_string): """ Helper function to convert a string representing a base and a policy. Used to decode the policy from either a file name or a directory name by various modules. :param policy_string: base name with policy index added :raises PolicyError: if given index does not map to a valid policy :returns: a tuple, in the form (base, policy) where base is the base string and policy is the StoragePolicy instance for the index encoded in the policy_string. """ if '-' in policy_string: base, policy_index = policy_string.rsplit('-', 1) else: base, policy_index = policy_string, None policy = POLICIES.get_by_index(policy_index) if get_policy_string(base, policy) != policy_string: raise PolicyError("Unknown policy", index=policy_index) return base, policy class BaseStoragePolicy(object): """ Represents a storage policy. Not meant to be instantiated directly; implement a derived subclasses (e.g. StoragePolicy, ECStoragePolicy, etc) or use :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. The object_ring property is lazy loaded once the service's ``swift_dir`` is known via :meth:`~StoragePolicyCollection.get_object_ring`, but it may be over-ridden via object_ring kwarg at create time for testing or actively loaded with :meth:`~StoragePolicy.load_ring`. """ policy_type_to_policy_cls = {} def __init__(self, idx, name='', is_default=False, is_deprecated=False, object_ring=None, aliases=''): # do not allow BaseStoragePolicy class to be instantiated directly if type(self) == BaseStoragePolicy: raise TypeError("Can't instantiate BaseStoragePolicy directly") # policy parameter validation try: self.idx = int(idx) except ValueError: raise PolicyError('Invalid index', idx) if self.idx < 0: raise PolicyError('Invalid index', idx) self.alias_list = [] self.add_name(name) if aliases: names_list = list_from_csv(aliases) for alias in names_list: if alias == name: continue self.add_name(alias) self.is_deprecated = config_true_value(is_deprecated) self.is_default = config_true_value(is_default) if self.policy_type not in BaseStoragePolicy.policy_type_to_policy_cls: raise PolicyError('Invalid type', self.policy_type) if self.is_deprecated and self.is_default: raise PolicyError('Deprecated policy can not be default. ' 'Invalid config', self.idx) self.ring_name = _get_policy_string('object', self.idx) self.object_ring = object_ring @property def name(self): return self.alias_list[0] @name.setter def name_setter(self, name): self._validate_policy_name(name) self.alias_list[0] = name @property def aliases(self): return ", ".join(self.alias_list) def __int__(self): return self.idx def __cmp__(self, other): return cmp(self.idx, int(other)) def __repr__(self): return ("%s(%d, %r, is_default=%s, " "is_deprecated=%s, policy_type=%r)") % \ (self.__class__.__name__, self.idx, self.alias_list, self.is_default, self.is_deprecated, self.policy_type) @classmethod def register(cls, policy_type): """ Decorator for Storage Policy implementations to register their StoragePolicy class. This will also set the policy_type attribute on the registered implementation. """ def register_wrapper(policy_cls): if policy_type in cls.policy_type_to_policy_cls: raise PolicyError( '%r is already registered for the policy_type %r' % ( cls.policy_type_to_policy_cls[policy_type], policy_type)) cls.policy_type_to_policy_cls[policy_type] = policy_cls policy_cls.policy_type = policy_type return policy_cls return register_wrapper @classmethod def _config_options_map(cls): """ Map config option name to StoragePolicy parameter name. """ return { 'name': 'name', 'aliases': 'aliases', 'policy_type': 'policy_type', 'default': 'is_default', 'deprecated': 'is_deprecated', } @classmethod def from_config(cls, policy_index, options): config_to_policy_option_map = cls._config_options_map() policy_options = {} for config_option, value in options.items(): try: policy_option = config_to_policy_option_map[config_option] except KeyError: raise PolicyError('Invalid option %r in ' 'storage-policy section' % config_option, index=policy_index) policy_options[policy_option] = value return cls(policy_index, **policy_options) def get_info(self, config=False): """ Return the info dict and conf file options for this policy. :param config: boolean, if True all config options are returned """ info = {} for config_option, policy_attribute in \ self._config_options_map().items(): info[config_option] = getattr(self, policy_attribute) if not config: # remove some options for public consumption if not self.is_default: info.pop('default') if not self.is_deprecated: info.pop('deprecated') info.pop('policy_type') return info def _validate_policy_name(self, name): """ Helper function to determine the validity of a policy name. Used to check policy names before setting them. :param name: a name string for a single policy name. :raises PolicyError: if the policy name is invalid. """ if not name: raise PolicyError('Invalid name %r' % name, self.idx) # this is defensively restrictive, but could be expanded in the future if not all(c in VALID_CHARS for c in name): msg = 'Names are used as HTTP headers, and can not ' \ 'reliably contain any characters not in %r. ' \ 'Invalid name %r' % (VALID_CHARS, name) raise PolicyError(msg, self.idx) if name.upper() == LEGACY_POLICY_NAME.upper() and self.idx != 0: msg = 'The name %s is reserved for policy index 0. ' \ 'Invalid name %r' % (LEGACY_POLICY_NAME, name) raise PolicyError(msg, self.idx) if name.upper() in (existing_name.upper() for existing_name in self.alias_list): msg = 'The name %s is already assigned to this policy.' % name raise PolicyError(msg, self.idx) def add_name(self, name): """ Adds an alias name to the storage policy. Shouldn't be called directly from the storage policy but instead through the storage policy collection class, so lookups by name resolve correctly. :param name: a new alias for the storage policy """ self._validate_policy_name(name) self.alias_list.append(name) def remove_name(self, name): """ Removes an alias name from the storage policy. Shouldn't be called directly from the storage policy but instead through the storage policy collection class, so lookups by name resolve correctly. If the name removed is the primary name then the next available alias will be adopted as the new primary name. :param name: a name assigned to the storage policy """ if name not in self.alias_list: raise PolicyError("%s is not a name assigned to policy %s" % (name, self.idx)) if len(self.alias_list) == 1: raise PolicyError("Cannot remove only name %s from policy %s. " "Policies must have at least one name." % (name, self.idx)) else: self.alias_list.remove(name) def change_primary_name(self, name): """ Changes the primary/default name of the policy to a specified name. :param name: a string name to replace the current primary name. """ if name == self.name: return elif name in self.alias_list: self.remove_name(name) else: self._validate_policy_name(name) self.alias_list.insert(0, name) def load_ring(self, swift_dir): """ Load the ring for this policy immediately. :param swift_dir: path to rings """ if self.object_ring: return self.object_ring = Ring(swift_dir, ring_name=self.ring_name) @property def quorum(self): """ Number of successful backend requests needed for the proxy to consider the client request successful. """ raise NotImplementedError() @BaseStoragePolicy.register(REPL_POLICY) class StoragePolicy(BaseStoragePolicy): """ Represents a storage policy of type 'replication'. Default storage policy class unless otherwise overridden from swift.conf. Not meant to be instantiated directly; use :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. """ @property def quorum(self): """ Quorum concept in the replication case: floor(number of replica / 2) + 1 """ if not self.object_ring: raise PolicyError('Ring is not loaded') return quorum_size(self.object_ring.replica_count) @BaseStoragePolicy.register(EC_POLICY) class ECStoragePolicy(BaseStoragePolicy): """ Represents a storage policy of type 'erasure_coding'. Not meant to be instantiated directly; use :func:`~swift.common.storage_policy.reload_storage_policies` to load POLICIES from ``swift.conf``. """ def __init__(self, idx, name='', aliases='', is_default=False, is_deprecated=False, object_ring=None, ec_segment_size=DEFAULT_EC_OBJECT_SEGMENT_SIZE, ec_type=None, ec_ndata=None, ec_nparity=None, ec_duplication_factor=1): super(ECStoragePolicy, self).__init__( idx=idx, name=name, aliases=aliases, is_default=is_default, is_deprecated=is_deprecated, object_ring=object_ring) # Validate erasure_coding policy specific members # ec_type is one of the EC implementations supported by PyEClib if ec_type is None: raise PolicyError('Missing ec_type') if ec_type not in VALID_EC_TYPES: raise PolicyError('Wrong ec_type %s for policy %s, should be one' ' of "%s"' % (ec_type, self.name, ', '.join(VALID_EC_TYPES))) self._ec_type = ec_type # Define _ec_ndata as the number of EC data fragments # Accessible as the property "ec_ndata" try: value = int(ec_ndata) if value <= 0: raise ValueError self._ec_ndata = value except (TypeError, ValueError): raise PolicyError('Invalid ec_num_data_fragments %r' % ec_ndata, index=self.idx) # Define _ec_nparity as the number of EC parity fragments # Accessible as the property "ec_nparity" try: value = int(ec_nparity) if value <= 0: raise ValueError self._ec_nparity = value except (TypeError, ValueError): raise PolicyError('Invalid ec_num_parity_fragments %r' % ec_nparity, index=self.idx) # Define _ec_segment_size as the encode segment unit size # Accessible as the property "ec_segment_size" try: value = int(ec_segment_size) if value <= 0: raise ValueError self._ec_segment_size = value except (TypeError, ValueError): raise PolicyError('Invalid ec_object_segment_size %r' % ec_segment_size, index=self.idx) if self._ec_type == 'isa_l_rs_vand' and self._ec_nparity >= 5: logger = logging.getLogger("swift.common.storage_policy") if not logger.handlers: # If nothing else, log to stderr logger.addHandler(logging.StreamHandler(sys.__stderr__)) logger.warning( 'Storage policy %s uses an EC configuration known to harm ' 'data durability. Any data in this policy should be migrated. ' 'See https://bugs.launchpad.net/swift/+bug/1639691 for ' 'more information.' % self.name) if not is_deprecated: raise PolicyError( 'Storage policy %s uses an EC configuration known to harm ' 'data durability. This policy MUST be deprecated.' % self.name) # Initialize PyECLib EC backend try: self.pyeclib_driver = \ ECDriver(k=self._ec_ndata, m=self._ec_nparity, ec_type=self._ec_type) except ECDriverError as e: raise PolicyError("Error creating EC policy (%s)" % e, index=self.idx) # quorum size in the EC case depends on the choice of EC scheme. self._ec_quorum_size = \ self._ec_ndata + self.pyeclib_driver.min_parity_fragments_needed() self._fragment_size = None self._ec_duplication_factor = \ config_positive_int_value(ec_duplication_factor) @property def ec_type(self): return self._ec_type @property def ec_ndata(self): return self._ec_ndata @property def ec_nparity(self): return self._ec_nparity @property def ec_n_unique_fragments(self): return self._ec_ndata + self._ec_nparity @property def ec_segment_size(self): return self._ec_segment_size @property def fragment_size(self): """ Maximum length of a fragment, including header. NB: a fragment archive is a sequence of 0 or more max-length fragments followed by one possibly-shorter fragment. """ # Technically pyeclib's get_segment_info signature calls for # (data_len, segment_size) but on a ranged GET we don't know the # ec-content-length header before we need to compute where in the # object we should request to align with the fragment size. So we # tell pyeclib a lie - from it's perspective, as long as data_len >= # segment_size it'll give us the answer we want. From our # perspective, because we only use this answer to calculate the # *minimum* size we should read from an object body even if data_len < # segment_size we'll still only read *the whole one and only last # fragment* and pass than into pyeclib who will know what to do with # it just as it always does when the last fragment is < fragment_size. if self._fragment_size is None: self._fragment_size = self.pyeclib_driver.get_segment_info( self.ec_segment_size, self.ec_segment_size)['fragment_size'] return self._fragment_size @property def ec_scheme_description(self): """ This short hand form of the important parts of the ec schema is stored in Object System Metadata on the EC Fragment Archives for debugging. """ return "%s %d+%d" % (self._ec_type, self._ec_ndata, self._ec_nparity) @property def ec_duplication_factor(self): return self._ec_duplication_factor def __repr__(self): extra_info = '' if self.ec_duplication_factor != 1: extra_info = ', ec_duplication_factor=%d' % \ self.ec_duplication_factor return ("%s, EC config(ec_type=%s, ec_segment_size=%d, " "ec_ndata=%d, ec_nparity=%d%s)") % \ (super(ECStoragePolicy, self).__repr__(), self.ec_type, self.ec_segment_size, self.ec_ndata, self.ec_nparity, extra_info) @classmethod def _config_options_map(cls): options = super(ECStoragePolicy, cls)._config_options_map() options.update({ 'ec_type': 'ec_type', 'ec_object_segment_size': 'ec_segment_size', 'ec_num_data_fragments': 'ec_ndata', 'ec_num_parity_fragments': 'ec_nparity', 'ec_duplication_factor': 'ec_duplication_factor', }) return options def get_info(self, config=False): info = super(ECStoragePolicy, self).get_info(config=config) if not config: info.pop('ec_object_segment_size') info.pop('ec_num_data_fragments') info.pop('ec_num_parity_fragments') info.pop('ec_type') info.pop('ec_duplication_factor') return info @property def quorum(self): """ Number of successful backend requests needed for the proxy to consider the client PUT request successful. The quorum size for EC policies defines the minimum number of data + parity elements required to be able to guarantee the desired fault tolerance, which is the number of data elements supplemented by the minimum number of parity elements required by the chosen erasure coding scheme. For example, for Reed-Solomon, the minimum number parity elements required is 1, and thus the quorum_size requirement is ec_ndata + 1. Given the number of parity elements required is not the same for every erasure coding scheme, consult PyECLib for min_parity_fragments_needed() """ return self._ec_quorum_size * self.ec_duplication_factor def load_ring(self, swift_dir): """ Load the ring for this policy immediately. :param swift_dir: path to rings """ if self.object_ring: return def validate_ring_data(ring_data): """ EC specific validation Replica count check - we need _at_least_ (#data + #parity) replicas configured. Also if the replica count is larger than exactly that number there's a non-zero risk of error for code that is considering the number of nodes in the primary list from the ring. """ configured_fragment_count = ring_data.replica_count required_fragment_count = \ (self.ec_n_unique_fragments) * self.ec_duplication_factor if configured_fragment_count != required_fragment_count: raise RingLoadError( 'EC ring for policy %s needs to be configured with ' 'exactly %d replicas. Got %s.' % ( self.name, required_fragment_count, configured_fragment_count)) self.object_ring = Ring( swift_dir, ring_name=self.ring_name, validation_hook=validate_ring_data) def get_backend_index(self, node_index): """ Backend index for PyECLib :param node_index: integer of node index :return: integer of actual fragment index. if param is not an integer, return None instead """ try: node_index = int(node_index) except ValueError: return None return node_index % self.ec_n_unique_fragments class StoragePolicyCollection(object): """ This class represents the collection of valid storage policies for the cluster and is instantiated as :class:`StoragePolicy` objects are added to the collection when ``swift.conf`` is parsed by :func:`parse_storage_policies`. When a StoragePolicyCollection is created, the following validation is enforced: * If a policy with index 0 is not declared and no other policies defined, Swift will create one * The policy index must be a non-negative integer * If no policy is declared as the default and no other policies are defined, the policy with index 0 is set as the default * Policy indexes must be unique * Policy names are required * Policy names are case insensitive * Policy names must contain only letters, digits or a dash * Policy names must be unique * The policy name 'Policy-0' can only be used for the policy with index 0 * If any policies are defined, exactly one policy must be declared default * Deprecated policies can not be declared the default """ def __init__(self, pols): self.default = [] self.by_name = {} self.by_index = {} self._validate_policies(pols) def _add_policy(self, policy): """ Add pre-validated policies to internal indexes. """ for name in policy.alias_list: self.by_name[name.upper()] = policy self.by_index[int(policy)] = policy def __repr__(self): return (textwrap.dedent(""" StoragePolicyCollection([ %s ]) """) % ',\n '.join(repr(p) for p in self)).strip() def __len__(self): return len(self.by_index) def __getitem__(self, key): return self.by_index[key] def __iter__(self): return iter(self.by_index.values()) def _validate_policies(self, policies): """ :param policies: list of policies """ for policy in policies: if int(policy) in self.by_index: raise PolicyError('Duplicate index %s conflicts with %s' % ( policy, self.get_by_index(int(policy)))) for name in policy.alias_list: if name.upper() in self.by_name: raise PolicyError('Duplicate name %s conflicts with %s' % ( policy, self.get_by_name(name))) if policy.is_default: if not self.default: self.default = policy else: raise PolicyError( 'Duplicate default %s conflicts with %s' % ( policy, self.default)) self._add_policy(policy) # If a 0 policy wasn't explicitly given, or nothing was # provided, create the 0 policy now if 0 not in self.by_index: if len(self) != 0: raise PolicyError('You must specify a storage policy ' 'section for policy index 0 in order ' 'to define multiple policies') self._add_policy(StoragePolicy(0, name=LEGACY_POLICY_NAME)) # at least one policy must be enabled enabled_policies = [p for p in self if not p.is_deprecated] if not enabled_policies: raise PolicyError("Unable to find policy that's not deprecated!") # if needed, specify default if not self.default: if len(self) > 1: raise PolicyError("Unable to find default policy") self.default = self[0] self.default.is_default = True def get_by_name(self, name): """ Find a storage policy by its name. :param name: name of the policy :returns: storage policy, or None """ return self.by_name.get(name.upper()) def get_by_index(self, index): """ Find a storage policy by its index. An index of None will be treated as 0. :param index: numeric index of the storage policy :returns: storage policy, or None if no such policy """ # makes it easier for callers to just pass in a header value if index in ('', None): index = 0 else: try: index = int(index) except ValueError: return None return self.by_index.get(index) @property def legacy(self): return self.get_by_index(None) def get_object_ring(self, policy_idx, swift_dir): """ Get the ring object to use to handle a request based on its policy. An index of None will be treated as 0. :param policy_idx: policy index as defined in swift.conf :param swift_dir: swift_dir used by the caller :returns: appropriate ring object """ policy = self.get_by_index(policy_idx) if not policy: raise PolicyError("No policy with index %s" % policy_idx) if not policy.object_ring: policy.load_ring(swift_dir) return policy.object_ring def get_policy_info(self): """ Build info about policies for the /info endpoint :returns: list of dicts containing relevant policy information """ policy_info = [] for pol in self: # delete from /info if deprecated if pol.is_deprecated: continue policy_entry = pol.get_info() policy_info.append(policy_entry) return policy_info def add_policy_alias(self, policy_index, *aliases): """ Adds a new name or names to a policy :param policy_index: index of a policy in this policy collection. :param aliases: arbitrary number of string policy names to add. """ policy = self.get_by_index(policy_index) for alias in aliases: if alias.upper() in self.by_name: raise PolicyError('Duplicate name %s in use ' 'by policy %s' % (alias, self.get_by_name(alias))) else: policy.add_name(alias) self.by_name[alias.upper()] = policy def remove_policy_alias(self, *aliases): """ Removes a name or names from a policy. If the name removed is the primary name then the next available alias will be adopted as the new primary name. :param aliases: arbitrary number of existing policy names to remove. """ for alias in aliases: policy = self.get_by_name(alias) if not policy: raise PolicyError('No policy with name %s exists.' % alias) if len(policy.alias_list) == 1: raise PolicyError('Policy %s with name %s has only one name. ' 'Policies must have at least one name.' % ( policy, alias)) else: policy.remove_name(alias) del self.by_name[alias.upper()] def change_policy_primary_name(self, policy_index, new_name): """ Changes the primary or default name of a policy. The new primary name can be an alias that already belongs to the policy or a completely new name. :param policy_index: index of a policy in this policy collection. :param new_name: a string name to set as the new default name. """ policy = self.get_by_index(policy_index) name_taken = self.get_by_name(new_name) # if the name belongs to some other policy in the collection if name_taken and name_taken != policy: raise PolicyError('Other policy %s with name %s exists.' % (self.get_by_name(new_name).idx, new_name)) else: policy.change_primary_name(new_name) self.by_name[new_name.upper()] = policy def parse_storage_policies(conf): """ Parse storage policies in ``swift.conf`` - note that validation is done when the :class:`StoragePolicyCollection` is instantiated. :param conf: ConfigParser parser object for swift.conf """ policies = [] for section in conf.sections(): if not section.startswith('storage-policy:'): continue policy_index = section.split(':', 1)[1] config_options = dict(conf.items(section)) policy_type = config_options.pop('policy_type', DEFAULT_POLICY_TYPE) policy_cls = BaseStoragePolicy.policy_type_to_policy_cls[policy_type] policy = policy_cls.from_config(policy_index, config_options) policies.append(policy) return StoragePolicyCollection(policies) class StoragePolicySingleton(object): """ An instance of this class is the primary interface to storage policies exposed as a module level global named ``POLICIES``. This global reference wraps ``_POLICIES`` which is normally instantiated by parsing ``swift.conf`` and will result in an instance of :class:`StoragePolicyCollection`. You should never patch this instance directly, instead patch the module level ``_POLICIES`` instance so that swift code which imported ``POLICIES`` directly will reference the patched :class:`StoragePolicyCollection`. """ def __iter__(self): return iter(_POLICIES) def __len__(self): return len(_POLICIES) def __getitem__(self, key): return _POLICIES[key] def __getattribute__(self, name): return getattr(_POLICIES, name) def __repr__(self): return repr(_POLICIES) def reload_storage_policies(): """ Reload POLICIES from ``swift.conf``. """ global _POLICIES policy_conf = ConfigParser() policy_conf.read(utils.SWIFT_CONF_FILE) try: _POLICIES = parse_storage_policies(policy_conf) except PolicyError as e: raise SystemExit('ERROR: Invalid Storage Policy Configuration ' 'in %s (%s)' % (utils.SWIFT_CONF_FILE, e)) # parse configuration and setup singleton _POLICIES = None reload_storage_policies() POLICIES = StoragePolicySingleton() swift-2.17.1/swift/common/http.py0000666000175000017500000001076713435012003016741 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. def is_informational(status): """ Check if HTTP status code is informational. :param status: http status code :returns: True if status is successful, else False """ return 100 <= status <= 199 def is_success(status): """ Check if HTTP status code is successful. :param status: http status code :returns: True if status is successful, else False """ return 200 <= status <= 299 def is_redirection(status): """ Check if HTTP status code is redirection. :param status: http status code :returns: True if status is redirection, else False """ return 300 <= status <= 399 def is_client_error(status): """ Check if HTTP status code is client error. :param status: http status code :returns: True if status is client error, else False """ return 400 <= status <= 499 def is_server_error(status): """ Check if HTTP status code is server error. :param status: http status code :returns: True if status is server error, else False """ return 500 <= status <= 599 # List of HTTP status codes ############################################################################### # 1xx Informational ############################################################################### HTTP_CONTINUE = 100 HTTP_SWITCHING_PROTOCOLS = 101 HTTP_PROCESSING = 102 # WebDAV HTTP_CHECKPOINT = 103 HTTP_REQUEST_URI_TOO_LONG = 122 ############################################################################### # 2xx Success ############################################################################### HTTP_OK = 200 HTTP_CREATED = 201 HTTP_ACCEPTED = 202 HTTP_NON_AUTHORITATIVE_INFORMATION = 203 HTTP_NO_CONTENT = 204 HTTP_RESET_CONTENT = 205 HTTP_PARTIAL_CONTENT = 206 HTTP_MULTI_STATUS = 207 # WebDAV HTTP_IM_USED = 226 ############################################################################### # 3xx Redirection ############################################################################### HTTP_MULTIPLE_CHOICES = 300 HTTP_MOVED_PERMANENTLY = 301 HTTP_FOUND = 302 HTTP_SEE_OTHER = 303 HTTP_NOT_MODIFIED = 304 HTTP_USE_PROXY = 305 HTTP_SWITCH_PROXY = 306 HTTP_TEMPORARY_REDIRECT = 307 HTTP_RESUME_INCOMPLETE = 308 ############################################################################### # 4xx Client Error ############################################################################### HTTP_BAD_REQUEST = 400 HTTP_UNAUTHORIZED = 401 HTTP_PAYMENT_REQUIRED = 402 HTTP_FORBIDDEN = 403 HTTP_NOT_FOUND = 404 HTTP_METHOD_NOT_ALLOWED = 405 HTTP_NOT_ACCEPTABLE = 406 HTTP_PROXY_AUTHENTICATION_REQUIRED = 407 HTTP_REQUEST_TIMEOUT = 408 HTTP_CONFLICT = 409 HTTP_GONE = 410 HTTP_LENGTH_REQUIRED = 411 HTTP_PRECONDITION_FAILED = 412 HTTP_REQUEST_ENTITY_TOO_LARGE = 413 HTTP_REQUEST_URI_TOO_LONG = 414 HTTP_UNSUPPORTED_MEDIA_TYPE = 415 HTTP_REQUESTED_RANGE_NOT_SATISFIABLE = 416 HTTP_EXPECTATION_FAILED = 417 HTTP_IM_A_TEAPOT = 418 HTTP_UNPROCESSABLE_ENTITY = 422 # WebDAV HTTP_LOCKED = 423 # WebDAV HTTP_FAILED_DEPENDENCY = 424 # WebDAV HTTP_UNORDERED_COLLECTION = 425 HTTP_UPGRADE_REQUIED = 426 HTTP_PRECONDITION_REQUIRED = 428 HTTP_TOO_MANY_REQUESTS = 429 HTTP_REQUEST_HEADER_FIELDS_TOO_LARGE = 431 HTTP_NO_RESPONSE = 444 HTTP_RETRY_WITH = 449 HTTP_BLOCKED_BY_WINDOWS_PARENTAL_CONTROLS = 450 HTTP_CLIENT_CLOSED_REQUEST = 499 ############################################################################### # 5xx Server Error ############################################################################### HTTP_INTERNAL_SERVER_ERROR = 500 HTTP_NOT_IMPLEMENTED = 501 HTTP_BAD_GATEWAY = 502 HTTP_SERVICE_UNAVAILABLE = 503 HTTP_GATEWAY_TIMEOUT = 504 HTTP_VERSION_NOT_SUPPORTED = 505 HTTP_VARIANT_ALSO_NEGOTIATES = 506 HTTP_INSUFFICIENT_STORAGE = 507 # WebDAV HTTP_BANDWIDTH_LIMIT_EXCEEDED = 509 HTTP_NOT_EXTENDED = 510 HTTP_NETWORK_AUTHENTICATION_REQUIRED = 511 HTTP_NETWORK_READ_TIMEOUT_ERROR = 598 # not used in RFC HTTP_NETWORK_CONNECT_TIMEOUT_ERROR = 599 # not used in RFC swift-2.17.1/swift/common/wsgi.py0000666000175000017500000012646713435012015016743 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """WSGI tools for use with swift.""" from __future__ import print_function import errno import inspect import os import signal import time from swift import gettext_ as _ from textwrap import dedent import eventlet import eventlet.debug from eventlet import greenio, GreenPool, sleep, wsgi, listen, Timeout from paste.deploy import loadwsgi from eventlet.green import socket, ssl, os as green_os import six from six import BytesIO from six import StringIO from six.moves.urllib.parse import unquote if six.PY2: import mimetools from swift.common import utils, constraints from swift.common.storage_policy import BindPortsCache from swift.common.swob import Request from swift.common.utils import capture_stdio, disable_fallocate, \ drop_privileges, get_logger, NullLogger, config_true_value, \ validate_configuration, get_hub, config_auto_int_value, \ reiterate # Set maximum line size of message headers to be accepted. wsgi.MAX_HEADER_LINE = constraints.MAX_HEADER_SIZE try: import multiprocessing CPU_COUNT = multiprocessing.cpu_count() or 1 except (ImportError, NotImplementedError): CPU_COUNT = 1 class NamedConfigLoader(loadwsgi.ConfigLoader): """ Patch paste.deploy's ConfigLoader so each context object will know what config section it came from. """ def get_context(self, object_type, name=None, global_conf=None): context = super(NamedConfigLoader, self).get_context( object_type, name=name, global_conf=global_conf) context.name = name context.local_conf['__name__'] = name return context loadwsgi.ConfigLoader = NamedConfigLoader class ConfigDirLoader(NamedConfigLoader): """ Read configuration from multiple files under the given path. """ def __init__(self, conf_dir): # parent class uses filename attribute when building error messages self.filename = conf_dir = conf_dir.strip() defaults = { 'here': os.path.normpath(os.path.abspath(conf_dir)), '__file__': os.path.abspath(conf_dir) } self.parser = loadwsgi.NicerConfigParser(conf_dir, defaults=defaults) self.parser.optionxform = str # Don't lower-case keys utils.read_conf_dir(self.parser, conf_dir) def _loadconfigdir(object_type, uri, path, name, relative_to, global_conf): if relative_to: path = os.path.normpath(os.path.join(relative_to, path)) loader = ConfigDirLoader(path) if global_conf: loader.update_defaults(global_conf, overwrite=False) return loader.get_context(object_type, name, global_conf) # add config_dir parsing to paste.deploy loadwsgi._loaders['config_dir'] = _loadconfigdir class ConfigString(NamedConfigLoader): """ Wrap a raw config string up for paste.deploy. If you give one of these to our loadcontext (e.g. give it to our appconfig) we'll intercept it and get it routed to the right loader. """ def __init__(self, config_string): self.contents = StringIO(dedent(config_string)) self.filename = "string" defaults = { 'here': "string", '__file__': self.contents, } self.parser = loadwsgi.NicerConfigParser("string", defaults=defaults) self.parser.optionxform = str # Don't lower-case keys self.parser.readfp(self.contents) def wrap_conf_type(f): """ Wrap a function whos first argument is a paste.deploy style config uri, such that you can pass it an un-adorned raw filesystem path (or config string) and the config directive (either config:, config_dir:, or config_str:) will be added automatically based on the type of entity (either a file or directory, or if no such entity on the file system - just a string) before passing it through to the paste.deploy function. """ def wrapper(conf_path, *args, **kwargs): if os.path.isdir(conf_path): conf_type = 'config_dir' else: conf_type = 'config' conf_uri = '%s:%s' % (conf_type, conf_path) return f(conf_uri, *args, **kwargs) return wrapper appconfig = wrap_conf_type(loadwsgi.appconfig) def monkey_patch_mimetools(): """ mimetools.Message defaults content-type to "text/plain" This changes it to default to None, so we can detect missing headers. """ if six.PY3: # The mimetools has been removed from Python 3 return orig_parsetype = mimetools.Message.parsetype def parsetype(self): if not self.typeheader: self.type = None self.maintype = None self.subtype = None self.plisttext = '' else: orig_parsetype(self) parsetype.patched = True if not getattr(mimetools.Message.parsetype, 'patched', None): mimetools.Message.parsetype = parsetype def get_socket(conf): """Bind socket to bind ip:port in conf :param conf: Configuration dict to read settings from :returns: a socket object as returned from socket.listen or ssl.wrap_socket if conf specifies cert_file """ try: bind_port = int(conf['bind_port']) except (ValueError, KeyError, TypeError): raise ConfigFilePortError() bind_addr = (conf.get('bind_ip', '0.0.0.0'), bind_port) address_family = [addr[0] for addr in socket.getaddrinfo( bind_addr[0], bind_addr[1], socket.AF_UNSPEC, socket.SOCK_STREAM) if addr[0] in (socket.AF_INET, socket.AF_INET6)][0] sock = None bind_timeout = int(conf.get('bind_timeout', 30)) retry_until = time.time() + bind_timeout warn_ssl = False while not sock and time.time() < retry_until: try: sock = listen(bind_addr, backlog=int(conf.get('backlog', 4096)), family=address_family) if 'cert_file' in conf: warn_ssl = True sock = ssl.wrap_socket(sock, certfile=conf['cert_file'], keyfile=conf['key_file']) except socket.error as err: if err.args[0] != errno.EADDRINUSE: raise sleep(0.1) if not sock: raise Exception(_('Could not bind to %(addr)s:%(port)s ' 'after trying for %(timeout)s seconds') % { 'addr': bind_addr[0], 'port': bind_addr[1], 'timeout': bind_timeout}) # in my experience, sockets can hang around forever without keepalive sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if hasattr(socket, 'TCP_KEEPIDLE'): sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 600) if warn_ssl: ssl_warning_message = _('WARNING: SSL should only be enabled for ' 'testing purposes. Use external SSL ' 'termination for a production deployment.') get_logger(conf).warning(ssl_warning_message) print(ssl_warning_message) return sock class RestrictedGreenPool(GreenPool): """ Works the same as GreenPool, but if the size is specified as one, then the spawn_n() method will invoke waitall() before returning to prevent the caller from doing any other work (like calling accept()). """ def __init__(self, size=1024): super(RestrictedGreenPool, self).__init__(size=size) self._rgp_do_wait = (size == 1) def spawn_n(self, *args, **kwargs): super(RestrictedGreenPool, self).spawn_n(*args, **kwargs) if self._rgp_do_wait: self.waitall() def pipeline_property(name, **kwargs): """ Create a property accessor for the given name. The property will dig through the bound instance on which it was accessed for an attribute "app" and check that object for an attribute of the given name. If the "app" object does not have such an attribute, it will look for an attribute "app" on THAT object and continue it's search from there. If the named attribute cannot be found accessing the property will raise AttributeError. If a default kwarg is provided you get that instead of the AttributeError. When found the attribute will be cached on instance with the property accessor using the same name as the attribute prefixed with a leading underscore. """ cache_attr_name = '_%s' % name def getter(self): cached_value = getattr(self, cache_attr_name, None) if cached_value: return cached_value app = self # first app is on self while True: app = getattr(app, 'app', None) if not app: break try: value = getattr(app, name) except AttributeError: continue setattr(self, cache_attr_name, value) return value if 'default' in kwargs: return kwargs['default'] raise AttributeError('No apps in pipeline have a ' '%s attribute' % name) return property(getter) class PipelineWrapper(object): """ This class provides a number of utility methods for modifying the composition of a wsgi pipeline. """ def __init__(self, context): self.context = context def __contains__(self, entry_point_name): try: self.index(entry_point_name) return True except ValueError: return False def startswith(self, entry_point_name): """ Tests if the pipeline starts with the given entry point name. :param entry_point_name: entry point of middleware or app (Swift only) :returns: True if entry_point_name is first in pipeline, False otherwise """ try: first_ctx = self.context.filter_contexts[0] except IndexError: first_ctx = self.context.app_context return first_ctx.entry_point_name == entry_point_name def _format_for_display(self, ctx): # Contexts specified by pipeline= have .name set in NamedConfigLoader. if hasattr(ctx, 'name'): return ctx.name # This should not happen: a foreign context. Let's not crash. return "" def __str__(self): parts = [self._format_for_display(ctx) for ctx in self.context.filter_contexts] parts.append(self._format_for_display(self.context.app_context)) return " ".join(parts) def create_filter(self, entry_point_name): """ Creates a context for a filter that can subsequently be added to a pipeline context. :param entry_point_name: entry point of the middleware (Swift only) :returns: a filter context """ spec = 'egg:swift#' + entry_point_name ctx = loadwsgi.loadcontext(loadwsgi.FILTER, spec, global_conf=self.context.global_conf) ctx.protocol = 'paste.filter_factory' ctx.name = entry_point_name return ctx def index(self, entry_point_name): """ Returns the first index of the given entry point name in the pipeline. Raises ValueError if the given module is not in the pipeline. """ for i, ctx in enumerate(self.context.filter_contexts): if ctx.entry_point_name == entry_point_name: return i raise ValueError("%s is not in pipeline" % (entry_point_name,)) def insert_filter(self, ctx, index=0): """ Inserts a filter module into the pipeline context. :param ctx: the context to be inserted :param index: (optional) index at which filter should be inserted in the list of pipeline filters. Default is 0, which means the start of the pipeline. """ self.context.filter_contexts.insert(index, ctx) def loadcontext(object_type, uri, name=None, relative_to=None, global_conf=None): if isinstance(uri, loadwsgi.ConfigLoader): # bypass loadcontext's uri parsing and loader routing and # just directly return the context if global_conf: uri.update_defaults(global_conf, overwrite=False) return uri.get_context(object_type, name, global_conf) add_conf_type = wrap_conf_type(lambda x: x) return loadwsgi.loadcontext(object_type, add_conf_type(uri), name=name, relative_to=relative_to, global_conf=global_conf) def _add_pipeline_properties(app, *names): for property_name in names: if not hasattr(app, property_name): setattr(app.__class__, property_name, pipeline_property(property_name)) def loadapp(conf_file, global_conf=None, allow_modify_pipeline=True): """ Loads a context from a config file, and if the context is a pipeline then presents the app with the opportunity to modify the pipeline. """ global_conf = global_conf or {} ctx = loadcontext(loadwsgi.APP, conf_file, global_conf=global_conf) if ctx.object_type.name == 'pipeline': # give app the opportunity to modify the pipeline context app = ctx.app_context.create() func = getattr(app, 'modify_wsgi_pipeline', None) if func and allow_modify_pipeline: func(PipelineWrapper(ctx)) return ctx.create() def load_app_config(conf_file): """ Read the app config section from a config file. :param conf_file: path to a config file :return: a dict """ app_conf = {} try: ctx = loadcontext(loadwsgi.APP, conf_file) except LookupError: pass else: app_conf.update(ctx.app_context.global_conf) app_conf.update(ctx.app_context.local_conf) return app_conf def run_server(conf, logger, sock, global_conf=None): # Ensure TZ environment variable exists to avoid stat('/etc/localtime') on # some platforms. This locks in reported times to UTC. os.environ['TZ'] = 'UTC+0' time.tzset() wsgi.HttpProtocol.default_request_version = "HTTP/1.0" # Turn off logging requests by the underlying WSGI software. wsgi.HttpProtocol.log_request = lambda *a: None # Redirect logging other messages by the underlying WSGI software. wsgi.HttpProtocol.log_message = \ lambda s, f, *a: logger.error('ERROR WSGI: ' + f % a) wsgi.WRITE_TIMEOUT = int(conf.get('client_timeout') or 60) eventlet.hubs.use_hub(get_hub()) eventlet_debug = config_true_value(conf.get('eventlet_debug', 'no')) eventlet.debug.hub_exceptions(eventlet_debug) wsgi_logger = NullLogger() if eventlet_debug: # let eventlet.wsgi.server log to stderr wsgi_logger = None # utils.LogAdapter stashes name in server; fallback on unadapted loggers if not global_conf: if hasattr(logger, 'server'): log_name = logger.server else: log_name = logger.name global_conf = {'log_name': log_name} app = loadapp(conf['__file__'], global_conf=global_conf) max_clients = int(conf.get('max_clients', '1024')) pool = RestrictedGreenPool(size=max_clients) try: # Disable capitalizing headers in Eventlet if possible. This is # necessary for the AWS SDK to work with swift3 middleware. argspec = inspect.getargspec(wsgi.server) if 'capitalize_response_headers' in argspec.args: wsgi.server(sock, app, wsgi_logger, custom_pool=pool, capitalize_response_headers=False) else: wsgi.server(sock, app, wsgi_logger, custom_pool=pool) except socket.error as err: if err[0] != errno.EINVAL: raise pool.waitall() class WorkersStrategy(object): """ WSGI server management strategy object for a single bind port and listen socket shared by a configured number of forked-off workers. Used in :py:func:`run_wsgi`. :param dict conf: Server configuration dictionary. :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` object. """ def __init__(self, conf, logger): self.conf = conf self.logger = logger self.sock = None self.children = [] self.worker_count = config_auto_int_value(conf.get('workers'), CPU_COUNT) def loop_timeout(self): """ We want to keep from busy-waiting, but we also need a non-None value so the main loop gets a chance to tell whether it should keep running or not (e.g. SIGHUP received). So we return 0.5. """ return 0.5 def do_bind_ports(self): """ Bind the one listen socket for this strategy and drop privileges (since the parent process will never need to bind again). """ try: self.sock = get_socket(self.conf) except ConfigFilePortError: msg = 'bind_port wasn\'t properly set in the config file. ' \ 'It must be explicitly set to a valid port number.' return msg drop_privileges(self.conf.get('user', 'swift')) def no_fork_sock(self): """ Return a server listen socket if the server should run in the foreground (no fork). """ # Useful for profiling [no forks]. if self.worker_count == 0: return self.sock def new_worker_socks(self): """ Yield a sequence of (socket, opqaue_data) tuples for each server which should be forked-off and started. The opaque_data item for each socket will passed into the :py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods where it will be ignored. """ while len(self.children) < self.worker_count: yield self.sock, None def post_fork_hook(self): """ Perform any initialization in a forked-off child process prior to starting the wsgi server. """ pass def log_sock_exit(self, sock, _unused): """ Log a server's exit. :param socket sock: The listen socket for the worker just started. :param _unused: The socket's opaque_data yielded by :py:meth:`new_worker_socks`. """ self.logger.notice('Child %d exiting normally' % os.getpid()) def register_worker_start(self, sock, _unused, pid): """ Called when a new worker is started. :param socket sock: The listen socket for the worker just started. :param _unused: The socket's opaque_data yielded by new_worker_socks(). :param int pid: The new worker process' PID """ self.logger.notice('Started child %s' % pid) self.children.append(pid) def register_worker_exit(self, pid): """ Called when a worker has exited. :param int pid: The PID of the worker that exited. """ self.logger.error('Removing dead child %s' % pid) self.children.remove(pid) def shutdown_sockets(self): """ Shutdown any listen sockets. """ greenio.shutdown_safe(self.sock) self.sock.close() class PortPidState(object): """ A helper class for :py:class:`ServersPerPortStrategy` to track listen sockets and PIDs for each port. :param int servers_per_port: The configured number of servers per port. :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` """ def __init__(self, servers_per_port, logger): self.servers_per_port = servers_per_port self.logger = logger self.sock_data_by_port = {} def sock_for_port(self, port): """ :param int port: The port whose socket is desired. :returns: The bound listen socket for the given port. """ return self.sock_data_by_port[port]['sock'] def port_for_sock(self, sock): """ :param socket sock: A tracked bound listen socket :returns: The port the socket is bound to. """ for port, sock_data in self.sock_data_by_port.items(): if sock_data['sock'] == sock: return port def _pid_to_port_and_index(self, pid): for port, sock_data in self.sock_data_by_port.items(): for server_idx, a_pid in enumerate(sock_data['pids']): if pid == a_pid: return port, server_idx def port_index_pairs(self): """ Returns current (port, server index) pairs. :returns: A set of (port, server_idx) tuples for currently-tracked ports, sockets, and PIDs. """ current_port_index_pairs = set() for port, pid_state in self.sock_data_by_port.items(): current_port_index_pairs |= set( (port, i) for i, pid in enumerate(pid_state['pids']) if pid is not None) return current_port_index_pairs def track_port(self, port, sock): """ Start tracking servers for the given port and listen socket. :param int port: The port to start tracking :param socket sock: The bound listen socket for the port. """ self.sock_data_by_port[port] = { 'sock': sock, 'pids': [None] * self.servers_per_port, } def not_tracking(self, port): """ Return True if the specified port is not being tracked. :param int port: A port to check. """ return port not in self.sock_data_by_port def all_socks(self): """ Yield all current listen sockets. """ for orphan_data in self.sock_data_by_port.values(): yield orphan_data['sock'] def forget_port(self, port): """ Idempotently forget a port, closing the listen socket at most once. """ orphan_data = self.sock_data_by_port.pop(port, None) if orphan_data: greenio.shutdown_safe(orphan_data['sock']) orphan_data['sock'].close() self.logger.notice('Closing unnecessary sock for port %d', port) def add_pid(self, port, index, pid): self.sock_data_by_port[port]['pids'][index] = pid def forget_pid(self, pid): """ Idempotently forget a PID. It's okay if the PID is no longer in our data structure (it could have been removed by the "orphan port" removal in :py:meth:`new_worker_socks`). :param int pid: The PID which exited. """ port_server_idx = self._pid_to_port_and_index(pid) if port_server_idx is None: # This method can lose a race with the "orphan port" removal, when # a ring reload no longer contains a port. So it's okay if we were # unable to find a (port, server_idx) pair. return dead_port, server_idx = port_server_idx self.logger.error('Removing dead child %d (PID: %s) for port %s', server_idx, pid, dead_port) self.sock_data_by_port[dead_port]['pids'][server_idx] = None class ServersPerPortStrategy(object): """ WSGI server management strategy object for an object-server with one listen port per unique local port in the storage policy rings. The `servers_per_port` integer config setting determines how many workers are run per port. Used in :py:func:`run_wsgi`. :param dict conf: Server configuration dictionary. :param logger: The server's :py:class:`~swift.common.utils.LogAdaptor` object. :param int servers_per_port: The number of workers to run per port. """ def __init__(self, conf, logger, servers_per_port): self.conf = conf self.logger = logger self.servers_per_port = servers_per_port self.swift_dir = conf.get('swift_dir', '/etc/swift') self.ring_check_interval = int(conf.get('ring_check_interval', 15)) self.port_pid_state = PortPidState(servers_per_port, logger) bind_ip = conf.get('bind_ip', '0.0.0.0') self.cache = BindPortsCache(self.swift_dir, bind_ip) def _reload_bind_ports(self): self.bind_ports = self.cache.all_bind_ports_for_node() def _bind_port(self, port): new_conf = self.conf.copy() new_conf['bind_port'] = port sock = get_socket(new_conf) self.port_pid_state.track_port(port, sock) def loop_timeout(self): """ Return timeout before checking for reloaded rings. :returns: The time to wait for a child to exit before checking for reloaded rings (new ports). """ return self.ring_check_interval def do_bind_ports(self): """ Bind one listen socket per unique local storage policy ring port. Then do all the work of drop_privileges except the actual dropping of privileges (each forked-off worker will do that post-fork in :py:meth:`post_fork_hook`). """ self._reload_bind_ports() for port in self.bind_ports: self._bind_port(port) # The workers strategy drops privileges here, which we obviously cannot # do if we want to support binding to low ports. But we do want some # of the actions that drop_privileges did. try: os.setsid() except OSError: pass # In case you need to rmdir where you started the daemon: os.chdir('/') # Ensure files are created with the correct privileges: os.umask(0o22) def no_fork_sock(self): """ This strategy does not support running in the foreground. """ pass def new_worker_socks(self): """ Yield a sequence of (socket, server_idx) tuples for each server which should be forked-off and started. Any sockets for "orphaned" ports no longer in any ring will be closed (causing their associated workers to gracefully exit) after all new sockets have been yielded. The server_idx item for each socket will passed into the :py:meth:`log_sock_exit` and :py:meth:`register_worker_start` methods. """ self._reload_bind_ports() desired_port_index_pairs = set( (p, i) for p in self.bind_ports for i in range(self.servers_per_port)) current_port_index_pairs = self.port_pid_state.port_index_pairs() if desired_port_index_pairs != current_port_index_pairs: # Orphan ports are ports which had object-server processes running, # but which no longer appear in the ring. We'll kill them after we # start missing workers. orphan_port_index_pairs = current_port_index_pairs - \ desired_port_index_pairs # Fork off worker(s) for every port who's supposed to have # worker(s) but doesn't missing_port_index_pairs = desired_port_index_pairs - \ current_port_index_pairs for port, server_idx in sorted(missing_port_index_pairs): if self.port_pid_state.not_tracking(port): try: self._bind_port(port) except Exception as e: self.logger.critical('Unable to bind to port %d: %s', port, e) continue yield self.port_pid_state.sock_for_port(port), server_idx for orphan_pair in orphan_port_index_pairs: # For any port in orphan_port_index_pairs, it is guaranteed # that there should be no listen socket for that port, so we # can close and forget them. self.port_pid_state.forget_port(orphan_pair[0]) def post_fork_hook(self): """ Called in each child process, prior to starting the actual wsgi server, to drop privileges. """ drop_privileges(self.conf.get('user', 'swift'), call_setsid=False) def log_sock_exit(self, sock, server_idx): """ Log a server's exit. """ port = self.port_pid_state.port_for_sock(sock) self.logger.notice('Child %d (PID %d, port %d) exiting normally', server_idx, os.getpid(), port) def register_worker_start(self, sock, server_idx, pid): """ Called when a new worker is started. :param socket sock: The listen socket for the worker just started. :param server_idx: The socket's server_idx as yielded by :py:meth:`new_worker_socks`. :param int pid: The new worker process' PID """ port = self.port_pid_state.port_for_sock(sock) self.logger.notice('Started child %d (PID %d) for port %d', server_idx, pid, port) self.port_pid_state.add_pid(port, server_idx, pid) def register_worker_exit(self, pid): """ Called when a worker has exited. :param int pid: The PID of the worker that exited. """ self.port_pid_state.forget_pid(pid) def shutdown_sockets(self): """ Shutdown any listen sockets. """ for sock in self.port_pid_state.all_socks(): greenio.shutdown_safe(sock) sock.close() def run_wsgi(conf_path, app_section, *args, **kwargs): """ Runs the server according to some strategy. The default strategy runs a specified number of workers in pre-fork model. The object-server (only) may use a servers-per-port strategy if its config has a servers_per_port setting with a value greater than zero. :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: 0 if successful, nonzero otherwise """ # Load configuration, Set logger and Load request processor try: (conf, logger, log_name) = \ _initrp(conf_path, app_section, *args, **kwargs) except ConfigFileError as e: print(e) return 1 # optional nice/ionice priority scheduling utils.modify_priority(conf, logger) servers_per_port = int(conf.get('servers_per_port', '0') or 0) # NOTE: for now servers_per_port is object-server-only; future work could # be done to test and allow it to be used for account and container # servers, but that has not been done yet. if servers_per_port and app_section == 'object-server': strategy = ServersPerPortStrategy( conf, logger, servers_per_port=servers_per_port) else: strategy = WorkersStrategy(conf, logger) # patch event before loadapp utils.eventlet_monkey_patch() # Ensure the configuration and application can be loaded before proceeding. global_conf = {'log_name': log_name} if 'global_conf_callback' in kwargs: kwargs['global_conf_callback'](conf, global_conf) loadapp(conf_path, global_conf=global_conf) # set utils.FALLOCATE_RESERVE if desired utils.FALLOCATE_RESERVE, utils.FALLOCATE_IS_PERCENT = \ utils.config_fallocate_value(conf.get('fallocate_reserve', '1%')) # Start listening on bind_addr/port error_msg = strategy.do_bind_ports() if error_msg: logger.error(error_msg) print(error_msg) return 1 # Redirect errors to logger and close stdio. Do this *after* binding ports; # we use this to signal that the service is ready to accept connections. capture_stdio(logger) no_fork_sock = strategy.no_fork_sock() if no_fork_sock: run_server(conf, logger, no_fork_sock, global_conf=global_conf) return 0 def kill_children(*args): """Kills the entire process group.""" logger.error('SIGTERM received') signal.signal(signal.SIGTERM, signal.SIG_IGN) running[0] = False os.killpg(0, signal.SIGTERM) def hup(*args): """Shuts down the server, but allows running requests to complete""" logger.error('SIGHUP received') signal.signal(signal.SIGHUP, signal.SIG_IGN) running[0] = False running = [True] signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGHUP, hup) while running[0]: for sock, sock_info in strategy.new_worker_socks(): pid = os.fork() if pid == 0: signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL) strategy.post_fork_hook() run_server(conf, logger, sock) strategy.log_sock_exit(sock, sock_info) return 0 else: strategy.register_worker_start(sock, sock_info, pid) # The strategy may need to pay attention to something in addition to # child process exits (like new ports showing up in a ring). # # NOTE: a timeout value of None will just instantiate the Timeout # object and not actually schedule it, which is equivalent to no # timeout for the green_os.wait(). loop_timeout = strategy.loop_timeout() with Timeout(loop_timeout, exception=False): try: try: pid, status = green_os.wait() if os.WIFEXITED(status) or os.WIFSIGNALED(status): strategy.register_worker_exit(pid) except OSError as err: if err.errno not in (errno.EINTR, errno.ECHILD): raise if err.errno == errno.ECHILD: # If there are no children at all (ECHILD), then # there's nothing to actually wait on. We sleep # for a little bit to avoid a tight CPU spin # and still are able to catch any KeyboardInterrupt # events that happen. The value of 0.01 matches the # value in eventlet's waitpid(). sleep(0.01) except KeyboardInterrupt: logger.notice('User quit') running[0] = False break strategy.shutdown_sockets() logger.notice('Exited') return 0 class ConfigFileError(Exception): pass class ConfigFilePortError(ConfigFileError): pass def _initrp(conf_path, app_section, *args, **kwargs): try: conf = appconfig(conf_path, name=app_section) except Exception as e: raise ConfigFileError("Error trying to load config from %s: %s" % (conf_path, e)) validate_configuration() # pre-configure logger log_name = conf.get('log_name', app_section) if 'logger' in kwargs: logger = kwargs.pop('logger') else: logger = get_logger(conf, log_name, log_to_console=kwargs.pop('verbose', False), log_route='wsgi') # disable fallocate if desired if config_true_value(conf.get('disable_fallocate', 'no')): disable_fallocate() monkey_patch_mimetools() return (conf, logger, log_name) def init_request_processor(conf_path, app_section, *args, **kwargs): """ Loads common settings from conf Sets the logger Loads the request processor :param conf_path: Path to paste.deploy style configuration file/directory :param app_section: App name from conf file to load config from :returns: the loaded application entry point :raises ConfigFileError: Exception is raised for config file error """ (conf, logger, log_name) = _initrp(conf_path, app_section, *args, **kwargs) app = loadapp(conf_path, global_conf={'log_name': log_name}) return (app, conf, logger, log_name) class WSGIContext(object): """ This class provides a means to provide context (scope) for a middleware filter to have access to the wsgi start_response results like the request status and headers. """ def __init__(self, wsgi_app): self.app = wsgi_app def _start_response(self, status, headers, exc_info=None): """ Saves response info without sending it to the remote client. Uses the same semantics as the usual WSGI start_response. """ self._response_status = status self._response_headers = headers self._response_exc_info = exc_info def _app_call(self, env): """ Ensures start_response has been called before returning. """ self._response_status = None self._response_headers = None self._response_exc_info = None resp = self.app(env, self._start_response) # if start_response has not been called, iterate until we've got a # non-empty chunk, by which time the app *should* have called it if self._response_status is None: resp = reiterate(resp) return resp def _get_status_int(self): """ Returns the HTTP status int from the last called self._start_response result. """ return int(self._response_status.split(' ', 1)[0]) def _response_header_value(self, key): "Returns str of value for given header key or None" for h_key, val in self._response_headers: if h_key.lower() == key.lower(): return val return None def update_content_length(self, new_total_len): self._response_headers = [ (h, v) for h, v in self._response_headers if h.lower() != 'content-length'] self._response_headers.append(('Content-Length', str(new_total_len))) def make_env(env, method=None, path=None, agent='Swift', query_string=None, swift_source=None): """ Returns a new fresh WSGI environment. :param env: The WSGI environment to base the new environment on. :param method: The new REQUEST_METHOD or None to use the original. :param path: The new path_info or none to use the original. path should NOT be quoted. When building a url, a Webob Request (in accordance with wsgi spec) will quote env['PATH_INFO']. url += quote(environ['PATH_INFO']) :param query_string: The new query_string or none to use the original. When building a url, a Webob Request will append the query string directly to the url. url += '?' + env['QUERY_STRING'] :param agent: The HTTP user agent to use; default 'Swift'. You can put %(orig)s in the agent to have it replaced with the original env's HTTP_USER_AGENT, such as '%(orig)s StaticWeb'. You also set agent to None to use the original env's HTTP_USER_AGENT or '' to have no HTTP_USER_AGENT. :param swift_source: Used to mark the request as originating out of middleware. Will be logged in proxy logs. :returns: Fresh WSGI environment. """ newenv = {} for name in ('HTTP_USER_AGENT', 'HTTP_HOST', 'PATH_INFO', 'QUERY_STRING', 'REMOTE_USER', 'REQUEST_METHOD', 'SCRIPT_NAME', 'SERVER_NAME', 'SERVER_PORT', 'HTTP_ORIGIN', 'HTTP_ACCESS_CONTROL_REQUEST_METHOD', 'SERVER_PROTOCOL', 'swift.cache', 'swift.source', 'swift.trans_id', 'swift.authorize_override', 'swift.authorize', 'HTTP_X_USER_ID', 'HTTP_X_PROJECT_ID', 'HTTP_REFERER', 'swift.infocache'): if name in env: newenv[name] = env[name] if method: newenv['REQUEST_METHOD'] = method if path: newenv['PATH_INFO'] = path newenv['SCRIPT_NAME'] = '' if query_string is not None: newenv['QUERY_STRING'] = query_string if agent: newenv['HTTP_USER_AGENT'] = ( agent % {'orig': env.get('HTTP_USER_AGENT', '')}).strip() elif agent == '' and 'HTTP_USER_AGENT' in newenv: del newenv['HTTP_USER_AGENT'] if swift_source: newenv['swift.source'] = swift_source newenv['wsgi.input'] = BytesIO() if 'SCRIPT_NAME' not in newenv: newenv['SCRIPT_NAME'] = '' return newenv def make_subrequest(env, method=None, path=None, body=None, headers=None, agent='Swift', swift_source=None, make_env=make_env): """ Makes a new swob.Request based on the current env but with the parameters specified. :param env: The WSGI environment to base the new request on. :param method: HTTP method of new request; default is from the original env. :param path: HTTP path of new request; default is from the original env. path should be compatible with what you would send to Request.blank. path should be quoted and it can include a query string. for example: '/a%20space?unicode_str%E8%AA%9E=y%20es' :param body: HTTP body of new request; empty by default. :param headers: Extra HTTP headers of new request; None by default. :param agent: The HTTP user agent to use; default 'Swift'. You can put %(orig)s in the agent to have it replaced with the original env's HTTP_USER_AGENT, such as '%(orig)s StaticWeb'. You also set agent to None to use the original env's HTTP_USER_AGENT or '' to have no HTTP_USER_AGENT. :param swift_source: Used to mark the request as originating out of middleware. Will be logged in proxy logs. :param make_env: make_subrequest calls this make_env to help build the swob.Request. :returns: Fresh swob.Request object. """ query_string = None path = path or '' if path and '?' in path: path, query_string = path.split('?', 1) newenv = make_env(env, method, path=unquote(path), agent=agent, query_string=query_string, swift_source=swift_source) if not headers: headers = {} if body: return Request.blank(path, environ=newenv, body=body, headers=headers) else: return Request.blank(path, environ=newenv, headers=headers) def make_pre_authed_env(env, method=None, path=None, agent='Swift', query_string=None, swift_source=None): """Same as :py:func:`make_env` but with preauthorization.""" newenv = make_env( env, method=method, path=path, agent=agent, query_string=query_string, swift_source=swift_source) newenv['swift.authorize'] = lambda req: None newenv['swift.authorize_override'] = True newenv['REMOTE_USER'] = '.wsgi.pre_authed' return newenv def make_pre_authed_request(env, method=None, path=None, body=None, headers=None, agent='Swift', swift_source=None): """Same as :py:func:`make_subrequest` but with preauthorization.""" return make_subrequest( env, method=method, path=path, body=body, headers=headers, agent=agent, swift_source=swift_source, make_env=make_pre_authed_env) swift-2.17.1/swift/common/internal_client.py0000666000175000017500000010753613435012015021140 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import sleep, Timeout from eventlet.green import httplib, socket import json import six from six.moves import range from six.moves import urllib import struct from sys import exc_info, exit import zlib from time import gmtime, strftime, time from zlib import compressobj from swift.common.exceptions import ClientException from swift.common.http import (HTTP_NOT_FOUND, HTTP_MULTIPLE_CHOICES, is_server_error) from swift.common.swob import Request from swift.common.utils import quote, closing_if_possible from swift.common.wsgi import loadapp, pipeline_property if six.PY3: from eventlet.green.urllib import request as urllib2 else: from eventlet.green import urllib2 class UnexpectedResponse(Exception): """ Exception raised on invalid responses to InternalClient.make_request(). :param message: Exception message. :param resp: The unexpected response. """ def __init__(self, message, resp): super(UnexpectedResponse, self).__init__(message) self.resp = resp class CompressingFileReader(object): """ Wrapper for file object to compress object while reading. Can be used to wrap file objects passed to InternalClient.upload_object(). Used in testing of InternalClient. :param file_obj: File object to wrap. :param compresslevel: Compression level, defaults to 9. :param chunk_size: Size of chunks read when iterating using object, defaults to 4096. """ def __init__(self, file_obj, compresslevel=9, chunk_size=4096): self._f = file_obj self.compresslevel = compresslevel self.chunk_size = chunk_size self.set_initial_state() def set_initial_state(self): """ Sets the object to the state needed for the first read. """ self._f.seek(0) self._compressor = compressobj( self.compresslevel, zlib.DEFLATED, -zlib.MAX_WBITS, zlib.DEF_MEM_LEVEL, 0) self.done = False self.first = True self.crc32 = 0 self.total_size = 0 def read(self, *a, **kw): """ Reads a chunk from the file object. Params are passed directly to the underlying file object's read(). :returns: Compressed chunk from file object. """ if self.done: return '' x = self._f.read(*a, **kw) if x: self.crc32 = zlib.crc32(x, self.crc32) & 0xffffffff self.total_size += len(x) compressed = self._compressor.compress(x) if not compressed: compressed = self._compressor.flush(zlib.Z_SYNC_FLUSH) else: compressed = self._compressor.flush(zlib.Z_FINISH) crc32 = struct.pack("= HTTP_MULTIPLE_CHOICES: ''.join(resp.app_iter) break data = json.loads(resp.body) if not data: break for item in data: yield item marker = data[-1]['name'].encode('utf8') def make_path(self, account, container=None, obj=None): """ Returns a swift path for a request quoting and utf-8 encoding the path parts as need be. :param account: swift account :param container: container, defaults to None :param obj: object, defaults to None :raises ValueError: Is raised if obj is specified and container is not. """ path = '/v1/%s' % quote(account) if container: path += '/%s' % quote(container) if obj: path += '/%s' % quote(obj) elif obj: raise ValueError('Object specified without container') return path def _set_metadata( self, path, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets metadata on path using metadata_prefix to set values in headers of POST request. :param path: Path to do POST on. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = {} for k, v in metadata.items(): if k.lower().startswith(metadata_prefix): headers[k] = v else: headers['%s%s' % (metadata_prefix, k)] = v self.make_request('POST', path, headers, acceptable_statuses) # account methods def iter_containers( self, account, marker='', end_marker='', prefix='', acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Returns an iterator of containers dicts from an account. :param account: Account on which to do the container listing. :param marker: Prefix of first desired item, defaults to ''. :param end_marker: Last item returned will be 'less' than this, defaults to ''. :param prefix: Prefix of containers :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) return self._iter_items(path, marker, end_marker, prefix, acceptable_statuses) def get_account_info( self, account, acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Returns (container_count, object_count) for an account. :param account: Account on which to get the information. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) resp = self.make_request('HEAD', path, {}, acceptable_statuses) if not resp.status_int // 100 == 2: return (0, 0) return (int(resp.headers.get('x-account-container-count', 0)), int(resp.headers.get('x-account-object-count', 0))) def get_account_metadata( self, account, metadata_prefix='', acceptable_statuses=(2,)): """Gets account metadata. :param account: Account on which to get the metadata. :param metadata_prefix: Used to filter values from the headers returned. Will strip that prefix from the keys in the dict returned. Defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :returns: Returns dict of account metadata. Keys will be lowercase. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) return self._get_metadata(path, metadata_prefix, acceptable_statuses) def set_account_metadata( self, account, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets account metadata. A call to this will add to the account metadata and not overwrite all of it with values in the metadata dict. To clear an account metadata value, pass an empty string as the value for the key in the metadata dict. :param account: Account on which to get the metadata. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account) self._set_metadata( path, metadata, metadata_prefix, acceptable_statuses) # container methods def container_exists(self, account, container): """Checks to see if a container exists. :param account: The container's account. :param container: Container to check. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. :returns: True if container exists, false otherwise. """ path = self.make_path(account, container) resp = self.make_request('HEAD', path, {}, (2, HTTP_NOT_FOUND)) return not resp.status_int == HTTP_NOT_FOUND def create_container( self, account, container, headers=None, acceptable_statuses=(2,)): """ Creates container. :param account: The container's account. :param container: Container to create. :param headers: Defaults to empty dict. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = headers or {} path = self.make_path(account, container) self.make_request('PUT', path, headers, acceptable_statuses) def delete_container( self, account, container, acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Deletes a container. :param account: The container's account. :param container: Container to delete. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) self.make_request('DELETE', path, {}, acceptable_statuses) def get_container_metadata( self, account, container, metadata_prefix='', acceptable_statuses=(2,)): """Gets container metadata. :param account: The container's account. :param container: Container to get metadata on. :param metadata_prefix: Used to filter values from the headers returned. Will strip that prefix from the keys in the dict returned. Defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :returns: Returns dict of container metadata. Keys will be lowercase. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) return self._get_metadata(path, metadata_prefix, acceptable_statuses) def iter_objects( self, account, container, marker='', end_marker='', prefix='', acceptable_statuses=(2, HTTP_NOT_FOUND)): """ Returns an iterator of object dicts from a container. :param account: The container's account. :param container: Container to iterate objects on. :param marker: Prefix of first desired item, defaults to ''. :param end_marker: Last item returned will be 'less' than this, defaults to ''. :param prefix: Prefix of objects :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) return self._iter_items(path, marker, end_marker, prefix, acceptable_statuses) def set_container_metadata( self, account, container, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets container metadata. A call to this will add to the container metadata and not overwrite all of it with values in the metadata dict. To clear a container metadata value, pass an empty string as the value for the key in the metadata dict. :param account: The container's account. :param container: Container to set metadata on. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container) self._set_metadata( path, metadata, metadata_prefix, acceptable_statuses) # object methods def delete_object( self, account, container, obj, acceptable_statuses=(2, HTTP_NOT_FOUND), headers=None): """ Deletes an object. :param account: The object's account. :param container: The object's container. :param obj: The object. :param acceptable_statuses: List of status for valid responses, defaults to (2, HTTP_NOT_FOUND). :param headers: extra headers to send with request :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container, obj) self.make_request('DELETE', path, (headers or {}), acceptable_statuses) def get_object_metadata( self, account, container, obj, metadata_prefix='', acceptable_statuses=(2,), headers=None): """Gets object metadata. :param account: The object's account. :param container: The object's container. :param obj: The object. :param metadata_prefix: Used to filter values from the headers returned. Will strip that prefix from the keys in the dict returned. Defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :param headers: extra headers to send with request :returns: Dict of object metadata. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container, obj) return self._get_metadata(path, metadata_prefix, acceptable_statuses, headers=headers) def get_object(self, account, container, obj, headers, acceptable_statuses=(2,), params=None): """ Gets an object. :param account: The object's account. :param container: The object's container. :param obj: The object name. :param headers: Headers to send with request, defaults to empty dict. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :param params: A dict of params to be set in request query string, defaults to None. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. :returns: A 3-tuple (status, headers, iterator of object body) """ headers = headers or {} path = self.make_path(account, container, obj) resp = self.make_request( 'GET', path, headers, acceptable_statuses, params=params) return (resp.status_int, resp.headers, resp.app_iter) def iter_object_lines( self, account, container, obj, headers=None, acceptable_statuses=(2,)): """ Returns an iterator of object lines from an uncompressed or compressed text object. Uncompress object as it is read if the object's name ends with '.gz'. :param account: The object's account. :param container: The object's container. :param obj: The object. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = headers or {} path = self.make_path(account, container, obj) resp = self.make_request('GET', path, headers, acceptable_statuses) if not resp.status_int // 100 == 2: return last_part = '' compressed = obj.endswith('.gz') # magic in the following zlib.decompressobj argument is courtesy of # Python decompressing gzip chunk-by-chunk # http://stackoverflow.com/questions/2423866 d = zlib.decompressobj(16 + zlib.MAX_WBITS) for chunk in resp.app_iter: if compressed: chunk = d.decompress(chunk) parts = chunk.split('\n') if len(parts) == 1: last_part = last_part + parts[0] else: parts[0] = last_part + parts[0] for part in parts[:-1]: yield part last_part = parts[-1] if last_part: yield last_part def set_object_metadata( self, account, container, obj, metadata, metadata_prefix='', acceptable_statuses=(2,)): """ Sets an object's metadata. The object's metadata will be overwritten by the values in the metadata dict. :param account: The object's account. :param container: The object's container. :param obj: The object. :param metadata: Dict of metadata to set. :param metadata_prefix: Prefix used to set metadata values in headers of requests, used to prefix keys in metadata when setting metadata, defaults to ''. :param acceptable_statuses: List of status for valid responses, defaults to (2,). :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ path = self.make_path(account, container, obj) self._set_metadata( path, metadata, metadata_prefix, acceptable_statuses) def upload_object( self, fobj, account, container, obj, headers=None): """ :param fobj: File object to read object's content from. :param account: The object's account. :param container: The object's container. :param obj: The object. :param headers: Headers to send with request, defaults to empty dict. :raises UnexpectedResponse: Exception raised when requests fail to get a response with an acceptable status :raises Exception: Exception is raised when code fails in an unexpected way. """ headers = dict(headers or {}) if 'Content-Length' not in headers: headers['Transfer-Encoding'] = 'chunked' path = self.make_path(account, container, obj) self.make_request('PUT', path, headers, (2,), fobj) def get_auth(url, user, key, auth_version='1.0', **kwargs): if auth_version != '1.0': exit('ERROR: swiftclient missing, only auth v1.0 supported') req = urllib2.Request(url) req.add_header('X-Auth-User', user) req.add_header('X-Auth-Key', key) conn = urllib2.urlopen(req) headers = conn.info() return ( headers.getheader('X-Storage-Url'), headers.getheader('X-Auth-Token')) class SimpleClient(object): """ Simple client that is used in bin/swift-dispersion-* and container sync """ def __init__(self, url=None, token=None, starting_backoff=1, max_backoff=5, retries=5): self.url = url self.token = token self.attempts = 0 # needed in swif-dispersion-populate self.starting_backoff = starting_backoff self.max_backoff = max_backoff self.retries = retries def base_request(self, method, container=None, name=None, prefix=None, headers=None, proxy=None, contents=None, full_listing=None, logger=None, additional_info=None, timeout=None, marker=None): # Common request method trans_start = time() url = self.url if full_listing: info, body_data = self.base_request( method, container, name, prefix, headers, proxy, timeout=timeout, marker=marker) listing = body_data while listing: marker = listing[-1]['name'] info, listing = self.base_request( method, container, name, prefix, headers, proxy, timeout=timeout, marker=marker) if listing: body_data.extend(listing) return [info, body_data] if headers is None: headers = {} if self.token: headers['X-Auth-Token'] = self.token if container: url = '%s/%s' % (url.rstrip('/'), quote(container)) if name: url = '%s/%s' % (url.rstrip('/'), quote(name)) else: params = ['format=json'] if prefix: params.append('prefix=%s' % prefix) if marker: params.append('marker=%s' % quote(marker)) url += '?' + '&'.join(params) req = urllib2.Request(url, headers=headers, data=contents) if proxy: proxy = urllib.parse.urlparse(proxy) req.set_proxy(proxy.netloc, proxy.scheme) req.get_method = lambda: method conn = urllib2.urlopen(req, timeout=timeout) body = conn.read() info = conn.info() try: body_data = json.loads(body) except ValueError: body_data = None trans_stop = time() if logger: sent_content_length = 0 for n, v in headers.items(): nl = n.lower() if nl == 'content-length': try: sent_content_length = int(v) break except ValueError: pass logger.debug("-> " + " ".join( quote(str(x) if x else "-", ":/") for x in ( strftime('%Y-%m-%dT%H:%M:%S', gmtime(trans_stop)), method, url, conn.getcode(), sent_content_length, info['content-length'], trans_start, trans_stop, trans_stop - trans_start, additional_info ))) return [info, body_data] def retry_request(self, method, **kwargs): retries = kwargs.pop('retries', self.retries) self.attempts = 0 backoff = self.starting_backoff while self.attempts <= retries: self.attempts += 1 try: return self.base_request(method, **kwargs) except (socket.error, httplib.HTTPException, urllib2.URLError) \ as err: if self.attempts > retries: if isinstance(err, urllib2.HTTPError): raise ClientException('Raise too many retries', http_status=err.getcode()) else: raise sleep(backoff) backoff = min(backoff * 2, self.max_backoff) def get_account(self, *args, **kwargs): # Used in swift-dispersion-populate return self.retry_request('GET', **kwargs) def put_container(self, container, **kwargs): # Used in swift-dispersion-populate return self.retry_request('PUT', container=container, **kwargs) def get_container(self, container, **kwargs): # Used in swift-dispersion-populate return self.retry_request('GET', container=container, **kwargs) def put_object(self, container, name, contents, **kwargs): # Used in swift-dispersion-populate return self.retry_request('PUT', container=container, name=name, contents=contents.read(), **kwargs) def head_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) return client.retry_request('HEAD', **kwargs) def put_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) client.retry_request('PUT', **kwargs) def delete_object(url, **kwargs): """For usage with container sync """ client = SimpleClient(url=url) client.retry_request('DELETE', **kwargs) swift-2.17.1/swift/common/exceptions.py0000666000175000017500000001270513435012015020140 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from eventlet import Timeout import swift.common.utils class MessageTimeout(Timeout): def __init__(self, seconds=None, msg=None): Timeout.__init__(self, seconds=seconds) self.msg = msg def __str__(self): return '%s: %s' % (Timeout.__str__(self), self.msg) class SwiftException(Exception): pass class PutterConnectError(Exception): def __init__(self, status=None): self.status = status class InvalidTimestamp(SwiftException): pass class InsufficientStorage(SwiftException): pass class FooterNotSupported(SwiftException): pass class MultiphasePUTNotSupported(SwiftException): pass class SuffixSyncError(SwiftException): pass class RangeAlreadyComplete(SwiftException): pass class DiskFileError(SwiftException): pass class DiskFileNotOpen(DiskFileError): pass class DiskFileQuarantined(DiskFileError): pass class DiskFileCollision(DiskFileError): pass class DiskFileNotExist(DiskFileError): pass class DiskFileDeleted(DiskFileNotExist): def __init__(self, metadata=None): self.metadata = metadata or {} self.timestamp = swift.common.utils.Timestamp( self.metadata.get('X-Timestamp', 0)) class DiskFileExpired(DiskFileDeleted): pass class DiskFileNoSpace(DiskFileError): pass class DiskFileDeviceUnavailable(DiskFileError): pass class DiskFileXattrNotSupported(DiskFileError): pass class DiskFileBadMetadataChecksum(DiskFileError): pass class DeviceUnavailable(SwiftException): pass class InvalidAccountInfo(SwiftException): pass class PathNotDir(OSError): pass class ChunkReadError(SwiftException): pass class ChunkReadTimeout(Timeout): pass class ChunkWriteTimeout(Timeout): pass class ConnectionTimeout(Timeout): pass class ResponseTimeout(Timeout): pass class DriveNotMounted(SwiftException): pass class LockTimeout(MessageTimeout): pass class RingLoadError(SwiftException): pass class RingBuilderError(SwiftException): pass class RingValidationError(RingBuilderError): pass class EmptyRingError(RingBuilderError): pass class DuplicateDeviceError(RingBuilderError): pass class UnPicklingError(SwiftException): pass class FileNotFoundError(SwiftException): pass class PermissionError(SwiftException): pass class ListingIterError(SwiftException): pass class ListingIterNotFound(ListingIterError): pass class ListingIterNotAuthorized(ListingIterError): def __init__(self, aresp): self.aresp = aresp class SegmentError(SwiftException): pass class LinkIterError(SwiftException): pass class ReplicationException(Exception): pass class ReplicationLockTimeout(LockTimeout): pass class MimeInvalid(SwiftException): pass class APIVersionError(SwiftException): pass class EncryptionException(SwiftException): pass class ClientException(Exception): def __init__(self, msg, http_scheme='', http_host='', http_port='', http_path='', http_query='', http_status=None, http_reason='', http_device='', http_response_content='', http_headers=None): super(ClientException, self).__init__(msg) self.msg = msg self.http_scheme = http_scheme self.http_host = http_host self.http_port = http_port self.http_path = http_path self.http_query = http_query self.http_status = http_status self.http_reason = http_reason self.http_device = http_device self.http_response_content = http_response_content self.http_headers = http_headers or {} def __str__(self): a = self.msg b = '' if self.http_scheme: b += '%s://' % self.http_scheme if self.http_host: b += self.http_host if self.http_port: b += ':%s' % self.http_port if self.http_path: b += self.http_path if self.http_query: b += '?%s' % self.http_query if self.http_status: if b: b = '%s %s' % (b, self.http_status) else: b = str(self.http_status) if self.http_reason: if b: b = '%s %s' % (b, self.http_reason) else: b = '- %s' % self.http_reason if self.http_device: if b: b = '%s: device %s' % (b, self.http_device) else: b = 'device %s' % self.http_device if self.http_response_content: if len(self.http_response_content) <= 60: b += ' %s' % self.http_response_content else: b += ' [first 60 chars of response] %s' \ % self.http_response_content[:60] return b and '%s: %s' % (a, b) or a class InvalidPidFileException(Exception): pass swift-2.17.1/swift/common/direct_client.py0000666000175000017500000005265213435012015020574 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Internal client library for making calls directly to the servers rather than through the proxy. """ import json import os import socket from eventlet import sleep, Timeout import six import six.moves.cPickle as pickle from six.moves.http_client import HTTPException from swift.common.bufferedhttp import http_connect from swift.common.exceptions import ClientException from swift.common.utils import Timestamp, FileLikeIter from swift.common.http import HTTP_NO_CONTENT, HTTP_INSUFFICIENT_STORAGE, \ is_success, is_server_error from swift.common.header_key_dict import HeaderKeyDict from swift.common.utils import quote class DirectClientException(ClientException): def __init__(self, stype, method, node, part, path, resp, host=None): # host can be used to override the node ip and port reported in # the exception host = host if host is not None else node if not isinstance(path, six.text_type): path = path.decode("utf-8") full_path = quote('/%s/%s%s' % (node['device'], part, path)) msg = '%s server %s:%s direct %s %r gave status %s' % ( stype, host['ip'], host['port'], method, full_path, resp.status) headers = HeaderKeyDict(resp.getheaders()) super(DirectClientException, self).__init__( msg, http_host=host['ip'], http_port=host['port'], http_device=node['device'], http_status=resp.status, http_reason=resp.reason, http_headers=headers) def _make_req(node, part, method, path, _headers, stype, conn_timeout=5, response_timeout=15): """ Make request to backend storage node. (i.e. 'Account', 'Container', 'Object') :param node: a node dict from a ring :param part: an integer, the partion number :param method: a string, the HTTP method (e.g. 'PUT', 'DELETE', etc) :param path: a string, the request path :param headers: a dict, header name => value :param stype: a string, describing the type of service :returns: an HTTPResponse object """ with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, method, path, headers=_headers) with Timeout(response_timeout): resp = conn.getresponse() resp.read() if not is_success(resp.status): raise DirectClientException(stype, method, node, part, path, resp) return resp def _get_direct_account_container(path, stype, node, part, marker=None, limit=None, prefix=None, delimiter=None, conn_timeout=5, response_timeout=15, end_marker=None, reverse=None): """Base class for get direct account and container. Do not use directly use the get_direct_account or get_direct_container instead. """ params = ['format=json'] if marker: params.append('marker=%s' % quote(marker)) if limit: params.append('limit=%d' % limit) if prefix: params.append('prefix=%s' % quote(prefix)) if delimiter: params.append('delimiter=%s' % quote(delimiter)) if end_marker: params.append('end_marker=%s' % quote(end_marker)) if reverse: params.append('reverse=%s' % quote(reverse)) qs = '&'.join(params) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'GET', path, query_string=qs, headers=gen_headers()) with Timeout(response_timeout): resp = conn.getresponse() if not is_success(resp.status): resp.read() raise DirectClientException(stype, 'GET', node, part, path, resp) resp_headers = HeaderKeyDict() for header, value in resp.getheaders(): resp_headers[header] = value if resp.status == HTTP_NO_CONTENT: resp.read() return resp_headers, [] return resp_headers, json.loads(resp.read()) def gen_headers(hdrs_in=None, add_ts=False): hdrs_out = HeaderKeyDict(hdrs_in) if hdrs_in else HeaderKeyDict() if add_ts: hdrs_out['X-Timestamp'] = Timestamp.now().internal hdrs_out['User-Agent'] = 'direct-client %s' % os.getpid() return hdrs_out def direct_get_account(node, part, account, marker=None, limit=None, prefix=None, delimiter=None, conn_timeout=5, response_timeout=15, end_marker=None, reverse=None): """ Get listings directly from the account server. :param node: node dictionary from the ring :param part: partition the account is on :param account: account name :param marker: marker query :param limit: query limit :param prefix: prefix query :param delimiter: delimiter for the query :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param end_marker: end_marker query :param reverse: reverse the returned listing :returns: a tuple of (response headers, a list of containers) The response headers will HeaderKeyDict. """ path = '/' + account return _get_direct_account_container(path, "Account", node, part, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, end_marker=end_marker, reverse=reverse, conn_timeout=conn_timeout, response_timeout=response_timeout) def direct_delete_account(node, part, account, conn_timeout=5, response_timeout=15, headers=None): if headers is None: headers = {} path = '/%s' % account _make_req(node, part, 'DELETE', path, gen_headers(headers, True), 'Account', conn_timeout, response_timeout) def direct_head_container(node, part, account, container, conn_timeout=5, response_timeout=15): """ Request container information directly from the container server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :returns: a dict containing the response's headers in a HeaderKeyDict :raises ClientException: HTTP HEAD request failed """ path = '/%s/%s' % (account, container) resp = _make_req(node, part, 'HEAD', path, gen_headers(), 'Container', conn_timeout, response_timeout) resp_headers = HeaderKeyDict() for header, value in resp.getheaders(): resp_headers[header] = value return resp_headers def direct_get_container(node, part, account, container, marker=None, limit=None, prefix=None, delimiter=None, conn_timeout=5, response_timeout=15, end_marker=None, reverse=None): """ Get container listings directly from the container server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param marker: marker query :param limit: query limit :param prefix: prefix query :param delimiter: delimiter for the query :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param end_marker: end_marker query :param reverse: reverse the returned listing :returns: a tuple of (response headers, a list of objects) The response headers will be a HeaderKeyDict. """ path = '/%s/%s' % (account, container) return _get_direct_account_container(path, "Container", node, part, marker=marker, limit=limit, prefix=prefix, delimiter=delimiter, end_marker=end_marker, reverse=reverse, conn_timeout=conn_timeout, response_timeout=response_timeout) def direct_delete_container(node, part, account, container, conn_timeout=5, response_timeout=15, headers=None): """ Delete container directly from the container server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param headers: dict to be passed into HTTPConnection headers :raises ClientException: HTTP DELETE request failed """ if headers is None: headers = {} path = '/%s/%s' % (account, container) add_timestamp = 'x-timestamp' not in (k.lower() for k in headers) _make_req(node, part, 'DELETE', path, gen_headers(headers, add_timestamp), 'Container', conn_timeout, response_timeout) def direct_put_container_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers=None): if headers is None: headers = {} have_x_timestamp = 'x-timestamp' in (k.lower() for k in headers) path = '/%s/%s/%s' % (account, container, obj) _make_req(node, part, 'PUT', path, gen_headers(headers, add_ts=(not have_x_timestamp)), 'Container', conn_timeout, response_timeout) def direct_delete_container_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers=None): if headers is None: headers = {} headers = gen_headers(headers, add_ts='x-timestamp' not in ( k.lower() for k in headers)) path = '/%s/%s/%s' % (account, container, obj) _make_req(node, part, 'DELETE', path, headers, 'Container', conn_timeout, response_timeout) def direct_head_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers=None): """ Request object information directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param obj: object name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param headers: dict to be passed into HTTPConnection headers :returns: a dict containing the response's headers in a HeaderKeyDict :raises ClientException: HTTP HEAD request failed """ if headers is None: headers = {} headers = gen_headers(headers) path = '/%s/%s/%s' % (account, container, obj) resp = _make_req(node, part, 'HEAD', path, headers, 'Object', conn_timeout, response_timeout) resp_headers = HeaderKeyDict() for header, value in resp.getheaders(): resp_headers[header] = value return resp_headers def direct_get_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, resp_chunk_size=None, headers=None): """ Get object directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param obj: object name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param resp_chunk_size: if defined, chunk size of data to read. :param headers: dict to be passed into HTTPConnection headers :returns: a tuple of (response headers, the object's contents) The response headers will be a HeaderKeyDict. :raises ClientException: HTTP GET request failed """ if headers is None: headers = {} path = '/%s/%s/%s' % (account, container, obj) with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'GET', path, headers=gen_headers(headers)) with Timeout(response_timeout): resp = conn.getresponse() if not is_success(resp.status): resp.read() raise DirectClientException('Object', 'GET', node, part, path, resp) if resp_chunk_size: def _object_body(): buf = resp.read(resp_chunk_size) while buf: yield buf buf = resp.read(resp_chunk_size) object_body = _object_body() else: object_body = resp.read() resp_headers = HeaderKeyDict() for header, value in resp.getheaders(): resp_headers[header] = value return resp_headers, object_body def direct_put_object(node, part, account, container, name, contents, content_length=None, etag=None, content_type=None, headers=None, conn_timeout=5, response_timeout=15, chunk_size=65535): """ Put object directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param name: object name :param contents: an iterable or string to read object data from :param content_length: value to send as content-length header :param etag: etag of contents :param content_type: value to send as content-type header :param headers: additional headers to include in the request :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param chunk_size: if defined, chunk size of data to send. :returns: etag from the server response :raises ClientException: HTTP PUT request failed """ path = '/%s/%s/%s' % (account, container, name) if headers is None: headers = {} if etag: headers['ETag'] = etag.strip('"') if content_length is not None: headers['Content-Length'] = str(content_length) else: for n, v in headers.items(): if n.lower() == 'content-length': content_length = int(v) if content_type is not None: headers['Content-Type'] = content_type else: headers['Content-Type'] = 'application/octet-stream' if not contents: headers['Content-Length'] = '0' if isinstance(contents, six.string_types): contents = [contents] # Incase the caller want to insert an object with specific age add_ts = 'X-Timestamp' not in headers if content_length is None: headers['Transfer-Encoding'] = 'chunked' with Timeout(conn_timeout): conn = http_connect(node['ip'], node['port'], node['device'], part, 'PUT', path, headers=gen_headers(headers, add_ts)) contents_f = FileLikeIter(contents) if content_length is None: chunk = contents_f.read(chunk_size) while chunk: conn.send('%x\r\n%s\r\n' % (len(chunk), chunk)) chunk = contents_f.read(chunk_size) conn.send('0\r\n\r\n') else: left = content_length while left > 0: size = chunk_size if size > left: size = left chunk = contents_f.read(size) if not chunk: break conn.send(chunk) left -= len(chunk) with Timeout(response_timeout): resp = conn.getresponse() resp.read() if not is_success(resp.status): raise DirectClientException('Object', 'PUT', node, part, path, resp) return resp.getheader('etag').strip('"') def direct_post_object(node, part, account, container, name, headers, conn_timeout=5, response_timeout=15): """ Direct update to object metadata on object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param name: object name :param headers: headers to store as metadata :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :raises ClientException: HTTP POST request failed """ path = '/%s/%s/%s' % (account, container, name) _make_req(node, part, 'POST', path, gen_headers(headers, True), 'Object', conn_timeout, response_timeout) def direct_delete_object(node, part, account, container, obj, conn_timeout=5, response_timeout=15, headers=None): """ Delete object directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param account: account name :param container: container name :param obj: object name :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :raises ClientException: HTTP DELETE request failed """ if headers is None: headers = {} headers = gen_headers(headers, add_ts='x-timestamp' not in ( k.lower() for k in headers)) path = '/%s/%s/%s' % (account, container, obj) _make_req(node, part, 'DELETE', path, headers, 'Object', conn_timeout, response_timeout) def direct_get_suffix_hashes(node, part, suffixes, conn_timeout=5, response_timeout=15, headers=None): """ Get suffix hashes directly from the object server. :param node: node dictionary from the ring :param part: partition the container is on :param conn_timeout: timeout in seconds for establishing the connection :param response_timeout: timeout in seconds for getting the response :param headers: dict to be passed into HTTPConnection headers :returns: dict of suffix hashes :raises ClientException: HTTP REPLICATE request failed """ if headers is None: headers = {} path = '/%s' % '-'.join(suffixes) with Timeout(conn_timeout): conn = http_connect(node['replication_ip'], node['replication_port'], node['device'], part, 'REPLICATE', path, headers=gen_headers(headers)) with Timeout(response_timeout): resp = conn.getresponse() if not is_success(resp.status): raise DirectClientException('Object', 'REPLICATE', node, part, path, resp, host={'ip': node['replication_ip'], 'port': node['replication_port']} ) return pickle.loads(resp.read()) def retry(func, *args, **kwargs): """ Helper function to retry a given function a number of times. :param func: callable to be called :param retries: number of retries :param error_log: logger for errors :param args: arguments to send to func :param kwargs: keyward arguments to send to func (if retries or error_log are sent, they will be deleted from kwargs before sending on to func) :returns: result of func :raises ClientException: all retries failed """ retries = kwargs.pop('retries', 5) error_log = kwargs.pop('error_log', None) attempts = 0 backoff = 1 while attempts <= retries: attempts += 1 try: return attempts, func(*args, **kwargs) except (socket.error, HTTPException, Timeout) as err: if error_log: error_log(err) if attempts > retries: raise except ClientException as err: if error_log: error_log(err) if attempts > retries or not is_server_error(err.http_status) or \ err.http_status == HTTP_INSUFFICIENT_STORAGE: raise sleep(backoff) backoff *= 2 # Shouldn't actually get down here, but just in case. if args and 'ip' in args[0]: raise ClientException('Raise too many retries', http_host=args[0]['ip'], http_port=args[0]['port'], http_device=args[0]['device']) else: raise ClientException('Raise too many retries') swift-2.17.1/swift/common/middleware/0000775000175000017500000000000013435012120017510 5ustar zuulzuul00000000000000swift-2.17.1/swift/common/middleware/container_quotas.py0000666000175000017500000001250113435012003023441 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ The ``container_quotas`` middleware implements simple quotas that can be imposed on swift containers by a user with the ability to set container metadata, most likely the account administrator. This can be useful for limiting the scope of containers that are delegated to non-admin users, exposed to ``formpost`` uploads, or just as a self-imposed sanity check. Any object PUT operations that exceed these quotas return a 413 response (request entity too large) with a descriptive body. Quotas are subject to several limitations: eventual consistency, the timeliness of the cached container_info (60 second ttl by default), and it's unable to reject chunked transfer uploads that exceed the quota (though once the quota is exceeded, new chunked transfers will be refused). Quotas are set by adding meta values to the container, and are validated when set: +---------------------------------------------+-------------------------------+ |Metadata | Use | +=============================================+===============================+ | X-Container-Meta-Quota-Bytes | Maximum size of the | | | container, in bytes. | +---------------------------------------------+-------------------------------+ | X-Container-Meta-Quota-Count | Maximum object count of the | | | container. | +---------------------------------------------+-------------------------------+ The ``container_quotas`` middleware should be added to the pipeline in your ``/etc/swift/proxy-server.conf`` file just after any auth middleware. For example:: [pipeline:main] pipeline = catch_errors cache tempauth container_quotas proxy-server [filter:container_quotas] use = egg:swift#container_quotas """ from swift.common.http import is_success from swift.common.swob import HTTPRequestEntityTooLarge, HTTPBadRequest, \ wsgify from swift.common.utils import register_swift_info from swift.proxy.controllers.base import get_container_info class ContainerQuotaMiddleware(object): def __init__(self, app, *args, **kwargs): self.app = app def bad_response(self, req, container_info): # 401 if the user couldn't have PUT this object in the first place. # This prevents leaking the container's existence to unauthed users. if 'swift.authorize' in req.environ: req.acl = container_info['write_acl'] aresp = req.environ['swift.authorize'](req) if aresp: return aresp return HTTPRequestEntityTooLarge(body='Upload exceeds quota.') @wsgify def __call__(self, req): try: (version, account, container, obj) = req.split_path(3, 4, True) except ValueError: return self.app # verify new quota headers are properly formatted if not obj and req.method in ('PUT', 'POST'): val = req.headers.get('X-Container-Meta-Quota-Bytes') if val and not val.isdigit(): return HTTPBadRequest(body='Invalid bytes quota.') val = req.headers.get('X-Container-Meta-Quota-Count') if val and not val.isdigit(): return HTTPBadRequest(body='Invalid count quota.') # check user uploads against quotas elif obj and req.method in ('PUT'): container_info = get_container_info( req.environ, self.app, swift_source='CQ') if not container_info or not is_success(container_info['status']): # this will hopefully 404 later return self.app if 'quota-bytes' in container_info.get('meta', {}) and \ 'bytes' in container_info and \ container_info['meta']['quota-bytes'].isdigit(): content_length = (req.content_length or 0) new_size = int(container_info['bytes']) + content_length if int(container_info['meta']['quota-bytes']) < new_size: return self.bad_response(req, container_info) if 'quota-count' in container_info.get('meta', {}) and \ 'object_count' in container_info and \ container_info['meta']['quota-count'].isdigit(): new_count = int(container_info['object_count']) + 1 if int(container_info['meta']['quota-count']) < new_count: return self.bad_response(req, container_info) return self.app def filter_factory(global_conf, **local_conf): register_swift_info('container_quotas') def container_quota_filter(app): return ContainerQuotaMiddleware(app) return container_quota_filter swift-2.17.1/swift/common/middleware/memcache.py0000666000175000017500000001111113435012003021621 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import os from six.moves.configparser import ConfigParser, NoSectionError, NoOptionError from swift.common.memcached import (MemcacheRing, CONN_TIMEOUT, POOL_TIMEOUT, IO_TIMEOUT, TRY_COUNT) class MemcacheMiddleware(object): """ Caching middleware that manages caching in swift. """ def __init__(self, app, conf): self.app = app self.memcache_servers = conf.get('memcache_servers') serialization_format = conf.get('memcache_serialization_support') try: # Originally, while we documented using memcache_max_connections # we only accepted max_connections max_conns = int(conf.get('memcache_max_connections', conf.get('max_connections', 0))) except ValueError: max_conns = 0 memcache_options = {} if (not self.memcache_servers or serialization_format is None or max_conns <= 0): path = os.path.join(conf.get('swift_dir', '/etc/swift'), 'memcache.conf') memcache_conf = ConfigParser() if memcache_conf.read(path): # if memcache.conf exists we'll start with those base options try: memcache_options = dict(memcache_conf.items('memcache')) except NoSectionError: pass if not self.memcache_servers: try: self.memcache_servers = \ memcache_conf.get('memcache', 'memcache_servers') except (NoSectionError, NoOptionError): pass if serialization_format is None: try: serialization_format = \ memcache_conf.get('memcache', 'memcache_serialization_support') except (NoSectionError, NoOptionError): pass if max_conns <= 0: try: new_max_conns = \ memcache_conf.get('memcache', 'memcache_max_connections') max_conns = int(new_max_conns) except (NoSectionError, NoOptionError, ValueError): pass # while memcache.conf options are the base for the memcache # middleware, if you set the same option also in the filter # section of the proxy config it is more specific. memcache_options.update(conf) connect_timeout = float(memcache_options.get( 'connect_timeout', CONN_TIMEOUT)) pool_timeout = float(memcache_options.get( 'pool_timeout', POOL_TIMEOUT)) tries = int(memcache_options.get('tries', TRY_COUNT)) io_timeout = float(memcache_options.get('io_timeout', IO_TIMEOUT)) if not self.memcache_servers: self.memcache_servers = '127.0.0.1:11211' if max_conns <= 0: max_conns = 2 if serialization_format is None: serialization_format = 2 else: serialization_format = int(serialization_format) self.memcache = MemcacheRing( [s.strip() for s in self.memcache_servers.split(',') if s.strip()], connect_timeout=connect_timeout, pool_timeout=pool_timeout, tries=tries, io_timeout=io_timeout, allow_pickle=(serialization_format == 0), allow_unpickle=(serialization_format <= 1), max_conns=max_conns) def __call__(self, env, start_response): env['swift.cache'] = self.memcache return self.app(env, start_response) def filter_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) def cache_filter(app): return MemcacheMiddleware(app, conf) return cache_filter swift-2.17.1/swift/common/middleware/__init__.py0000666000175000017500000000261313435012003021625 0ustar zuulzuul00000000000000# Copyright (c) 2010-2017 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import re from swift.common.wsgi import WSGIContext class RewriteContext(WSGIContext): base_re = None def __init__(self, app, requested, rewritten): super(RewriteContext, self).__init__(app) self.requested = requested self.rewritten_re = re.compile(self.base_re % re.escape(rewritten)) def handle_request(self, env, start_response): resp_iter = self._app_call(env) for i, (header, value) in enumerate(self._response_headers): if header.lower() in ('location', 'content-location'): self._response_headers[i] = (header, self.rewritten_re.sub( r'\1%s\2' % self.requested, value)) start_response(self._response_status, self._response_headers, self._response_exc_info) return resp_iter swift-2.17.1/swift/common/middleware/keystoneauth.py0000666000175000017500000006122313435012003022613 0ustar zuulzuul00000000000000# Copyright 2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from swift.common import utils as swift_utils from swift.common.http import is_success from swift.common.middleware import acl as swift_acl from swift.common.request_helpers import get_sys_meta_prefix from swift.common.swob import HTTPNotFound, HTTPForbidden, HTTPUnauthorized from swift.common.utils import config_read_reseller_options, list_from_csv from swift.proxy.controllers.base import get_account_info import functools PROJECT_DOMAIN_ID_HEADER = 'x-account-project-domain-id' PROJECT_DOMAIN_ID_SYSMETA_HEADER = \ get_sys_meta_prefix('account') + 'project-domain-id' # a string that is unique w.r.t valid ids UNKNOWN_ID = '_unknown' class KeystoneAuth(object): """Swift middleware to Keystone authorization system. In Swift's proxy-server.conf add this keystoneauth middleware and the authtoken middleware to your pipeline. Make sure you have the authtoken middleware before the keystoneauth middleware. The authtoken middleware will take care of validating the user and keystoneauth will authorize access. The sample proxy-server.conf shows a sample pipeline that uses keystone. :download:`proxy-server.conf-sample ` The authtoken middleware is shipped with keystonemiddleware - it does not have any other dependencies than itself so you can either install it by copying the file directly in your python path or by installing keystonemiddleware. If support is required for unvalidated users (as with anonymous access) or for formpost/staticweb/tempurl middleware, authtoken will need to be configured with ``delay_auth_decision`` set to true. See the Keystone documentation for more detail on how to configure the authtoken middleware. In proxy-server.conf you will need to have the setting account auto creation to true:: [app:proxy-server] account_autocreate = true And add a swift authorization filter section, such as:: [filter:keystoneauth] use = egg:swift#keystoneauth operator_roles = admin, swiftoperator The user who is able to give ACL / create Containers permissions will be the user with a role listed in the ``operator_roles`` setting which by default includes the admin and the swiftoperator roles. The keystoneauth middleware maps a Keystone project/tenant to an account in Swift by adding a prefix (``AUTH_`` by default) to the tenant/project id.. For example, if the project id is ``1234``, the path is ``/v1/AUTH_1234``. If you need to have a different reseller_prefix to be able to mix different auth servers you can configure the option ``reseller_prefix`` in your keystoneauth entry like this:: reseller_prefix = NEWAUTH Don't forget to also update the Keystone service endpoint configuration to use NEWAUTH in the path. It is possible to have several accounts associated with the same project. This is done by listing several prefixes as shown in the following example:: reseller_prefix = AUTH, SERVICE This means that for project id '1234', the paths '/v1/AUTH_1234' and '/v1/SERVICE_1234' are associated with the project and are authorized using roles that a user has with that project. The core use of this feature is that it is possible to provide different rules for each account prefix. The following parameters may be prefixed with the appropriate prefix:: operator_roles service_roles For backward compatibility, if either of these parameters is specified without a prefix then it applies to all reseller_prefixes. Here is an example, using two prefixes:: reseller_prefix = AUTH, SERVICE # The next three lines have identical effects (since the first applies # to both prefixes). operator_roles = admin, swiftoperator AUTH_operator_roles = admin, swiftoperator SERVICE_operator_roles = admin, swiftoperator # The next line only applies to accounts with the SERVICE prefix SERVICE_operator_roles = admin, some_other_role X-Service-Token tokens are supported by the inclusion of the service_roles configuration option. When present, this option requires that the X-Service-Token header supply a token from a user who has a role listed in service_roles. Here is an example configuration:: reseller_prefix = AUTH, SERVICE AUTH_operator_roles = admin, swiftoperator SERVICE_operator_roles = admin, swiftoperator SERVICE_service_roles = service The keystoneauth middleware supports cross-tenant access control using the syntax ``:`` to specify a grantee in container Access Control Lists (ACLs). For a request to be granted by an ACL, the grantee ```` must match the UUID of the tenant to which the request X-Auth-Token is scoped and the grantee ```` must match the UUID of the user authenticated by that token. Note that names must no longer be used in cross-tenant ACLs because with the introduction of domains in keystone names are no longer globally unique. For backwards compatibility, ACLs using names will be granted by keystoneauth when it can be established that the grantee tenant, the grantee user and the tenant being accessed are either not yet in a domain (e.g. the X-Auth-Token has been obtained via the keystone v2 API) or are all in the default domain to which legacy accounts would have been migrated. The default domain is identified by its UUID, which by default has the value ``default``. This can be changed by setting the ``default_domain_id`` option in the keystoneauth configuration:: default_domain_id = default The backwards compatible behavior can be disabled by setting the config option ``allow_names_in_acls`` to false:: allow_names_in_acls = false To enable this backwards compatibility, keystoneauth will attempt to determine the domain id of a tenant when any new account is created, and persist this as account metadata. If an account is created for a tenant using a token with reselleradmin role that is not scoped on that tenant, keystoneauth is unable to determine the domain id of the tenant; keystoneauth will assume that the tenant may not be in the default domain and therefore not match names in ACLs for that account. By default, middleware higher in the WSGI pipeline may override auth processing, useful for middleware such as tempurl and formpost. If you know you're not going to use such middleware and you want a bit of extra security you can disable this behaviour by setting the ``allow_overrides`` option to ``false``:: allow_overrides = false :param app: The next WSGI app in the pipeline :param conf: The dict of configuration values """ def __init__(self, app, conf): self.app = app self.conf = conf self.logger = swift_utils.get_logger(conf, log_route='keystoneauth') self.reseller_prefixes, self.account_rules = \ config_read_reseller_options(conf, dict(operator_roles=['admin', 'swiftoperator'], service_roles=[])) self.reseller_admin_role = conf.get('reseller_admin_role', 'ResellerAdmin').lower() config_is_admin = conf.get('is_admin', "false").lower() if swift_utils.config_true_value(config_is_admin): self.logger.warning("The 'is_admin' option for keystoneauth is no " "longer supported. Remove the 'is_admin' " "option from your keystoneauth config") config_overrides = conf.get('allow_overrides', 't').lower() self.allow_overrides = swift_utils.config_true_value(config_overrides) self.default_domain_id = conf.get('default_domain_id', 'default') self.allow_names_in_acls = swift_utils.config_true_value( conf.get('allow_names_in_acls', 'true')) def __call__(self, environ, start_response): env_identity = self._keystone_identity(environ) # Check if one of the middleware like tempurl or formpost have # set the swift.authorize_override environ and want to control the # authentication if (self.allow_overrides and environ.get('swift.authorize_override', False)): msg = 'Authorizing from an overriding middleware' self.logger.debug(msg) return self.app(environ, start_response) if env_identity: self.logger.debug('Using identity: %r', env_identity) environ['REMOTE_USER'] = env_identity.get('tenant') environ['keystone.identity'] = env_identity environ['swift.authorize'] = functools.partial( self.authorize, env_identity) user_roles = (r.lower() for r in env_identity.get('roles', [])) if self.reseller_admin_role in user_roles: environ['reseller_request'] = True else: self.logger.debug('Authorizing as anonymous') environ['swift.authorize'] = self.authorize_anonymous environ['swift.clean_acl'] = swift_acl.clean_acl def keystone_start_response(status, response_headers, exc_info=None): project_domain_id = None for key, val in response_headers: if key.lower() == PROJECT_DOMAIN_ID_SYSMETA_HEADER: project_domain_id = val break if project_domain_id: response_headers.append((PROJECT_DOMAIN_ID_HEADER, project_domain_id)) return start_response(status, response_headers, exc_info) return self.app(environ, keystone_start_response) def _keystone_identity(self, environ): """Extract the identity from the Keystone auth component.""" if (environ.get('HTTP_X_IDENTITY_STATUS') != 'Confirmed' or environ.get( 'HTTP_X_SERVICE_IDENTITY_STATUS') not in (None, 'Confirmed')): return roles = list_from_csv(environ.get('HTTP_X_ROLES', '')) service_roles = list_from_csv(environ.get('HTTP_X_SERVICE_ROLES', '')) identity = {'user': (environ.get('HTTP_X_USER_ID'), environ.get('HTTP_X_USER_NAME')), 'tenant': (environ.get('HTTP_X_PROJECT_ID', environ.get('HTTP_X_TENANT_ID')), environ.get('HTTP_X_PROJECT_NAME', environ.get('HTTP_X_TENANT_NAME'))), 'roles': roles, 'service_roles': service_roles} token_info = environ.get('keystone.token_info', {}) auth_version = 0 user_domain = project_domain = (None, None) if 'access' in token_info: # ignore any domain id headers that authtoken may have set auth_version = 2 elif 'token' in token_info: auth_version = 3 user_domain = (environ.get('HTTP_X_USER_DOMAIN_ID'), environ.get('HTTP_X_USER_DOMAIN_NAME')) project_domain = (environ.get('HTTP_X_PROJECT_DOMAIN_ID'), environ.get('HTTP_X_PROJECT_DOMAIN_NAME')) identity['user_domain'] = user_domain identity['project_domain'] = project_domain identity['auth_version'] = auth_version return identity def _get_account_name(self, prefix, tenant_id): return '%s%s' % (prefix, tenant_id) def _account_matches_tenant(self, account, tenant_id): """Check if account belongs to a project/tenant""" for prefix in self.reseller_prefixes: if self._get_account_name(prefix, tenant_id) == account: return True return False def _get_account_prefix(self, account): """Get the prefix of an account""" # Empty prefix matches everything, so try to match others first for prefix in [pre for pre in self.reseller_prefixes if pre != '']: if account.startswith(prefix): return prefix if '' in self.reseller_prefixes: return '' return None def _get_project_domain_id(self, environ): info = get_account_info(environ, self.app, 'KS') domain_id = info.get('sysmeta', {}).get('project-domain-id') exists = (is_success(info.get('status', 0)) and info.get('account_really_exists', True)) return exists, domain_id def _set_project_domain_id(self, req, path_parts, env_identity): ''' Try to determine the project domain id and save it as account metadata. Do this for a PUT or POST to the account, and also for a container PUT in case that causes the account to be auto-created. ''' if PROJECT_DOMAIN_ID_SYSMETA_HEADER in req.headers: return version, account, container, obj = path_parts method = req.method if (obj or (container and method != 'PUT') or method not in ['PUT', 'POST']): return tenant_id, tenant_name = env_identity['tenant'] exists, sysmeta_id = self._get_project_domain_id(req.environ) req_has_id, req_id, new_id = False, None, None if self._account_matches_tenant(account, tenant_id): # domain id can be inferred from request (may be None) req_has_id = True req_id = env_identity['project_domain'][0] if not exists: # new account so set a domain id new_id = req_id if req_has_id else UNKNOWN_ID elif sysmeta_id is None and req_id == self.default_domain_id: # legacy account, update if default domain id in req new_id = req_id elif sysmeta_id == UNKNOWN_ID and req_has_id: # unknown domain, update if req confirms domain new_id = req_id or '' elif req_has_id and sysmeta_id != req_id: self.logger.warning("Inconsistent project domain id: " + "%s in token vs %s in account metadata." % (req_id, sysmeta_id)) if new_id is not None: req.headers[PROJECT_DOMAIN_ID_SYSMETA_HEADER] = new_id def _is_name_allowed_in_acl(self, req, path_parts, identity): if not self.allow_names_in_acls: return False user_domain_id = identity['user_domain'][0] if user_domain_id and user_domain_id != self.default_domain_id: return False proj_domain_id = identity['project_domain'][0] if proj_domain_id and proj_domain_id != self.default_domain_id: return False # request user and scoped project are both in default domain tenant_id, tenant_name = identity['tenant'] version, account, container, obj = path_parts if self._account_matches_tenant(account, tenant_id): # account == scoped project, so account is also in default domain allow = True else: # retrieve account project domain id from account sysmeta exists, acc_domain_id = self._get_project_domain_id(req.environ) allow = exists and acc_domain_id in [self.default_domain_id, None] if allow: self.logger.debug("Names allowed in acls.") return allow def _authorize_cross_tenant(self, user_id, user_name, tenant_id, tenant_name, roles, allow_names=True): """Check cross-tenant ACLs. Match tenant:user, tenant and user could be its id, name or '*' :param user_id: The user id from the identity token. :param user_name: The user name from the identity token. :param tenant_id: The tenant ID from the identity token. :param tenant_name: The tenant name from the identity token. :param roles: The given container ACL. :param allow_names: If True then attempt to match tenant and user names as well as id's. :returns: matched string if tenant(name/id/*):user(name/id/*) matches the given ACL. None otherwise. """ tenant_match = [tenant_id, '*'] user_match = [user_id, '*'] if allow_names: tenant_match = tenant_match + [tenant_name] user_match = user_match + [user_name] for tenant in tenant_match: for user in user_match: s = '%s:%s' % (tenant, user) if s in roles: return s return None def authorize(self, env_identity, req): # Cleanup - make sure that a previously set swift_owner setting is # cleared now. This might happen for example with COPY requests. req.environ.pop('swift_owner', None) tenant_id, tenant_name = env_identity['tenant'] user_id, user_name = env_identity['user'] referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None)) # allow OPTIONS requests to proceed as normal if req.method == 'OPTIONS': return try: part = req.split_path(1, 4, True) version, account, container, obj = part except ValueError: return HTTPNotFound(request=req) self._set_project_domain_id(req, part, env_identity) user_roles = [r.lower() for r in env_identity.get('roles', [])] user_service_roles = [r.lower() for r in env_identity.get( 'service_roles', [])] # Give unconditional access to a user with the reseller_admin # role. if self.reseller_admin_role in user_roles: msg = 'User %s has reseller admin authorizing' self.logger.debug(msg, tenant_id) req.environ['swift_owner'] = True return # If we are not reseller admin and user is trying to delete its own # account then deny it. if not container and not obj and req.method == 'DELETE': # User is not allowed to issue a DELETE on its own account msg = 'User %s:%s is not allowed to delete its own account' self.logger.debug(msg, tenant_name, user_name) return self.denied_response(req) # cross-tenant authorization matched_acl = None if roles: allow_names = self._is_name_allowed_in_acl(req, part, env_identity) matched_acl = self._authorize_cross_tenant(user_id, user_name, tenant_id, tenant_name, roles, allow_names) if matched_acl is not None: log_msg = 'user %s allowed in ACL authorizing.' self.logger.debug(log_msg, matched_acl) return acl_authorized = self._authorize_unconfirmed_identity(req, obj, referrers, roles) if acl_authorized: return # Check if a user tries to access an account that does not match their # token if not self._account_matches_tenant(account, tenant_id): log_msg = 'tenant mismatch: %s != %s' self.logger.debug(log_msg, account, tenant_id) return self.denied_response(req) # Compare roles from tokens against the configuration options: # # X-Auth-Token role Has specified X-Service-Token role Grant # in operator_roles? service_roles? in service_roles? swift_owner? # ------------------ -------------- -------------------- ------------ # yes yes yes yes # yes yes no no # yes no don't care yes # no don't care don't care no # ------------------ -------------- -------------------- ------------ account_prefix = self._get_account_prefix(account) operator_roles = self.account_rules[account_prefix]['operator_roles'] have_operator_role = set(operator_roles).intersection( set(user_roles)) service_roles = self.account_rules[account_prefix]['service_roles'] have_service_role = set(service_roles).intersection( set(user_service_roles)) allowed = False if have_operator_role and (service_roles and have_service_role): allowed = True elif have_operator_role and not service_roles: allowed = True if allowed: log_msg = 'allow user with role(s) %s as account admin' self.logger.debug(log_msg, ','.join(have_operator_role.union( have_service_role))) req.environ['swift_owner'] = True return if acl_authorized is not None: return self.denied_response(req) # Check if we have the role in the userroles and allow it for user_role in user_roles: if user_role in (r.lower() for r in roles): log_msg = 'user %s:%s allowed in ACL: %s authorizing' self.logger.debug(log_msg, tenant_name, user_name, user_role) return return self.denied_response(req) def authorize_anonymous(self, req): """ Authorize an anonymous request. :returns: None if authorization is granted, an error page otherwise. """ try: part = req.split_path(1, 4, True) version, account, container, obj = part except ValueError: return HTTPNotFound(request=req) # allow OPTIONS requests to proceed as normal if req.method == 'OPTIONS': return is_authoritative_authz = (account and (self._get_account_prefix(account) in self.reseller_prefixes)) if not is_authoritative_authz: return self.denied_response(req) referrers, roles = swift_acl.parse_acl(getattr(req, 'acl', None)) authorized = self._authorize_unconfirmed_identity(req, obj, referrers, roles) if not authorized: return self.denied_response(req) def _authorize_unconfirmed_identity(self, req, obj, referrers, roles): """" Perform authorization for access that does not require a confirmed identity. :returns: A boolean if authorization is granted or denied. None if a determination could not be made. """ # Allow container sync. if (req.environ.get('swift_sync_key') and (req.environ['swift_sync_key'] == req.headers.get('x-container-sync-key', None)) and 'x-timestamp' in req.headers): log_msg = 'allowing proxy %s for container-sync' self.logger.debug(log_msg, req.remote_addr) return True # Check if referrer is allowed. if swift_acl.referrer_allowed(req.referer, referrers): if obj or '.rlistings' in roles: log_msg = 'authorizing %s via referer ACL' self.logger.debug(log_msg, req.referrer) return True return False def denied_response(self, req): """Deny WSGI Response. Returns a standard WSGI response callable with the status of 403 or 401 depending on whether the REMOTE_USER is set or not. """ if req.remote_user: return HTTPForbidden(request=req) else: return HTTPUnauthorized(request=req) def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) def auth_filter(app): return KeystoneAuth(app, conf) return auth_filter swift-2.17.1/swift/common/middleware/tempauth.py0000666000175000017500000010720013435012015021716 0ustar zuulzuul00000000000000# Copyright (c) 2011-2014 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test authentication and authorization system. Add to your pipeline in proxy-server.conf, such as:: [pipeline:main] pipeline = catch_errors cache tempauth proxy-server Set account auto creation to true in proxy-server.conf:: [app:proxy-server] account_autocreate = true And add a tempauth filter section, such as:: [filter:tempauth] use = egg:swift#tempauth user_admin_admin = admin .admin .reseller_admin user_test_tester = testing .admin user_test2_tester2 = testing2 .admin user_test_tester3 = testing3 # To allow accounts/users with underscores you can base64 encode them. # Here is the account "under_score" and username "a_b" (note the lack # of padding equal signs): user64_dW5kZXJfc2NvcmU_YV9i = testing4 See the proxy-server.conf-sample for more information. Account/User List ^^^^^^^^^^^^^^^^^ All accounts/users are listed in the filter section. The format is:: user__ = [group] [group] [...] [storage_url] If you want to be able to include underscores in the ```` or ```` portions, you can base64 encode them (with *no* equal signs) in a line like this:: user64__ = [group] [...] [storage_url] There are two special groups: * ``.reseller_admin`` -- can do anything to any account for this auth * ``.admin`` -- can do anything within the account If neither of these groups are specified, the user can only access containers that have been explicitly allowed for them by a ``.admin`` or ``.reseller_admin``. The trailing optional ``storage_url`` allows you to specify an alternate URL to hand back to the user upon authentication. If not specified, this defaults to:: $HOST/v1/_ Where ``$HOST`` will do its best to resolve to what the requester would need to use to reach this host, ```` is from this section, and ```` is from the ``user__`` name. Note that ``$HOST`` cannot possibly handle when you have a load balancer in front of it that does https while TempAuth itself runs with http; in such a case, you'll have to specify the ``storage_url_scheme`` configuration value as an override. Multiple Reseller Prefix Items ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ The reseller prefix specifies which parts of the account namespace this middleware is responsible for managing authentication and authorization. By default, the prefix is 'AUTH' so accounts and tokens are prefixed by 'AUTH\_'. When a request's token and/or path start with 'AUTH\_', this middleware knows it is responsible. We allow the reseller prefix to be a list. In tempauth, the first item in the list is used as the prefix for tokens and user groups. The other prefixes provide alternate accounts that user's can access. For example if the reseller prefix list is 'AUTH, OTHER', a user with admin access to 'AUTH_account' also has admin access to 'OTHER_account'. Required Group ^^^^^^^^^^^^^^ The group ``.admin`` is normally needed to access an account (ACLs provide an additional way to access an account). You can specify the ``require_group`` parameter. This means that you also need the named group to access an account. If you have several reseller prefix items, prefix the ``require_group`` parameter with the appropriate prefix. X-Service-Token ^^^^^^^^^^^^^^^ If an ``X-Service-Token`` is presented in the request headers, the groups derived from the token are appended to the roles derived from ``X-Auth-Token``. If ``X-Auth-Token`` is missing or invalid, ``X-Service-Token`` is not processed. The ``X-Service-Token`` is useful when combined with multiple reseller prefix items. In the following configuration, accounts prefixed ``SERVICE\_`` are only accessible if ``X-Auth-Token`` is from the end-user and ``X-Service-Token`` is from the ``glance`` user:: [filter:tempauth] use = egg:swift#tempauth reseller_prefix = AUTH, SERVICE SERVICE_require_group = .service user_admin_admin = admin .admin .reseller_admin user_joeacct_joe = joepw .admin user_maryacct_mary = marypw .admin user_glance_glance = glancepw .service The name ``.service`` is an example. Unlike ``.admin`` and ``.reseller_admin`` it is not a reserved name. Please note that ACLs can be set on service accounts and are matched against the identity validated by ``X-Auth-Token``. As such ACLs can grant access to a service account's container without needing to provide a service token, just like any other cross-reseller request using ACLs. Account ACLs ^^^^^^^^^^^^ If a swift_owner issues a POST or PUT to the account with the ``X-Account-Access-Control`` header set in the request, then this may allow certain types of access for additional users. * Read-Only: Users with read-only access can list containers in the account, list objects in any container, retrieve objects, and view unprivileged account/container/object metadata. * Read-Write: Users with read-write access can (in addition to the read-only privileges) create objects, overwrite existing objects, create new containers, and set unprivileged container/object metadata. * Admin: Users with admin access are swift_owners and can perform any action, including viewing/setting privileged metadata (e.g. changing account ACLs). To generate headers for setting an account ACL:: from swift.common.middleware.acl import format_acl acl_data = { 'admin': ['alice'], 'read-write': ['bob', 'carol'] } header_value = format_acl(version=2, acl_dict=acl_data) To generate a curl command line from the above:: token=... storage_url=... python -c ' from swift.common.middleware.acl import format_acl acl_data = { 'admin': ['alice'], 'read-write': ['bob', 'carol'] } headers = {'X-Account-Access-Control': format_acl(version=2, acl_dict=acl_data)} header_str = ' '.join(["-H '%s: %s'" % (k, v) for k, v in headers.items()]) print('curl -D- -X POST -H "x-auth-token: $token" %s ' '$storage_url' % header_str) ' """ from __future__ import print_function from time import time from traceback import format_exc from uuid import uuid4 import base64 from eventlet import Timeout import six from swift.common.swob import Response, Request from swift.common.swob import HTTPBadRequest, HTTPForbidden, HTTPNotFound, \ HTTPUnauthorized from swift.common.request_helpers import get_sys_meta_prefix from swift.common.middleware.acl import ( clean_acl, parse_acl, referrer_allowed, acls_from_account_info) from swift.common.utils import cache_from_env, get_logger, \ split_path, config_true_value, register_swift_info from swift.common.utils import config_read_reseller_options from swift.proxy.controllers.base import get_account_info DEFAULT_TOKEN_LIFE = 86400 class TempAuth(object): """ :param app: The next WSGI app in the pipeline :param conf: The dict of configuration values from the Paste config file """ def __init__(self, app, conf): self.app = app self.conf = conf self.logger = get_logger(conf, log_route='tempauth') self.log_headers = config_true_value(conf.get('log_headers', 'f')) self.reseller_prefixes, self.account_rules = \ config_read_reseller_options(conf, dict(require_group='')) self.reseller_prefix = self.reseller_prefixes[0] self.logger.set_statsd_prefix('tempauth.%s' % ( self.reseller_prefix if self.reseller_prefix else 'NONE',)) self.auth_prefix = conf.get('auth_prefix', '/auth/') if not self.auth_prefix or not self.auth_prefix.strip('/'): self.logger.warning('Rewriting invalid auth prefix "%s" to ' '"/auth/" (Non-empty auth prefix path ' 'is required)' % self.auth_prefix) self.auth_prefix = '/auth/' if not self.auth_prefix.startswith('/'): self.auth_prefix = '/' + self.auth_prefix if not self.auth_prefix.endswith('/'): self.auth_prefix += '/' self.token_life = int(conf.get('token_life', DEFAULT_TOKEN_LIFE)) self.allow_overrides = config_true_value( conf.get('allow_overrides', 't')) self.storage_url_scheme = conf.get('storage_url_scheme', 'default') self.users = {} for conf_key in conf: if conf_key.startswith('user_') or conf_key.startswith('user64_'): account, username = conf_key.split('_', 1)[1].split('_') if conf_key.startswith('user64_'): # Because trailing equal signs would screw up config file # parsing, we auto-pad with '=' chars. account += '=' * (len(account) % 4) account = base64.b64decode(account) username += '=' * (len(username) % 4) username = base64.b64decode(username) values = conf[conf_key].split() if not values: raise ValueError('%s has no key set' % conf_key) key = values.pop(0) if values and ('://' in values[-1] or '$HOST' in values[-1]): url = values.pop() else: url = '$HOST/v1/%s%s' % (self.reseller_prefix, account) self.users[account + ':' + username] = { 'key': key, 'url': url, 'groups': values} def __call__(self, env, start_response): """ Accepts a standard WSGI application call, authenticating the request and installing callback hooks for authorization and ACL header validation. For an authenticated request, REMOTE_USER will be set to a comma separated list of the user's groups. With a non-empty reseller prefix, acts as the definitive auth service for just tokens and accounts that begin with that prefix, but will deny requests outside this prefix if no other auth middleware overrides it. With an empty reseller prefix, acts as the definitive auth service only for tokens that validate to a non-empty set of groups. For all other requests, acts as the fallback auth service when no other auth middleware overrides it. Alternatively, if the request matches the self.auth_prefix, the request will be routed through the internal auth request handler (self.handle). This is to handle granting tokens, etc. """ if self.allow_overrides and env.get('swift.authorize_override', False): return self.app(env, start_response) if env.get('PATH_INFO', '').startswith(self.auth_prefix): return self.handle(env, start_response) s3 = env.get('swift3.auth_details') token = env.get('HTTP_X_AUTH_TOKEN', env.get('HTTP_X_STORAGE_TOKEN')) service_token = env.get('HTTP_X_SERVICE_TOKEN') if s3 or (token and token.startswith(self.reseller_prefix)): # Note: Empty reseller_prefix will match all tokens. groups = self.get_groups(env, token) if service_token: service_groups = self.get_groups(env, service_token) if groups and service_groups: groups += ',' + service_groups if groups: user = groups and groups.split(',', 1)[0] or '' trans_id = env.get('swift.trans_id') self.logger.debug('User: %s uses token %s (trans_id %s)' % (user, 's3' if s3 else token, trans_id)) env['REMOTE_USER'] = groups env['swift.authorize'] = self.authorize env['swift.clean_acl'] = clean_acl if '.reseller_admin' in groups: env['reseller_request'] = True else: # Unauthorized token if self.reseller_prefix and not s3: # Because I know I'm the definitive auth for this token, I # can deny it outright. self.logger.increment('unauthorized') try: vrs, realm, rest = split_path(env['PATH_INFO'], 2, 3, True) except ValueError: realm = 'unknown' return HTTPUnauthorized(headers={ 'Www-Authenticate': 'Swift realm="%s"' % realm})( env, start_response) # Because I'm not certain if I'm the definitive auth for empty # reseller_prefixed tokens, I won't overwrite swift.authorize. elif 'swift.authorize' not in env: env['swift.authorize'] = self.denied_response else: if self._is_definitive_auth(env.get('PATH_INFO', '')): # Handle anonymous access to accounts I'm the definitive # auth for. env['swift.authorize'] = self.authorize env['swift.clean_acl'] = clean_acl elif self.reseller_prefix == '': # Because I'm not certain if I'm the definitive auth, I won't # overwrite swift.authorize. if 'swift.authorize' not in env: env['swift.authorize'] = self.authorize env['swift.clean_acl'] = clean_acl else: # Not my token, not my account, I can't authorize this request, # deny all is a good idea if not already set... if 'swift.authorize' not in env: env['swift.authorize'] = self.denied_response return self.app(env, start_response) def _is_definitive_auth(self, path): """ Determine if we are the definitive auth Determines if we are the definitive auth for a given path. If the account name is prefixed with something matching one of the reseller_prefix items, then we are the auth (return True) Non-matching: we are not the auth. However, one of the reseller_prefix items can be blank. If so, we cannot always be definite so return False. :param path: A path (e.g., /v1/AUTH_joesaccount/c/o) :return:True if we are definitive auth """ try: version, account, rest = split_path(path, 1, 3, True) except ValueError: return False if account: return bool(self._get_account_prefix(account)) return False def _non_empty_reseller_prefixes(self): return iter([pre for pre in self.reseller_prefixes if pre != '']) def _get_account_prefix(self, account): """ Get the prefix of an account Determines which reseller prefix matches the account and returns that prefix. If account does not start with one of the known reseller prefixes, returns None. :param account: Account name (e.g., AUTH_joesaccount) or None :return: The prefix string (examples: 'AUTH_', 'SERVICE_', '') If we can't match the prefix of the account, return None """ if account is None: return None # Empty prefix matches everything, so try to match others first for prefix in self._non_empty_reseller_prefixes(): if account.startswith(prefix): return prefix if '' in self.reseller_prefixes: return '' return None def _dot_account(self, account): """ Detect if account starts with dot character after the prefix :param account: account in path (e.g., AUTH_joesaccount) :return:True if name starts with dot character """ prefix = self._get_account_prefix(account) return prefix is not None and account[len(prefix)] == '.' def _get_user_groups(self, account, account_user, account_id): """ :param account: example: test :param account_user: example: test:tester :param account_id: example: AUTH_test :return: a comma separated string of group names. The group names are as follows: account,account_user,groups... If .admin is in the groups, this is replaced by all the possible account ids. For example, for user joe, account acct and resellers AUTH_, OTHER_, the returned string is as follows: acct,acct:joe,AUTH_acct,OTHER_acct """ groups = [account, account_user] groups.extend(self.users[account_user]['groups']) if '.admin' in groups: groups.remove('.admin') for prefix in self._non_empty_reseller_prefixes(): groups.append('%s%s' % (prefix, account)) if account_id not in groups: groups.append(account_id) groups = ','.join(groups) return groups def get_groups(self, env, token): """ Get groups for the given token. :param env: The current WSGI environment dictionary. :param token: Token to validate and return a group string for. :returns: None if the token is invalid or a string containing a comma separated list of groups the authenticated user is a member of. The first group in the list is also considered a unique identifier for that user. """ groups = None memcache_client = cache_from_env(env) if not memcache_client: raise Exception('Memcache required') memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token) cached_auth_data = memcache_client.get(memcache_token_key) if cached_auth_data: expires, groups = cached_auth_data if expires < time(): groups = None s3_auth_details = env.get('swift3.auth_details') if s3_auth_details: if 'check_signature' not in s3_auth_details: self.logger.warning( 'Swift3 did not provide a check_signature function; ' 'upgrade Swift3 if you want to use it with tempauth') return None account_user = s3_auth_details['access_key'] if account_user not in self.users: return None user = self.users[account_user] account = account_user.split(':', 1)[0] account_id = user['url'].rsplit('/', 1)[-1] if not s3_auth_details['check_signature'](user['key']): return None env['PATH_INFO'] = env['PATH_INFO'].replace( account_user, account_id, 1) groups = self._get_user_groups(account, account_user, account_id) return groups def account_acls(self, req): """ Return a dict of ACL data from the account server via get_account_info. Auth systems may define their own format, serialization, structure, and capabilities implemented in the ACL headers and persisted in the sysmeta data. However, auth systems are strongly encouraged to be interoperable with Tempauth. Account ACLs are set and retrieved via the header X-Account-Access-Control For header format and syntax, see: * :func:`swift.common.middleware.acl.parse_acl()` * :func:`swift.common.middleware.acl.format_acl()` """ info = get_account_info(req.environ, self.app, swift_source='TA') try: acls = acls_from_account_info(info) except ValueError as e1: self.logger.warning("Invalid ACL stored in metadata: %r" % e1) return None except NotImplementedError as e2: self.logger.warning( "ACL version exceeds middleware version: %r" % e2) return None return acls def extract_acl_and_report_errors(self, req): """ Return a user-readable string indicating the errors in the input ACL, or None if there are no errors. """ acl_header = 'x-account-access-control' acl_data = req.headers.get(acl_header) result = parse_acl(version=2, data=acl_data) if result is None: return 'Syntax error in input (%r)' % acl_data tempauth_acl_keys = 'admin read-write read-only'.split() for key in result: # While it is possible to construct auth systems that collaborate # on ACLs, TempAuth is not such an auth system. At this point, # it thinks it is authoritative. if key not in tempauth_acl_keys: return "Key '%s' not recognized" % key for key in tempauth_acl_keys: if key not in result: continue if not isinstance(result[key], list): return "Value for key '%s' must be a list" % key for grantee in result[key]: if not isinstance(grantee, six.string_types): return "Elements of '%s' list must be strings" % key # Everything looks fine, no errors found internal_hdr = get_sys_meta_prefix('account') + 'core-access-control' req.headers[internal_hdr] = req.headers.pop(acl_header) return None def authorize(self, req): """ Returns None if the request is authorized to continue or a standard WSGI response callable if not. """ try: _junk, account, container, obj = req.split_path(1, 4, True) except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) if self._get_account_prefix(account) is None: self.logger.debug("Account name: %s doesn't start with " "reseller_prefix(s): %s." % (account, ','.join(self.reseller_prefixes))) return self.denied_response(req) # At this point, TempAuth is convinced that it is authoritative. # If you are sending an ACL header, it must be syntactically valid # according to TempAuth's rules for ACL syntax. acl_data = req.headers.get('x-account-access-control') if acl_data is not None: error = self.extract_acl_and_report_errors(req) if error: msg = 'X-Account-Access-Control invalid: %s\n\nInput: %s\n' % ( error, acl_data) headers = [('Content-Type', 'text/plain; charset=UTF-8')] return HTTPBadRequest(request=req, headers=headers, body=msg) user_groups = (req.remote_user or '').split(',') account_user = user_groups[1] if len(user_groups) > 1 else None if '.reseller_admin' in user_groups and \ account not in self.reseller_prefixes and \ not self._dot_account(account): req.environ['swift_owner'] = True self.logger.debug("User %s has reseller admin authorizing." % account_user) return None if account in user_groups and \ (req.method not in ('DELETE', 'PUT') or container): # The user is admin for the account and is not trying to do an # account DELETE or PUT account_prefix = self._get_account_prefix(account) require_group = self.account_rules.get(account_prefix).get( 'require_group') if require_group and require_group in user_groups: req.environ['swift_owner'] = True self.logger.debug("User %s has admin and %s group." " Authorizing." % (account_user, require_group)) return None elif not require_group: req.environ['swift_owner'] = True self.logger.debug("User %s has admin authorizing." % account_user) return None if (req.environ.get('swift_sync_key') and (req.environ['swift_sync_key'] == req.headers.get('x-container-sync-key', None)) and 'x-timestamp' in req.headers): self.logger.debug("Allow request with container sync-key: %s." % req.environ['swift_sync_key']) return None if req.method == 'OPTIONS': # allow OPTIONS requests to proceed as normal self.logger.debug("Allow OPTIONS request.") return None referrers, groups = parse_acl(getattr(req, 'acl', None)) if referrer_allowed(req.referer, referrers): if obj or '.rlistings' in groups: self.logger.debug("Allow authorizing %s via referer ACL." % req.referer) return None for user_group in user_groups: if user_group in groups: self.logger.debug("User %s allowed in ACL: %s authorizing." % (account_user, user_group)) return None # Check for access via X-Account-Access-Control acct_acls = self.account_acls(req) if acct_acls: # At least one account ACL is set in this account's sysmeta data, # so we should see whether this user is authorized by the ACLs. user_group_set = set(user_groups) if user_group_set.intersection(acct_acls['admin']): req.environ['swift_owner'] = True self.logger.debug('User %s allowed by X-Account-Access-Control' ' (admin)' % account_user) return None if (user_group_set.intersection(acct_acls['read-write']) and (container or req.method in ('GET', 'HEAD'))): # The RW ACL allows all operations to containers/objects, but # only GET/HEAD to accounts (and OPTIONS, above) self.logger.debug('User %s allowed by X-Account-Access-Control' ' (read-write)' % account_user) return None if (user_group_set.intersection(acct_acls['read-only']) and req.method in ('GET', 'HEAD')): self.logger.debug('User %s allowed by X-Account-Access-Control' ' (read-only)' % account_user) return None return self.denied_response(req) def denied_response(self, req): """ Returns a standard WSGI response callable with the status of 403 or 401 depending on whether the REMOTE_USER is set or not. """ if req.remote_user: self.logger.increment('forbidden') return HTTPForbidden(request=req) else: self.logger.increment('unauthorized') return HTTPUnauthorized(request=req) def handle(self, env, start_response): """ WSGI entry point for auth requests (ones that match the self.auth_prefix). Wraps env in swob.Request object and passes it down. :param env: WSGI environment dictionary :param start_response: WSGI callable """ try: req = Request(env) if self.auth_prefix: req.path_info_pop() req.bytes_transferred = '-' req.client_disconnect = False if 'x-storage-token' in req.headers and \ 'x-auth-token' not in req.headers: req.headers['x-auth-token'] = req.headers['x-storage-token'] return self.handle_request(req)(env, start_response) except (Exception, Timeout): print("EXCEPTION IN handle: %s: %s" % (format_exc(), env)) self.logger.increment('errors') start_response('500 Server Error', [('Content-Type', 'text/plain')]) return ['Internal server error.\n'] def handle_request(self, req): """ Entry point for auth requests (ones that match the self.auth_prefix). Should return a WSGI-style callable (such as swob.Response). :param req: swob.Request object """ req.start_time = time() handler = None try: version, account, user, _junk = split_path(req.path_info, 1, 4, True) except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) if version in ('v1', 'v1.0', 'auth'): if req.method == 'GET': handler = self.handle_get_token if not handler: self.logger.increment('errors') req.response = HTTPBadRequest(request=req) else: req.response = handler(req) return req.response def handle_get_token(self, req): """ Handles the various `request for token and service end point(s)` calls. There are various formats to support the various auth servers in the past. Examples:: GET /v1//auth X-Auth-User: : or X-Storage-User: X-Auth-Key: or X-Storage-Pass: GET /auth X-Auth-User: : or X-Storage-User: : X-Auth-Key: or X-Storage-Pass: GET /v1.0 X-Auth-User: : or X-Storage-User: : X-Auth-Key: or X-Storage-Pass: On successful authentication, the response will have X-Auth-Token and X-Storage-Token set to the token to use with Swift and X-Storage-URL set to the URL to the default Swift cluster to use. :param req: The swob.Request to process. :returns: swob.Response, 2xx on success with data set as explained above. """ # Validate the request info try: pathsegs = split_path(req.path_info, 1, 3, True) except ValueError: self.logger.increment('errors') return HTTPNotFound(request=req) if pathsegs[0] == 'v1' and pathsegs[2] == 'auth': account = pathsegs[1] user = req.headers.get('x-storage-user') if not user: user = req.headers.get('x-auth-user') if not user or ':' not in user: self.logger.increment('token_denied') auth = 'Swift realm="%s"' % account return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) account2, user = user.split(':', 1) if account != account2: self.logger.increment('token_denied') auth = 'Swift realm="%s"' % account return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) key = req.headers.get('x-storage-pass') if not key: key = req.headers.get('x-auth-key') elif pathsegs[0] in ('auth', 'v1.0'): user = req.headers.get('x-auth-user') if not user: user = req.headers.get('x-storage-user') if not user or ':' not in user: self.logger.increment('token_denied') auth = 'Swift realm="unknown"' return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) account, user = user.split(':', 1) key = req.headers.get('x-auth-key') if not key: key = req.headers.get('x-storage-pass') else: return HTTPBadRequest(request=req) if not all((account, user, key)): self.logger.increment('token_denied') realm = account or 'unknown' return HTTPUnauthorized(request=req, headers={'Www-Authenticate': 'Swift realm="%s"' % realm}) # Authenticate user account_user = account + ':' + user if account_user not in self.users: self.logger.increment('token_denied') auth = 'Swift realm="%s"' % account return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) if self.users[account_user]['key'] != key: self.logger.increment('token_denied') auth = 'Swift realm="unknown"' return HTTPUnauthorized(request=req, headers={'Www-Authenticate': auth}) account_id = self.users[account_user]['url'].rsplit('/', 1)[-1] # Get memcache client memcache_client = cache_from_env(req.environ) if not memcache_client: raise Exception('Memcache required') # See if a token already exists and hasn't expired token = None memcache_user_key = '%s/user/%s' % (self.reseller_prefix, account_user) candidate_token = memcache_client.get(memcache_user_key) if candidate_token: memcache_token_key = \ '%s/token/%s' % (self.reseller_prefix, candidate_token) cached_auth_data = memcache_client.get(memcache_token_key) if cached_auth_data: expires, old_groups = cached_auth_data old_groups = old_groups.split(',') new_groups = self._get_user_groups(account, account_user, account_id) if expires > time() and \ set(old_groups) == set(new_groups.split(',')): token = candidate_token # Create a new token if one didn't exist if not token: # Generate new token token = '%stk%s' % (self.reseller_prefix, uuid4().hex) expires = time() + self.token_life groups = self._get_user_groups(account, account_user, account_id) # Save token memcache_token_key = '%s/token/%s' % (self.reseller_prefix, token) memcache_client.set(memcache_token_key, (expires, groups), time=float(expires - time())) # Record the token with the user info for future use. memcache_user_key = \ '%s/user/%s' % (self.reseller_prefix, account_user) memcache_client.set(memcache_user_key, token, time=float(expires - time())) resp = Response(request=req, headers={ 'x-auth-token': token, 'x-storage-token': token, 'x-auth-token-expires': str(int(expires - time()))}) url = self.users[account_user]['url'].replace('$HOST', resp.host_url) if self.storage_url_scheme != 'default': url = self.storage_url_scheme + ':' + url.split(':', 1)[1] resp.headers['x-storage-url'] = url return resp def filter_factory(global_conf, **local_conf): """Returns a WSGI filter app for use with paste.deploy.""" conf = global_conf.copy() conf.update(local_conf) register_swift_info('tempauth', account_acls=True) def auth_filter(app): return TempAuth(app, conf) return auth_filter swift-2.17.1/swift/common/middleware/x_profile/0000775000175000017500000000000013435012120021477 5ustar zuulzuul00000000000000swift-2.17.1/swift/common/middleware/x_profile/__init__.py0000666000175000017500000000000013435012003023600 0ustar zuulzuul00000000000000swift-2.17.1/swift/common/middleware/x_profile/exceptions.py0000666000175000017500000000204113435012003024231 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from swift import gettext_ as _ class ProfileException(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return _('Profiling Error: %s') % self.msg class NotFoundException(ProfileException): pass class MethodNotAllowed(ProfileException): pass class ODFLIBNotInstalled(ProfileException): pass class PLOTLIBNotInstalled(ProfileException): pass class DataLoadFailure(ProfileException): pass swift-2.17.1/swift/common/middleware/x_profile/profile_model.py0000666000175000017500000002377513435012003024711 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import json import os import pstats import tempfile import time from swift import gettext_ as _ from swift.common.middleware.x_profile.exceptions import ODFLIBNotInstalled ODFLIB_INSTALLED = True try: from odf.opendocument import OpenDocumentSpreadsheet from odf.table import Table, TableRow, TableCell from odf.text import P except ImportError: ODFLIB_INSTALLED = False class Stats2(pstats.Stats): def __init__(self, *args, **kwds): pstats.Stats.__init__(self, *args, **kwds) def func_to_dict(self, func): return {'module': func[0], 'line': func[1], 'function': func[2]} def func_std_string(self, func): return pstats.func_std_string(func) def to_json(self, *selection): d = dict() d['files'] = [f for f in self.files] d['prim_calls'] = (self.prim_calls) d['total_calls'] = (self.total_calls) if hasattr(self, 'sort_type'): d['sort_type'] = self.sort_type else: d['sort_type'] = 'random' d['total_tt'] = (self.total_tt) if self.fcn_list: stat_list = self.fcn_list[:] else: stat_list = self.stats.keys() for s in selection: stat_list, __ = self.eval_print_amount(s, stat_list, '') self.calc_callees() function_calls = [] for func in stat_list: cc, nc, tt, ct, callers = self.stats[func] fdict = dict() fdict.update(self.func_to_dict(func)) fdict.update({'cc': (cc), 'nc': (nc), 'tt': (tt), 'ct': (ct)}) if self.all_callees: fdict.update({'callees': []}) for key in self.all_callees[func]: cee = self.func_to_dict(key) metric = self.all_callees[func][key] # FIXME: eventlet profiler don't provide full list of # the metrics if type(metric) is tuple: cc1, nc1, tt1, ct1 = metric cee.update({'cc': cc1, 'nc': nc1, 'tt': tt1, 'ct': ct1}) else: cee['nc'] = metric fdict['callees'].append(cee) cer = [] for caller in callers: fd = self.func_to_dict(caller) metric2 = callers[caller] if isinstance(metric2, tuple): cc2, nc2, tt2, ct2 = metric2 fd.update({'cc': cc2, 'nc': nc2, 'tt': tt2, 'ct': ct2}) else: fd.update({'nc': metric2}) cer.append(fd) fdict.update({'callers': cer}) function_calls.append(fdict) d['stats'] = function_calls return json.dumps(d, indent=2) def to_csv(self, *selection): if self.fcn_list: stat_list = self.fcn_list[:] order_text = "Ordered by: " + self.sort_type + '\r\n' else: stat_list = self.stats.keys() order_text = "Random listing order was used\r\n" for s in selection: stat_list, __ = self.eval_print_amount(s, stat_list, '') csv = '%d function calls (%d primitive calls) in %.6f seconds.' % ( self.total_calls, self.prim_calls, self.total_tt) csv = csv + order_text + 'call count(nc), primitive call count(cc), \ total time(tt), time per call, \ cumulative time(ct), time per call, \ function\r\n' for func in stat_list: cc, nc, tt, ct, __ = self.stats[func] tpc = '' if nc == 0 else '%3f' % (tt / nc) cpc = '' if cc == 0 else '%3f' % (ct / cc) fn = '%s:%d(%s)' % (func[0], func[1], func[2]) csv = csv + '%d,%d,%3f,%s,%3f,%s,%s\r\n' % ( nc, cc, tt, tpc, ct, cpc, fn) return csv def to_ods(self, *selection): if not ODFLIB_INSTALLED: raise ODFLIBNotInstalled(_('odfpy not installed.')) if self.fcn_list: stat_list = self.fcn_list[:] order_text = " Ordered by: " + self.sort_type + '\n' else: stat_list = self.stats.keys() order_text = " Random listing order was used\n" for s in selection: stat_list, __ = self.eval_print_amount(s, stat_list, '') spreadsheet = OpenDocumentSpreadsheet() table = Table(name="Profile") for fn in self.files: tcf = TableCell() tcf.addElement(P(text=fn)) trf = TableRow() trf.addElement(tcf) table.addElement(trf) tc_summary = TableCell() summary_text = '%d function calls (%d primitive calls) in %.6f \ seconds' % (self.total_calls, self.prim_calls, self.total_tt) tc_summary.addElement(P(text=summary_text)) tr_summary = TableRow() tr_summary.addElement(tc_summary) table.addElement(tr_summary) tc_order = TableCell() tc_order.addElement(P(text=order_text)) tr_order = TableRow() tr_order.addElement(tc_order) table.addElement(tr_order) tr_header = TableRow() tc_cc = TableCell() tc_cc.addElement(P(text='Total Call Count')) tr_header.addElement(tc_cc) tc_pc = TableCell() tc_pc.addElement(P(text='Primitive Call Count')) tr_header.addElement(tc_pc) tc_tt = TableCell() tc_tt.addElement(P(text='Total Time(seconds)')) tr_header.addElement(tc_tt) tc_pc = TableCell() tc_pc.addElement(P(text='Time Per call(seconds)')) tr_header.addElement(tc_pc) tc_ct = TableCell() tc_ct.addElement(P(text='Cumulative Time(seconds)')) tr_header.addElement(tc_ct) tc_pt = TableCell() tc_pt.addElement(P(text='Cumulative Time per call(seconds)')) tr_header.addElement(tc_pt) tc_nfl = TableCell() tc_nfl.addElement(P(text='filename:lineno(function)')) tr_header.addElement(tc_nfl) table.addElement(tr_header) for func in stat_list: cc, nc, tt, ct, __ = self.stats[func] tr_header = TableRow() tc_nc = TableCell() tc_nc.addElement(P(text=nc)) tr_header.addElement(tc_nc) tc_pc = TableCell() tc_pc.addElement(P(text=cc)) tr_header.addElement(tc_pc) tc_tt = TableCell() tc_tt.addElement(P(text=tt)) tr_header.addElement(tc_tt) tc_tpc = TableCell() tc_tpc.addElement(P(text=(None if nc == 0 else float(tt) / nc))) tr_header.addElement(tc_tpc) tc_ct = TableCell() tc_ct.addElement(P(text=ct)) tr_header.addElement(tc_ct) tc_tpt = TableCell() tc_tpt.addElement(P(text=(None if cc == 0 else float(ct) / cc))) tr_header.addElement(tc_tpt) tc_nfl = TableCell() tc_nfl.addElement(P(text=func)) tr_header.addElement(tc_nfl) table.addElement(tr_header) spreadsheet.spreadsheet.addElement(table) with tempfile.TemporaryFile() as tmp_ods: spreadsheet.write(tmp_ods) tmp_ods.seek(0) data = tmp_ods.read() return data class ProfileLog(object): def __init__(self, log_filename_prefix, dump_timestamp): self.log_filename_prefix = log_filename_prefix self.dump_timestamp = dump_timestamp def get_all_pids(self): profile_ids = [l.replace(self.log_filename_prefix, '') for l in glob.glob(self.log_filename_prefix + '*') if not l.endswith('.tmp')] return sorted(profile_ids, reverse=True) def get_logfiles(self, id_or_name): # The first file with timestamp in the sorted log_files # (PREFIX)(PROCESS_ID)-(TIMESTAMP) if id_or_name in ['all']: if self.dump_timestamp: latest_dict = {} for pid in self.get_all_pids(): [process_id, __] = pid.split('-') if process_id not in latest_dict.keys(): latest_dict[process_id] = self.log_filename_prefix +\ pid log_files = latest_dict.values() else: log_files = [l for l in glob.glob(self.log_filename_prefix + '*') if not l.endswith('.tmp')] else: pid = str(os.getpid()) if id_or_name in [None, '', 'current']\ else id_or_name log_files = [l for l in glob.glob(self.log_filename_prefix + pid + '*') if not l.endswith('.tmp')] if len(log_files) > 0: log_files = sorted(log_files, reverse=True)[0:1] return log_files def dump_profile(self, profiler, pid): if self.log_filename_prefix: pfn = self.log_filename_prefix + str(pid) if self.dump_timestamp: pfn = pfn + "-" + str(time.time()) tmpfn = pfn + ".tmp" profiler.dump_stats(tmpfn) os.rename(tmpfn, pfn) return pfn def clear(self, id_or_name): log_files = self.get_logfiles(id_or_name) for l in log_files: os.path.exists(l) and os.remove(l) swift-2.17.1/swift/common/middleware/x_profile/html_viewer.py0000666000175000017500000005105013435012015024404 0ustar zuulzuul00000000000000# Copyright (c) 2010-2012 OpenStack, LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import cgi import os import random import re import string import tempfile from swift import gettext_ as _ from exceptions import PLOTLIBNotInstalled, ODFLIBNotInstalled,\ NotFoundException, MethodNotAllowed, DataLoadFailure, ProfileException from profile_model import Stats2 PLOTLIB_INSTALLED = True try: import matplotlib # use agg backend for writing to file, not for rendering in a window. # otherwise some platform will complain "no display name and $DISPLAY # environment variable" matplotlib.use('agg') import matplotlib.pyplot as plt except ImportError: PLOTLIB_INSTALLED = False empty_description = """ The default profile of current process or the profile you requested is empty. """ profile_tmpl = """ """ sort_tmpl = """ """ limit_tmpl = """ """ fulldirs_tmpl = """ """ mode_tmpl = """ """ nfl_filter_tmpl = """ """ formelements_tmpl = """
Profile Sort Limit Full Path Filter Plot Metric Plot Type Format
${profile} ${sort} ${limit} ${fulldirs} ${nfl_filter}
""" index_tmpl = """ profile results
${description}

${formelements}
${profilehtml}
    
""" class HTMLViewer(object): format_dict = {'default': 'application/octet-stream', 'json': 'application/json', 'csv': 'text/csv', 'ods': 'application/vnd.oasis.opendocument.spreadsheet', 'python': 'text/html'} def __init__(self, app_path, profile_module, profile_log): self.app_path = app_path self.profile_module = profile_module self.profile_log = profile_log def _get_param(self, query_dict, key, default=None, multiple=False): value = query_dict.get(key, default) if value is None or value == '': return default if multiple: return value if isinstance(value, list): return eval(value[0]) if isinstance(default, int) else value[0] else: return value def render(self, url, method, path_entry, query_dict, clear_callback): plot = self._get_param(query_dict, 'plot', None) download = self._get_param(query_dict, 'download', None) clear = self._get_param(query_dict, 'clear', None) action = plot or download or clear profile_id = self._get_param(query_dict, 'profile', 'current') sort = self._get_param(query_dict, 'sort', 'time') limit = self._get_param(query_dict, 'limit', -1) fulldirs = self._get_param(query_dict, 'fulldirs', 0) nfl_filter = self._get_param(query_dict, 'nfl_filter', '').strip() metric_selected = self._get_param(query_dict, 'metric', 'cc') plot_type = self._get_param(query_dict, 'plottype', 'bar') download_format = self._get_param(query_dict, 'format', 'default') content = '' # GET /__profile, POST /__profile if len(path_entry) == 2 and method in ['GET', 'POST']: log_files = self.profile_log.get_logfiles(profile_id) if action == 'plot': content, headers = self.plot(log_files, sort, limit, nfl_filter, metric_selected, plot_type) elif action == 'download': content, headers = self.download(log_files, sort, limit, nfl_filter, download_format) else: if action == 'clear': self.profile_log.clear(profile_id) clear_callback and clear_callback() content, headers = self.index_page(log_files, sort, limit, fulldirs, nfl_filter, profile_id, url) # GET /__profile__/all # GET /__profile__/current # GET /__profile__/profile_id # GET /__profile__/profile_id/ # GET /__profile__/profile_id/account.py:50(GETorHEAD) # GET /__profile__/profile_id/swift/proxy/controllers # /account.py:50(GETorHEAD) # with QUERY_STRING: ?format=[default|json|csv|ods] elif len(path_entry) > 2 and method == 'GET': profile_id = path_entry[2] log_files = self.profile_log.get_logfiles(profile_id) pids = self.profile_log.get_all_pids() # return all profiles in a json format by default. # GET /__profile__/ if profile_id == '': content = '{"profile_ids": ["' + '","'.join(pids) + '"]}' headers = [('content-type', self.format_dict['json'])] else: if len(path_entry) > 3 and path_entry[3] != '': nfl_filter = '/'.join(path_entry[3:]) if path_entry[-1].find(':0') == -1: nfl_filter = '/' + nfl_filter content, headers = self.download(log_files, sort, -1, nfl_filter, download_format) headers.append(('Access-Control-Allow-Origin', '*')) else: raise MethodNotAllowed(_('method %s is not allowed.') % method) return content, headers def index_page(self, log_files=None, sort='time', limit=-1, fulldirs=0, nfl_filter='', profile_id='current', url='#'): headers = [('content-type', 'text/html')] if len(log_files) == 0: return empty_description, headers try: stats = Stats2(*log_files) except (IOError, ValueError): raise DataLoadFailure(_('Can not load profile data from %s.') % log_files) if not fulldirs: stats.strip_dirs() stats.sort_stats(sort) nfl_filter_esc =\ nfl_filter.replace('(', '\(').replace(')', '\)') amount = [nfl_filter_esc, limit] if nfl_filter_esc else [limit] profile_html = self.generate_stats_html(stats, self.app_path, profile_id, *amount) description = "Profiling information is generated by using\ '%s' profiler." % self.profile_module sort_repl = '' % (p, p) for p in self.profile_log.get_all_pids()]) profile_element = string.Template(profile_tmpl).substitute( {'profile_list': plist}) profile_repl = '